diff --git a/CalibMuon/DTCalibration/python/ALCARECODtCalibHI_cff.py b/CalibMuon/DTCalibration/python/ALCARECODtCalibHI_cff.py index f6d6111f453e1..0b5ef7d79ce39 100644 --- a/CalibMuon/DTCalibration/python/ALCARECODtCalibHI_cff.py +++ b/CalibMuon/DTCalibration/python/ALCARECODtCalibHI_cff.py @@ -18,20 +18,6 @@ dt4DSegmentsNoWire.Reco4DAlgoConfig.recAlgoConfig.tTrigModeConfig.doWirePropCorrection = False dt4DSegmentsNoWire.Reco4DAlgoConfig.Reco2DAlgoConfig.recAlgoConfig.tTrigModeConfig.doWirePropCorrection = False -#this is to select collisions -primaryVertexFilter = cms.EDFilter("VertexSelector", - src = cms.InputTag("hiSelectedVertex"), - cut = cms.string("!isFake && ndof > 4 && abs(z) <= 15 && position.Rho <= 2"), - filter = cms.bool(True), -) - -noscraping = cms.EDFilter("FilterOutScraping", - applyfilter = cms.untracked.bool(True), - debugOn = cms.untracked.bool(False), - numtrack = cms.untracked.uint32(10), - thresh = cms.untracked.double(0.25) -) - #seqALCARECODtCalibHI = cms.Sequence(ALCARECODtCalibHIHLTFilter * primaryVertexFilter * DTCalibMuonSelection * dt4DSegmentsNoWire) seqALCARECODtCalibHI = cms.Sequence(ALCARECODtCalibHIHLTFilter * dt4DSegmentsNoWire) diff --git a/CalibMuon/DTCalibration/python/ALCARECODtCalib_cff.py b/CalibMuon/DTCalibration/python/ALCARECODtCalib_cff.py index d24c23af59d05..3d764e620b552 100644 --- a/CalibMuon/DTCalibration/python/ALCARECODtCalib_cff.py +++ b/CalibMuon/DTCalibration/python/ALCARECODtCalib_cff.py @@ -18,17 +18,6 @@ dt4DSegmentsNoWire.Reco4DAlgoConfig.Reco2DAlgoConfig.recAlgoConfig.tTrigModeConfig.doWirePropCorrection = False #this is to select collisions -primaryVertexFilter = cms.EDFilter("VertexSelector", - src = cms.InputTag("offlinePrimaryVertices"), - cut = cms.string("!isFake && ndof > 4 && abs(z) <= 15 && position.Rho <= 2"), # tracksSize() > 3 for the older cut - filter = cms.bool(True), # otherwise it won't filter the events, just produce an empty vertex collection. -) - -noscraping = cms.EDFilter("FilterOutScraping", - applyfilter = cms.untracked.bool(True), - debugOn = cms.untracked.bool(False), - numtrack = cms.untracked.uint32(10), - thresh = cms.untracked.double(0.25) -) +from RecoMET.METFilters.metFilters_cff import primaryVertexFilter, noscraping seqALCARECODtCalib = cms.Sequence(primaryVertexFilter * noscraping * ALCARECODtCalibHLTFilter * DTCalibMuonSelection * dt4DSegmentsNoWire) diff --git a/CommonTools/ParticleFlow/python/pfTaus_cff.py b/CommonTools/ParticleFlow/python/pfTaus_cff.py index 5f1642b279515..2395d583c6bad 100644 --- a/CommonTools/ParticleFlow/python/pfTaus_cff.py +++ b/CommonTools/ParticleFlow/python/pfTaus_cff.py @@ -125,7 +125,6 @@ pfTauTagInfoProducer.PFCandidateProducer = jetConfig.ak4PFJets.src pfTauTagInfoProducer.PFJetTracksAssociatorProducer = 'pfJetTracksAssociatorAtVertex' - pfTausPreSequence = cms.Sequence( pfJetTracksAssociatorAtVertex + pfTauPFJets08Region + diff --git a/CommonTools/PileupAlgos/interface/PuppiAlgo.h b/CommonTools/PileupAlgos/interface/PuppiAlgo.h index 05c86f42b9eaa..f65323c297178 100644 --- a/CommonTools/PileupAlgos/interface/PuppiAlgo.h +++ b/CommonTools/PileupAlgos/interface/PuppiAlgo.h @@ -16,6 +16,7 @@ class PuppiAlgo{ void computeMedRMS(const unsigned int &iAlgo,const double &iPVFrac); //Get the Weight double compute(std::vector const &iVals,double iChi2) const; + const std::vector & alphas(){ return fPups; } //Helpers inline double ptMin() const { return fPtMin; } inline double etaMin() const { return fEtaMin; } @@ -26,6 +27,9 @@ class PuppiAlgo{ inline double coneSize ( unsigned int iAlgo) const { return fConeSize.at(iAlgo); } inline double neutralPt (int iNPV) const { return fNeutralPtMin + iNPV * fNeutralPtSlope; } + inline double rms( unsigned int i ) const {return fRMS[i];} + inline double median( unsigned int i ) const {return fMedian[i];} + private: unsigned int fNAlgos; float fEtaMax; @@ -33,6 +37,14 @@ class PuppiAlgo{ float fPtMin ; double fNeutralPtMin; double fNeutralPtSlope; + + double fRMSEtaSF; + double fMedEtaSF; + double fEtaMaxExtrap; + + std::vector fRMS; + std::vector fMedian; + std::vector fPups; std::vector fPupsPV; std::vector fAlgoId; @@ -42,8 +54,6 @@ class PuppiAlgo{ std::vector fConeSize; std::vector fRMSPtMin; std::vector fRMSScaleFactor; - std::vector fRMS; - std::vector fMedian; std::vector fMean; std::vector fNCount; }; diff --git a/CommonTools/PileupAlgos/interface/PuppiContainer.h b/CommonTools/PileupAlgos/interface/PuppiContainer.h index 909e9d9fc82f5..9d726f278acb4 100644 --- a/CommonTools/PileupAlgos/interface/PuppiContainer.h +++ b/CommonTools/PileupAlgos/interface/PuppiContainer.h @@ -37,25 +37,41 @@ class PuppiContainer{ PuppiContainer(const edm::ParameterSet &iConfig); ~PuppiContainer(); void initialize(const std::vector &iRecoObjects); + void setNPV(int iNPV){ fNPV = iNPV; } + std::vector const & pfParticles() const { return fPFParticles; } std::vector const & pvParticles() const { return fChargedPV; } - std::vector const & puppiWeights() ; + std::vector const & puppiWeights(); + const std::vector & puppiRawAlphas(){ return fRawAlphas; } + const std::vector & puppiAlphas(){ return fVals; } + // const std::vector puppiAlpha () {return fAlpha;} + const std::vector & puppiAlphasMed() {return fAlphaMed;} + const std::vector & puppiAlphasRMS() {return fAlphaRMS;} + + int puppiNAlgos(){ return fNAlgos; } std::vector const & puppiParticles() const { return fPupParticles;} protected: double goodVar (fastjet::PseudoJet const &iPart,std::vector const &iParts, int iOpt,double iRCone); void getRMSAvg (int iOpt,std::vector const &iConstits,std::vector const &iParticles,std::vector const &iChargeParticles); + void getRawAlphas (int iOpt,std::vector const &iConstits,std::vector const &iParticles,std::vector const &iChargeParticles); double getChi2FromdZ(double iDZ); int getPuppiId ( float iPt, float iEta); double var_within_R (int iId, const std::vector & particles, const fastjet::PseudoJet& centre, double R); + bool fPuppiDiagnostics; std::vector fRecoParticles; std::vector fPFParticles; std::vector fChargedPV; std::vector fPupParticles; std::vector fWeights; std::vector fVals; + std::vector fRawAlphas; + std::vector fAlphaMed; + std::vector fAlphaRMS; + bool fApplyCHS; + bool fInvert; bool fUseExp; double fNeutralMinPt; double fNeutralSlope; diff --git a/CommonTools/PileupAlgos/interface/RecoObj.h b/CommonTools/PileupAlgos/interface/RecoObj.h index bde2077758b3b..f1867fdaf5dcd 100644 --- a/CommonTools/PileupAlgos/interface/RecoObj.h +++ b/CommonTools/PileupAlgos/interface/RecoObj.h @@ -14,7 +14,7 @@ class RecoObj {} ~RecoObj(){} - float pt, eta, phi, m; // kinematics + float pt, eta, phi, m, rapidity; // kinematics int id; int pfType; int vtxId; // Vertex Id from Vertex Collection diff --git a/CommonTools/PileupAlgos/plugins/PuppiProducer.cc b/CommonTools/PileupAlgos/plugins/PuppiProducer.cc index 7388a0156d255..d535604f7660a 100644 --- a/CommonTools/PileupAlgos/plugins/PuppiProducer.cc +++ b/CommonTools/PileupAlgos/plugins/PuppiProducer.cc @@ -29,8 +29,14 @@ // ------------------------------------------------------------------------------------------ PuppiProducer::PuppiProducer(const edm::ParameterSet& iConfig) { + fPuppiDiagnostics = iConfig.getParameter("puppiDiagnostics"); + fPuppiForLeptons = iConfig.getParameter("puppiForLeptons"); fUseDZ = iConfig.getParameter("UseDeltaZCut"); fDZCut = iConfig.getParameter("DeltaZCut"); + fUseExistingWeights = iConfig.getParameter("useExistingWeights"); + fUseWeightsNoLep = iConfig.getParameter("useWeightsNoLep"); + fVtxNdofCut = iConfig.getParameter("vtxNdofCut"); + fVtxZCut = iConfig.getParameter("vtxZCut"); fPuppiContainer = std::unique_ptr ( new PuppiContainer(iConfig) ); tokenPFCandidates_ @@ -42,10 +48,16 @@ PuppiProducer::PuppiProducer(const edm::ParameterSet& iConfig) { produces > (); produces > (); produces< edm::ValueMap >(); - + produces(); - + if (fPuppiDiagnostics){ + produces ("PuppiNAlgos"); + produces> ("PuppiRawAlphas"); + produces> ("PuppiAlphas"); + produces> ("PuppiAlphasMed"); + produces> ("PuppiAlphasRms"); + } } // ------------------------------------------------------------------------------------------ PuppiProducer::~PuppiProducer(){ @@ -63,14 +75,23 @@ void PuppiProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { iEvent.getByToken(tokenVertices_,hVertexProduct); const reco::VertexCollection *pvCol = hVertexProduct.product(); + int npv = 0; + const reco::VertexCollection::const_iterator vtxEnd = pvCol->end(); + for (reco::VertexCollection::const_iterator vtxIter = pvCol->begin(); vtxEnd != vtxIter; ++vtxIter) { + if (!vtxIter->isFake() && vtxIter->ndof()>=fVtxNdofCut && fabs(vtxIter->z())<=fVtxZCut) + npv++; + } + //Fill the reco objects fRecoObjCollection.clear(); for(CandidateView::const_iterator itPF = pfCol->begin(); itPF!=pfCol->end(); itPF++) { + // std::cout << "itPF->pdgId() = " << itPF->pdgId() << std::endl; RecoObj pReco; pReco.pt = itPF->pt(); pReco.eta = itPF->eta(); pReco.phi = itPF->phi(); pReco.m = itPF->mass(); + pReco.rapidity = itPF->rapidity(); pReco.charge = itPF->charge(); const reco::Vertex *closestVtx = 0; double pDZ = -9999; @@ -78,62 +99,122 @@ void PuppiProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { int pVtxId = -9999; bool lFirst = true; const pat::PackedCandidate *lPack = dynamic_cast(&(*itPF)); - if(lPack == 0 ) { + if(lPack == 0 ) { + const reco::PFCandidate *pPF = dynamic_cast(&(*itPF)); + double curdz = 9999; + int closestVtxForUnassociateds = -9999; for(reco::VertexCollection::const_iterator iV = pvCol->begin(); iV!=pvCol->end(); ++iV) { - if(lFirst) { - if ( pPF->trackRef().isNonnull() ) pDZ = pPF->trackRef() ->dz(iV->position()); - else if ( pPF->gsfTrackRef().isNonnull() ) pDZ = pPF->gsfTrackRef()->dz(iV->position()); - if ( pPF->trackRef().isNonnull() ) pD0 = pPF->trackRef() ->d0(); - else if ( pPF->gsfTrackRef().isNonnull() ) pD0 = pPF->gsfTrackRef()->d0(); - lFirst = false; - if(pDZ > -9999) pVtxId = 0; - } - if(iV->trackWeight(pPF->trackRef())>0) { - closestVtx = &(*iV); - break; - } - pVtxId++; + if(lFirst) { + if ( pPF->trackRef().isNonnull() ) pDZ = pPF->trackRef() ->dz(iV->position()); + else if ( pPF->gsfTrackRef().isNonnull() ) pDZ = pPF->gsfTrackRef()->dz(iV->position()); + if ( pPF->trackRef().isNonnull() ) pD0 = pPF->trackRef() ->d0(); + else if ( pPF->gsfTrackRef().isNonnull() ) pD0 = pPF->gsfTrackRef()->d0(); + lFirst = false; + if(pDZ > -9999) pVtxId = 0; + } + if(iV->trackWeight(pPF->trackRef())>0) { + closestVtx = &(*iV); + break; + } + // in case it's unassocciated, keep more info + double tmpdz = 99999; + if ( pPF->trackRef().isNonnull() ) tmpdz = pPF->trackRef() ->dz(iV->position()); + else if ( pPF->gsfTrackRef().isNonnull() ) tmpdz = pPF->gsfTrackRef()->dz(iV->position()); + if (fabs(tmpdz) < curdz){ + curdz = fabs(tmpdz); + closestVtxForUnassociateds = pVtxId; + } + pVtxId++; + } - } else if(lPack->vertexRef().isNonnull() ) { - pDZ = lPack->dz(); - pD0 = lPack->dxy(); + int tmpFromPV = 0; + // mocking the miniAOD definitions + if (closestVtx != 0 && fabs(pReco.charge) > 0 && pVtxId > 0) tmpFromPV = 0; + if (closestVtx != 0 && fabs(pReco.charge) > 0 && pVtxId == 0) tmpFromPV = 3; + if (closestVtx == 0 && fabs(pReco.charge) > 0 && closestVtxForUnassociateds == 0) tmpFromPV = 2; + if (closestVtx == 0 && fabs(pReco.charge) > 0 && closestVtxForUnassociateds != 0) tmpFromPV = 1; + pReco.dZ = pDZ; + pReco.d0 = pD0; + pReco.id = 0; + if (fabs(pReco.charge) == 0){ pReco.id = 0; } + else{ + if (tmpFromPV == 0){ pReco.id = 2; } // 0 is associated to PU vertex + if (tmpFromPV == 3){ pReco.id = 1; } + if (tmpFromPV == 1 || tmpFromPV == 2){ + pReco.id = 0; + if (!fPuppiForLeptons && fUseDZ && (fabs(pDZ) < fDZCut)) pReco.id = 1; + if (!fPuppiForLeptons && fUseDZ && (fabs(pDZ) > fDZCut)) pReco.id = 2; + if (fPuppiForLeptons && tmpFromPV == 1) pReco.id = 2; + if (fPuppiForLeptons && tmpFromPV == 2) pReco.id = 1; + } + } + } + else if(lPack->vertexRef().isNonnull() ) { + pDZ = lPack->dz(); + pD0 = lPack->dxy(); closestVtx = &(*(lPack->vertexRef())); - pVtxId = (lPack->fromPV() != (pat::PackedCandidate::PVUsedInFit)); - if( (lPack->fromPV() == pat::PackedCandidate::PVLoose) || - (lPack->fromPV() == pat::PackedCandidate::PVTight) ) - closestVtx = 0; + pReco.dZ = pDZ; + pReco.d0 = pD0; + + pReco.id = 0; + if (fabs(pReco.charge) == 0){ pReco.id = 0; } + if (fabs(pReco.charge) > 0){ + if (lPack->fromPV() == 0){ pReco.id = 2; } // 0 is associated to PU vertex + if (lPack->fromPV() == (pat::PackedCandidate::PVUsedInFit)){ pReco.id = 1; } + if (lPack->fromPV() == (pat::PackedCandidate::PVTight) || lPack->fromPV() == (pat::PackedCandidate::PVLoose)){ + pReco.id = 0; + if (!fPuppiForLeptons && fUseDZ && (fabs(pDZ) < fDZCut)) pReco.id = 1; + if (!fPuppiForLeptons && fUseDZ && (fabs(pDZ) > fDZCut)) pReco.id = 2; + if (fPuppiForLeptons && lPack->fromPV() == (pat::PackedCandidate::PVLoose)) pReco.id = 2; + if (fPuppiForLeptons && lPack->fromPV() == (pat::PackedCandidate::PVTight)) pReco.id = 1; + } + } } - pReco.dZ = pDZ; - pReco.d0 = pD0; - - if(closestVtx == 0) pReco.vtxId = -1; - if(closestVtx != 0) pReco.vtxId = pVtxId; - //if(closestVtx != 0) pReco.vtxChi2 = closestVtx->trackWeight(itPF->trackRef()); - //Set the id for Puppi Algo: 0 is neutral pfCandidate, id = 1 for particles coming from PV and id = 2 for charged particles from non-leading vertex - pReco.id = 0; - - if(closestVtx != 0 && pVtxId == 0 && fabs(pReco.charge) > 0) pReco.id = 1; - if(closestVtx != 0 && pVtxId > 0 && fabs(pReco.charge) > 0) pReco.id = 2; - //Add a dZ cut if wanted (this helps) - if(fUseDZ && pDZ > -9999 && closestVtx == 0 && (fabs(pDZ) < fDZCut) && fabs(pReco.charge) > 0) pReco.id = 1; - if(fUseDZ && pDZ > -9999 && closestVtx == 0 && (fabs(pDZ) > fDZCut) && fabs(pReco.charge) > 0) pReco.id = 2; - - //std::cout << "pVtxId = " << pVtxId << ", and charge = " << itPF->charge() << ", and closestVtx = " << closestVtx << ", and id = " << pReco.id << std::endl; fRecoObjCollection.push_back(pReco); + } + fPuppiContainer->initialize(fRecoObjCollection); + fPuppiContainer->setNPV( npv ); + + std::vector lWeights; + std::vector lCandidates; + if (!fUseExistingWeights){ + //Compute the weights and get the particles + lWeights = fPuppiContainer->puppiWeights(); + lCandidates = fPuppiContainer->puppiParticles(); + } + else{ + //Use the existing weights + int lPackCtr = 0; + for(CandidateView::const_iterator itPF = pfCol->begin(); itPF!=pfCol->end(); itPF++) { + const pat::PackedCandidate *lPack = dynamic_cast(&(*itPF)); + float curpupweight = -1.; + if(lPack == 0 ) { + // throw error + throw edm::Exception(edm::errors::LogicError,"PuppiProducer: cannot get weights since inputs are not packedPFCandidates"); + } + else{ + // if (fUseWeightsNoLep){ curpupweight = itPF->puppiWeightNoLep(); } + // else{ curpupweight = itPF->puppiWeight(); } + curpupweight = lPack->puppiWeight(); + } + lWeights.push_back(curpupweight); + fastjet::PseudoJet curjet( curpupweight*lPack->px(), curpupweight*lPack->py(), curpupweight*lPack->pz(), curpupweight*lPack->energy()); + curjet.set_user_index(lPackCtr); + lCandidates.push_back(curjet); + lPackCtr++; + } + } - //Compute the weights - const std::vector lWeights = fPuppiContainer->puppiWeights(); //Fill it into the event std::auto_ptr > lPupOut(new edm::ValueMap()); edm::ValueMap::Filler lPupFiller(*lPupOut); lPupFiller.insert(hPFProduct,lWeights.begin(),lWeights.end()); lPupFiller.fill(); - // This is a dummy to access the "translate" method which is a // non-static member function even though it doesn't need to be. // Will fix in the future. @@ -143,7 +224,6 @@ void PuppiProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { // Since the size of the ValueMap must be equal to the input collection, we need // to search the "puppi" particles to find a match for each input. If none is found, // the input is set to have a four-vector of 0,0,0,0 - const std::vector lCandidates = fPuppiContainer->puppiParticles(); fPuppiCandidates.reset( new PFOutputCollection ); std::auto_ptr > p4PupOut(new edm::ValueMap()); LorentzVectorCollection puppiP4s; @@ -165,11 +245,14 @@ void PuppiProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { auto puppiMatched = find_if( lCandidates.begin(), lCandidates.end(), [&val]( fastjet::PseudoJet const & i ){ return i.user_index() == val; } ); if ( puppiMatched != lCandidates.end() ) { pVec.SetPxPyPzE(puppiMatched->px(),puppiMatched->py(),puppiMatched->pz(),puppiMatched->E()); + // fPuppiCandidates->push_back(pCand); } else { pVec.SetPxPyPzE( 0, 0, 0, 0); } pCand.setP4(pVec); puppiP4s.push_back( pVec ); + + pCand.setSourceCandidatePtr( i0->sourceCandidatePtr(0) ); fPuppiCandidates->push_back(pCand); } @@ -191,6 +274,25 @@ void PuppiProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { filler.insert(hPFProduct, values.begin(), values.end()); filler.fill(); iEvent.put(pfMap_p); + + + ////////////////////////////////////////////// + if (fPuppiDiagnostics && !fUseExistingWeights){ + + // all the different alphas per particle + // THE alpha per particle + std::auto_ptr > theAlphas(new std::vector(fPuppiContainer->puppiAlphas())); + std::auto_ptr > theAlphasMed(new std::vector(fPuppiContainer->puppiAlphasMed())); + std::auto_ptr > theAlphasRms(new std::vector(fPuppiContainer->puppiAlphasRMS())); + std::auto_ptr > alphas(new std::vector(fPuppiContainer->puppiRawAlphas())); + std::auto_ptr nalgos(new double(fPuppiContainer->puppiNAlgos())); + + iEvent.put(alphas,"PuppiRawAlphas"); + iEvent.put(nalgos,"PuppiNAlgos"); + iEvent.put(theAlphas,"PuppiAlphas"); + iEvent.put(theAlphasMed,"PuppiAlphasMed"); + iEvent.put(theAlphasRms,"PuppiAlphasRms"); + } } diff --git a/CommonTools/PileupAlgos/plugins/PuppiProducer.h b/CommonTools/PileupAlgos/plugins/PuppiProducer.h index 7252f6e2bad2b..c04496a97445f 100644 --- a/CommonTools/PileupAlgos/plugins/PuppiProducer.h +++ b/CommonTools/PileupAlgos/plugins/PuppiProducer.h @@ -46,8 +46,14 @@ class PuppiProducer : public edm::stream::EDProducer<> { std::string fPuppiName; std::string fPFName; std::string fPVName; + bool fPuppiDiagnostics; + bool fPuppiForLeptons; bool fUseDZ; - float fDZCut; + float fDZCut; + bool fUseExistingWeights; + bool fUseWeightsNoLep; + int fVtxNdofCut; + double fVtxZCut; std::unique_ptr fPuppiContainer; std::vector fRecoObjCollection; std::auto_ptr< PFOutputCollection > fPuppiCandidates; diff --git a/CommonTools/PileupAlgos/python/PUPuppi_cff.py b/CommonTools/PileupAlgos/python/PUPuppi_cff.py new file mode 100644 index 0000000000000..bbd9ec3e98518 --- /dev/null +++ b/CommonTools/PileupAlgos/python/PUPuppi_cff.py @@ -0,0 +1,7 @@ +import FWCore.ParameterSet.Config as cms + +from CommonTools.PileupAlgos.Puppi_cff import * + +pupuppi = puppi.clone() +pupuppi.invertPuppi = True + diff --git a/CommonTools/PileupAlgos/python/Puppi_cff.py b/CommonTools/PileupAlgos/python/Puppi_cff.py index 0e04d19a5f9aa..1128727cad778 100644 --- a/CommonTools/PileupAlgos/python/Puppi_cff.py +++ b/CommonTools/PileupAlgos/python/Puppi_cff.py @@ -6,7 +6,7 @@ useCharged = cms.bool(True), applyLowPUCorr = cms.bool(True), combOpt = cms.int32(0), - cone = cms.double(0.3), + cone = cms.double(0.4), rmsPtMin = cms.double(0.1), rmsScaleFactor = cms.double(1.0) ) @@ -18,45 +18,65 @@ useCharged = cms.bool(False), applyLowPUCorr = cms.bool(True), combOpt = cms.int32(0), - cone = cms.double(0.3), + cone = cms.double(0.4), rmsPtMin = cms.double(0.5), rmsScaleFactor = cms.double(1.0) ) ) puppi = cms.EDProducer("PuppiProducer",#cms.PSet(#"PuppiProducer", - UseDeltaZCut = cms.bool (False), + puppiDiagnostics = cms.bool(False), + puppiForLeptons = cms.bool(False), + UseDeltaZCut = cms.bool(True), DeltaZCut = cms.double(0.3), candName = cms.InputTag('particleFlow'), vertexName = cms.InputTag('offlinePrimaryVertices'), #candName = cms.string('packedPFCandidates'), #vertexName = cms.string('offlineSlimmedPrimaryVertices'), applyCHS = cms.bool (True), + invertPuppi = cms.bool (False), useExp = cms.bool (False), MinPuppiWeight = cms.double(0.01), + useExistingWeights = cms.bool(False), + useWeightsNoLep = cms.bool(False), + vtxNdofCut = cms.int32(4), + vtxZCut = cms.double(24), algos = cms.VPSet( cms.PSet( etaMin = cms.double(0.), - etaMax = cms.double( 2.5), + etaMax = cms.double(2.5), ptMin = cms.double(0.), - MinNeutralPt = cms.double(0.2), - MinNeutralPtSlope = cms.double(0.02), + MinNeutralPt = cms.double(0.1), + MinNeutralPtSlope = cms.double(0.015), + RMSEtaSF = cms.double(1.0), + MedEtaSF = cms.double(1.0), + EtaMaxExtrap = cms.double(2.0), puppiAlgos = puppiCentral ), cms.PSet( etaMin = cms.double(2.5), etaMax = cms.double(3.0), ptMin = cms.double(0.0), - MinNeutralPt = cms.double(1.0), - MinNeutralPtSlope = cms.double(0.005), + MinNeutralPt = cms.double(1.7), + MinNeutralPtSlope = cms.double(0.07), + # RMSEtaSF = cms.double(1.545), + # MedEtaSF = cms.double(0.845), + RMSEtaSF = cms.double(1.30), + MedEtaSF = cms.double(1.05), + EtaMaxExtrap = cms.double(2.0), puppiAlgos = puppiForward ), cms.PSet( etaMin = cms.double(3.0), etaMax = cms.double(10.0), ptMin = cms.double(0.0), - MinNeutralPt = cms.double(1.5), - MinNeutralPtSlope = cms.double(0.005), + MinNeutralPt = cms.double(2.0), + MinNeutralPtSlope = cms.double(0.07), + # RMSEtaSF = cms.double(1.18), + # MedEtaSF = cms.double(0.4397), + RMSEtaSF = cms.double(1.10), + MedEtaSF = cms.double(0.90), + EtaMaxExtrap = cms.double(2.0), puppiAlgos = puppiForward ) ) diff --git a/CommonTools/PileupAlgos/src/PuppiAlgo.cc b/CommonTools/PileupAlgos/src/PuppiAlgo.cc index 50be293ae4dd0..b2dcec2e84031 100644 --- a/CommonTools/PileupAlgos/src/PuppiAlgo.cc +++ b/CommonTools/PileupAlgos/src/PuppiAlgo.cc @@ -8,143 +8,177 @@ #include "TMath.h" -PuppiAlgo::PuppiAlgo(edm::ParameterSet &iConfig) { - fEtaMin = iConfig.getParameter("etaMin"); - fEtaMax = iConfig.getParameter("etaMax"); - fPtMin = iConfig.getParameter("ptMin"); - fNeutralPtMin = iConfig.getParameter("MinNeutralPt"); // Weighted Neutral Pt Cut - fNeutralPtSlope = iConfig.getParameter("MinNeutralPtSlope"); // Slope vs #pv - - std::vector lAlgos = iConfig.getParameter >("puppiAlgos"); - fNAlgos = lAlgos.size(); - //Uber Configurable Puppi - for(unsigned int i0 = 0; i0 < lAlgos.size(); i0++) { - int pAlgoId = lAlgos[i0].getParameter ("algoId"); - bool pCharged = lAlgos[i0].getParameter ("useCharged"); - bool pWeight0 = lAlgos[i0].getParameter ("applyLowPUCorr"); - int pComb = lAlgos[i0].getParameter ("combOpt"); // 0=> add in chi2/1=>Multiply p-values - double pConeSize = lAlgos[i0].getParameter("cone"); // Min Pt when computing pt and rms - double pRMSPtMin = lAlgos[i0].getParameter("rmsPtMin"); // Min Pt when computing pt and rms - double pRMSSF = lAlgos[i0].getParameter("rmsScaleFactor"); // Additional Tuning parameter for Jokers - fAlgoId .push_back(pAlgoId); - fCharged .push_back(pCharged); - fAdjust .push_back(pWeight0); - fCombId .push_back(pComb); - fConeSize .push_back(pConeSize); - fRMSPtMin .push_back(pRMSPtMin); - fRMSScaleFactor.push_back(pRMSSF); - double pRMS = 0; - double pMed = 0; - double pMean = 0; - int pNCount = 0; - fRMS .push_back(pRMS); - fMedian.push_back(pMed); - fMean .push_back(pMean); - fNCount.push_back(pNCount); - } +PuppiAlgo::PuppiAlgo(edm::ParameterSet &iConfig) { + fEtaMin = iConfig.getParameter("etaMin"); + fEtaMax = iConfig.getParameter("etaMax"); + fPtMin = iConfig.getParameter("ptMin"); + fNeutralPtMin = iConfig.getParameter("MinNeutralPt"); // Weighted Neutral Pt Cut + fNeutralPtSlope = iConfig.getParameter("MinNeutralPtSlope"); // Slope vs #pv + fRMSEtaSF = iConfig.getParameter("RMSEtaSF"); + fMedEtaSF = iConfig.getParameter("MedEtaSF"); + fEtaMaxExtrap = iConfig.getParameter("EtaMaxExtrap"); + + std::vector lAlgos = iConfig.getParameter >("puppiAlgos"); + fNAlgos = lAlgos.size(); + //Uber Configurable Puppi + for(unsigned int i0 = 0; i0 < lAlgos.size(); i0++) { + int pAlgoId = lAlgos[i0].getParameter ("algoId"); + bool pCharged = lAlgos[i0].getParameter ("useCharged"); + bool pWeight0 = lAlgos[i0].getParameter ("applyLowPUCorr"); + int pComb = lAlgos[i0].getParameter ("combOpt"); // 0=> add in chi2/1=>Multiply p-values + double pConeSize = lAlgos[i0].getParameter("cone"); // Min Pt when computing pt and rms + double pRMSPtMin = lAlgos[i0].getParameter("rmsPtMin"); // Min Pt when computing pt and rms + double pRMSSF = lAlgos[i0].getParameter("rmsScaleFactor"); // Additional Tuning parameter for Jokers + fAlgoId .push_back(pAlgoId); + fCharged .push_back(pCharged); + fAdjust .push_back(pWeight0); + fCombId .push_back(pComb); + fConeSize .push_back(pConeSize); + fRMSPtMin .push_back(pRMSPtMin); + fRMSScaleFactor.push_back(pRMSSF); + double pRMS = 0; + double pMed = 0; + double pMean = 0; + int pNCount = 0; + fRMS .push_back(pRMS); + fMedian.push_back(pMed); + fMean .push_back(pMean); + fNCount.push_back(pNCount); + } } -PuppiAlgo::~PuppiAlgo() { - fPups .clear(); - fPupsPV.clear(); +PuppiAlgo::~PuppiAlgo() { + fPups .clear(); + fPupsPV.clear(); } -void PuppiAlgo::reset() { - fPups .clear(); - fPupsPV.clear(); - for(unsigned int i0 = 0; i0 < fNAlgos; i0++) { - fMedian[i0] = 0; - fRMS [i0] = 0; - fMean [i0] = 0; - fNCount[i0] = 0; - } +void PuppiAlgo::reset() { + fPups .clear(); + fPupsPV.clear(); + for(unsigned int i0 = 0; i0 < fNAlgos; i0++) { + fMedian[i0] = 0; + fRMS [i0] = 0; + fMean [i0] = 0; + fNCount[i0] = 0; + } } -void PuppiAlgo::add(const fastjet::PseudoJet &iParticle,const double &iVal,const unsigned int iAlgo) { - if(iParticle.pt() < fRMSPtMin[iAlgo]) return; - // Change from SRR : Previously used fastjet::PseudoJet::user_index to decide the particle type. - // In CMSSW we use the user_index to specify the index in the input collection, so I invented - // a new mechanism using the fastjet UserInfo functionality. Of course, it's still just an integer - // but that interface could be changed (or augmented) if desired / needed. - int puppi_register = std::numeric_limits::lowest(); - if ( iParticle.has_user_info() ) { - PuppiContainer::PuppiUserInfo const * pInfo = dynamic_cast( iParticle.user_info_ptr() ); - if ( pInfo != 0 ) { - puppi_register = pInfo->puppi_register(); +void PuppiAlgo::add(const fastjet::PseudoJet &iParticle,const double &iVal,const unsigned int iAlgo) { + if(iParticle.pt() < fRMSPtMin[iAlgo]) return; + // Change from SRR : Previously used fastjet::PseudoJet::user_index to decide the particle type. + // In CMSSW we use the user_index to specify the index in the input collection, so I invented + // a new mechanism using the fastjet UserInfo functionality. Of course, it's still just an integer + // but that interface could be changed (or augmented) if desired / needed. + int puppi_register = std::numeric_limits::lowest(); + if ( iParticle.has_user_info() ) { + PuppiContainer::PuppiUserInfo const * pInfo = dynamic_cast( iParticle.user_info_ptr() ); + if ( pInfo != 0 ) { + puppi_register = pInfo->puppi_register(); + } + } + if ( puppi_register == std::numeric_limits::lowest() ) { + throw cms::Exception("PuppiRegisterNotSet") << "The puppi register is not set. This must be set before use.\n"; } - } - if ( puppi_register == std::numeric_limits::lowest() ) { - throw cms::Exception("PuppiRegisterNotSet") << "The puppi register is not set. This must be set before use.\n"; - } - if(fCharged[iAlgo] && std::abs(puppi_register) < 1) return; - if(fCharged[iAlgo] && (std::abs(puppi_register) >=1 && std::abs(puppi_register) <=2)) fPupsPV.push_back(iVal); - if(fCharged[iAlgo] && std::abs(puppi_register) < 3) return; - fPups.push_back(iVal); - fNCount[iAlgo]++; + + //// original code + // if(fCharged[iAlgo] && std::abs(puppi_register) < 1) return; + // if(fCharged[iAlgo] && (std::abs(puppi_register) >=1 && std::abs(puppi_register) <=2)) fPupsPV.push_back(iVal); + //if(fCharged[iAlgo] && std::abs(puppi_register) < 3) return; + //// if used fCharged and not CHPU, just return + // fPups.push_back(iVal); //original + // fNCount[iAlgo]++; + + // added by Nhan -- for all eta regions, compute mean/RMS from the central charged PU + //std::cout << "std::abs(puppi_register) = " << std::abs(puppi_register) << std::endl; + if ((std::abs(iParticle.eta()) < fEtaMaxExtrap) && (std::abs(puppi_register) >= 3)){ + fPups.push_back(iVal); + // fPupsPV.push_back(iVal); + fNCount[iAlgo]++; + } + // for the low PU case, correction. for checking that the PU-only median will be below the PV particles + if(std::abs(iParticle.eta()) < fEtaMaxExtrap && (std::abs(puppi_register) >=1 && std::abs(puppi_register) <=2)) fPupsPV.push_back(iVal); + } -void PuppiAlgo::computeMedRMS(const unsigned int &iAlgo,const double &iPVFrac) { - if(iAlgo >= fNAlgos ) return; - if(fNCount[iAlgo] == 0) return; - int lNBefore = 0; - for(unsigned int i0 = 0; i0 < iAlgo; i0++) lNBefore += fNCount[i0]; - std::sort(fPups.begin()+lNBefore,fPups.begin()+lNBefore+fNCount[iAlgo]); - double lCorr = 1.; - //if(!fCharged[iAlgo] && fAdjust[iAlgo]) lCorr *= 1. - iPVFrac; - if(fAdjust[iAlgo]) lCorr *= 1. - iPVFrac; - int lNum0 = 0; - for(int i0 = lNBefore; i0 < lNBefore+fNCount[iAlgo]; i0++) { - if(fPups[i0] == 0) lNum0 = i0-lNBefore; - } - //lNum0 = 0; - int lNHalfway = lNBefore + lNum0 + int( double( fNCount[iAlgo]-lNum0 )*0.50*lCorr); - fMedian[iAlgo] = fPups[lNHalfway]; - double lMed = fMedian[iAlgo]; //Just to make the readability easier - int lNRMS = 0; - for(int i0 = lNBefore; i0 < lNBefore+fNCount[iAlgo]; i0++) { - fMean[iAlgo] += fPups[i0]; - if(fPups[i0] == 0) continue; - if(!fCharged[iAlgo] && fAdjust[iAlgo] && fPups[i0] > lMed) continue; - //if(fAdjust[iAlgo] && fPups[i0] > lMed) continue; - lNRMS++; - fRMS [iAlgo] += (fPups[i0]-lMed)*(fPups[i0]-lMed); - } - fMean[iAlgo]/=fNCount[iAlgo]; - if(lNRMS > 0) fRMS [iAlgo]/=lNRMS; - if(fRMS[iAlgo] == 0) fRMS[iAlgo] = 1e-5; +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +//NHAN'S VERSION +void PuppiAlgo::computeMedRMS(const unsigned int &iAlgo,const double &iPVFrac) { + + //std::cout << "fNCount[iAlgo] = " << fNCount[iAlgo] << std::endl; + if(iAlgo >= fNAlgos ) return; + if(fNCount[iAlgo] == 0) return; + + // sort alphas + int lNBefore = 0; + for(unsigned int i0 = 0; i0 < iAlgo; i0++) lNBefore += fNCount[i0]; + std::sort(fPups.begin()+lNBefore,fPups.begin()+lNBefore+fNCount[iAlgo]); + + // in case you have alphas == 0 + int lNum0 = 0; + for(int i0 = lNBefore; i0 < lNBefore+fNCount[iAlgo]; i0++) { + if(fPups[i0] == 0) lNum0 = i0-lNBefore; + } + + // comput median, removed lCorr for now + int lNHalfway = lNBefore + lNum0 + int( double( fNCount[iAlgo]-lNum0 )*0.50); + fMedian[iAlgo] = fPups[lNHalfway]; + double lMed = fMedian[iAlgo]; //Just to make the readability easier + + int lNRMS = 0; + for(int i0 = lNBefore; i0 < lNBefore+fNCount[iAlgo]; i0++) { + fMean[iAlgo] += fPups[i0]; + if(fPups[i0] == 0) continue; + if(!fCharged[iAlgo] && fAdjust[iAlgo] && fPups[i0] > lMed) continue; + //if(fAdjust[iAlgo] && fPups[i0] > lMed) continue; + lNRMS++; + fRMS [iAlgo] += (fPups[i0]-lMed)*(fPups[i0]-lMed); + } + fMean[iAlgo]/=fNCount[iAlgo]; + if(lNRMS > 0) fRMS [iAlgo]/=lNRMS; + if(fRMS[iAlgo] == 0) fRMS[iAlgo] = 1e-5; + // here is the raw RMS + fRMS [iAlgo] = sqrt(fRMS[iAlgo]); + + // some ways to do corrections to fRMS and fMedian + fRMS [iAlgo] *= fRMSScaleFactor[iAlgo]; + + fRMS[iAlgo] *= fRMSEtaSF; + fMedian[iAlgo] *= fMedEtaSF; + + if(!fAdjust[iAlgo]) return; + //Adjust the p-value to correspond to the median + std::sort(fPupsPV.begin(),fPupsPV.end()); + int lNPV = 0; + for(unsigned int i0 = 0; i0 < fPupsPV.size(); i0++) if(fPupsPV[i0] <= lMed ) lNPV++; + double lAdjust = double(lNPV)/double(fPupsPV.size()+fNCount[iAlgo]); + if(lAdjust > 0) fMedian[iAlgo] -= sqrt(ROOT::Math::chisquared_quantile(lAdjust,1.)*fRMS[iAlgo]); - fRMS [iAlgo] = sqrt(fRMS[iAlgo]); - fRMS [iAlgo] *= fRMSScaleFactor[iAlgo]; - //if(!fCharged[iAlgo]) std::cout << " Process : " << iAlgo << " Median : " << fMedian[iAlgo] << " +/- " << fRMS[iAlgo] << " -- Begin : " << lNBefore << " -- Total : " << fNCount[iAlgo] << " -- 50% " << lNHalfway << " Fraction less than @ Median : " << std::endl; - if(!fAdjust[iAlgo]) return; - //Adjust the p-value to correspond to the median - std::sort(fPupsPV.begin(),fPupsPV.end()); - int lNPV = 0; for(unsigned int i0 = 0; i0 < fPupsPV.size(); i0++) if(fPupsPV[i0] <= lMed ) lNPV++; - double lAdjust = 1.5*double(lNPV)/double(fPupsPV.size()+fNCount[iAlgo]); - if(lAdjust > 0) fMedian[iAlgo] -= sqrt(ROOT::Math::chisquared_quantile(lAdjust,1.)*fRMS[iAlgo]); } +//////////////////////////////////////////////////////////////////////////////// + //This code is probably a bit confusing -double PuppiAlgo::compute(std::vector const &iVals,double iChi2) const { - if(fAlgoId[0] == -1) return 1; - double lVal = 0.; - double lPVal = 1.; - int lNDOF = 0; - for(unsigned int i0 = 0; i0 < fNAlgos; i0++) { - if(fNCount[i0] == 0) return 1.; //in the NoPU case return 1. - if(fCombId[i0] == 1 && i0 > 0) { //Compute the previous p-value so that p-values can be multiplieed - double pPVal = ROOT::Math::chisquared_cdf(lVal,lNDOF); - lPVal *= pPVal; - lNDOF = 0; - lVal = 0; +double PuppiAlgo::compute(std::vector const &iVals,double iChi2) const { + if(fAlgoId[0] == -1) return 1; + double lVal = 0.; + double lPVal = 1.; + int lNDOF = 0; + for(unsigned int i0 = 0; i0 < fNAlgos; i0++) { + if(fNCount[i0] == 0) return 1.; //in the NoPU case return 1. + if(fCombId[i0] == 1 && i0 > 0) { //Compute the previous p-value so that p-values can be multiplieed + double pPVal = ROOT::Math::chisquared_cdf(lVal,lNDOF); + lPVal *= pPVal; + lNDOF = 0; + lVal = 0; + } + double pVal = iVals[i0]; + //Special Check for any algo with log(0) + if(fAlgoId[i0] == 0 && iVals[i0] == 0) pVal = fMedian[i0]; + if(fAlgoId[i0] == 3 && iVals[i0] == 0) pVal = fMedian[i0]; + if(fAlgoId[i0] == 5 && iVals[i0] == 0) pVal = fMedian[i0]; + lVal += (pVal-fMedian[i0])*(fabs(pVal-fMedian[i0]))/fRMS[i0]/fRMS[i0]; + lNDOF++; + if(i0 == 0 && iChi2 != 0) lNDOF++; //Add external Chi2 to first element + if(i0 == 0 && iChi2 != 0) lVal+=iChi2; //Add external Chi2 to first element } - double pVal = iVals[i0]; - //Special Check for any algo with log(0) - if(fAlgoId[i0] == 0 && iVals[i0] == 0) pVal = fMedian[i0]; - if(fAlgoId[i0] == 3 && iVals[i0] == 0) pVal = fMedian[i0]; - if(fAlgoId[i0] == 5 && iVals[i0] == 0) pVal = fMedian[i0]; - lVal += (pVal-fMedian[i0])*(fabs(pVal-fMedian[i0]))/fRMS[i0]/fRMS[i0]; - lNDOF++; - if(i0 == 0 && iChi2 != 0) lNDOF++; //Add external Chi2 to first element - if(i0 == 0 && iChi2 != 0) lVal+=iChi2; //Add external Chi2 to first element - } - //Top it off with the last calc - lPVal *= ROOT::Math::chisquared_cdf(lVal,lNDOF); - return lPVal; + //Top it off with the last calc + lPVal *= ROOT::Math::chisquared_cdf(lVal,lNDOF); + return lPVal; } diff --git a/CommonTools/PileupAlgos/src/PuppiContainer.cc b/CommonTools/PileupAlgos/src/PuppiContainer.cc index 85b11e2c83db0..d1e7134184993 100644 --- a/CommonTools/PileupAlgos/src/PuppiContainer.cc +++ b/CommonTools/PileupAlgos/src/PuppiContainer.cc @@ -12,194 +12,250 @@ using namespace std; using namespace fastjet; PuppiContainer::PuppiContainer(const edm::ParameterSet &iConfig) { - fApplyCHS = iConfig.getParameter("applyCHS"); - fUseExp = iConfig.getParameter("useExp"); - fPuppiWeightCut = iConfig.getParameter("MinPuppiWeight"); - std::vector lAlgos = iConfig.getParameter >("algos"); - fNAlgos = lAlgos.size(); - for(unsigned int i0 = 0; i0 < lAlgos.size(); i0++) { - PuppiAlgo pPuppiConfig(lAlgos[i0]); - fPuppiAlgo.push_back(pPuppiConfig); - } + fPuppiDiagnostics = iConfig.getParameter("puppiDiagnostics"); + fApplyCHS = iConfig.getParameter("applyCHS"); + fInvert = iConfig.getParameter("invertPuppi"); + fUseExp = iConfig.getParameter("useExp"); + fPuppiWeightCut = iConfig.getParameter("MinPuppiWeight"); + std::vector lAlgos = iConfig.getParameter >("algos"); + fNAlgos = lAlgos.size(); + for(unsigned int i0 = 0; i0 < lAlgos.size(); i0++) { + PuppiAlgo pPuppiConfig(lAlgos[i0]); + fPuppiAlgo.push_back(pPuppiConfig); + } } -void PuppiContainer::initialize(const std::vector &iRecoObjects) { +void PuppiContainer::initialize(const std::vector &iRecoObjects) { //Clear everything - fRecoParticles.resize(0); - fPFParticles .resize(0); - fChargedPV .resize(0); - fPupParticles .resize(0); - fWeights .resize(0); - fVals.resize(0); + fRecoParticles.resize(0); + fPFParticles .resize(0); + fChargedPV .resize(0); + fPupParticles .resize(0); + fWeights .resize(0); + fVals.resize(0); + fRawAlphas.resize(0); + fAlphaMed .resize(0); + fAlphaRMS .resize(0); //fChargedNoPV.resize(0); //Link to the RecoObjects - fPVFrac = 0.; - fNPV = 1.; - fRecoParticles = iRecoObjects; - for (unsigned int i = 0; i < fRecoParticles.size(); i++){ - fastjet::PseudoJet curPseudoJet; - auto fRecoParticle = fRecoParticles[i]; - curPseudoJet.reset_PtYPhiM(fRecoParticle.pt,fRecoParticle.eta,fRecoParticle.phi,fRecoParticle.m); - int puppi_register = 0; - if(fRecoParticle.id == 0 or fRecoParticle.charge == 0) puppi_register = 0; // zero is neutral hadron - if(fRecoParticle.id == 1 and fRecoParticle.charge != 0) puppi_register = fRecoParticle.charge; // from PV use the - if(fRecoParticle.id == 2 and fRecoParticle.charge != 0) puppi_register = fRecoParticle.charge+5; // from NPV use the charge as key +5 as key - curPseudoJet.set_user_info( new PuppiUserInfo( puppi_register ) ); - // fill vector of pseudojets for internal references - fPFParticles.push_back(curPseudoJet); - //Take Charged particles associated to PV - if(std::abs(fRecoParticle.id) == 1) fChargedPV.push_back(curPseudoJet); - if(std::abs(fRecoParticle.id) >= 1 ) fPVFrac+=1.; - //if((fRecoParticle.id == 0) && (inParticles[i].id == 2)) _genParticles.push_back( curPseudoJet); - //if(fRecoParticle.id <= 2 && !(inParticles[i].pt < fNeutralMinE && fRecoParticle.id < 2)) _pfchsParticles.push_back(curPseudoJet); - //if(fRecoParticle.id == 3) _chargedNoPV.push_back(curPseudoJet); - if(fNPV < fRecoParticle.vtxId) fNPV = fRecoParticle.vtxId; - } - if (fPVFrac != 0) fPVFrac = double(fChargedPV.size())/fPVFrac; - else fPVFrac = 0; + fPVFrac = 0.; + fNPV = 1.; + fRecoParticles = iRecoObjects; + for (unsigned int i = 0; i < fRecoParticles.size(); i++){ + fastjet::PseudoJet curPseudoJet; + auto fRecoParticle = fRecoParticles[i]; + // float nom = sqrt((fRecoParticle.m)*(fRecoParticle.m) + (fRecoParticle.pt)*(fRecoParticle.pt)*(cosh(fRecoParticle.eta))*(cosh(fRecoParticle.eta))) + (fRecoParticle.pt)*sinh(fRecoParticle.eta);//hacked + // float denom = sqrt((fRecoParticle.m)*(fRecoParticle.m) + (fRecoParticle.pt)*(fRecoParticle.pt));//hacked + // float rapidity = log(nom/denom);//hacked + curPseudoJet.reset_PtYPhiM(fRecoParticle.pt,fRecoParticle.rapidity,fRecoParticle.phi,fRecoParticle.m);//hacked + //curPseudoJet.reset_PtYPhiM(fRecoParticle.pt,fRecoParticle.eta,fRecoParticle.phi,fRecoParticle.m); + int puppi_register = 0; + if(fRecoParticle.id == 0 or fRecoParticle.charge == 0) puppi_register = 0; // zero is neutral hadron + if(fRecoParticle.id == 1 and fRecoParticle.charge != 0) puppi_register = fRecoParticle.charge; // from PV use the + if(fRecoParticle.id == 2 and fRecoParticle.charge != 0) puppi_register = fRecoParticle.charge+5; // from NPV use the charge as key +5 as key + curPseudoJet.set_user_info( new PuppiUserInfo( puppi_register ) ); + // fill vector of pseudojets for internal references + fPFParticles.push_back(curPseudoJet); + //Take Charged particles associated to PV + if(std::abs(fRecoParticle.id) == 1) fChargedPV.push_back(curPseudoJet); + if(std::abs(fRecoParticle.id) >= 1 ) fPVFrac+=1.; + //if((fRecoParticle.id == 0) && (inParticles[i].id == 2)) _genParticles.push_back( curPseudoJet); + //if(fRecoParticle.id <= 2 && !(inParticles[i].pt < fNeutralMinE && fRecoParticle.id < 2)) _pfchsParticles.push_back(curPseudoJet); + //if(fRecoParticle.id == 3) _chargedNoPV.push_back(curPseudoJet); + // if(fNPV < fRecoParticle.vtxId) fNPV = fRecoParticle.vtxId; + } + if (fPVFrac != 0) fPVFrac = double(fChargedPV.size())/fPVFrac; + else fPVFrac = 0; } PuppiContainer::~PuppiContainer(){} double PuppiContainer::goodVar(PseudoJet const &iPart,std::vector const &iParts, int iOpt,double iRCone) { - double lPup = 0; - lPup = var_within_R(iOpt,iParts,iPart,iRCone); - return lPup; + double lPup = 0; + lPup = var_within_R(iOpt,iParts,iPart,iRCone); + return lPup; } double PuppiContainer::var_within_R(int iId, const vector & particles, const PseudoJet& centre, double R){ - if(iId == -1) return 1; - fastjet::Selector sel = fastjet::SelectorCircle(R); - sel.set_reference(centre); - vector near_particles = sel(particles); - double var = 0; - //double lSumPt = 0; - //if(iId == 1) for(unsigned int i=0; i 2.*M_PI-pDPhi) pDPhi = 2.*M_PI-pDPhi; - double pDR2 = pDEta*pDEta+pDPhi*pDPhi; - if(std::abs(pDR2) < 0.0001) continue; - if(iId == 0) var += (near_particles[i].pt()/pDR2); - if(iId == 1) var += near_particles[i].pt(); - if(iId == 2) var += (1./pDR2); - if(iId == 3) var += (1./pDR2); - if(iId == 4) var += near_particles[i].pt(); - if(iId == 5) var += (near_particles[i].pt() * near_particles[i].pt()/pDR2); - } - if(iId == 1) var += centre.pt(); //Sum in a cone - if(iId == 0 && var != 0) var = log(var); - if(iId == 3 && var != 0) var = log(var); - if(iId == 5 && var != 0) var = log(var); - return var; + if(iId == -1) return 1; + fastjet::Selector sel = fastjet::SelectorCircle(R); + sel.set_reference(centre); + vector near_particles = sel(particles); + double var = 0; + //double lSumPt = 0; + //if(iId == 1) for(unsigned int i=0; i 2.*M_PI-pDPhi) pDPhi = 2.*M_PI-pDPhi; + double pDR2 = pDEta*pDEta+pDPhi*pDPhi; + if(std::abs(pDR2) < 0.0001) continue; + if(iId == 0) var += (near_particles[i].pt()/pDR2); + if(iId == 1) var += near_particles[i].pt(); + if(iId == 2) var += (1./pDR2); + if(iId == 3) var += (1./pDR2); + if(iId == 4) var += near_particles[i].pt(); + if(iId == 5) var += (near_particles[i].pt() * near_particles[i].pt()/pDR2); + } + if(iId == 1) var += centre.pt(); //Sum in a cone + if(iId == 0 && var != 0) var = log(var); + if(iId == 3 && var != 0) var = log(var); + if(iId == 5 && var != 0) var = log(var); + return var; } //In fact takes the median not the average -void PuppiContainer::getRMSAvg(int iOpt,std::vector const &iConstits,std::vector const &iParticles,std::vector const &iChargedParticles) { - for(unsigned int i0 = 0; i0 < iConstits.size(); i0++ ) { - double pVal = -1; - //Calculate the Puppi Algo to use - int pPupId = getPuppiId(iConstits[i0].pt(),iConstits[i0].eta()); - if(pPupId == -1 || fPuppiAlgo[pPupId].numAlgos() <= iOpt){ - fVals.push_back(-1); - continue; +void PuppiContainer::getRMSAvg(int iOpt,std::vector const &iConstits,std::vector const &iParticles,std::vector const &iChargedParticles) { + for(unsigned int i0 = 0; i0 < iConstits.size(); i0++ ) { + double pVal = -1; + //Calculate the Puppi Algo to use + int pPupId = getPuppiId(iConstits[i0].pt(),iConstits[i0].eta()); + if(pPupId == -1 || fPuppiAlgo[pPupId].numAlgos() <= iOpt){ + fVals.push_back(-1); + continue; + } + //Get the Puppi Sub Algo (given iteration) + int pAlgo = fPuppiAlgo[pPupId].algoId (iOpt); + bool pCharged = fPuppiAlgo[pPupId].isCharged(iOpt); + double pCone = fPuppiAlgo[pPupId].coneSize (iOpt); + //Compute the Puppi Metric + if(!pCharged) pVal = goodVar(iConstits[i0],iParticles ,pAlgo,pCone); + if( pCharged) pVal = goodVar(iConstits[i0],iChargedParticles,pAlgo,pCone); + fVals.push_back(pVal); + //if(std::isnan(pVal) || std::isinf(pVal)) cerr << "====> Value is Nan " << pVal << " == " << iConstits[i0].pt() << " -- " << iConstits[i0].eta() << endl; + if( ! edm::isFinite(pVal)) { + LogDebug( "NotFound" ) << "====> Value is Nan " << pVal << " == " << iConstits[i0].pt() << " -- " << iConstits[i0].eta() << endl; + continue; + } + + // // fPuppiAlgo[pPupId].add(iConstits[i0],pVal,iOpt); + //code added by Nhan, now instead for every algorithm give it all the particles + for(int i1 = 0; i1 < fNAlgos; i1++){ + pAlgo = fPuppiAlgo[i1].algoId (iOpt); + pCharged = fPuppiAlgo[i1].isCharged(iOpt); + pCone = fPuppiAlgo[i1].coneSize (iOpt); + double curVal = -1; + if(!pCharged) curVal = goodVar(iConstits[i0],iParticles ,pAlgo,pCone); + if( pCharged) curVal = goodVar(iConstits[i0],iChargedParticles,pAlgo,pCone); + //std::cout << "i1 = " << i1 << ", curVal = " << curVal << ", eta = " << iConstits[i0].eta() << ", pupID = " << pPupId << std::endl; + fPuppiAlgo[i1].add(iConstits[i0],curVal,iOpt); + } + } - //Get the Puppi Sub Algo (given iteration) - int pAlgo = fPuppiAlgo[pPupId].algoId (iOpt); - bool pCharged = fPuppiAlgo[pPupId].isCharged(iOpt); - double pCone = fPuppiAlgo[pPupId].coneSize (iOpt); - //Compute the Puppi Metric - if(!pCharged) pVal = goodVar(iConstits[i0],iParticles ,pAlgo,pCone); - if( pCharged) pVal = goodVar(iConstits[i0],iChargedParticles,pAlgo,pCone); - fVals.push_back(pVal); - //if(std::isnan(pVal) || std::isinf(pVal)) cerr << "====> Value is Nan " << pVal << " == " << iConstits[i0].pt() << " -- " << iConstits[i0].eta() << endl; - if( ! edm::isFinite(pVal)) { - LogDebug( "NotFound" ) << "====> Value is Nan " << pVal << " == " << iConstits[i0].pt() << " -- " << iConstits[i0].eta() << endl; - continue; + for(int i0 = 0; i0 < fNAlgos; i0++) fPuppiAlgo[i0].computeMedRMS(iOpt,fPVFrac); +} +//In fact takes the median not the average +void PuppiContainer::getRawAlphas(int iOpt,std::vector const &iConstits,std::vector const &iParticles,std::vector const &iChargedParticles) { + for(int j0 = 0; j0 < fNAlgos; j0++){ + for(unsigned int i0 = 0; i0 < iConstits.size(); i0++ ) { + double pVal = -1; + //Get the Puppi Sub Algo (given iteration) + int pAlgo = fPuppiAlgo[j0].algoId (iOpt); + bool pCharged = fPuppiAlgo[j0].isCharged(iOpt); + double pCone = fPuppiAlgo[j0].coneSize (iOpt); + //Compute the Puppi Metric + if(!pCharged) pVal = goodVar(iConstits[i0],iParticles ,pAlgo,pCone); + if( pCharged) pVal = goodVar(iConstits[i0],iChargedParticles,pAlgo,pCone); + fRawAlphas.push_back(pVal); + if( ! edm::isFinite(pVal)) { + LogDebug( "NotFound" ) << "====> Value is Nan " << pVal << " == " << iConstits[i0].pt() << " -- " << iConstits[i0].eta() << endl; + continue; + } + } } - fPuppiAlgo[pPupId].add(iConstits[i0],pVal,iOpt); - } - for(int i0 = 0; i0 < fNAlgos; i0++) fPuppiAlgo[i0].computeMedRMS(iOpt,fPVFrac); } -int PuppiContainer::getPuppiId( float iPt, float iEta) { - int lId = -1; - for(int i0 = 0; i0 < fNAlgos; i0++) { - if(std::abs(iEta) < fPuppiAlgo[i0].etaMin()) continue; - if(std::abs(iEta) > fPuppiAlgo[i0].etaMax()) continue; - if(iPt < fPuppiAlgo[i0].ptMin()) continue; - lId = i0; - break; - } - //if(lId == -1) std::cerr << "Error : Full fiducial range is not defined " << std::endl; - return lId; +int PuppiContainer::getPuppiId( float iPt, float iEta) { + int lId = -1; + for(int i0 = 0; i0 < fNAlgos; i0++) { + if(std::abs(iEta) < fPuppiAlgo[i0].etaMin()) continue; + if(std::abs(iEta) > fPuppiAlgo[i0].etaMax()) continue; + if(iPt < fPuppiAlgo[i0].ptMin()) continue; + lId = i0; + break; + } + //if(lId == -1) std::cerr << "Error : Full fiducial range is not defined " << std::endl; + return lId; } -double PuppiContainer::getChi2FromdZ(double iDZ) { - //We need to obtain prob of PU + (1-Prob of LV) - // Prob(LV) = Gaus(dZ,sigma) where sigma = 1.5mm (its really more like 1mm) - //double lProbLV = ROOT::Math::normal_cdf_c(std::abs(iDZ),0.2)*2.; //*2 is to do it double sided - //Take iDZ to be corrected by sigma already - double lProbLV = ROOT::Math::normal_cdf_c(std::abs(iDZ),1.)*2.; //*2 is to do it double sided - double lProbPU = 1-lProbLV; - if(lProbPU <= 0) lProbPU = 1e-16; //Quick Trick to through out infs - if(lProbPU >= 0) lProbPU = 1-1e-16; //Ditto - double lChi2PU = TMath::ChisquareQuantile(lProbPU,1); - lChi2PU*=lChi2PU; - return lChi2PU; +double PuppiContainer::getChi2FromdZ(double iDZ) { + //We need to obtain prob of PU + (1-Prob of LV) + // Prob(LV) = Gaus(dZ,sigma) where sigma = 1.5mm (its really more like 1mm) + //double lProbLV = ROOT::Math::normal_cdf_c(std::abs(iDZ),0.2)*2.; //*2 is to do it double sided + //Take iDZ to be corrected by sigma already + double lProbLV = ROOT::Math::normal_cdf_c(std::abs(iDZ),1.)*2.; //*2 is to do it double sided + double lProbPU = 1-lProbLV; + if(lProbPU <= 0) lProbPU = 1e-16; //Quick Trick to through out infs + if(lProbPU >= 0) lProbPU = 1-1e-16; //Ditto + double lChi2PU = TMath::ChisquareQuantile(lProbPU,1); + lChi2PU*=lChi2PU; + return lChi2PU; } std::vector const & PuppiContainer::puppiWeights() { - fPupParticles .resize(0); - fWeights .resize(0); - fVals .resize(0); - for(int i0 = 0; i0 < fNAlgos; i0++) fPuppiAlgo[i0].reset(); - + fPupParticles .resize(0); + fWeights .resize(0); + fVals .resize(0); + for(int i0 = 0; i0 < fNAlgos; i0++) fPuppiAlgo[i0].reset(); + int lNMaxAlgo = 1; for(int i0 = 0; i0 < fNAlgos; i0++) lNMaxAlgo = std::max(fPuppiAlgo[i0].numAlgos(),lNMaxAlgo); //Run through all compute mean and RMS int lNParticles = fRecoParticles.size(); - for(int i0 = 0; i0 < lNMaxAlgo; i0++) { - getRMSAvg(i0,fPFParticles,fPFParticles,fChargedPV); - } - std::vector pVals; - for(int i0 = 0; i0 < lNParticles; i0++) { - //Refresh - pVals.clear(); - double pWeight = 1; - //Get the Puppi Id and if ill defined move on - int pPupId = getPuppiId(fRecoParticles[i0].pt,fRecoParticles[i0].eta); - if(pPupId == -1) { - fWeights .push_back(pWeight); - continue; - } - // fill the p-values - double pChi2 = 0; - if(fUseExp){ - //Compute an Experimental Puppi Weight with delta Z info (very simple example) - pChi2 = getChi2FromdZ(fRecoParticles[i0].dZ); - //Now make sure Neutrals are not set - if(fRecoParticles[i0].pfType > 3) pChi2 = 0; - } - //Fill and compute the PuppiWeight - int lNAlgos = fPuppiAlgo[pPupId].numAlgos(); - for(int i1 = 0; i1 < lNAlgos; i1++) pVals.push_back(fVals[lNParticles*i1+i0]); - pWeight = fPuppiAlgo[pPupId].compute(pVals,pChi2); - //Apply the CHS weights - if(fRecoParticles[i0].id == 1 && fApplyCHS ) pWeight = 1; - if(fRecoParticles[i0].id == 2 && fApplyCHS ) pWeight = 0; - //Basic Weight Checks - if( ! edm::isFinite(pWeight)) { - pWeight = 0.0; - LogDebug("PuppiWeightError") << "====> Weight is nan : " << pWeight << " : pt " << fRecoParticles[i0].pt << " -- eta : " << fRecoParticles[i0].eta << " -- Value" << fVals[i0] << " -- id : " << fRecoParticles[i0].id << " -- NAlgos: " << lNAlgos << std::endl; + for(int i0 = 0; i0 < lNMaxAlgo; i0++) { + getRMSAvg(i0,fPFParticles,fPFParticles,fChargedPV); + } + if (fPuppiDiagnostics) getRawAlphas(0,fPFParticles,fPFParticles,fChargedPV); + + std::vector pVals; + for(int i0 = 0; i0 < lNParticles; i0++) { + //Refresh + pVals.clear(); + double pWeight = 1; + //Get the Puppi Id and if ill defined move on + int pPupId = getPuppiId(fRecoParticles[i0].pt,fRecoParticles[i0].eta); + if(pPupId == -1) { + fWeights .push_back(pWeight); + fAlphaMed.push_back(-10); + fAlphaRMS.push_back(-10); + continue; + } + // fill the p-values + double pChi2 = 0; + if(fUseExp){ + //Compute an Experimental Puppi Weight with delta Z info (very simple example) + pChi2 = getChi2FromdZ(fRecoParticles[i0].dZ); + //Now make sure Neutrals are not set + if(fRecoParticles[i0].pfType > 3) pChi2 = 0; + } + //Fill and compute the PuppiWeight + int lNAlgos = fPuppiAlgo[pPupId].numAlgos(); + for(int i1 = 0; i1 < lNAlgos; i1++) pVals.push_back(fVals[lNParticles*i1+i0]); + + pWeight = fPuppiAlgo[pPupId].compute(pVals,pChi2); + //Apply the CHS weights + if(fRecoParticles[i0].id == 1 && fApplyCHS ) pWeight = 1; + if(fRecoParticles[i0].id == 2 && fApplyCHS ) pWeight = 0; + //Basic Weight Checks + if( ! edm::isFinite(pWeight)) { + pWeight = 0.0; + LogDebug("PuppiWeightError") << "====> Weight is nan : " << pWeight << " : pt " << fRecoParticles[i0].pt << " -- eta : " << fRecoParticles[i0].eta << " -- Value" << fVals[i0] << " -- id : " << fRecoParticles[i0].id << " -- NAlgos: " << lNAlgos << std::endl; + } + //Basic Cuts + if(pWeight < fPuppiWeightCut) pWeight = 0; //==> Elminate the low Weight stuff + if(pWeight*fPFParticles[i0].pt() < fPuppiAlgo[pPupId].neutralPt(fNPV) && fRecoParticles[i0].id == 0 ) pWeight = 0; //threshold cut on the neutral Pt + if(fInvert) pWeight = 1.-pWeight; + //std::cout << "fRecoParticles[i0].pt = " << fRecoParticles[i0].pt << ", fRecoParticles[i0].charge = " << fRecoParticles[i0].charge << ", fRecoParticles[i0].id = " << fRecoParticles[i0].id << ", weight = " << pWeight << std::endl; + + fWeights .push_back(pWeight); + fAlphaMed.push_back(fPuppiAlgo[pPupId].median(0)); + fAlphaRMS.push_back(fPuppiAlgo[pPupId].rms(0)); + //Now get rid of the thrown out weights for the particle collection + + // leave these lines in, in case want to move eventually to having no 1-to-1 correspondence between puppi and pf cands + // if( std::abs(pWeight) < std::numeric_limits::denorm_min() ) continue; // this line seems not to work like it's supposed to... + // if(std::abs(pWeight) <= 0. ) continue; + + //Produce + PseudoJet curjet( pWeight*fPFParticles[i0].px(), pWeight*fPFParticles[i0].py(), pWeight*fPFParticles[i0].pz(), pWeight*fPFParticles[i0].e()); + curjet.set_user_index(i0); + fPupParticles.push_back(curjet); } - //Basic Cuts - if(pWeight < fPuppiWeightCut) pWeight = 0; //==> Elminate the low Weight stuff - if(pWeight*fPFParticles[i0].pt() < fPuppiAlgo[pPupId].neutralPt(fNPV) && fRecoParticles[i0].id == 0 ) pWeight = 0; //threshold cut on the neutral Pt - fWeights .push_back(pWeight); - //Now get rid of the thrown out weights for the particle collection - if(std::abs(pWeight) < std::numeric_limits::denorm_min() ) continue; - //Produce - PseudoJet curjet( pWeight*fPFParticles[i0].px(), pWeight*fPFParticles[i0].py(), pWeight*fPFParticles[i0].pz(), pWeight*fPFParticles[i0].e()); - curjet.set_user_index(i0); - fPupParticles.push_back(curjet); - } - return fWeights; + return fWeights; } diff --git a/CommonTools/PileupAlgos/test/testPUMods.py b/CommonTools/PileupAlgos/test/testPUMods.py index dd386cdaf0069..3efd6e1254ff4 100644 --- a/CommonTools/PileupAlgos/test/testPUMods.py +++ b/CommonTools/PileupAlgos/test/testPUMods.py @@ -5,14 +5,17 @@ process.load('FWCore/MessageService/MessageLogger_cfi') process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff') process.MessageLogger.cerr.FwkReport.reportEvery = 10 -process.GlobalTag.globaltag = 'START53_V7G::All' +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '') process.load('CommonTools/PileupAlgos/Puppi_cff') process.load('CommonTools/PileupAlgos/softKiller_cfi') -process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(50) ) +process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) ) process.source = cms.Source("PoolSource", - fileNames = cms.untracked.vstring('/store/relval/CMSSW_7_2_0_pre6/RelValProdTTbar/AODSIM/PRE_STA72_V4-v1/00000/BA8284B4-4F40-E411-9AA2-002590593878.root') + fileNames = cms.untracked.vstring( + '/store/mc/RunIISpring15DR74/QCD_Pt-15to7000_TuneCUETP8M1_Flat_13TeV_pythia8/GEN-SIM-RECO/AsymptFlat0to50bx25Reco_MCRUN2_74_V9-v3/10000/0009D30B-0207-E511-B581-0026182FD753.root' + ) ) process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*") @@ -23,13 +26,20 @@ fileMode = cms.untracked.string('NOMERGE') ) +process.puppi.candName = 'packedPFCandidates' +process.puppi.candName = cms.InputTag('packedPFCandidates') +process.puppi.vertexName = cms.InputTag('offlineSlimmedPrimaryVertices') -process.puSequence = cms.Sequence(process.puppi*process.softKiller) +process.packedPFCandidatesNoLep = cms.EDFilter("CandPtrSelector", src = cms.InputTag("packedPFCandidates"), cut = cms.string("abs(pdgId) != 13 && abs(pdgId) != 11")) +process.puppiNoLep = process.puppi.clone() +process.puppiNoLep.candName = cms.InputTag('packedPFCandidatesNoLep') +process.puppiNoLep.vertexName = cms.InputTag('offlineSlimmedPrimaryVertices') + + +process.puSequence = cms.Sequence(process.packedPFCandidatesNoLep+process.puppi+process.puppiNoLep) process.p = cms.Path(process.puSequence) process.output = cms.OutputModule("PoolOutputModule", - outputCommands = cms.untracked.vstring('drop *', - 'keep *_particleFlow_*_*', - 'keep *_*_*_TestPUMods'), + outputCommands = cms.untracked.vstring('keep *'), fileName = cms.untracked.string ("Output.root") ) # schedule definition diff --git a/CommonTools/RecoAlgos/python/HBHENoiseFilter_cfi.py b/CommonTools/RecoAlgos/python/HBHENoiseFilter_cfi.py index 3f8072f93ea0b..0edef993ef497 100644 --- a/CommonTools/RecoAlgos/python/HBHENoiseFilter_cfi.py +++ b/CommonTools/RecoAlgos/python/HBHENoiseFilter_cfi.py @@ -12,3 +12,10 @@ inputLabel = cms.InputTag('HBHENoiseFilterResultProducer','HBHENoiseFilterResult'), reverseDecision = cms.bool(False) ) + +# Filter on the standard HCAL noise decision (isolation) +HBHENoiseIsoFilter = cms.EDFilter( + 'BooleanFlagFilter', + inputLabel = cms.InputTag('HBHENoiseFilterResultProducer','HBHEIsoNoiseFilterResult'), + reverseDecision = cms.bool(False) +) diff --git a/CommonTools/Utils/BuildFile.xml b/CommonTools/Utils/BuildFile.xml index a72a7baec88f7..c8c2be257353f 100644 --- a/CommonTools/Utils/BuildFile.xml +++ b/CommonTools/Utils/BuildFile.xml @@ -1,5 +1,7 @@ + + diff --git a/CommonTools/Utils/interface/TMVAEvaluator.h b/CommonTools/Utils/interface/TMVAEvaluator.h index 497fc22f71f5c..13ee5127d7d85 100644 --- a/CommonTools/Utils/interface/TMVAEvaluator.h +++ b/CommonTools/Utils/interface/TMVAEvaluator.h @@ -5,8 +5,12 @@ #include #include #include +#include #include "TMVA/Reader.h" +#include "TMVA/IMethod.h" +#include "CondFormats/EgammaObjects/interface/GBRForest.h" +#include "FWCore/Framework/interface/EventSetup.h" class TMVAEvaluator { @@ -16,17 +20,29 @@ class TMVAEvaluator { ~TMVAEvaluator(); void initialize(const std::string & options, const std::string & method, const std::string & weightFile, - const std::vector & variables, const std::vector & spectators); - float evaluate(const std::map & inputs, const bool useSpectators=false); + const std::vector & variables, const std::vector & spectators, bool useGBRForest=false, bool useAdaBoost=false); + void initializeGBRForest(const GBRForest* gbrForest, const std::vector & variables, + const std::vector & spectators, bool useAdaBoost=false); + void initializeGBRForest(const edm::EventSetup &iSetup, const std::string & label, + const std::vector & variables, const std::vector & spectators, bool useAdaBoost=false); + float evaluateTMVA(const std::map & inputs, bool useSpectators) const; + float evaluateGBRForest(const std::map & inputs) const; + float evaluate(const std::map & inputs, bool useSpectators=false) const; private: bool mIsInitialized; + bool mUsingGBRForest; + bool mUseAdaBoost; + bool mReleaseAtEnd; std::string mMethod; - std::unique_ptr mReader; + mutable std::mutex m_mutex; + [[cms::thread_guard("m_mutex")]] std::unique_ptr mReader; + std::unique_ptr mIMethod; + std::unique_ptr mGBRForest; - std::map mVariables; - std::map mSpectators; + [[cms::thread_guard("m_mutex")]] mutable std::map> mVariables; + [[cms::thread_guard("m_mutex")]] mutable std::map> mSpectators; }; #endif // CommonTools_Utils_TMVAEvaluator_h diff --git a/CommonTools/Utils/interface/TMVAZipReader.h b/CommonTools/Utils/interface/TMVAZipReader.h index 5a9bbab039fcb..59b86406f0460 100644 --- a/CommonTools/Utils/interface/TMVAZipReader.h +++ b/CommonTools/Utils/interface/TMVAZipReader.h @@ -24,6 +24,7 @@ #define TMVAZIPREADER_7RXIGO70 #include "TMVA/Reader.h" +#include "TMVA/IMethod.h" #include namespace reco { @@ -32,7 +33,7 @@ namespace reco { bool hasEnding(std::string const &fullString, std::string const &ending); char* readGzipFile(const std::string& weightFile); - void loadTMVAWeights(TMVA::Reader* reader, const std::string& method, + TMVA::IMethod* loadTMVAWeights(TMVA::Reader* reader, const std::string& method, const std::string& weightFile, bool verbose=false); diff --git a/CommonTools/Utils/src/TMVAEvaluator.cc b/CommonTools/Utils/src/TMVAEvaluator.cc index 8ba7edc538182..a938fee34f1ef 100644 --- a/CommonTools/Utils/src/TMVAEvaluator.cc +++ b/CommonTools/Utils/src/TMVAEvaluator.cc @@ -2,21 +2,26 @@ #include "CommonTools/Utils/interface/TMVAZipReader.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "CondFormats/DataRecord/interface/GBRWrapperRcd.h" +#include "FWCore/Framework/interface/ESHandle.h" +#include "TMVA/MethodBDT.h" TMVAEvaluator::TMVAEvaluator() : - mIsInitialized(false) + mIsInitialized(false), mUsingGBRForest(false), mUseAdaBoost(false), mReleaseAtEnd(false) { } TMVAEvaluator::~TMVAEvaluator() { + if (mReleaseAtEnd) + mGBRForest.release(); } void TMVAEvaluator::initialize(const std::string & options, const std::string & method, const std::string & weightFile, - const std::vector & variables, const std::vector & spectators) + const std::vector & variables, const std::vector & spectators, bool useGBRForest, bool useAdaBoost) { // initialize the TMVA reader mReader.reset(new TMVA::Reader(options.c_str())); @@ -26,48 +31,80 @@ void TMVAEvaluator::initialize(const std::string & options, const std::string & // add input variables for(std::vector::const_iterator it = variables.begin(); it!=variables.end(); ++it) { - mVariables.insert( std::pair(*it,0.) ); - mReader->AddVariable(it->c_str(), &mVariables.at(*it)); + mVariables.insert( std::make_pair( *it, std::make_pair( it - variables.begin(), 0. ) ) ); + mReader->AddVariable(it->c_str(), &(mVariables.at(*it).second)); } // add spectator variables for(std::vector::const_iterator it = spectators.begin(); it!=spectators.end(); ++it) { - mSpectators.insert( std::pair(*it,0.) ); - mReader->AddSpectator(it->c_str(), &mSpectators.at(*it)); + mSpectators.insert( std::make_pair( *it, std::make_pair( it - spectators.begin(), 0. ) ) ); + mReader->AddSpectator(it->c_str(), &(mSpectators.at(*it).second)); } // load the TMVA weights - reco::details::loadTMVAWeights(mReader.get(), mMethod.c_str(), weightFile.c_str()); + mIMethod = std::unique_ptr( reco::details::loadTMVAWeights(mReader.get(), mMethod.c_str(), weightFile.c_str()) ); + + if (useGBRForest) + { + mGBRForest.reset( new GBRForest( dynamic_cast( mReader->FindMVA(mMethod.c_str()) ) ) ); + + // now can free some memory + mReader.reset(nullptr); + mIMethod.reset(nullptr); + + mUsingGBRForest = true; + mUseAdaBoost = useAdaBoost; + } mIsInitialized = true; } -float TMVAEvaluator::evaluate(const std::map & inputs, const bool useSpectators) +void TMVAEvaluator::initializeGBRForest(const GBRForest* gbrForest, const std::vector & variables, + const std::vector & spectators, bool useAdaBoost) { - if(!mIsInitialized) - { - edm::LogError("InitializationError") << "TMVAEvaluator not properly initialized."; - return -99.; - } + // add input variables + for(std::vector::const_iterator it = variables.begin(); it!=variables.end(); ++it) + mVariables.insert( std::make_pair( *it, std::make_pair( it - variables.begin(), 0. ) ) ); - if( useSpectators && inputs.size() < ( mVariables.size() + mSpectators.size() ) ) - { - edm::LogError("MissingInputs") << "Too few inputs provided (" << inputs.size() << " provided but " << mVariables.size() << " input and " << mSpectators.size() << " spectator variables expected)."; - return -99.; - } - else if( inputs.size() < mVariables.size() ) - { - edm::LogError("MissingInputVariable(s)") << "Too few input variables provided (" << inputs.size() << " provided but " << mVariables.size() << " expected)."; - return -99.; - } + // add spectator variables + for(std::vector::const_iterator it = spectators.begin(); it!=spectators.end(); ++it) + mSpectators.insert( std::make_pair( *it, std::make_pair( it - spectators.begin(), 0. ) ) ); + + mGBRForest.reset( gbrForest ); + + mIsInitialized = true; + mUsingGBRForest = true; + mUseAdaBoost = useAdaBoost; + mReleaseAtEnd = true; // need to release ownership at the end if getting GBRForest from an external source +} + + +void TMVAEvaluator::initializeGBRForest(const edm::EventSetup &iSetup, const std::string & label, + const std::vector & variables, const std::vector & spectators, bool useAdaBoost) +{ + edm::ESHandle gbrForestHandle; + + iSetup.get().get(label.c_str(), gbrForestHandle); + + initializeGBRForest(gbrForestHandle.product(), variables, spectators, useAdaBoost); +} + + +float TMVAEvaluator::evaluateTMVA(const std::map & inputs, bool useSpectators) const +{ + // default value + float value = -99.; + + // TMVA::Reader is not thread safe + std::lock_guard lock(m_mutex); // set the input variable values - for(std::map::iterator it = mVariables.begin(); it!=mVariables.end(); ++it) + for(auto it = mVariables.begin(); it!=mVariables.end(); ++it) { if (inputs.count(it->first)>0) - it->second = inputs.at(it->first); + it->second.second = inputs.at(it->first); else edm::LogError("MissingInputVariable") << "Input variable " << it->first << " is missing from the list of inputs. The returned discriminator value might not be sensible."; } @@ -76,17 +113,77 @@ float TMVAEvaluator::evaluate(const std::map & inputs, const if(useSpectators) { // set the spectator variable values - for(std::map::iterator it = mSpectators.begin(); it!=mSpectators.end(); ++it) + for(auto it = mSpectators.begin(); it!=mSpectators.end(); ++it) { if (inputs.count(it->first)>0) - it->second = inputs.at(it->first); + it->second.second = inputs.at(it->first); else edm::LogError("MissingSpectatorVariable") << "Spectator variable " << it->first << " is missing from the list of inputs. The returned discriminator value might not be sensible."; } } // evaluate the MVA - float value = mReader->EvaluateMVA(mMethod.c_str()); + value = mReader->EvaluateMVA(mMethod.c_str()); + + return value; +} + + +float TMVAEvaluator::evaluateGBRForest(const std::map & inputs) const +{ + // default value + float value = -99.; + + std::unique_ptr vars(new float[mVariables.size()]); // allocate n floats + + // set the input variable values + for(auto it = mVariables.begin(); it!=mVariables.end(); ++it) + { + if (inputs.count(it->first)>0) + vars[it->second.first] = inputs.at(it->first); + else + edm::LogError("MissingInputVariable") << "Input variable " << it->first << " is missing from the list of inputs. The returned discriminator value might not be sensible."; + } + + // evaluate the MVA + if (mUseAdaBoost) + value = mGBRForest->GetAdaBoostClassifier(vars.get()); + else + value = mGBRForest->GetGradBoostClassifier(vars.get()); + + return value; +} + +float TMVAEvaluator::evaluate(const std::map & inputs, bool useSpectators) const +{ + // default value + float value = -99.; + + if(!mIsInitialized) + { + edm::LogError("InitializationError") << "TMVAEvaluator not properly initialized."; + return value; + } + + if( useSpectators && inputs.size() < ( mVariables.size() + mSpectators.size() ) ) + { + edm::LogError("MissingInputs") << "Too few inputs provided (" << inputs.size() << " provided but " << mVariables.size() << " input and " << mSpectators.size() << " spectator variables expected)."; + return value; + } + else if( inputs.size() < mVariables.size() ) + { + edm::LogError("MissingInputVariable(s)") << "Too few input variables provided (" << inputs.size() << " provided but " << mVariables.size() << " expected)."; + return value; + } + + if (mUsingGBRForest) + { + if(useSpectators) + edm::LogWarning("UnsupportedFunctionality") << "Use of spectator variables with GBRForest is not supported. Spectator variables will be ignored."; + value = evaluateGBRForest(inputs); + } + else + value = evaluateTMVA(inputs, useSpectators); return value; } diff --git a/CommonTools/Utils/src/TMVAZipReader.cc b/CommonTools/Utils/src/TMVAZipReader.cc index 4e5fc8a3f59b3..03928464ae722 100644 --- a/CommonTools/Utils/src/TMVAZipReader.cc +++ b/CommonTools/Utils/src/TMVAZipReader.cc @@ -54,8 +54,11 @@ char* reco::details::readGzipFile(const std::string& weightFile) return buffer; } -void reco::details::loadTMVAWeights(TMVA::Reader* reader, const std::string& method, +TMVA::IMethod* reco::details::loadTMVAWeights(TMVA::Reader* reader, const std::string& method, const std::string& weightFile, bool verbose) { + + TMVA::IMethod* ptr = nullptr; + verbose = false; if (verbose) std::cout << "Booking TMVA Reader with " << method << " and weight file: " << weightFile @@ -65,7 +68,7 @@ void reco::details::loadTMVAWeights(TMVA::Reader* reader, const std::string& met if (verbose) std::cout << "Weight file is pure xml." << std::endl; // Let TMVA read the file - reader->BookMVA(method, weightFile); + ptr = reader->BookMVA(method, weightFile); } else if (reco::details::hasEnding(weightFile, ".gz") || reco::details::hasEnding(weightFile, ".gzip")) { if (verbose) std::cout << "Unzipping file." << std::endl; @@ -86,7 +89,7 @@ void reco::details::loadTMVAWeights(TMVA::Reader* reader, const std::string& met close(fdToUselessFile); if (verbose) std::cout << "Booking MvA" << std::endl; - reader->BookMVA(method, weight_file_name); + ptr = reader->BookMVA(method, weight_file_name); if (verbose) std::cout << "Cleaning up" << std::endl; remove(weight_file_name.c_str()); @@ -103,4 +106,6 @@ void reco::details::loadTMVAWeights(TMVA::Reader* reader, const std::string& met << "I don't understand the extension on the filename: " << weightFile << ", it should be .xml, .gz, or .gzip" << std::endl; } + + return ptr; } diff --git a/DataFormats/EgammaCandidates/interface/Photon.h b/DataFormats/EgammaCandidates/interface/Photon.h index 5e9c16458e1e1..f2e76c8915b63 100644 --- a/DataFormats/EgammaCandidates/interface/Photon.h +++ b/DataFormats/EgammaCandidates/interface/Photon.h @@ -443,6 +443,9 @@ namespace reco { float sumPhotonEtHighThreshold() const {return pfIsolation_.sumPhotonEtHighThreshold;} float sumPUPt() const {return pfIsolation_.sumPUPt;} + /// Get Particle Flow Isolation variables block + const PflowIsolationVariables& getPflowIsolationVariables() const { return pfIsolation_; } + /// Set Particle Flow Isolation variables void setPflowIsolationVariables ( const PflowIsolationVariables& pfisol ) { pfIsolation_ = pfisol;} diff --git a/DataFormats/PatCandidates/interface/PackedCandidate.h b/DataFormats/PatCandidates/interface/PackedCandidate.h index 1bffd3b554898..d27baed55809c 100644 --- a/DataFormats/PatCandidates/interface/PackedCandidate.h +++ b/DataFormats/PatCandidates/interface/PackedCandidate.h @@ -346,8 +346,9 @@ namespace pat { virtual bool isJet() const { return false; } // puppiweight - void setPuppiWeight(float p); - float puppiWeight() const; + void setPuppiWeight(float p, float p_nolep = 0.0); /// Set both weights at once (with option for only full PUPPI) + float puppiWeight() const; /// Weight from full PUPPI + float puppiWeightNoLep() const; /// Weight from PUPPI removing leptons protected: uint16_t packedPt_, packedEta_, packedPhi_, packedM_; @@ -364,6 +365,7 @@ namespace pat { void unpackTrk() const ; int8_t packedPuppiweight_; + int8_t packedPuppiweightNoLepDiff_; // storing the DIFFERENCE of (all - "no lep") for compression optimization /// the four vector mutable PolarLorentzVector p4_; mutable LorentzVector p4c_; diff --git a/DataFormats/PatCandidates/src/PackedCandidate.cc b/DataFormats/PatCandidates/src/PackedCandidate.cc index 8660ec8a75462..e61943836c0e7 100644 --- a/DataFormats/PatCandidates/src/PackedCandidate.cc +++ b/DataFormats/PatCandidates/src/PackedCandidate.cc @@ -273,7 +273,13 @@ bool pat::PackedCandidate::longLived() const {return false;} bool pat::PackedCandidate::massConstraint() const {return false;} // puppiweight -void pat::PackedCandidate::setPuppiWeight(float p) { packedPuppiweight_ = pack8logClosed((p-0.5)*2,-2,0,64);} +// puppiweight +void pat::PackedCandidate::setPuppiWeight(float p, float p_nolep) { + // Set both weights at once to avoid misconfigured weights if called in the wrong order + packedPuppiweight_ = pack8logClosed((p-0.5)*2,-2,0,64); + packedPuppiweightNoLepDiff_ = pack8logClosed((p_nolep-0.5)*2,-2,0,64) - packedPuppiweight_; +} float pat::PackedCandidate::puppiWeight() const { return unpack8logClosed(packedPuppiweight_,-2,0,64)/2. + 0.5;} +float pat::PackedCandidate::puppiWeightNoLep() const { return unpack8logClosed(packedPuppiweightNoLepDiff_+packedPuppiweight_,-2,0,64)/2. + 0.5;} diff --git a/DataFormats/PatCandidates/src/classes_def_objects.xml b/DataFormats/PatCandidates/src/classes_def_objects.xml index aeb2d13138037..f9778594c6fb5 100644 --- a/DataFormats/PatCandidates/src/classes_def_objects.xml +++ b/DataFormats/PatCandidates/src/classes_def_objects.xml @@ -275,7 +275,11 @@ - + + + + + diff --git a/DataFormats/TauReco/interface/PFTau.h b/DataFormats/TauReco/interface/PFTau.h index 16b374e2450c0..e5c63782bf753 100644 --- a/DataFormats/TauReco/interface/PFTau.h +++ b/DataFormats/TauReco/interface/PFTau.h @@ -148,9 +148,16 @@ class PFTau : public BaseTau { /// Retrieve the identified hadronic decay mode according to the number of /// charged and piZero candidates in the signal cone hadronicDecayMode decayMode() const; - hadronicDecayMode calculateDecayMode() const; void setDecayMode(const hadronicDecayMode&); + /// Effect of eta and phi correction of strip on mass of tau candidate + float bendCorrMass() const { return bendCorrMass_; } + void setBendCorrMass(float bendCorrMass) { bendCorrMass_ = bendCorrMass; } + + /// Size of signal cone + double signalConeSize() const { return signalConeSize_; } + void setSignalConeSize(double signalConeSize) { signalConeSize_ = signalConeSize; } + //Electron rejection float emFraction() const; // Ecal/Hcal Cluster Energy float hcalTotOverPLead() const; // total Hcal Cluster E / leadPFChargedHadron P @@ -192,7 +199,7 @@ class PFTau : public BaseTau { CandidatePtr sourceCandidatePtr( size_type i ) const; /// prints information on this PFTau - void dump(std::ostream& out=std::cout) const; + void dump(std::ostream& out = std::cout) const; private: friend class tau::RecoTauConstructor; @@ -226,6 +233,10 @@ class PFTau : public BaseTau { hadronicDecayMode decayMode_; + float bendCorrMass_; + + float signalConeSize_; + reco::PFJetRef jetRef_; PFTauTagInfoRef PFTauTagInfoRef_; reco::PFCandidatePtr leadPFChargedHadrCand_; diff --git a/DataFormats/TauReco/interface/RecoTauPiZero.h b/DataFormats/TauReco/interface/RecoTauPiZero.h index 244f3077c555d..cc0a9c8f6a719 100644 --- a/DataFormats/TauReco/interface/RecoTauPiZero.h +++ b/DataFormats/TauReco/interface/RecoTauPiZero.h @@ -14,32 +14,47 @@ class RecoTauPiZero : public CompositePtrCandidate { kStrips = 3 }; - RecoTauPiZero():CompositePtrCandidate(),algoName_(kUndefined){ - this->setPdgId(111); } - - RecoTauPiZero(PiZeroAlgorithm algoName): - CompositePtrCandidate(), algoName_(algoName) { this->setPdgId(111); } + RecoTauPiZero() + : CompositePtrCandidate(), + algoName_(kUndefined), bendCorrEta_ (0.), bendCorrPhi_ (0.) + { + this->setPdgId(111); + } + + RecoTauPiZero(PiZeroAlgorithm algoName) + : CompositePtrCandidate(), + algoName_(algoName), bendCorrEta_ (0.), bendCorrPhi_ (0.) + { + this->setPdgId(111); + } /// constructor from values RecoTauPiZero(Charge q, const LorentzVector& p4, const Point& vtx = Point( 0, 0, 0 ), int pdgId = 111, int status = 0, bool integerCharge = true, - PiZeroAlgorithm algoName=kUndefined): - CompositePtrCandidate( - q, p4, vtx, pdgId, status, integerCharge ),algoName_(algoName) {} + PiZeroAlgorithm algoName = kUndefined) + : CompositePtrCandidate(q, p4, vtx, pdgId, status, integerCharge ), + algoName_(algoName), bendCorrEta_ (0.), bendCorrPhi_ (0.) + { + } /// constructor from values RecoTauPiZero(Charge q, const PolarLorentzVector& p4, const Point& vtx = Point( 0, 0, 0 ), int pdgId = 111, int status = 0, bool integerCharge = true, - PiZeroAlgorithm algoName=kUndefined): - CompositePtrCandidate( - q, p4, vtx, pdgId, status, integerCharge ),algoName_(algoName) {} + PiZeroAlgorithm algoName=kUndefined) + : CompositePtrCandidate(q, p4, vtx, pdgId, status, integerCharge ), + algoName_(algoName), bendCorrEta_ (0.), bendCorrPhi_ (0.) + { + } /// constructor from a Candidate - explicit RecoTauPiZero( - const Candidate & p, PiZeroAlgorithm algoName=kUndefined): - CompositePtrCandidate(p),algoName_(algoName) { this->setPdgId(111); } + explicit RecoTauPiZero(const Candidate& p, PiZeroAlgorithm algoName = kUndefined) + : CompositePtrCandidate(p), + algoName_(algoName), bendCorrEta_ (0.), bendCorrPhi_ (0.) + { + this->setPdgId(111); + } /// destructor ~RecoTauPiZero(){}; @@ -62,11 +77,20 @@ class RecoTauPiZero : public CompositePtrCandidate { /// Check whether a given algo produced this pi zero bool algoIs(PiZeroAlgorithm algo) const; - void print(std::ostream& out=std::cout) const; + /// Size of correction to account for spread of photon energy in eta and phi + /// in case charged pions make nuclear interactions or photons convert within the tracking detector + float bendCorrEta() const { return bendCorrEta_; } + float bendCorrPhi() const { return bendCorrPhi_; } + void setBendCorrEta(float bendCorrEta) { bendCorrEta_ = bendCorrEta; } + void setBendCorrPhi(float bendCorrPhi) { bendCorrPhi_ = bendCorrPhi; } + + void print(std::ostream& out = std::cout) const; private: PiZeroAlgorithm algoName_; + float bendCorrEta_; + float bendCorrPhi_; }; std::ostream & operator<<(std::ostream& out, const RecoTauPiZero& c); diff --git a/DataFormats/TauReco/src/PFTau.cc b/DataFormats/TauReco/src/PFTau.cc index f67560341df95..63f66e0984381 100644 --- a/DataFormats/TauReco/src/PFTau.cc +++ b/DataFormats/TauReco/src/PFTau.cc @@ -15,14 +15,16 @@ PFTau::PFTau() hcalTotOverPLead_ = NAN; hcalMaxOverPLead_ = NAN; hcal3x3OverPLead_ = NAN; - ecalStripSumEOverPLead_= NAN; + ecalStripSumEOverPLead_ = NAN; bremsRecoveryEOverPLead_ = NAN; electronPreIDOutput_ = NAN; - electronPreIDDecision_= NAN; + electronPreIDDecision_ = NAN; caloComp_ = NAN; segComp_ = NAN; muonDecision_ = NAN; - decayMode_=kNull; + decayMode_ = kNull; + bendCorrMass_ = 0.; + signalConeSize_ = 0.; } PFTau::PFTau(Charge q, const LorentzVector& p4, const Point& vtx) @@ -40,12 +42,14 @@ PFTau::PFTau(Charge q, const LorentzVector& p4, const Point& vtx) ecalStripSumEOverPLead_= NAN; bremsRecoveryEOverPLead_ = NAN; electronPreIDOutput_ = NAN; - electronPreIDDecision_= NAN; + electronPreIDDecision_ = NAN; caloComp_ = NAN; segComp_ = NAN; muonDecision_ = NAN; - decayMode_=kNull; + decayMode_ = kNull; + bendCorrMass_ = 0.; + signalConeSize_ = 0.; } PFTau* PFTau::clone() const { return new PFTau(*this); } @@ -177,22 +181,6 @@ void PFTau::setIsolationTauChargedHadronCandidatesRefs(const PFRecoTauChargedHad PFTau::hadronicDecayMode PFTau::decayMode() const { return decayMode_; } -PFTau::hadronicDecayMode PFTau::calculateDecayMode() const { - unsigned int nCharged = signalTauChargedHadronCandidates().size(); - unsigned int nPiZeros = signalPiZeroCandidates().size(); - // If no tracks exist, this is definitely not a tau! - if ( !nCharged ) return kNull; - // Find the maximum number of PiZeros our parameterization can hold - const unsigned int maxPiZeros = kOneProngNPiZero; - // Determine our track index - unsigned int trackIndex = (nCharged - 1)*(maxPiZeros + 1); - // Check if we handle the given number of tracks - if ( trackIndex >= kRareDecayMode ) return kRareDecayMode; - - if(nPiZeros>maxPiZeros) nPiZeros=maxPiZeros; - return static_cast(trackIndex + nPiZeros); -} - void PFTau::setDecayMode(const PFTau::hadronicDecayMode& dm){ decayMode_=dm;} // Setting information about the isolation region diff --git a/DataFormats/TauReco/src/RecoTauPiZero.cc b/DataFormats/TauReco/src/RecoTauPiZero.cc index 7b5fdf78dc7cb..a347175acc738 100644 --- a/DataFormats/TauReco/src/RecoTauPiZero.cc +++ b/DataFormats/TauReco/src/RecoTauPiZero.cc @@ -1,4 +1,5 @@ #include "DataFormats/TauReco/interface/RecoTauPiZero.h" +#include "DataFormats/ParticleFlowCandidate/interface/PFCandidate.h" #include "DataFormats/Math/interface/deltaPhi.h" namespace reco { @@ -55,31 +56,30 @@ bool RecoTauPiZero::algoIs(RecoTauPiZero::PiZeroAlgorithm algo) const { return (algoName_ == algo); } -namespace { -std::ostream& operator<<(std::ostream& out, const reco::Candidate::LorentzVector& p4) +namespace { - out << "(mass/pt/eta/phi) (" << std::setiosflags(std::ios::fixed) << std::setprecision(2) - << p4.mass() << "/" << std::setprecision(1) << p4.pt() << "/" << std::setprecision(2) << p4.eta() - << "/" << std::setprecision(2) << p4.phi() << ")"; - return out; -} + std::string getPFCandidateType(reco::PFCandidate::ParticleType pfCandidateType) + { + if ( pfCandidateType == reco::PFCandidate::X ) return "undefined"; + else if ( pfCandidateType == reco::PFCandidate::h ) return "PFChargedHadron"; + else if ( pfCandidateType == reco::PFCandidate::e ) return "PFElectron"; + else if ( pfCandidateType == reco::PFCandidate::mu ) return "PFMuon"; + else if ( pfCandidateType == reco::PFCandidate::gamma ) return "PFGamma"; + else if ( pfCandidateType == reco::PFCandidate::h0 ) return "PFNeutralHadron"; + else if ( pfCandidateType == reco::PFCandidate::h_HF ) return "HF_had"; + else if ( pfCandidateType == reco::PFCandidate::egamma_HF ) return "HF_em"; + else assert(0); + } } -void RecoTauPiZero::print(std::ostream& out) const { - if (!out) return; - - out << "RecoTauPiZero: " << this->p4() << - " nDaughters: " << this->numberOfDaughters() << - " (gamma/e) (" << this->numberOfGammas() << "/" << this->numberOfElectrons() << ")" << - " maxDeltaPhi: " << std::setprecision(3) << maxDeltaPhi() << - " maxDeltaEta: " << std::setprecision(3) << maxDeltaEta() << - " algo: " << algo() << - std::endl; - - for(size_t i = 0; i < this->numberOfDaughters(); ++i) - { - out << "--- daughter " << i << ": " << daughterPtr(i)->p4() << - " key: " << daughterPtr(i).key() << std::endl; +void RecoTauPiZero::print(std::ostream& stream) const +{ + std::cout << "Pt = " << this->pt() << ", eta = " << this->eta() << ", phi = " << this->phi() << std::endl; + size_t numDaughters = this->numberOfDaughters(); + for ( size_t iDaughter = 0; iDaughter < numDaughters; ++iDaughter ) { + const reco::PFCandidate* daughter = dynamic_cast(this->daughterPtr(iDaughter).get()); + std::cout << " daughter #" << iDaughter << " (" << getPFCandidateType(daughter->particleId()) << "):" + << " Pt = " << daughter->pt() << ", eta = " << daughter->eta() << ", phi = " << daughter->phi() << std::endl; } } diff --git a/DataFormats/TauReco/src/classes_def_2.xml b/DataFormats/TauReco/src/classes_def_2.xml index 364601d4fe7c2..04a7a406f3000 100644 --- a/DataFormats/TauReco/src/classes_def_2.xml +++ b/DataFormats/TauReco/src/classes_def_2.xml @@ -1,6 +1,7 @@ - + + @@ -265,7 +266,8 @@ isolationTauChargedHadronCandidates_.clear(); - + + diff --git a/JetMETCorrections/Type1MET/python/correctionTermsPfMetType1Type2_cff.py b/JetMETCorrections/Type1MET/python/correctionTermsPfMetType1Type2_cff.py index 9d0efa39b18ef..0db3f4937e852 100644 --- a/JetMETCorrections/Type1MET/python/correctionTermsPfMetType1Type2_cff.py +++ b/JetMETCorrections/Type1MET/python/correctionTermsPfMetType1Type2_cff.py @@ -14,8 +14,7 @@ src = cms.InputTag("ak4PFJets") ) # this one is needed only if the input file doesn't have it -# solved automatically with unscheduled execution -from RecoParticleFlow.PFProducer.pfLinker_cff import particleFlowPtrs +#from RecoParticleFlow.PFProducer.pfLinker_cff import particleFlowPtrs # particleFlowPtrs = cms.EDProducer("PFCandidateFwdPtrProducer", # src = cms.InputTag("particleFlow") # ) @@ -67,7 +66,7 @@ ##____________________________________________________________________________|| correctionTermsPfMetType1Type2 = cms.Sequence( pfJetsPtrForMetCorr + - particleFlowPtrs + + #particleFlowPtrs + pfCandsNotInJetsPtrForMetCorr + pfCandsNotInJetsForMetCorr + pfCandMETcorr + diff --git a/PhysicsTools/HepMCCandAlgos/plugins/GenParticlePruner.cc b/PhysicsTools/HepMCCandAlgos/plugins/GenParticlePruner.cc index 417aca54948ad..413785e68fadb 100644 --- a/PhysicsTools/HepMCCandAlgos/plugins/GenParticlePruner.cc +++ b/PhysicsTools/HepMCCandAlgos/plugins/GenParticlePruner.cc @@ -36,8 +36,8 @@ class GenParticlePruner : public edm::EDProducer { void flagMothers(const reco::GenParticle &, int); void recursiveFlagDaughters(size_t, const reco::GenParticleCollection &, int, std::vector &); void recursiveFlagMothers(size_t, const reco::GenParticleCollection &, int, std::vector &); - void addDaughterRefs(std::vector &, reco::GenParticle&, reco::GenParticleRefProd, const reco::GenParticleRefVector&) const; - void addMotherRefs(std::vector &, reco::GenParticle&, reco::GenParticleRefProd, const reco::GenParticleRefVector&) const; + void getDaughterKeys(std::vector &, std::vector &, const reco::GenParticleRefVector&) const; + void getMotherKeys(std::vector &, std::vector &, const reco::GenParticleRefVector&) const; }; using namespace edm; @@ -247,10 +247,17 @@ void GenParticlePruner::produce(Event& evt, const EventSetup& es) { // parentage/descendency. In some cases, a circular referencing is encountered, // which would result in an infinite loop. The list is checked to // avoid this. - vector daIndxs; - addDaughterRefs(daIndxs, newGen, outRef, gen.daughterRefVector()); - vector moIndxs; - addMotherRefs(moIndxs, newGen, outRef, gen.motherRefVector()); + vector daIndxs, daNewIndxs; + getDaughterKeys(daIndxs, daNewIndxs, gen.daughterRefVector()); + std::sort(daNewIndxs.begin(),daNewIndxs.end()); + for(size_t i=0; i moIndxs, moNewIndxs; + getMotherKeys(moIndxs, moNewIndxs, gen.motherRefVector()); + std::sort(moNewIndxs.begin(),moNewIndxs.end()); + for(size_t i=0; i & daIndxs, - GenParticle& newGen, GenParticleRefProd outRef, +void GenParticlePruner::getDaughterKeys(vector & daIndxs, vector & daNewIndxs, const GenParticleRefVector& daughters) const { for(GenParticleRefVector::const_iterator j = daughters.begin(); j != daughters.end(); ++j) { GenParticleRef dau = *j; - if ( find(daIndxs.begin(), daIndxs.end(), dau.key()) == daIndxs.end() ) { - int idx = flags_[dau.key()]; + if (find(daIndxs.begin(), daIndxs.end(), dau.key()) == daIndxs.end()) { daIndxs.push_back( dau.key() ); - if(idx > 0) { - GenParticleRef newDau(outRef, static_cast(idx)); - newGen.addDaughter(newDau); + int idx = flags_[dau.key()]; + if (idx > 0 ) { + daNewIndxs.push_back( idx ); } else { - const GenParticleRefVector daus = dau->daughterRefVector(); - if(daus.size()>0) { - addDaughterRefs(daIndxs, newGen, outRef, daus); - } + const GenParticleRefVector & daus = dau->daughterRefVector(); + if(daus.size()>0) + getDaughterKeys(daIndxs, daNewIndxs, daus); } } } @@ -289,22 +293,20 @@ void GenParticlePruner::addDaughterRefs(vector & daIndxs, -void GenParticlePruner::addMotherRefs(vector & moIndxs, - GenParticle& newGen, GenParticleRefProd outRef, +void GenParticlePruner::getMotherKeys(vector & moIndxs, vector & moNewIndxs, const GenParticleRefVector& mothers) const { for(GenParticleRefVector::const_iterator j = mothers.begin(); j != mothers.end(); ++j) { GenParticleRef mom = *j; - if ( find(moIndxs.begin(), moIndxs.end(), mom.key()) == moIndxs.end() ) { - int idx = flags_[mom.key()]; + if (find(moIndxs.begin(), moIndxs.end(), mom.key()) == moIndxs.end()) { moIndxs.push_back( mom.key() ); - if(idx >= 0) { - GenParticleRef newMom(outRef, static_cast(idx)); - newGen.addMother(newMom); + int idx = flags_[mom.key()]; + if (idx >= 0 ) { + moNewIndxs.push_back( idx ); } else { - const GenParticleRefVector moms = mom->motherRefVector(); - if(moms.size()>0) - addMotherRefs(moIndxs, newGen, outRef, moms); + const GenParticleRefVector & moms = mom->motherRefVector(); + if(moms.size()>0) + getMotherKeys(moIndxs, moNewIndxs, moms); } } } diff --git a/PhysicsTools/JetMCAlgos/plugins/HadronAndPartonSelector.cc b/PhysicsTools/JetMCAlgos/plugins/HadronAndPartonSelector.cc index a04254a94ab66..a701029f968ea 100644 --- a/PhysicsTools/JetMCAlgos/plugins/HadronAndPartonSelector.cc +++ b/PhysicsTools/JetMCAlgos/plugins/HadronAndPartonSelector.cc @@ -118,7 +118,8 @@ HadronAndPartonSelector::HadronAndPartonSelector(const edm::ParameterSet& iConfi //register your products produces( "bHadrons" ); produces( "cHadrons" ); - produces( "partons" ); + produces( "algorithmicPartons" ); + produces( "physicsPartons" ); produces( "leptons" ); } @@ -206,6 +207,7 @@ HadronAndPartonSelector::produce(edm::Event& iEvent, const edm::EventSetup& iSet std::auto_ptr bHadrons ( new reco::GenParticleRefVector ); std::auto_ptr cHadrons ( new reco::GenParticleRefVector ); std::auto_ptr partons ( new reco::GenParticleRefVector ); + std::auto_ptr physicsPartons ( new reco::GenParticleRefVector ); std::auto_ptr leptons ( new reco::GenParticleRefVector ); // loop over particles and select b and c hadrons and leptons @@ -249,12 +251,20 @@ HadronAndPartonSelector::produce(edm::Event& iEvent, const edm::EventSetup& iSet } // select partons - if ( partonMode_!="Undefined" ) + if ( partonMode_!="Undefined" ) { partonSelector_->run(particles,partons); + for(reco::GenParticleCollection::const_iterator it = particles->begin(); it != particles->end(); ++it) + { + if( !(it->status()==3 || (( partonMode_=="Pythia8" ) && (it->status()==23)))) continue; + if( !CandMCTagUtils::isParton( *it ) ) continue; // skip particle if not a parton + physicsPartons->push_back( reco::GenParticleRef( particles, it - particles->begin() ) ); + } + } iEvent.put( bHadrons, "bHadrons" ); iEvent.put( cHadrons, "cHadrons" ); - iEvent.put( partons, "partons" ); + iEvent.put( partons, "algorithmicPartons" ); + iEvent.put( physicsPartons, "physicsPartons" ); iEvent.put( leptons, "leptons" ); } diff --git a/PhysicsTools/JetMCAlgos/python/AK4PFJetsMCFlavourInfos_cfi.py b/PhysicsTools/JetMCAlgos/python/AK4PFJetsMCFlavourInfos_cfi.py index 6bff3b31fe023..bcbe821370f89 100644 --- a/PhysicsTools/JetMCAlgos/python/AK4PFJetsMCFlavourInfos_cfi.py +++ b/PhysicsTools/JetMCAlgos/python/AK4PFJetsMCFlavourInfos_cfi.py @@ -4,7 +4,8 @@ jets = cms.InputTag("ak4PFJets"), bHadrons = cms.InputTag("selectedHadronsAndPartons","bHadrons"), cHadrons = cms.InputTag("selectedHadronsAndPartons","cHadrons"), - partons = cms.InputTag("selectedHadronsAndPartons","partons"), + partons = cms.InputTag("selectedHadronsAndPartons","physicsPartons"), + leptons = cms.InputTag("selectedHadronsAndPartons","leptons"), jetAlgorithm = cms.string("AntiKt"), rParam = cms.double(0.4), ghostRescaling = cms.double(1e-18), diff --git a/PhysicsTools/JetMCAlgos/python/AK5PFJetsMCFlavourInfos_cfi.py b/PhysicsTools/JetMCAlgos/python/AK5PFJetsMCFlavourInfos_cfi.py index 92d8936ca45c6..5bec65672a08e 100644 --- a/PhysicsTools/JetMCAlgos/python/AK5PFJetsMCFlavourInfos_cfi.py +++ b/PhysicsTools/JetMCAlgos/python/AK5PFJetsMCFlavourInfos_cfi.py @@ -4,7 +4,8 @@ jets = cms.InputTag("ak5PFJets"), bHadrons = cms.InputTag("selectedHadronsAndPartons","bHadrons"), cHadrons = cms.InputTag("selectedHadronsAndPartons","cHadrons"), - partons = cms.InputTag("selectedHadronsAndPartons","partons"), + partons = cms.InputTag("selectedHadronsAndPartons","physicsPartons"), + leptons = cms.InputTag("selectedHadronsAndPartons","leptons"), jetAlgorithm = cms.string("AntiKt"), rParam = cms.double(0.5), ghostRescaling = cms.double(1e-18), diff --git a/PhysicsTools/PatAlgos/plugins/PATElectronSlimmer.cc b/PhysicsTools/PatAlgos/plugins/PATElectronSlimmer.cc index 396a31812adb9..1dd0d3490dbb9 100644 --- a/PhysicsTools/PatAlgos/plugins/PATElectronSlimmer.cc +++ b/PhysicsTools/PatAlgos/plugins/PATElectronSlimmer.cc @@ -89,7 +89,6 @@ pat::PATElectronSlimmer::PATElectronSlimmer(const edm::ParameterSet & iConfig) : void pat::PATElectronSlimmer::beginLuminosityBlock(const edm::LuminosityBlock&, const edm::EventSetup& iSetup) { - if( modifyElectron_ ) electronModifier_->setEventContent(iSetup); } void @@ -114,6 +113,7 @@ pat::PATElectronSlimmer::produce(edm::Event & iEvent, const edm::EventSetup & iS out->reserve(src->size()); if( modifyElectron_ ) { electronModifier_->setEvent(iEvent); } + if( modifyElectron_ ) electronModifier_->setEventContent(iSetup); std::vector keys; for (View::const_iterator it = src->begin(), ed = src->end(); it != ed; ++it) { diff --git a/PhysicsTools/PatAlgos/plugins/PATPackedCandidateProducer.cc b/PhysicsTools/PatAlgos/plugins/PATPackedCandidateProducer.cc index fa4c010449f51..1bfe5bcde6c51 100644 --- a/PhysicsTools/PatAlgos/plugins/PATPackedCandidateProducer.cc +++ b/PhysicsTools/PatAlgos/plugins/PATPackedCandidateProducer.cc @@ -70,8 +70,11 @@ namespace pat { edm::EDGetTokenT PVOrigs_; edm::EDGetTokenT TKOrigs_; edm::EDGetTokenT< edm::ValueMap > PuppiWeight_; + edm::EDGetTokenT< edm::ValueMap > PuppiWeightNoLep_; edm::EDGetTokenT > PuppiCandsMap_; edm::EDGetTokenT > PuppiCands_; + edm::EDGetTokenT > PuppiCandsNoLep_; + edm::EDGetTokenT > SVWhiteList_; double minPtForTrackProperties_; // for debugging @@ -92,8 +95,11 @@ pat::PATPackedCandidateProducer::PATPackedCandidateProducer(const edm::Parameter PVOrigs_(consumes(iConfig.getParameter("originalVertices"))), TKOrigs_(consumes(iConfig.getParameter("originalTracks"))), PuppiWeight_(consumes >(iConfig.getParameter("PuppiSrc"))), + PuppiWeightNoLep_(consumes >(iConfig.getParameter("PuppiNoLepSrc"))), PuppiCandsMap_(consumes >(iConfig.getParameter("PuppiSrc"))), PuppiCands_(consumes >(iConfig.getParameter("PuppiSrc"))), + PuppiCandsNoLep_(consumes >(iConfig.getParameter("PuppiNoLepSrc"))), + SVWhiteList_(consumes >(iConfig.getParameter("secondaryVerticesForWhiteList"))), minPtForTrackProperties_(iConfig.getParameter("minPtForTrackProperties")) { produces< std::vector > (); @@ -119,6 +125,19 @@ void pat::PATPackedCandidateProducer::produce(edm::Event& iEvent, const edm::Eve iEvent.getByToken( PuppiCands_, puppiCands ); std::vector mappingPuppi(puppiCands->size()); + edm::Handle< edm::ValueMap > puppiWeightNoLep; + iEvent.getByToken( PuppiWeightNoLep_, puppiWeightNoLep ); + edm::Handle > puppiCandsNoLep; + iEvent.getByToken( PuppiCandsNoLep_, puppiCandsNoLep ); + + std::vector puppiCandsNoLepPtrs; + if (puppiCandsNoLep.isValid()){ + for (auto pup : *puppiCandsNoLep){ + puppiCandsNoLepPtrs.push_back(pup.sourceCandidatePtr(0)); + } + } + auto const& puppiCandsNoLepV = puppiCandsNoLep.product(); + edm::Handle PVOrigs; iEvent.getByToken( PVOrigs_, PVOrigs ); @@ -129,7 +148,19 @@ void pat::PATPackedCandidateProducer::produce(edm::Event& iEvent, const edm::Eve const edm::Association & associatedPV=*(assoHandle.product()); const edm::ValueMap & associationQuality=*(assoQualityHandle.product()); + edm::Handle > svWhiteListHandle; + iEvent.getByToken(SVWhiteList_,svWhiteListHandle); + const edm::View & svWhiteList=*(svWhiteListHandle.product()); + std::set whiteList; + for(unsigned int i=0; i & c = svWhiteList[i].sourceCandidatePtr(j); + if(c.id() == cands.id()) whiteList.insert(c.key()); + } + } + edm::Handle PVs; iEvent.getByToken( PVs_, PVs ); reco::VertexRef PV(PVs.id()); @@ -188,7 +219,7 @@ void pat::PATPackedCandidateProducer::produce(edm::Event& iEvent, const edm::Eve } // properties of the best track outPtrP->back().setLostInnerHits( lostHits ); - if(outPtrP->back().pt() > minPtForTrackProperties_) { + if(outPtrP->back().pt() > minPtForTrackProperties_ || whiteList.find(ic)!=whiteList.end()) { outPtrP->back().setTrackProperties(*ctrack); //outPtrP->back().setTrackProperties(*ctrack,tsos.curvilinearError()); } @@ -209,11 +240,36 @@ void pat::PATPackedCandidateProducer::produce(edm::Event& iEvent, const edm::Eve outPtrP->back().setAssociationQuality(pat::PackedCandidate::PVAssociationQuality(pat::PackedCandidate::UsedInFitTight)); } - if (puppiWeight.isValid()){ - reco::PFCandidateRef pkref( cands, ic ); - outPtrP->back().setPuppiWeight( (*puppiWeight)[pkref]); - mappingPuppi[((*puppiCandsMap)[pkref]).key()]=ic; - } + if (puppiWeight.isValid()){ + reco::PFCandidateRef pkref( cands, ic ); + // outPtrP->back().setPuppiWeight( (*puppiWeight)[pkref]); + + float puppiWeightVal = (*puppiWeight)[pkref]; + float puppiWeightNoLepVal = 0.0; + + // Check the "no lepton" puppi weights. + // If present, then it is not a lepton, use stored weight + // If absent, it is a lepton, so set the weight to 1.0 + if ( puppiWeightNoLep.isValid() ) { + // Look for the pointer inside the "no lepton" candidate collection. + auto pkrefPtr = pkref->sourceCandidatePtr(0); + + bool foundNoLep = false; + for ( size_t ipcnl = 0; ipcnl < puppiCandsNoLepPtrs.size(); ipcnl++){ + if (puppiCandsNoLepPtrs[ipcnl] == pkrefPtr){ + foundNoLep = true; + puppiWeightNoLepVal = puppiCandsNoLepV->at(ipcnl).pt()/cand.pt(); // a hack for now, should use the value map + break; + } + } + if ( !foundNoLep || puppiWeightNoLepVal > 1 ) { + puppiWeightNoLepVal = 1.0; + } + } + outPtrP->back().setPuppiWeight( puppiWeightVal, puppiWeightNoLepVal ); + + mappingPuppi[((*puppiCandsMap)[pkref]).key()]=ic; + } mapping[ic] = ic; // trivial at the moment! if (cand.trackRef().isNonnull() && cand.trackRef().id() == TKOrigs.id()) { diff --git a/PhysicsTools/PatAlgos/plugins/PATPhotonSlimmer.cc b/PhysicsTools/PatAlgos/plugins/PATPhotonSlimmer.cc index 953cdb668b96d..251bdfeca409c 100644 --- a/PhysicsTools/PatAlgos/plugins/PATPhotonSlimmer.cc +++ b/PhysicsTools/PatAlgos/plugins/PATPhotonSlimmer.cc @@ -83,7 +83,6 @@ pat::PATPhotonSlimmer::PATPhotonSlimmer(const edm::ParameterSet & iConfig) : void pat::PATPhotonSlimmer::beginLuminosityBlock(const edm::LuminosityBlock&, const edm::EventSetup& iSetup) { - if( modifyPhoton_ ) photonModifier_->setEventContent(iSetup); } void @@ -108,6 +107,7 @@ pat::PATPhotonSlimmer::produce(edm::Event & iEvent, const edm::EventSetup & iSet out->reserve(src->size()); if( modifyPhoton_ ) { photonModifier_->setEvent(iEvent); } + if( modifyPhoton_ ) photonModifier_->setEventContent(iSetup); std::vector keys; for (View::const_iterator it = src->begin(), ed = src->end(); it != ed; ++it) { diff --git a/PhysicsTools/PatAlgos/plugins/PileupSummaryInfoSlimmer.cc b/PhysicsTools/PatAlgos/plugins/PileupSummaryInfoSlimmer.cc new file mode 100644 index 0000000000000..47e367f477fcd --- /dev/null +++ b/PhysicsTools/PatAlgos/plugins/PileupSummaryInfoSlimmer.cc @@ -0,0 +1,76 @@ +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/global/EDProducer.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" + +#include "SimDataFormats/PileupSummaryInfo/interface/PileupSummaryInfo.h" + +#include + +class PileupSummaryInfoSlimmer : public edm::global::EDProducer<> { +public: + PileupSummaryInfoSlimmer(const edm::ParameterSet& conf) : + src_(consumes >(conf.getParameter("src"))), + keepDetailedInfoFor_(conf.getParameter >("keepDetailedInfoFor")) { + produces >(); + } + + void produce(edm::StreamID, edm::Event &, edm::EventSetup const &) const override final; + +private: + const edm::EDGetTokenT > src_; + const std::vector keepDetailedInfoFor_; +}; + +void PileupSummaryInfoSlimmer::produce(edm::StreamID, + edm::Event& evt, + const edm::EventSetup& es ) const { + edm::Handle > input; + std::auto_ptr > output( new std::vector ); + + evt.getByToken(src_,input); + + for( const auto& psu : *input ) { + const int bunchCrossing = psu.getBunchCrossing(); + const int bunchSpacing = psu.getBunchSpacing(); + const int num_PU_vertices = psu.getPU_NumInteractions(); + const float TrueNumInteractions = psu.getTrueNumInteractions(); + + std::vector zpositions; + std::vector sumpT_lowpT; + std::vector sumpT_highpT; + std::vector ntrks_lowpT; + std::vector ntrks_highpT; + std::vector eventInfo; + std::vector pT_hats; + + const bool keep_details = std::find(keepDetailedInfoFor_.begin(), + keepDetailedInfoFor_.end(), + bunchCrossing) != keepDetailedInfoFor_.end(); + + if( keep_details ) { + zpositions = psu.getPU_zpositions(); + sumpT_lowpT = psu.getPU_sumpT_lowpT(); + sumpT_highpT = psu.getPU_sumpT_highpT(); + ntrks_lowpT = psu.getPU_ntrks_lowpT(); + ntrks_highpT = psu.getPU_ntrks_highpT(); + eventInfo = psu.getPU_EventID(); + pT_hats = psu.getPU_pT_hats(); + } + // insert the slimmed vertex info + output->emplace_back(num_PU_vertices, + zpositions, + sumpT_lowpT, sumpT_highpT, + ntrks_lowpT, ntrks_highpT, + eventInfo, + pT_hats, + bunchCrossing, + TrueNumInteractions, + bunchSpacing); + } + + evt.put(output); +} + +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(PileupSummaryInfoSlimmer); diff --git a/PhysicsTools/PatAlgos/python/cleaningLayer1/tauCleaner_cfi.py b/PhysicsTools/PatAlgos/python/cleaningLayer1/tauCleaner_cfi.py index 0a1f241e92fdb..bd961ba20904e 100644 --- a/PhysicsTools/PatAlgos/python/cleaningLayer1/tauCleaner_cfi.py +++ b/PhysicsTools/PatAlgos/python/cleaningLayer1/tauCleaner_cfi.py @@ -7,8 +7,8 @@ preselection = cms.string( 'tauID("decayModeFinding") > 0.5 &' ' tauID("byLooseCombinedIsolationDeltaBetaCorr3Hits") > 0.5 &' - ' tauID("againstMuonTight") > 0.5 &' - ' tauID("againstElectronMedium") > 0.5' + ' tauID("againstMuonTight3") > 0.5 &' + ' tauID("againstElectronVLooseMVA5") > 0.5' ), # overlap checking configurables @@ -34,5 +34,5 @@ ), # finalCut (any string-based cut on pat::Tau) - finalCut = cms.string('pt > 20. & abs(eta) < 2.3'), + finalCut = cms.string('pt > 18. & abs(eta) < 2.3'), ) diff --git a/PhysicsTools/PatAlgos/python/mcMatchLayer0/jetFlavourId_cff.py b/PhysicsTools/PatAlgos/python/mcMatchLayer0/jetFlavourId_cff.py index 1434fea9bb4f2..6b798b45afaab 100644 --- a/PhysicsTools/PatAlgos/python/mcMatchLayer0/jetFlavourId_cff.py +++ b/PhysicsTools/PatAlgos/python/mcMatchLayer0/jetFlavourId_cff.py @@ -26,11 +26,12 @@ jets = cms.InputTag("ak4PFJetsCHS"), bHadrons = cms.InputTag("patJetPartons","bHadrons"), cHadrons = cms.InputTag("patJetPartons","cHadrons"), - partons = cms.InputTag("patJetPartons","partons"), + partons = cms.InputTag("patJetPartons","physicsPartons"), + leptons = cms.InputTag("patJetPartons","leptons"), jetAlgorithm = cms.string("AntiKt"), rParam = cms.double(0.4), ghostRescaling = cms.double(1e-18), - hadronFlavourHasPriority = cms.bool(True) + hadronFlavourHasPriority = cms.bool(False) ) # default PAT sequence for jet flavour identification diff --git a/PhysicsTools/PatAlgos/python/producersLayer1/jetProducer_cfi.py b/PhysicsTools/PatAlgos/python/producersLayer1/jetProducer_cfi.py index c74a0a5adbdea..3a67f59cbc9c3 100644 --- a/PhysicsTools/PatAlgos/python/producersLayer1/jetProducer_cfi.py +++ b/PhysicsTools/PatAlgos/python/producersLayer1/jetProducer_cfi.py @@ -45,7 +45,8 @@ cms.InputTag("pfSimpleSecondaryVertexHighPurBJetTags"), cms.InputTag("pfCombinedSecondaryVertexV2BJetTags"), cms.InputTag("pfCombinedInclusiveSecondaryVertexV2BJetTags"), - cms.InputTag("pfCombinedSecondaryVertexSoftLeptonBJetTags"), + cms.InputTag("softPFMuonBJetTags"), + cms.InputTag("softPFElectronBJetTags"), cms.InputTag("pfCombinedMVABJetTags") ), # clone tag infos ATTENTION: these take lots of space! @@ -74,7 +75,7 @@ # jet flavour idetification configurables getJetMCFlavour = cms.bool(True), useLegacyJetMCFlavour = cms.bool(False), - addJetFlavourInfo = cms.bool(False), + addJetFlavourInfo = cms.bool(True), JetPartonMapSource = cms.InputTag("patJetFlavourAssociationLegacy"), JetFlavourInfoSource = cms.InputTag("patJetFlavourAssociation"), # efficiencies diff --git a/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py b/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py index 0e660c704fec0..8f5e1854a3286 100644 --- a/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py +++ b/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py @@ -50,42 +50,12 @@ embedIsolationPFGammaCands = cms.bool(False), ## embed in AOD externally stored isolation PFGammaCandidates # embed IsoDeposits - isoDeposits = cms.PSet( - pfAllParticles = cms.InputTag("tauIsoDepositPFCandidates"), - pfChargedHadron = cms.InputTag("tauIsoDepositPFChargedHadrons"), - pfNeutralHadron = cms.InputTag("tauIsoDepositPFNeutralHadrons"), - pfGamma = cms.InputTag("tauIsoDepositPFGammas") - ), + isoDeposits = cms.PSet(), # user defined isolation variables the variables defined here will be accessible # via pat::Tau::userIsolation(IsolationKeys key) with the key as defined in # DataFormats/PatCandidates/interface/Isolation.h - # - # (set Pt thresholds for PFChargedHadrons (PFGammas) to 1.0 (1.5) GeV, - # matching the thresholds used when computing the tau iso. discriminators - # in RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByIsolation_cfi.py) - userIsolation = cms.PSet( - pfAllParticles = cms.PSet( - src = cms.InputTag("tauIsoDepositPFCandidates"), - deltaR = cms.double(0.5), - threshold = cms.double(0.) - ), - pfChargedHadron = cms.PSet( - src = cms.InputTag("tauIsoDepositPFChargedHadrons"), - deltaR = cms.double(0.5), - threshold = cms.double(0.) - ), - pfNeutralHadron = cms.PSet( - src = cms.InputTag("tauIsoDepositPFNeutralHadrons"), - deltaR = cms.double(0.5), - threshold = cms.double(0.) - ), - pfGamma = cms.PSet( - src = cms.InputTag("tauIsoDepositPFGammas"), - deltaR = cms.double(0.5), - threshold = cms.double(0.) - ) - ), + userIsolation = cms.PSet(), # tau ID (for efficiency studies) addTauID = cms.bool(True), @@ -95,22 +65,12 @@ # disk space decayModeFinding = cms.InputTag("hpsPFTauDiscriminationByDecayModeFinding"), decayModeFindingNewDMs =cms.InputTag("hpsPFTauDiscriminationByDecayModeFindingNewDMs"), -# decayModeFindingOldDMs = cms.InputTag("hpsPFTauDiscriminationByDecayModeFindingOldDMs"), - # byCombinedIsolationDeltaBetaCorrRaw = cms.InputTag("hpsPFTauDiscriminationByRawCombinedIsolationDBSumPtCorr"), - # byVLooseCombinedIsolationDeltaBetaCorr = cms.InputTag("hpsPFTauDiscriminationByVLooseCombinedIsolationDBSumPtCorr"), - # byLooseCombinedIsolationDeltaBetaCorr = cms.InputTag("hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr"), - # byMediumCombinedIsolationDeltaBetaCorr = cms.InputTag("hpsPFTauDiscriminationByMediumCombinedIsolationDBSumPtCorr"), - # byTightCombinedIsolationDeltaBetaCorr = cms.InputTag("hpsPFTauDiscriminationByTightCombinedIsolationDBSumPtCorr"), - chargedIsoPtSum = cms.InputTag("hpsPFTauMVA3IsolationChargedIsoPtSum"), - neutralIsoPtSum = cms.InputTag("hpsPFTauMVA3IsolationNeutralIsoPtSum"), - puCorrPtSum = cms.InputTag("hpsPFTauMVA3IsolationPUcorrPtSum"), - byIsolationMVA3oldDMwoLTraw = cms.InputTag('hpsPFTauDiscriminationByIsolationMVA3oldDMwoLTraw'), - byVLooseIsolationMVA3oldDMwoLT = cms.InputTag('hpsPFTauDiscriminationByVLooseIsolationMVA3oldDMwoLT'), - byLooseIsolationMVA3oldDMwoLT = cms.InputTag('hpsPFTauDiscriminationByLooseIsolationMVA3oldDMwoLT'), - byMediumIsolationMVA3oldDMwoLT = cms.InputTag('hpsPFTauDiscriminationByMediumIsolationMVA3oldDMwoLT'), - byTightIsolationMVA3oldDMwoLT = cms.InputTag('hpsPFTauDiscriminationByTightIsolationMVA3oldDMwoLT'), - byVTightIsolationMVA3oldDMwoLT = cms.InputTag('hpsPFTauDiscriminationByVTightIsolationMVA3oldDMwoLT'), - byVVTightIsolationMVA3oldDMwoLT = cms.InputTag('hpsPFTauDiscriminationByVVTightIsolationMVA3oldDMwoLT'), + chargedIsoPtSum = cms.InputTag("hpsPFTauChargedIsoPtSum"), + neutralIsoPtSum = cms.InputTag("hpsPFTauNeutralIsoPtSum"), + puCorrPtSum = cms.InputTag("hpsPFTauPUcorrPtSum"), + neutralIsoPtSumWeight = cms.InputTag("hpsPFTauNeutralIsoPtSumWeight"), + footprintCorrection = cms.InputTag("hpsPFTauFootprintCorrection"), + photonPtSumOutsideSignalCone = cms.InputTag("hpsPFTauPhotonPtSumOutsideSignalCone"), byIsolationMVA3oldDMwLTraw = cms.InputTag('hpsPFTauDiscriminationByIsolationMVA3oldDMwLTraw'), byVLooseIsolationMVA3oldDMwLT = cms.InputTag('hpsPFTauDiscriminationByVLooseIsolationMVA3oldDMwLT'), byLooseIsolationMVA3oldDMwLT = cms.InputTag('hpsPFTauDiscriminationByLooseIsolationMVA3oldDMwLT'), @@ -118,13 +78,6 @@ byTightIsolationMVA3oldDMwLT = cms.InputTag('hpsPFTauDiscriminationByTightIsolationMVA3oldDMwLT'), byVTightIsolationMVA3oldDMwLT = cms.InputTag('hpsPFTauDiscriminationByVTightIsolationMVA3oldDMwLT'), byVVTightIsolationMVA3oldDMwLT = cms.InputTag('hpsPFTauDiscriminationByVVTightIsolationMVA3oldDMwLT'), - byIsolationMVA3newDMwoLTraw = cms.InputTag('hpsPFTauDiscriminationByIsolationMVA3newDMwoLTraw'), - byVLooseIsolationMVA3newDMwoLT = cms.InputTag('hpsPFTauDiscriminationByVLooseIsolationMVA3newDMwoLT'), - byLooseIsolationMVA3newDMwoLT = cms.InputTag('hpsPFTauDiscriminationByLooseIsolationMVA3newDMwoLT'), - byMediumIsolationMVA3newDMwoLT = cms.InputTag('hpsPFTauDiscriminationByMediumIsolationMVA3newDMwoLT'), - byTightIsolationMVA3newDMwoLT = cms.InputTag('hpsPFTauDiscriminationByTightIsolationMVA3newDMwoLT'), - byVTightIsolationMVA3newDMwoLT = cms.InputTag('hpsPFTauDiscriminationByVTightIsolationMVA3newDMwoLT'), - byVVTightIsolationMVA3newDMwoLT = cms.InputTag('hpsPFTauDiscriminationByVVTightIsolationMVA3newDMwoLT'), byIsolationMVA3newDMwLTraw = cms.InputTag('hpsPFTauDiscriminationByIsolationMVA3newDMwLTraw'), byVLooseIsolationMVA3newDMwLT = cms.InputTag('hpsPFTauDiscriminationByVLooseIsolationMVA3newDMwLT'), byLooseIsolationMVA3newDMwLT = cms.InputTag('hpsPFTauDiscriminationByLooseIsolationMVA3newDMwLT'), @@ -132,25 +85,17 @@ byTightIsolationMVA3newDMwLT = cms.InputTag('hpsPFTauDiscriminationByTightIsolationMVA3newDMwLT'), byVTightIsolationMVA3newDMwLT = cms.InputTag('hpsPFTauDiscriminationByVTightIsolationMVA3newDMwLT'), byVVTightIsolationMVA3newDMwLT = cms.InputTag('hpsPFTauDiscriminationByVVTightIsolationMVA3newDMwLT'), - againstElectronLoose = cms.InputTag("hpsPFTauDiscriminationByLooseElectronRejection"), - againstElectronMedium = cms.InputTag("hpsPFTauDiscriminationByMediumElectronRejection"), - againstElectronTight = cms.InputTag("hpsPFTauDiscriminationByTightElectronRejection"), - againstMuonLoose = cms.InputTag("hpsPFTauDiscriminationByLooseMuonRejection"), - againstMuonMedium = cms.InputTag("hpsPFTauDiscriminationByMediumMuonRejection"), - againstMuonTight = cms.InputTag("hpsPFTauDiscriminationByTightMuonRejection"), - againstMuonLoose2 = cms.InputTag("hpsPFTauDiscriminationByLooseMuonRejection2"), - againstMuonMedium2 = cms.InputTag("hpsPFTauDiscriminationByMediumMuonRejection2"), - againstMuonTight2 = cms.InputTag("hpsPFTauDiscriminationByTightMuonRejection2"), againstMuonLoose3 = cms.InputTag("hpsPFTauDiscriminationByLooseMuonRejection3"), againstMuonTight3 = cms.InputTag("hpsPFTauDiscriminationByTightMuonRejection3"), - againstMuonMVAraw = cms.InputTag('hpsPFTauDiscriminationByMVArawMuonRejection'), - againstMuonLooseMVA = cms.InputTag('hpsPFTauDiscriminationByMVALooseMuonRejection'), - againstMuonMediumMVA = cms.InputTag('hpsPFTauDiscriminationByMVAMediumMuonRejection'), - againstMuonTightMVA = cms.InputTag('hpsPFTauDiscriminationByMVATightMuonRejection'), - byCombinedIsolationDeltaBetaCorrRaw3Hits = cms.InputTag("hpsPFTauDiscriminationByRawCombinedIsolationDBSumPtCorr3Hits"), byLooseCombinedIsolationDeltaBetaCorr3Hits = cms.InputTag("hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr3Hits"), byMediumCombinedIsolationDeltaBetaCorr3Hits = cms.InputTag("hpsPFTauDiscriminationByMediumCombinedIsolationDBSumPtCorr3Hits"), byTightCombinedIsolationDeltaBetaCorr3Hits = cms.InputTag("hpsPFTauDiscriminationByTightCombinedIsolationDBSumPtCorr3Hits"), + byCombinedIsolationDeltaBetaCorrRaw3Hits = cms.InputTag("hpsPFTauDiscriminationByRawCombinedIsolationDBSumPtCorr3Hits"), + byLoosePileupWeightedIsolation3Hits = cms.InputTag("hpsPFTauDiscriminationByLoosePileupWeightedIsolation3Hits"), + byMediumPileupWeightedIsolation3Hits = cms.InputTag("hpsPFTauDiscriminationByMediumPileupWeightedIsolation3Hits"), + byTightPileupWeightedIsolation3Hits = cms.InputTag("hpsPFTauDiscriminationByTightPileupWeightedIsolation3Hits"), + byPhotonPtSumOutsideSignalCone = cms.InputTag("hpsPFTauDiscriminationByPhotonPtSumOutsideSignalCone"), + byPileupWeightedIsolationRaw3Hits = cms.InputTag("hpsPFTauDiscriminationByRawPileupWeightedIsolation3Hits"), againstElectronMVA5raw = cms.InputTag("hpsPFTauDiscriminationByMVA5rawElectronRejection"), againstElectronMVA5category = cms.InputTag("hpsPFTauDiscriminationByMVA5rawElectronRejection:category"), againstElectronVLooseMVA5 = cms.InputTag("hpsPFTauDiscriminationByMVA5VLooseElectronRejection"), @@ -158,7 +103,6 @@ againstElectronMediumMVA5 = cms.InputTag("hpsPFTauDiscriminationByMVA5MediumElectronRejection"), againstElectronTightMVA5 = cms.InputTag("hpsPFTauDiscriminationByMVA5TightElectronRejection"), againstElectronVTightMVA5 = cms.InputTag("hpsPFTauDiscriminationByMVA5VTightElectronRejection"), -# againstElectronDeadECAL = cms.InputTag("hpsPFTauDiscriminationByDeadECALElectronRejection"), ), # mc matching configurables diff --git a/PhysicsTools/PatAlgos/python/recoLayer0/pfParticleSelectionForIso_cff.py b/PhysicsTools/PatAlgos/python/recoLayer0/pfParticleSelectionForIso_cff.py index f857d3ea080d7..7497051200047 100644 --- a/PhysicsTools/PatAlgos/python/recoLayer0/pfParticleSelectionForIso_cff.py +++ b/PhysicsTools/PatAlgos/python/recoLayer0/pfParticleSelectionForIso_cff.py @@ -1,12 +1,10 @@ import FWCore.ParameterSet.Config as cms -from CommonTools.ParticleFlow.PFBRECO_cff import particleFlowPtrs from CommonTools.ParticleFlow.PFBRECO_cff import pfPileUpIsoPFBRECO, pfNoPileUpIsoPFBRECO, pfNoPileUpIsoPFBRECOSequence from CommonTools.ParticleFlow.PFBRECO_cff import pfPileUpPFBRECO, pfNoPileUpPFBRECO, pfNoPileUpPFBRECOSequence from CommonTools.ParticleFlow.PFBRECO_cff import pfAllNeutralHadronsPFBRECO, pfAllChargedHadronsPFBRECO, pfAllPhotonsPFBRECO, pfAllChargedParticlesPFBRECO, pfPileUpAllChargedParticlesPFBRECO, pfAllNeutralHadronsAndPhotonsPFBRECO, pfSortByTypePFBRECOSequence from CommonTools.ParticleFlow.PFBRECO_cff import pfParticleSelectionPFBRECOSequence pfParticleSelectionForIsoSequence = cms.Sequence( - particleFlowPtrs + pfParticleSelectionPFBRECOSequence ) \ No newline at end of file diff --git a/PhysicsTools/PatAlgos/python/slimming/MicroEventContent_cff.py b/PhysicsTools/PatAlgos/python/slimming/MicroEventContent_cff.py index 74b455e74840d..c5c114ee336cc 100644 --- a/PhysicsTools/PatAlgos/python/slimming/MicroEventContent_cff.py +++ b/PhysicsTools/PatAlgos/python/slimming/MicroEventContent_cff.py @@ -59,8 +59,9 @@ 'keep patPackedGenParticles_packedGenParticles_*_*', 'keep recoGenParticles_prunedGenParticles_*_*', 'keep LHEEventProduct_*_*_*', - 'keep PileupSummaryInfos_*_*_*', + 'keep PileupSummaryInfos_slimmedAddPileupInfo_*_*', 'keep GenFilterInfo_*_*_*', + 'keep GenLumiInfoProduct_*_*_*', 'keep GenEventInfoProduct_generator_*_*', # RUN 'keep LHERunInfoProduct_*_*_*', diff --git a/PhysicsTools/PatAlgos/python/slimming/applySubstructure_cff.py b/PhysicsTools/PatAlgos/python/slimming/applySubstructure_cff.py index fb63bd362b33d..6d875a429f197 100644 --- a/PhysicsTools/PatAlgos/python/slimming/applySubstructure_cff.py +++ b/PhysicsTools/PatAlgos/python/slimming/applySubstructure_cff.py @@ -25,14 +25,14 @@ def applySubstructure( process ) : genJetCollection = cms.InputTag('slimmedGenJetsAK8') ) process.patJetsAK8.userData.userFloats.src = [] # start with empty list of user floats - process.selectedPatJetsAK8.cut = cms.string("pt > 200") + process.selectedPatJetsAK8.cut = cms.string("pt > 170") ## AK8 groomed masses from RecoJets.Configuration.RecoPFJets_cff import ak8PFJetsCHSPruned, ak8PFJetsCHSSoftDrop, ak8PFJetsCHSFiltered, ak8PFJetsCHSTrimmed process.ak8PFJetsCHSPruned = ak8PFJetsCHSPruned.clone() - process.ak8PFJetsCHSSoftDrop = ak8PFJetsCHSSoftDrop.clone() + #process.ak8PFJetsCHSSoftDrop = ak8PFJetsCHSSoftDrop.clone() # already in AOD process.ak8PFJetsCHSTrimmed = ak8PFJetsCHSTrimmed.clone() process.ak8PFJetsCHSFiltered = ak8PFJetsCHSFiltered.clone() process.load("RecoJets.JetProducers.ak8PFJetsCHS_groomingValueMaps_cfi") @@ -77,7 +77,7 @@ def applySubstructure( process ) : fatJets=cms.InputTag('ak8PFJetsCHS'), # needed for subjet flavor clustering groomedFatJets=cms.InputTag('ak8PFJetsCHSSoftDrop') # needed for subjet flavor clustering ) - process.selectedPatJetsAK8PFCHSSoftDrop.cut = cms.string("pt > 200") + process.selectedPatJetsAK8PFCHSSoftDrop.cut = cms.string("pt > 170") process.slimmedJetsAK8PFCHSSoftDropSubjets = cms.EDProducer("PATJetSlimmer", src = cms.InputTag("selectedPatJetsAK8PFCHSSoftDropSubjets"), diff --git a/PhysicsTools/PatAlgos/python/slimming/metFilterPaths_cff.py b/PhysicsTools/PatAlgos/python/slimming/metFilterPaths_cff.py index 60acef746b604..e76d7fe4e4d06 100644 --- a/PhysicsTools/PatAlgos/python/slimming/metFilterPaths_cff.py +++ b/PhysicsTools/PatAlgos/python/slimming/metFilterPaths_cff.py @@ -2,16 +2,18 @@ ## We don't use "import *" because the cff contains some modules for which the C++ class doesn't exist ## and this triggers an error under unscheduled mode -from RecoMET.METFilters.metFilters_cff import HBHENoiseFilterResultProducer, HBHENoiseFilter, CSCTightHaloFilter, hcalLaserEventFilter, EcalDeadCellTriggerPrimitiveFilter, eeBadScFilter, ecalLaserCorrFilter +from RecoMET.METFilters.metFilters_cff import HBHENoiseFilterResultProducer, HBHENoiseFilter, HBHENoiseIsoFilter, CSCTightHaloFilter, hcalLaserEventFilter, EcalDeadCellTriggerPrimitiveFilter, eeBadScFilter, ecalLaserCorrFilter, EcalDeadCellBoundaryEnergyFilter, primaryVertexFilter from RecoMET.METFilters.metFilters_cff import goodVertices, trackingFailureFilter, trkPOGFilters, manystripclus53X, toomanystripclus53X, logErrorTooManyClusters from RecoMET.METFilters.metFilters_cff import metFilters # individual filters Flag_HBHENoiseFilter = cms.Path(HBHENoiseFilterResultProducer * HBHENoiseFilter) +Flag_HBHENoiseIsoFilter = cms.Path(HBHENoiseFilterResultProducer * HBHENoiseIsoFilter) Flag_CSCTightHaloFilter = cms.Path(CSCTightHaloFilter) Flag_hcalLaserEventFilter = cms.Path(hcalLaserEventFilter) Flag_EcalDeadCellTriggerPrimitiveFilter = cms.Path(EcalDeadCellTriggerPrimitiveFilter) -Flag_goodVertices = cms.Path(goodVertices) +Flag_EcalDeadCellBoundaryEnergyFilter = cms.Path(EcalDeadCellBoundaryEnergyFilter) +Flag_goodVertices = cms.Path(primaryVertexFilter) Flag_trackingFailureFilter = cms.Path(goodVertices + trackingFailureFilter) Flag_eeBadScFilter = cms.Path(eeBadScFilter) Flag_ecalLaserCorrFilter = cms.Path(ecalLaserCorrFilter) @@ -26,13 +28,13 @@ Flag_METFilters = cms.Path(metFilters) #add your new path here!! -allMetFilterPaths=['HBHENoiseFilter','CSCTightHaloFilter','hcalLaserEventFilter','EcalDeadCellTriggerPrimitiveFilter','goodVertices','eeBadScFilter', +allMetFilterPaths=['HBHENoiseFilter','HBHENoiseIsoFilter','CSCTightHaloFilter','hcalLaserEventFilter','EcalDeadCellTriggerPrimitiveFilter','EcalDeadCellBoundaryEnergyFilter','goodVertices','eeBadScFilter', 'ecalLaserCorrFilter','trkPOGFilters','trkPOG_manystripclus53X','trkPOG_toomanystripclus53X','trkPOG_logErrorTooManyClusters','METFilters'] def miniAOD_customizeMETFiltersFastSim(process): """Replace some MET filters that don't work in FastSim with trivial bools""" - for X in 'CSCTightHaloFilter', 'HBHENoiseFilter', 'HBHENoiseFilterResultProducer': + for X in 'CSCTightHaloFilter', 'HBHENoiseFilter', 'HBHENoiseIsoFilter', 'HBHENoiseFilterResultProducer': process.globalReplace(X, cms.EDFilter("HLTBool", result=cms.bool(True))) for X in 'manystripclus53X', 'toomanystripclus53X', 'logErrorTooManyClusters': process.globalReplace(X, cms.EDFilter("HLTBool", result=cms.bool(False))) diff --git a/PhysicsTools/PatAlgos/python/slimming/miniAOD_tools.py b/PhysicsTools/PatAlgos/python/slimming/miniAOD_tools.py index 186efcd2a4980..a85f97e73eb6d 100644 --- a/PhysicsTools/PatAlgos/python/slimming/miniAOD_tools.py +++ b/PhysicsTools/PatAlgos/python/slimming/miniAOD_tools.py @@ -148,23 +148,39 @@ def miniAOD_customizeCommon(process): process.slimmedPhotons.modifierConfig.modifications = egamma_modifications #VID Electron IDs - electron_ids = ['RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_CSA14_50ns_V1_cff', - 'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_CSA14_PU20bx25_V0_cff', - 'RecoEgamma.ElectronIdentification.Identification.heepElectronID_HEEPV50_CSA14_25ns_cff', - 'RecoEgamma.ElectronIdentification.Identification.heepElectronID_HEEPV50_CSA14_startup_cff'] - switchOnVIDElectronIdProducer(process, DataFormat.MiniAOD) + electron_ids = ['RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_PHYS14_PU20bx25_V2_cff', + 'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Spring15_25ns_V1_cff', + 'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Spring15_50ns_V1_cff', + 'RecoEgamma.ElectronIdentification.Identification.heepElectronID_HEEPV60_cff', + 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Spring15_25ns_nonTrig_V1_cff'] + switchOnVIDElectronIdProducer(process,DataFormat.MiniAOD) process.egmGsfElectronIDs.physicsObjectSrc = \ cms.InputTag("reducedEgamma","reducedGedGsfElectrons") - process.electronIDValueMapProducer.src = \ - cms.InputTag("reducedEgamma","reducedGedGsfElectrons") - process.electronIDValueMapProducer.ebReducedRecHitCollection = \ - cms.InputTag("reducedEgamma","reducedEBRecHits") - process.electronIDValueMapProducer.eeReducedRecHitCollection = \ - cms.InputTag("reducedEgamma","reducedEERecHits") - process.electronIDValueMapProducer.esReducedRecHitCollection = \ - cms.InputTag("reducedEgamma","reducedESRecHits") + process.electronMVAValueMapProducer.src = \ + cms.InputTag('reducedEgamma','reducedGedGsfElectrons') + process.electronRegressionValueMapProducer.src = \ + cms.InputTag('reducedEgamma','reducedGedGsfElectrons') for idmod in electron_ids: setupAllVIDIdsInModule(process,idmod,setupVIDElectronSelection,None,False) + + #VID Photon IDs + photon_ids = ['RecoEgamma.PhotonIdentification.Identification.cutBasedPhotonID_PHYS14_PU20bx25_V2_cff', + 'RecoEgamma.PhotonIdentification.Identification.cutBasedPhotonID_Spring15_50ns_V1_cff', + 'RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_Spring15_25ns_nonTrig_V2_cff', + 'RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_Spring15_50ns_nonTrig_V2_cff'] + switchOnVIDPhotonIdProducer(process,DataFormat.MiniAOD) + process.egmPhotonIDs.physicsObjectSrc = \ + cms.InputTag("reducedEgamma","reducedGedPhotons") + process.photonIDValueMapProducer.src = \ + cms.InputTag("reducedEgamma","reducedGedPhotons") + process.photonRegressionValueMapProducer.src = \ + cms.InputTag("reducedEgamma","reducedGedPhotons") + process.photonIDValueMapProducer.particleBasedIsolation = \ + cms.InputTag("reducedEgamma","reducedPhotonPfCandMap") + process.photonMVAValueMapProducer.src = \ + cms.InputTag('reducedEgamma','reducedGedPhotons') + for idmod in photon_ids: + setupAllVIDIdsInModule(process,idmod,setupVIDPhotonSelection,None,False) # Adding puppi jets process.load('CommonTools.PileupAlgos.Puppi_cff') @@ -172,6 +188,17 @@ def miniAOD_customizeCommon(process): process.ak4PFJetsPuppi.doAreaFastjet = True # even for standard ak4PFJets this is overwritten in RecoJets/Configuration/python/RecoPFJets_cff #process.puppi.candName = cms.InputTag('packedPFCandidates') #process.puppi.vertexName = cms.InputTag('offlineSlimmedPrimaryVertices') + # kind of ugly, is there a better way to do this? + process.pfNoLepPUPPI = cms.EDFilter("PdgIdCandViewSelector", + src = cms.InputTag("particleFlow"), + pdgId = cms.vint32( 1,2,22,111,130,310,2112,211,-211,321,-321,999211,2212,-2212 ) + ) + process.pfLeptonsPUPPET = cms.EDFilter("PdgIdCandViewSelector", + src = cms.InputTag("particleFlow"), + pdgId = cms.vint32(-11,11,-13,13), + ) + process.puppiNoLep = process.puppi.clone() + process.puppiNoLep.candName = cms.InputTag('pfNoLepPUPPI') from RecoJets.JetAssociationProducers.j2tParametersVX_cfi import j2tParametersVX process.ak4PFJetsPuppiTracksAssociatorAtVertex = cms.EDProducer("JetTracksAssociatorAtVertex", @@ -202,9 +229,12 @@ def miniAOD_customizeCommon(process): process.slimmedJetsPuppi.packedPFCandidates = cms.InputTag("packedPFCandidates") ## puppi met - process.load('RecoMET.METProducers.PFMET_cfi') - process.pfMetPuppi = process.pfMet.clone() - process.pfMetPuppi.src = cms.InputTag("puppi") + process.puppiForMET = cms.EDProducer("CandViewMerger", + src = cms.VInputTag( "pfLeptonsPUPPET", "puppiNoLep") + ) + import RecoMET.METProducers.PFMET_cfi + process.pfMetPuppi = RecoMET.METProducers.PFMET_cfi.pfMet.clone() + process.pfMetPuppi.src = cms.InputTag("puppiForMET") process.pfMetPuppi.alias = cms.string('pfMetPuppi') ## type1 correction, from puppi jets process.corrPfMetType1Puppi = process.corrPfMetType1.clone( @@ -228,7 +258,13 @@ def miniAOD_customizeCommon(process): process.slimmedMETsPuppi.type1Uncertainties = cms.InputTag("patPFMetT1") # only central value for now del process.slimmedMETsPuppi.type1p2Uncertainties # not available + ## Force a re-run of the tau id during MiniAOD production stage + process.load('RecoTauTag.Configuration.RecoPFTauTag_cff') + def miniAOD_customizeMC(process): + #slimmed pileup information + process.load('PhysicsTools.PatAlgos.slimming.slimmedAddPileupInfo_cfi') + process.muonMatch.matched = "prunedGenParticles" process.electronMatch.matched = "prunedGenParticles" process.electronMatch.src = cms.InputTag("reducedEgamma","reducedGedGsfElectrons") @@ -236,6 +272,7 @@ def miniAOD_customizeMC(process): process.photonMatch.src = cms.InputTag("reducedEgamma","reducedGedPhotons") process.tauMatch.matched = "prunedGenParticles" process.tauGenJets.GenParticles = "prunedGenParticles" + process.patJetPartons.particles = "prunedGenParticles" process.patJetPartonMatch.matched = "prunedGenParticles" process.patJetPartonMatch.mcStatus = [ 3, 23 ] process.patJetGenJetMatch.matched = "slimmedGenJets" @@ -245,7 +282,7 @@ def miniAOD_customizeMC(process): process.patPhotons.embedGenMatch = False process.patTaus.embedGenMatch = False process.patJets.embedGenPartonMatch = False - #also jet flavour must be switched to ak4 + #also jet flavour must be switched process.patJetFlavourAssociation.rParam = 0.4 def miniAOD_customizeOutput(out): diff --git a/PhysicsTools/PatAlgos/python/slimming/packedPFCandidates_cfi.py b/PhysicsTools/PatAlgos/python/slimming/packedPFCandidates_cfi.py index dff9d50202046..febbf0dc240eb 100644 --- a/PhysicsTools/PatAlgos/python/slimming/packedPFCandidates_cfi.py +++ b/PhysicsTools/PatAlgos/python/slimming/packedPFCandidates_cfi.py @@ -7,5 +7,7 @@ originalTracks = cms.InputTag("generalTracks"), vertexAssociator = cms.InputTag("primaryVertexAssociation","original"), PuppiSrc = cms.InputTag("puppi"), + PuppiNoLepSrc = cms.InputTag("puppiNoLep"), + secondaryVerticesForWhiteList = cms.InputTag("inclusiveCandidateSecondaryVertices"), minPtForTrackProperties = cms.double(0.95) ) diff --git a/PhysicsTools/PatAlgos/python/slimming/slimmedAddPileupInfo_cfi.py b/PhysicsTools/PatAlgos/python/slimming/slimmedAddPileupInfo_cfi.py new file mode 100644 index 0000000000000..42ed12c60d4f8 --- /dev/null +++ b/PhysicsTools/PatAlgos/python/slimming/slimmedAddPileupInfo_cfi.py @@ -0,0 +1,7 @@ +import FWCore.ParameterSet.Config as cms + +slimmedAddPileupInfo = cms.EDProducer( + 'PileupSummaryInfoSlimmer', + src = cms.InputTag('addPileupInfo'), + keepDetailedInfoFor = cms.vint32(0) +) diff --git a/PhysicsTools/PatAlgos/python/slimming/slimmedGenJets_cfi.py b/PhysicsTools/PatAlgos/python/slimming/slimmedGenJets_cfi.py index ff3838ee7f7f8..1efddf224201b 100644 --- a/PhysicsTools/PatAlgos/python/slimming/slimmedGenJets_cfi.py +++ b/PhysicsTools/PatAlgos/python/slimming/slimmedGenJets_cfi.py @@ -10,7 +10,7 @@ slimmedGenJetsAK8 = cms.EDProducer("PATGenJetSlimmer", - src = cms.InputTag("ak8GenJets"), + src = cms.InputTag("ak8GenJetsNoNu"), packedGenParticles = cms.InputTag("packedGenParticles"), cut = cms.string("pt > 150"), clearDaughters = cms.bool(False), #False means rekeying diff --git a/PhysicsTools/PatAlgos/python/tools/jetTools.py b/PhysicsTools/PatAlgos/python/tools/jetTools.py index c3e1985eb6098..76ef4d3093f36 100644 --- a/PhysicsTools/PatAlgos/python/tools/jetTools.py +++ b/PhysicsTools/PatAlgos/python/tools/jetTools.py @@ -318,7 +318,8 @@ def toolCode(self, process): _newPatJetFlavourAssociation.rParam=rParam _newPatJetFlavourAssociation.bHadrons=cms.InputTag("patJetPartons"+postfix,"bHadrons") _newPatJetFlavourAssociation.cHadrons=cms.InputTag("patJetPartons"+postfix,"cHadrons") - _newPatJetFlavourAssociation.partons=cms.InputTag("patJetPartons"+postfix,"partons") + _newPatJetFlavourAssociation.partons=cms.InputTag("patJetPartons"+postfix,"physicsPartons") + _newPatJetFlavourAssociation.leptons=cms.InputTag("patJetPartons"+postfix,"leptons") else : setattr(process, 'patJetFlavourAssociation'+_labelName+postfix, patJetFlavourAssociation.clone( @@ -327,7 +328,8 @@ def toolCode(self, process): rParam=rParam, bHadrons = cms.InputTag("patJetPartons"+postfix,"bHadrons"), cHadrons = cms.InputTag("patJetPartons"+postfix,"cHadrons"), - partons = cms.InputTag("patJetPartons"+postfix,"partons") + partons = cms.InputTag("patJetPartons"+postfix,"physicsPartons"), + leptons = cms.InputTag("patJetPartons"+postfix,"leptons") ) ) knownModules.append('patJetFlavourAssociation'+_labelName+postfix) diff --git a/PhysicsTools/PatAlgos/python/tools/tauTools.py b/PhysicsTools/PatAlgos/python/tools/tauTools.py index 167dce4cfe9b6..6de2055df4d5f 100644 --- a/PhysicsTools/PatAlgos/python/tools/tauTools.py +++ b/PhysicsTools/PatAlgos/python/tools/tauTools.py @@ -102,28 +102,29 @@ def _switchToPFTau(process, # Hadron-plus-strip(s) (HPS) Tau Discriminators hpsTauIDSources = [ ("decayModeFindingNewDMs", "DiscriminationByDecayModeFindingNewDMs"), - ("decayModeFindingOldDMs", "DiscriminationByDecayModeFindingOldDMs"), ("decayModeFinding", "DiscriminationByDecayModeFinding"), # CV: kept for backwards compatibility - ("byLooseIsolation", "DiscriminationByLooseIsolation"), - ("byVLooseCombinedIsolationDeltaBetaCorr", "DiscriminationByVLooseCombinedIsolationDBSumPtCorr"), - ("byLooseCombinedIsolationDeltaBetaCorr", "DiscriminationByLooseCombinedIsolationDBSumPtCorr"), - ("byMediumCombinedIsolationDeltaBetaCorr", "DiscriminationByMediumCombinedIsolationDBSumPtCorr"), - ("byTightCombinedIsolationDeltaBetaCorr", "DiscriminationByTightCombinedIsolationDBSumPtCorr"), - ("byCombinedIsolationDeltaBetaCorrRaw", "DiscriminationByRawCombinedIsolationDBSumPtCorr"), ("byLooseCombinedIsolationDeltaBetaCorr3Hits", "DiscriminationByLooseCombinedIsolationDBSumPtCorr3Hits"), ("byMediumCombinedIsolationDeltaBetaCorr3Hits", "DiscriminationByMediumCombinedIsolationDBSumPtCorr3Hits"), ("byTightCombinedIsolationDeltaBetaCorr3Hits", "DiscriminationByTightCombinedIsolationDBSumPtCorr3Hits"), ("byCombinedIsolationDeltaBetaCorrRaw3Hits", "DiscriminationByRawCombinedIsolationDBSumPtCorr3Hits"), - ("chargedIsoPtSum", "MVA3IsolationChargedIsoPtSum"), - ("neutralIsoPtSum", "MVA3IsolationNeutralIsoPtSum"), - ("puCorrPtSum", "MVA3IsolationPUcorrPtSum"), - ("byIsolationMVA3oldDMwoLTraw", "DiscriminationByIsolationMVA3oldDMwoLTraw"), - ("byVLooseIsolationMVA3oldDMwoLT", "DiscriminationByVLooseIsolationMVA3oldDMwoLT"), - ("byLooseIsolationMVA3oldDMwoLT", "DiscriminationByLooseIsolationMVA3oldDMwoLT"), - ("byMediumIsolationMVA3oldDMwoLT", "DiscriminationByMediumIsolationMVA3oldDMwoLT"), - ("byTightIsolationMVA3oldDMwoLT", "DiscriminationByTightIsolationMVA3oldDMwoLT"), - ("byVTightIsolationMVA3oldDMwoLT", "DiscriminationByVTightIsolationMVA3oldDMwoLT"), - ("byVVTightIsolationMVA3oldDMwoLT", "DiscriminationByVVTightIsolationMVA3oldDMwoLT"), + ("byLoosePileupWeightedIsolation3Hits", "DiscriminationByLoosePileupWeightedIsolation3Hits"), + ("byMediumPileupWeightedIsolation3Hits", "DiscriminationByMediumPileupWeightedIsolation3Hits"), + ("byTightPileupWeightedIsolation3Hits", "DiscriminationByTightPileupWeightedIsolation3Hits"), + ("byPhotonPtSumOutsideSignalCone", "DiscriminationByPhotonPtSumOutsideSignalCone"), + ("byPileupWeightedIsolationRaw3Hits", "DiscriminationByRawPileupWeightedIsolation3Hits"), + ("chargedIsoPtSum", "ChargedIsoPtSum"), + ("neutralIsoPtSum", "NeutralIsoPtSum"), + ("puCorrPtSum", "PUcorrPtSum"), + ("neutralIsoPtSumWeight", "NeutralIsoPtSumWeight"), + ("footprintCorrection", "FootprintCorrection"), + ("photonPtSumOutsideSignalCone", "PhotonPtSumOutsideSignalCone"), + ##("byIsolationMVA3oldDMwoLTraw", "DiscriminationByIsolationMVA3oldDMwoLTraw"), + ##("byVLooseIsolationMVA3oldDMwoLT", "DiscriminationByVLooseIsolationMVA3oldDMwoLT"), + ##("byLooseIsolationMVA3oldDMwoLT", "DiscriminationByLooseIsolationMVA3oldDMwoLT"), + ##("byMediumIsolationMVA3oldDMwoLT", "DiscriminationByMediumIsolationMVA3oldDMwoLT"), + ##("byTightIsolationMVA3oldDMwoLT", "DiscriminationByTightIsolationMVA3oldDMwoLT"), + ##("byVTightIsolationMVA3oldDMwoLT", "DiscriminationByVTightIsolationMVA3oldDMwoLT"), + ##("byVVTightIsolationMVA3oldDMwoLT", "DiscriminationByVVTightIsolationMVA3oldDMwoLT"), ("byIsolationMVA3oldDMwLTraw", "DiscriminationByIsolationMVA3oldDMwLTraw"), ("byVLooseIsolationMVA3oldDMwLT", "DiscriminationByVLooseIsolationMVA3oldDMwLT"), ("byLooseIsolationMVA3oldDMwLT", "DiscriminationByLooseIsolationMVA3oldDMwLT"), @@ -132,12 +133,12 @@ def _switchToPFTau(process, ("byVTightIsolationMVA3oldDMwLT", "DiscriminationByVTightIsolationMVA3oldDMwLT"), ("byVVTightIsolationMVA3oldDMwLT", "DiscriminationByVVTightIsolationMVA3oldDMwLT"), ("byIsolationMVA3newDMwoLTraw", "DiscriminationByIsolationMVA3newDMwoLTraw"), - ("byVLooseIsolationMVA3newDMwoLT", "DiscriminationByVLooseIsolationMVA3newDMwoLT"), - ("byLooseIsolationMVA3newDMwoLT", "DiscriminationByLooseIsolationMVA3newDMwoLT"), - ("byMediumIsolationMVA3newDMwoLT", "DiscriminationByMediumIsolationMVA3newDMwoLT"), - ("byTightIsolationMVA3newDMwoLT", "DiscriminationByTightIsolationMVA3newDMwoLT"), - ("byVTightIsolationMVA3newDMwoLT", "DiscriminationByVTightIsolationMVA3newDMwoLT"), - ("byVVTightIsolationMVA3newDMwoLT", "DiscriminationByVVTightIsolationMVA3newDMwoLT"), + ##("byVLooseIsolationMVA3newDMwoLT", "DiscriminationByVLooseIsolationMVA3newDMwoLT"), + ##("byLooseIsolationMVA3newDMwoLT", "DiscriminationByLooseIsolationMVA3newDMwoLT"), + ##("byMediumIsolationMVA3newDMwoLT", "DiscriminationByMediumIsolationMVA3newDMwoLT"), + ##("byTightIsolationMVA3newDMwoLT", "DiscriminationByTightIsolationMVA3newDMwoLT"), + ##("byVTightIsolationMVA3newDMwoLT", "DiscriminationByVTightIsolationMVA3newDMwoLT"), + ##("byVVTightIsolationMVA3newDMwoLT", "DiscriminationByVVTightIsolationMVA3newDMwoLT"), ("byIsolationMVA3newDMwLTraw", "DiscriminationByIsolationMVA3newDMwLTraw"), ("byVLooseIsolationMVA3newDMwLT", "DiscriminationByVLooseIsolationMVA3newDMwLT"), ("byLooseIsolationMVA3newDMwLT", "DiscriminationByLooseIsolationMVA3newDMwLT"), @@ -145,9 +146,9 @@ def _switchToPFTau(process, ("byTightIsolationMVA3newDMwLT", "DiscriminationByTightIsolationMVA3newDMwLT"), ("byVTightIsolationMVA3newDMwLT", "DiscriminationByVTightIsolationMVA3newDMwLT"), ("byVVTightIsolationMVA3newDMwLT", "DiscriminationByVVTightIsolationMVA3newDMwLT"), - ("againstElectronLoose", "DiscriminationByLooseElectronRejection"), - ("againstElectronMedium", "DiscriminationByMediumElectronRejection"), - ("againstElectronTight", "DiscriminationByTightElectronRejection"), + ##("againstElectronLoose", "DiscriminationByLooseElectronRejection"), + ##("againstElectronMedium", "DiscriminationByMediumElectronRejection"), + ##("againstElectronTight", "DiscriminationByTightElectronRejection"), ("againstElectronMVA5raw", "DiscriminationByMVA5rawElectronRejection"), ("againstElectronMVA5category", "DiscriminationByMVA5rawElectronRejection:category"), ("againstElectronVLooseMVA5", "DiscriminationByMVA5VLooseElectronRejection"), @@ -155,20 +156,20 @@ def _switchToPFTau(process, ("againstElectronMediumMVA5", "DiscriminationByMVA5MediumElectronRejection"), ("againstElectronTightMVA5", "DiscriminationByMVA5TightElectronRejection"), ("againstElectronVTightMVA5", "DiscriminationByMVA5VTightElectronRejection"), - ("againstElectronDeadECAL", "DiscriminationByDeadECALElectronRejection"), - ("againstMuonLoose", "DiscriminationByLooseMuonRejection"), - ("againstMuonMedium", "DiscriminationByMediumMuonRejection"), - ("againstMuonTight", "DiscriminationByTightMuonRejection"), - ("againstMuonLoose2", "DiscriminationByLooseMuonRejection2"), - ("againstMuonMedium2", "DiscriminationByMediumMuonRejection2"), - ("againstMuonTight2", "DiscriminationByTightMuonRejection2"), + ##("againstElectronDeadECAL", "DiscriminationByDeadECALElectronRejection"), + ##("againstMuonLoose", "DiscriminationByLooseMuonRejection"), + ##("againstMuonMedium", "DiscriminationByMediumMuonRejection"), + ##("againstMuonTight", "DiscriminationByTightMuonRejection"), + ##("againstMuonLoose2", "DiscriminationByLooseMuonRejection2"), + ##("againstMuonMedium2", "DiscriminationByMediumMuonRejection2"), + ##("againstMuonTight2", "DiscriminationByTightMuonRejection2"), ("againstMuonLoose3", "DiscriminationByLooseMuonRejection3"), ("againstMuonTight3", "DiscriminationByTightMuonRejection3"), - ("againstMuonMVAraw", "DiscriminationByMVArawMuonRejection"), - ("againstMuonLooseMVA", "DiscriminationByMVALooseMuonRejection"), - ("againstMuonMediumMVA", "DiscriminationByMVAMediumMuonRejection"), - ("againstMuonTightMVA", "DiscriminationByMVATightMuonRejection") ] - + ##("againstMuonMVAraw", "DiscriminationByMVArawMuonRejection"), + ##("againstMuonLooseMVA", "DiscriminationByMVALooseMuonRejection"), + ##("againstMuonMediumMVA", "DiscriminationByMVAMediumMuonRejection"), + ##("againstMuonTightMVA", "DiscriminationByMVATightMuonRejection") + ] # switch to PFTau collection produced for fixed dR = 0.07 signal cone size def switchToPFTauFixedCone(process, @@ -208,8 +209,8 @@ def switchToPFTauHPS(process, ## adapt cleanPatTaus if hasattr(process, "cleanPatTaus" + patTauLabel + postfix): getattr(process, "cleanPatTaus" + patTauLabel + postfix).preselection = \ - 'pt > 20 & abs(eta) < 2.3 & tauID("decayModeFindingOldDMs") > 0.5 & tauID("byLooseCombinedIsolationDeltaBetaCorr3Hits") > 0.5' \ - + ' & tauID("againstMuonTight3") > 0.5 & tauID("againstElectronLoose") > 0.5' + 'pt > 18 & abs(eta) < 2.3 & tauID("decayModeFinding") > 0.5 & tauID("byLooseCombinedIsolationDeltaBetaCorr3Hits") > 0.5' \ + + ' & tauID("againstMuonTight3") > 0.5 & tauID("againstElectronVLooseMVA5") > 0.5' # Select switcher by string def switchToPFTauByType(process, diff --git a/RecoBTag/SoftLepton/interface/ElectronTagger.h b/RecoBTag/SoftLepton/interface/ElectronTagger.h index c094661d82c57..8eec54b44ff7f 100755 --- a/RecoBTag/SoftLepton/interface/ElectronTagger.h +++ b/RecoBTag/SoftLepton/interface/ElectronTagger.h @@ -2,10 +2,9 @@ #define RecoBTag_SoftLepton_ElectronTagger_h #include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "CommonTools/Utils/interface/TMVAEvaluator.h" #include "RecoBTau/JetTagComputer/interface/JetTagComputer.h" #include "RecoBTag/SoftLepton/interface/LeptonSelector.h" -#include "RecoBTag/SoftLepton/interface/MvaSoftElectronEstimator.h" -#include /** \class ElectronTagger * @@ -18,23 +17,19 @@ class ElectronTagger : public JetTagComputer { public: /// explicit ctor - ElectronTagger(const edm::ParameterSet & ); + ElectronTagger(const edm::ParameterSet & ); + void initialize(const JetTagComputerRecord &) override; virtual float discriminator(const TagInfoHelper & tagInfo) const override; -// std::vector vecstr; -// string path_mvaWeightFileEleID; + private: - btag::LeptonSelector m_selector; - edm::FileInPath WeightFile; - mutable std::mutex m_mutex; - std::unique_ptr mvaID; -}; + const btag::LeptonSelector m_selector; + const bool m_useCondDB; + const std::string m_gbrForestLabel; + const edm::FileInPath m_weightFile; + const bool m_useGBRForest; + const bool m_useAdaBoost; -ElectronTagger::ElectronTagger(const edm::ParameterSet & configuration): - m_selector(configuration) - { - uses("seTagInfos"); - WeightFile=configuration.getParameter("weightFile"); - mvaID.reset(new MvaSoftEleEstimator(WeightFile.fullPath())); - } + std::unique_ptr mvaID; +}; #endif diff --git a/RecoBTag/SoftLepton/interface/MuonTagger.h b/RecoBTag/SoftLepton/interface/MuonTagger.h index 7647c2116f15e..45a1ed8377571 100644 --- a/RecoBTag/SoftLepton/interface/MuonTagger.h +++ b/RecoBTag/SoftLepton/interface/MuonTagger.h @@ -6,10 +6,9 @@ #define RecoBTag_SoftLepton_MuonTagger_h #include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "CommonTools/Utils/interface/TMVAEvaluator.h" #include "RecoBTau/JetTagComputer/interface/JetTagComputer.h" #include "RecoBTag/SoftLepton/interface/LeptonSelector.h" -#include "RecoBTag/SoftLepton/interface/MvaSoftMuonEstimator.h" -#include #include class MuonTagger : public JetTagComputer { @@ -17,15 +16,18 @@ class MuonTagger : public JetTagComputer { public: MuonTagger(const edm::ParameterSet&); - + void initialize(const JetTagComputerRecord &) override; virtual float discriminator(const TagInfoHelper& tagInfo) const override; private: - btag::LeptonSelector m_selector; - edm::FileInPath WeightFile; - mutable std::mutex m_mutex; - [[cms::thread_guard("m_mutex")]] std::unique_ptr mvaID; + const bool m_useCondDB; + const std::string m_gbrForestLabel; + const edm::FileInPath m_weightFile; + const bool m_useGBRForest; + const bool m_useAdaBoost; + + std::unique_ptr mvaID; }; #endif diff --git a/RecoBTag/SoftLepton/interface/MvaSoftElectronEstimator.h b/RecoBTag/SoftLepton/interface/MvaSoftElectronEstimator.h deleted file mode 100644 index 6ebbac4acb842..0000000000000 --- a/RecoBTag/SoftLepton/interface/MvaSoftElectronEstimator.h +++ /dev/null @@ -1,48 +0,0 @@ -//-------------------------------------------------------------------------------------------------- -// $Id $ -// -// MvaSoftEleEstimator -// -// Helper Class for applying MVA electron ID selection -// -// Authors: S. de Visscher -//-------------------------------------------------------------------------------------------------- - - -/// --> NOTE if you want to use this class as standalone without the CMSSW part -/// you need to uncomment the below line and compile normally with scramv1 b -/// Then you need just to load it in your root macro the lib with the correct path, eg: -/// gSystem->Load("/data/benedet/CMSSW_5_2_2/lib/slc5_amd64_gcc462/pluginEGammaEGammaAnalysisTools.so"); - -//#define STANDALONE // <---- this line - -#ifndef MvaSoftEleEstimator_H -#define MvaSoftEleEstimator_H - -#include "DataFormats/PatCandidates/interface/Electron.h" -#include "DataFormats/EgammaCandidates/interface/GsfElectronFwd.h" -#include "DataFormats/MuonReco/interface/MuonFwd.h" -#include "DataFormats/VertexReco/interface/VertexFwd.h" -#include "DataFormats/ParticleFlowCandidate/interface/PFCandidateFwd.h" -#include "RecoEcal/EgammaCoreTools/interface/EcalClusterLazyTools.h" -#include "TrackingTools/TransientTrack/interface/TransientTrackBuilder.h" -#include "EgammaAnalysis/ElectronTools/interface/ElectronEffectiveArea.h" -#include -#include -#include "TMVA/Factory.h" -#include "TMVA/Tools.h" -#include "TMVA/Reader.h" - -class MvaSoftEleEstimator{ - public: - MvaSoftEleEstimator(std::string); - ~MvaSoftEleEstimator(); - - Double_t mvaValue(Float_t, Float_t, Float_t,Float_t,Float_t,Float_t); - - private: - TMVA::Reader* TMVAReader; - float mva_sip3d, mva_sip2d, mva_ptRel, mva_deltaR, mva_ratio, mva_e_pi; -}; - -#endif diff --git a/RecoBTag/SoftLepton/interface/MvaSoftMuonEstimator.h b/RecoBTag/SoftLepton/interface/MvaSoftMuonEstimator.h deleted file mode 100644 index 4349e67d15d4a..0000000000000 --- a/RecoBTag/SoftLepton/interface/MvaSoftMuonEstimator.h +++ /dev/null @@ -1,35 +0,0 @@ -// * Author: Alberto Zucchetta -// * Mail: a.zucchetta@cern.ch -// * January 16, 2015 - -#ifndef RecoBTag_SoftLepton_MvaSoftMuonEstimator_h -#define RecoBTag_SoftLepton_MvaSoftMuonEstimator_h - -#include "FWCore/ParameterSet/interface/ParameterSet.h" - - -#include -#include "TMVA/Factory.h" -#include "TMVA/Tools.h" -#include "TMVA/Reader.h" - -class MvaSoftMuonEstimator { - - public: - - MvaSoftMuonEstimator(std::string); - ~MvaSoftMuonEstimator(); - - float mvaValue(float, float, float, float, float); - - private: - - TMVA::Reader* TMVAReader; - - std::string weightFile; - float mva_sip3d, mva_sip2d, mva_ptRel, mva_deltaR, mva_ratio; - -}; - -#endif - diff --git a/RecoBTag/SoftLepton/python/SoftLeptonByMVA_cff.py b/RecoBTag/SoftLepton/python/SoftLeptonByMVA_cff.py index a059c440a6103..71e1ef1a1ed54 100644 --- a/RecoBTag/SoftLepton/python/SoftLeptonByMVA_cff.py +++ b/RecoBTag/SoftLepton/python/SoftLeptonByMVA_cff.py @@ -1,50 +1,66 @@ import FWCore.ParameterSet.Config as cms +softPFElectronCommon = cms.PSet( + useCondDB = cms.bool(False), + gbrForestLabel = cms.string("btag_SoftPFElectron_TMVA420_BDT_74X_v1"), + weightFile = cms.FileInPath('RecoBTag/SoftLepton/data/SoftPFElectron_BDT.weights.xml.gz'), + useGBRForest = cms.bool(True), + useAdaBoost = cms.bool(False) +) + +softPFMuonCommon = cms.PSet( + useCondDB = cms.bool(False), + gbrForestLabel = cms.string("btag_SoftPFMuon_TMVA420_BDT_74X_v1"), + weightFile = cms.FileInPath('RecoBTag/SoftLepton/data/SoftPFMuon_BDT.weights.xml.gz'), + useGBRForest = cms.bool(True), + useAdaBoost = cms.bool(True) +) + softPFElectronBJetTags = cms.EDProducer("JetTagProducer", jetTagComputer = cms.string('softPFElectronComputer'), tagInfos = cms.VInputTag(cms.InputTag("softPFElectronsTagInfos")) ) softPFElectronComputer = cms.ESProducer("ElectronTaggerESProducer", + softPFElectronCommon, ipSign = cms.string("any"), - weightFile = cms.FileInPath('RecoBTag/SoftLepton/data/SoftPFElectron_BDT.weights.xml.gz') ) negativeSoftPFElectronBJetTags = cms.EDProducer("JetTagProducer", jetTagComputer = cms.string('negativeSoftPFElectronComputer'), tagInfos = cms.VInputTag(cms.InputTag("softPFElectronsTagInfos")) ) negativeSoftPFElectronComputer = cms.ESProducer("ElectronTaggerESProducer", - ipSign = cms.string("negative"), - weightFile = cms.FileInPath('RecoBTag/SoftLepton/data/SoftPFElectron_BDT.weights.xml.gz') + softPFElectronCommon, + ipSign = cms.string("negative") ) positiveSoftPFElectronBJetTags = cms.EDProducer("JetTagProducer", jetTagComputer = cms.string('positiveSoftPFElectronComputer'), tagInfos = cms.VInputTag(cms.InputTag("softPFElectronsTagInfos")) ) positiveSoftPFElectronComputer = cms.ESProducer("ElectronTaggerESProducer", - ipSign = cms.string("positive"), - weightFile = cms.FileInPath('RecoBTag/SoftLepton/data/SoftPFElectron_BDT.weights.xml.gz') + softPFElectronCommon, + ipSign = cms.string("positive") ) softPFMuonBJetTags = cms.EDProducer("JetTagProducer", jetTagComputer = cms.string('softPFMuonComputer'), tagInfos = cms.VInputTag(cms.InputTag("softPFMuonsTagInfos")) ) softPFMuonComputer = cms.ESProducer("MuonTaggerESProducer", - ipSign = cms.string("any"), - weightFile = cms.FileInPath('RecoBTag/SoftLepton/data/SoftPFMuon_BDT.weights.xml.gz') + softPFMuonCommon, + ipSign = cms.string("any") ) negativeSoftPFMuonBJetTags = cms.EDProducer("JetTagProducer", jetTagComputer = cms.string('negativeSoftPFMuonComputer'), tagInfos = cms.VInputTag(cms.InputTag("softPFMuonsTagInfos")) ) negativeSoftPFMuonComputer = cms.ESProducer("MuonTaggerESProducer", - ipSign = cms.string("negative"), - weightFile = cms.FileInPath('RecoBTag/SoftLepton/data/SoftPFMuon_BDT.weights.xml.gz') + softPFMuonCommon, + ipSign = cms.string("negative") ) positiveSoftPFMuonBJetTags = cms.EDProducer("JetTagProducer", jetTagComputer = cms.string('positiveSoftPFMuonComputer'), tagInfos = cms.VInputTag(cms.InputTag("softPFMuonsTagInfos")) ) positiveSoftPFMuonComputer = cms.ESProducer("MuonTaggerESProducer", - ipSign = cms.string("positive"), - weightFile = cms.FileInPath('RecoBTag/SoftLepton/data/SoftPFMuon_BDT.weights.xml.gz') + softPFMuonCommon, + ipSign = cms.string("positive") ) diff --git a/RecoBTag/SoftLepton/src/ElectronTagger.cc b/RecoBTag/SoftLepton/src/ElectronTagger.cc index 4c4ac6dcd7540..ebfe283b4e0b5 100644 --- a/RecoBTag/SoftLepton/src/ElectronTagger.cc +++ b/RecoBTag/SoftLepton/src/ElectronTagger.cc @@ -1,12 +1,47 @@ #include #include +#include "FWCore/Framework/interface/ESHandle.h" +#include "CondFormats/DataRecord/interface/BTauGenericMVAJetTagComputerRcd.h" +#include "CondFormats/DataRecord/interface/GBRWrapperRcd.h" +#include "RecoBTau/JetTagComputer/interface/JetTagComputerRecord.h" #include "DataFormats/BTauReco/interface/SoftLeptonTagInfo.h" #include "RecoBTag/SoftLepton/interface/LeptonSelector.h" #include "RecoBTag/SoftLepton/interface/ElectronTagger.h" #include "DataFormats/BTauReco/interface/CandSoftLeptonTagInfo.h" #include +ElectronTagger::ElectronTagger(const edm::ParameterSet & cfg): + m_selector(cfg), + m_useCondDB(cfg.getParameter("useCondDB")), + m_gbrForestLabel(cfg.existsAs("gbrForestLabel") ? cfg.getParameter("gbrForestLabel") : ""), + m_weightFile(cfg.existsAs("weightFile") ? cfg.getParameter("weightFile") : edm::FileInPath()), + m_useGBRForest(cfg.existsAs("useGBRForest") ? cfg.getParameter("useGBRForest") : false), + m_useAdaBoost(cfg.existsAs("useAdaBoost") ? cfg.getParameter("useAdaBoost") : false) + { + uses("seTagInfos"); + mvaID.reset(new TMVAEvaluator()); + } + +void ElectronTagger::initialize(const JetTagComputerRecord & record) +{ + // variable names and order need to be the same as in the training + std::vector variables({"sip3d", "sip2d", "ptRel", "deltaR", "ratio", "mva_e_pi"}); + std::vector spectators; + + if (m_useCondDB) + { + const GBRWrapperRcd & gbrWrapperRecord = record.getRecord(); + + edm::ESHandle gbrForestHandle; + gbrWrapperRecord.get(m_gbrForestLabel.c_str(), gbrForestHandle); + + mvaID->initializeGBRForest(gbrForestHandle.product(), variables, spectators, m_useAdaBoost); + } + else + mvaID->initialize("Color:Silent:Error", "BDT", m_weightFile.fullPath(), variables, spectators, m_useGBRForest, m_useAdaBoost); +} + /// b-tag a jet based on track-to-jet parameters in the extened info collection float ElectronTagger::discriminator(const TagInfoHelper & tagInfo) const { // default value, used if there are no leptons associated to this jet @@ -16,8 +51,6 @@ float ElectronTagger::discriminator(const TagInfoHelper & tagInfo) const { std::mt19937_64 random; std::uniform_real_distribution dist(0.f,1.f); - //MvaSofEleEstimator is not thread safe - std::lock_guard lock(m_mutex); // if there are multiple leptons, look for the highest tag result for (unsigned int i = 0; i < info.leptons(); i++) { const reco::SoftLeptonProperties & properties = info.properties(i); @@ -27,7 +60,19 @@ float ElectronTagger::discriminator(const TagInfoHelper & tagInfo) const { float rndm = dist(random); //for negative tagger, flip 50% of the negative signs to positive value float sip3d = (m_selector.isNegative() && rndm<0.5) ? -properties.sip3d : properties.sip3d; - float tag = mvaID->mvaValue( properties.sip2d, sip3d, properties.ptRel, properties.deltaR, properties.ratio,properties.elec_mva); + float sip2d = (m_selector.isNegative() && rndm<0.5) ? -properties.sip2d : properties.sip2d; + + std::map inputs; + inputs["sip3d"] = sip3d; + inputs["sip2d"] = sip2d; + inputs["ptRel"] = properties.ptRel; + inputs["deltaR"] = properties.deltaR; + inputs["ratio"] = properties.ratio; + inputs["mva_e_pi"] = properties.elec_mva; + + float tag = mvaID->evaluate(inputs); + // Transform output between 0 and 1 + tag = (tag+1.0)/2.0; if (tag > bestTag) bestTag = tag; } diff --git a/RecoBTag/SoftLepton/src/MuonTagger.cc b/RecoBTag/SoftLepton/src/MuonTagger.cc index 252ef5f5d5995..da03af35d3d60 100644 --- a/RecoBTag/SoftLepton/src/MuonTagger.cc +++ b/RecoBTag/SoftLepton/src/MuonTagger.cc @@ -5,18 +5,46 @@ #include #include +#include "FWCore/Framework/interface/ESHandle.h" +#include "CondFormats/DataRecord/interface/BTauGenericMVAJetTagComputerRcd.h" +#include "CondFormats/DataRecord/interface/GBRWrapperRcd.h" +#include "RecoBTau/JetTagComputer/interface/JetTagComputerRecord.h" #include "DataFormats/BTauReco/interface/SoftLeptonTagInfo.h" #include "DataFormats/BTauReco/interface/CandSoftLeptonTagInfo.h" #include "RecoBTag/SoftLepton/interface/LeptonSelector.h" #include "RecoBTag/SoftLepton/interface/MuonTagger.h" -MuonTagger::MuonTagger(const edm::ParameterSet& conf): m_selector(conf) { +MuonTagger::MuonTagger(const edm::ParameterSet& cfg): + m_selector(cfg), + m_useCondDB(cfg.getParameter("useCondDB")), + m_gbrForestLabel(cfg.existsAs("gbrForestLabel") ? cfg.getParameter("gbrForestLabel") : ""), + m_weightFile(cfg.existsAs("weightFile") ? cfg.getParameter("weightFile") : edm::FileInPath()), + m_useGBRForest(cfg.existsAs("useGBRForest") ? cfg.getParameter("useGBRForest") : false), + m_useAdaBoost(cfg.existsAs("useAdaBoost") ? cfg.getParameter("useAdaBoost") : false) +{ uses("smTagInfos"); - WeightFile=conf.getParameter("weightFile"); - mvaID.reset(new MvaSoftMuonEstimator(WeightFile.fullPath())); + mvaID.reset(new TMVAEvaluator()); } +void MuonTagger::initialize(const JetTagComputerRecord & record) +{ + // variable names and order need to be the same as in the training + std::vector variables({"TagInfo1.sip3d", "TagInfo1.sip2d", "TagInfo1.ptRel", "TagInfo1.deltaR", "TagInfo1.ratio"}); + std::vector spectators; + + if (m_useCondDB) + { + const GBRWrapperRcd & gbrWrapperRecord = record.getRecord(); + + edm::ESHandle gbrForestHandle; + gbrWrapperRecord.get(m_gbrForestLabel.c_str(), gbrForestHandle); + + mvaID->initializeGBRForest(gbrForestHandle.product(), variables, spectators, m_useAdaBoost); + } + else + mvaID->initialize("Color:Silent:Error", "BDT", m_weightFile.fullPath(), variables, spectators, m_useGBRForest, m_useAdaBoost); +} // b-tag a jet based on track-to-jet parameters in the extened info collection float MuonTagger::discriminator(const TagInfoHelper& tagInfo) const { @@ -27,9 +55,6 @@ float MuonTagger::discriminator(const TagInfoHelper& tagInfo) const { std::mt19937_64 random; std::uniform_real_distribution dist(0.f,1.f); - //MvaSoftMuonEstimator is not thread safe - std::lock_guard lock(m_mutex); - // If there are multiple leptons, look for the highest tag result for (unsigned int i=0; imvaValue(sip3d, sip2d, properties.ptRel, properties.deltaR, properties.ratio); + + std::map inputs; + inputs["TagInfo1.sip3d"] = sip3d; + inputs["TagInfo1.sip2d"] = sip2d; + inputs["TagInfo1.ptRel"] = properties.ptRel; + inputs["TagInfo1.deltaR"] = properties.deltaR; + inputs["TagInfo1.ratio"] = properties.ratio; + + float tag = mvaID->evaluate(inputs); + // Transform output between 0 and 1 + tag = (tag+1.0)/2.0; if(tag>bestTag) bestTag = tag; } diff --git a/RecoBTag/SoftLepton/src/MvaSoftElectronEstimator.cc b/RecoBTag/SoftLepton/src/MvaSoftElectronEstimator.cc deleted file mode 100644 index 9a043a8c0baed..0000000000000 --- a/RecoBTag/SoftLepton/src/MvaSoftElectronEstimator.cc +++ /dev/null @@ -1,62 +0,0 @@ -#include -#include "RecoBTag/SoftLepton/interface/MvaSoftElectronEstimator.h" -#include -#include -using namespace std; - -#include "DataFormats/TrackReco/interface/Track.h" -#include "DataFormats/GsfTrackReco/interface/GsfTrack.h" -#include "DataFormats/TrackReco/interface/TrackFwd.h" -#include "DataFormats/EgammaCandidates/interface/GsfElectron.h" -#include "DataFormats/EgammaReco/interface/SuperCluster.h" -#include "DataFormats/MuonReco/interface/Muon.h" -#include "DataFormats/ParticleFlowCandidate/interface/PFCandidate.h" -#include "DataFormats/ParticleFlowCandidate/interface/PFCandidateFwd.h" -#include "DataFormats/Common/interface/RefToPtr.h" -#include "DataFormats/VertexReco/interface/Vertex.h" -#include "RecoEcal/EgammaCoreTools/interface/EcalClusterLazyTools.h" -#include "TrackingTools/TransientTrack/interface/TransientTrackBuilder.h" -#include "TrackingTools/IPTools/interface/IPTools.h" -//#include "EgammaAnalysis/ElectronTools/interface/ElectronEffectiveArea.h" -#include "DataFormats/Common/interface/RefToPtr.h" -#include "CommonTools/Utils/interface/TMVAZipReader.h" -using namespace reco; - -//-------------------------------------------------------------------------------------------------- -MvaSoftEleEstimator::MvaSoftEleEstimator(std::string weightFile) -{ - TMVAReader = new TMVA::Reader("Color:Silent:Error"); - TMVAReader->SetVerbose(false); - TMVAReader->AddVariable("sip3d", &mva_sip3d); - TMVAReader->AddVariable("sip2d", &mva_sip2d); - TMVAReader->AddVariable("ptRel", &mva_ptRel); - TMVAReader->AddVariable("deltaR", &mva_deltaR); - TMVAReader->AddVariable("ratio", &mva_ratio); - TMVAReader->AddVariable("mva_e_pi", &mva_e_pi); - reco::details::loadTMVAWeights(TMVAReader, "BDT", weightFile.c_str()); - -} -//-------------------------------------------------------------------------------------------------- -MvaSoftEleEstimator::~MvaSoftEleEstimator() -{ - delete TMVAReader; -} - -//-------------------------------------------------------------------------------------------------- - -Double_t MvaSoftEleEstimator::mvaValue(Float_t sip2d, Float_t sip3d, Float_t ptRel, float deltaR, Float_t ratio, Float_t mva_e_pi) { - - mva_sip3d = sip3d; - mva_sip2d = sip2d; - mva_ptRel = ptRel; - mva_deltaR = deltaR; - mva_ratio = ratio; - mva_e_pi = mva_e_pi; - - float tag = TMVAReader->EvaluateMVA("BDT"); - // Transform output between 0 and 1 - tag = (tag+1.0)/2.0; - - return tag; -} -//-------------------------------------------------------------------------------------------------------- diff --git a/RecoBTag/SoftLepton/src/MvaSoftMuonEstimator.cc b/RecoBTag/SoftLepton/src/MvaSoftMuonEstimator.cc deleted file mode 100644 index ad53b1341a638..0000000000000 --- a/RecoBTag/SoftLepton/src/MvaSoftMuonEstimator.cc +++ /dev/null @@ -1,42 +0,0 @@ -// * Author: Alberto Zucchetta -// * Mail: a.zucchetta@cern.ch -// * January 16, 2015 - -#include - -#include "DataFormats/BTauReco/interface/SoftLeptonTagInfo.h" -#include "RecoBTag/SoftLepton/interface/LeptonSelector.h" -#include "RecoBTag/SoftLepton/interface/MvaSoftMuonEstimator.h" -#include "CommonTools/Utils/interface/TMVAZipReader.h" - -MvaSoftMuonEstimator::MvaSoftMuonEstimator(std::string weightFile) { - TMVAReader = new TMVA::Reader("Color:Silent:Error"); - TMVAReader->SetVerbose(false); - TMVAReader->AddVariable("TagInfo1.sip3d", &mva_sip3d); - TMVAReader->AddVariable("TagInfo1.sip2d", &mva_sip2d); - TMVAReader->AddVariable("TagInfo1.ptRel", &mva_ptRel); - TMVAReader->AddVariable("TagInfo1.deltaR", &mva_deltaR); - TMVAReader->AddVariable("TagInfo1.ratio", &mva_ratio); - reco::details::loadTMVAWeights(TMVAReader, "BDT", weightFile.c_str()); -} - -MvaSoftMuonEstimator::~MvaSoftMuonEstimator() { - delete TMVAReader; -} - - -// b-tag a jet based on track-to-jet parameters in the extened info collection -float MvaSoftMuonEstimator::mvaValue(float sip3d, float sip2d, float ptRel, float deltaR, float ratio) { - mva_sip3d = sip3d; - mva_sip2d = sip2d; - mva_ptRel = ptRel; - mva_deltaR = deltaR; - mva_ratio = ratio; - // Evaluate tagger - float tag = TMVAReader->EvaluateMVA("BDT"); - // Transform output between approximately 0 and 1 - tag = (tag+1.)/2.; - - return tag; -} - diff --git a/RecoBTau/JetTagComputer/interface/JetTagComputerRecord.h b/RecoBTau/JetTagComputer/interface/JetTagComputerRecord.h index 293ef3c63a2ff..9472ceba48444 100644 --- a/RecoBTau/JetTagComputer/interface/JetTagComputerRecord.h +++ b/RecoBTau/JetTagComputer/interface/JetTagComputerRecord.h @@ -5,10 +5,11 @@ #include class BTauGenericMVAJetTagComputerRcd; +class GBRWrapperRcd; class JetTagComputerRecord : public edm::eventsetup::DependentRecordImplementation< JetTagComputerRecord, - boost::mpl::vector > {}; + boost::mpl::vector > {}; #endif diff --git a/RecoBTau/JetTagComputer/python/candidateCombinedMVAComputer_cfi.py b/RecoBTau/JetTagComputer/python/candidateCombinedMVAComputer_cfi.py index 7aa4347abb7c9..726b4d7cdd96b 100644 --- a/RecoBTau/JetTagComputer/python/candidateCombinedMVAComputer_cfi.py +++ b/RecoBTau/JetTagComputer/python/candidateCombinedMVAComputer_cfi.py @@ -12,7 +12,7 @@ cms.PSet( discriminator = cms.bool(True), variables = cms.bool(False), - jetTagComputer = cms.string('candidateCombinedSecondaryVertexComputer') + jetTagComputer = cms.string('candidateCombinedSecondaryVertexV2Computer') ), cms.PSet( discriminator = cms.bool(True), diff --git a/RecoBTau/JetTagComputer/python/combinedMVAComputer_cfi.py b/RecoBTau/JetTagComputer/python/combinedMVAComputer_cfi.py index 0d4b255882af9..5d9f8d4230962 100644 --- a/RecoBTau/JetTagComputer/python/combinedMVAComputer_cfi.py +++ b/RecoBTau/JetTagComputer/python/combinedMVAComputer_cfi.py @@ -12,7 +12,7 @@ cms.PSet( discriminator = cms.bool(True), variables = cms.bool(False), - jetTagComputer = cms.string('combinedSecondaryVertexComputer') + jetTagComputer = cms.string('combinedSecondaryVertexV2Computer') ), cms.PSet( discriminator = cms.bool(True), diff --git a/RecoEgamma/EgammaPhotonProducers/python/reducedEgamma_cfi.py b/RecoEgamma/EgammaPhotonProducers/python/reducedEgamma_cfi.py index 6293ff0d8af23..16a19c46afa78 100644 --- a/RecoEgamma/EgammaPhotonProducers/python/reducedEgamma_cfi.py +++ b/RecoEgamma/EgammaPhotonProducers/python/reducedEgamma_cfi.py @@ -1,8 +1,8 @@ import FWCore.ParameterSet.Config as cms reducedEgamma = cms.EDProducer("ReducedEGProducer", - keepPhotons = cms.string("pt > 14 && hadTowOverEm()<0.15"), #keep in output - slimRelinkPhotons = cms.string("pt > 14 && hadTowOverEm()<0.15"), #keep only slimmed SuperCluster plus seed cluster + keepPhotons = cms.string("(pt > 14 && hadTowOverEm()<0.15) || (pt>10 && pt<=14 && hadTowOverEm()<0.15 && chargedHadronIso()<10)"), #keep in output + slimRelinkPhotons = cms.string("(pt > 14 && hadTowOverEm()<0.15) || (pt>10 && pt<=14 && hadTowOverEm()<0.15 && chargedHadronIso()<10)"), #keep only slimmed SuperCluster plus seed cluster relinkPhotons = cms.string("(r9()>0.8 || chargedHadronIso()<20 || chargedHadronIso()<0.3*pt())"), #keep all associated clusters/rechits/conversions keepGsfElectrons = cms.string(""), #keep in output slimRelinkGsfElectrons = cms.string(""), #keep only slimmed SuperCluster plus seed cluster @@ -57,5 +57,3 @@ "eleHcalPFClusIso", ), ) - - diff --git a/RecoEgamma/EgammaTools/interface/AnyMVAEstimatorRun2Base.h b/RecoEgamma/EgammaTools/interface/AnyMVAEstimatorRun2Base.h index 7b06d42e9d585..d795c43f1a6ff 100644 --- a/RecoEgamma/EgammaTools/interface/AnyMVAEstimatorRun2Base.h +++ b/RecoEgamma/EgammaTools/interface/AnyMVAEstimatorRun2Base.h @@ -13,27 +13,37 @@ class AnyMVAEstimatorRun2Base { public: // Constructor, destructor - AnyMVAEstimatorRun2Base(const edm::ParameterSet& conf) : _conf(conf){}; + AnyMVAEstimatorRun2Base(const edm::ParameterSet& conf) : _conf(conf) {} virtual ~AnyMVAEstimatorRun2Base(){}; - + // Functions that must be provided in derived classes // These function should work on electrons or photons // of the reco or pat type - virtual float mvaValue( const edm::Ptr& particle) = 0; + virtual float mvaValue( const edm::Ptr& particle, const edm::Event&) const = 0; // A specific implementation of MVA is expected to have data members // that will contain particle's quantities on which the MVA operates. // This function fill their value for a given particle. - virtual void fillMVAVariables(const edm::Ptr& particle) = 0; + virtual std::vector fillMVAVariables(const edm::Ptr& particle, const edm::Event&) const = 0; // A specific implementation of MVA is expected to have one or more categories // defined with respect to eta, pt, etc. // This function determines the category for a given particle. - virtual int findCategory( const edm::Ptr& particle) = 0; - virtual int getNCategories() = 0; + virtual int findCategory( const edm::Ptr& particle) const = 0; + virtual int getNCategories() const = 0; // The name is a unique name associated with a particular MVA implementation, // it is found as a const data member in a derived class. - virtual const std::string getName() = 0; + virtual const std::string& getName() const = 0; + // An extra variable string set during construction that can be used + // to distinguish different instances of the estimator configured with + // different weight files. The tag can be used to construct names of ValueMaps, etc. + virtual const std::string& getTag() const = 0; + + // fills a vector of floats in the order that arguments are provided + template + std::vector packMVAVariables(const Args... args) const { + return std::vector({ args... }); + } // // Extra event content - if needed. @@ -41,10 +51,12 @@ class AnyMVAEstimatorRun2Base { // Some MVA implementation may require direct access to event content. // Implement these methods only if needed in the derived classes (use "override" // for certainty). + + // DEPRECATED // This method needs to be used only once after this MVA estimator is constructed - virtual void setConsumes(edm::ConsumesCollector &&cc){}; + virtual void setConsumes(edm::ConsumesCollector &&cc) const {}; // This method needs to be called for each event - virtual void getEventContent(const edm::Event& iEvent){}; + virtual void getEventContent(const edm::Event& iEvent) const final {}; // // Data members diff --git a/RecoEgamma/EgammaTools/interface/MVAObjectCache.h b/RecoEgamma/EgammaTools/interface/MVAObjectCache.h new file mode 100644 index 0000000000000..a47c8174618ea --- /dev/null +++ b/RecoEgamma/EgammaTools/interface/MVAObjectCache.h @@ -0,0 +1,30 @@ +#ifndef __RecoEgamma_EgammaTools_MVAObjectCache_H__ +#define __RecoEgamma_EgammaTools_MVAObjectCache_H__ + +#include "FWCore/ParameterSet/interface/ParameterSet.h" + +#include "RecoEgamma/EgammaTools/interface/AnyMVAEstimatorRun2Base.h" + +#include +#include +#include + +namespace egamma { + + class MVAObjectCache { + public: + typedef std::unique_ptr MVAPtr; + + MVAObjectCache(const edm::ParameterSet& conf); + + const MVAPtr& getMVA(const std::string& mva) const; + + const std::unordered_map& allMVAs() const { + return mvas_; + } + private: + std::unordered_map mvas_; + }; + +} +#endif diff --git a/RecoEgamma/EgammaTools/interface/MVAValueMapProducer.h b/RecoEgamma/EgammaTools/interface/MVAValueMapProducer.h index 826fbb50e60bf..2207d0938b432 100644 --- a/RecoEgamma/EgammaTools/interface/MVAValueMapProducer.h +++ b/RecoEgamma/EgammaTools/interface/MVAValueMapProducer.h @@ -1,3 +1,6 @@ +#ifndef __RecoEgamma_EgammaTools_MVAValueMapProducer_H__ +#define __RecoEgamma_EgammaTools_MVAValueMapProducer_H__ + #include "FWCore/Framework/interface/Frameworkfwd.h" #include "FWCore/Framework/interface/stream/EDProducer.h" @@ -10,18 +13,27 @@ #include "DataFormats/Common/interface/View.h" #include "RecoEgamma/EgammaTools/interface/AnyMVAEstimatorRun2Base.h" +#include "RecoEgamma/EgammaTools/interface/MVAObjectCache.h" #include #include template -class MVAValueMapProducer : public edm::stream::EDProducer<> { +class MVAValueMapProducer : public edm::stream::EDProducer< edm::GlobalCache > { public: - explicit MVAValueMapProducer(const edm::ParameterSet&); + MVAValueMapProducer(const edm::ParameterSet&, const egamma::MVAObjectCache*); ~MVAValueMapProducer(); + static std::unique_ptr + initializeGlobalCache(const edm::ParameterSet& conf) { + return std::unique_ptr(new egamma::MVAObjectCache(conf)); + } + + static void globalEndJob(const egamma::MVAObjectCache * ) { + } + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: @@ -40,9 +52,8 @@ class MVAValueMapProducer : public edm::stream::EDProducer<> { // for miniAOD case edm::EDGetToken srcMiniAOD_; - // MVA estimator - std::vector> mvaEstimators_; - + // MVA estimators are now stored in MVAObjectCache! + // Value map names std::vector mvaValueMapNames_; std::vector mvaCategoriesMapNames_; @@ -50,7 +61,8 @@ class MVAValueMapProducer : public edm::stream::EDProducer<> { }; template -MVAValueMapProducer::MVAValueMapProducer(const edm::ParameterSet& iConfig) +MVAValueMapProducer::MVAValueMapProducer(const edm::ParameterSet& iConfig, + const egamma::MVAObjectCache* mva_cache) { // @@ -60,54 +72,32 @@ MVAValueMapProducer::MVAValueMapProducer(const edm::ParameterSet& srcMiniAOD_ = mayConsume >(iConfig.getParameter("srcMiniAOD")); // Loop over the list of MVA configurations passed here from python and - // construct all requested MVA esimtators. - const std::vector& mvaEstimatorConfigs - = iConfig.getParameterSetVector("mvaConfigurations"); - for( auto &imva : mvaEstimatorConfigs ){ - - std::unique_ptr thisEstimator; - thisEstimator.reset(NULL); - if( !imva.empty() ) { - const std::string& pName = imva.getParameter("mvaName"); - // The factory below constructs the MVA of the appropriate type based - // on the "mvaName" which is the name of the derived MVA class (plugin) - AnyMVAEstimatorRun2Base *estimator = AnyMVAEstimatorRun2Factory::get()->create(pName, imva); - // Declare all event content, such as ValueMaps produced upstream or other, - // original event data pieces, that is needed (if any is implemented in the specific - // MVA classes) - //edm::ConsumesCollector &cc = consumesCollector(); - estimator->setConsumes( consumesCollector() ); - - thisEstimator.reset(estimator); - - } else - throw cms::Exception(" MVA configuration not found: ") - << " failed to find proper configuration for one of the MVAs in the main python script " << std::endl; - - // The unique pointer control is passed to the vector in the line below. - // Don't use thisEstimator pointer beyond the next line. - mvaEstimators_.emplace_back( thisEstimator.release() ); - + // construct all requested MVA esimtators. + const auto& all_mvas = mva_cache->allMVAs(); + for( auto mvaItr = all_mvas.begin(); mvaItr != all_mvas.end(); ++mvaItr ) { + // set the consumes + mvaItr->second->setConsumes(consumesCollector()); // // Compose and save the names of the value maps to be produced // - const auto& currentEstimator = mvaEstimators_.back(); - std::string thisValueMapName = currentEstimator->getName() + "Values"; - std::string thisCategoriesMapName = currentEstimator->getName() + "Categories"; + const auto& currentEstimator = mvaItr->second; + const std::string full_name = ( currentEstimator->getName() + + currentEstimator->getTag() ); + std::string thisValueMapName = full_name + "Values"; + std::string thisCategoriesMapName = full_name + "Categories"; mvaValueMapNames_.push_back( thisValueMapName ); mvaCategoriesMapNames_.push_back( thisCategoriesMapName ); // Declare the maps to the framework produces >(thisValueMapName); - produces >(thisCategoriesMapName); - + produces >(thisCategoriesMapName); } } template - MVAValueMapProducer::~MVAValueMapProducer() { +MVAValueMapProducer::~MVAValueMapProducer() { } template @@ -130,27 +120,27 @@ void MVAValueMapProducer::produce(edm::Event& iEvent, const edm::E // Loop over MVA estimators - for( unsigned iEstimator = 0; iEstimator < mvaEstimators_.size(); iEstimator++ ){ - + const auto& all_mvas = globalCache()->allMVAs(); + for( auto mva_itr = all_mvas.begin(); mva_itr != all_mvas.end(); ++mva_itr ){ + const int iEstimator = std::distance(all_mvas.begin(),mva_itr); + // Set up all event content, such as ValueMaps produced upstream or other, // original event data pieces, that is needed (if any is implemented in the specific // MVA classes) - mvaEstimators_[iEstimator]->getEventContent( iEvent ); + const auto& thisEstimator = mva_itr->second; std::vector mvaValues; std::vector mvaCategories; - + // Loop over particles for (size_t i = 0; i < src->size(); ++i){ - auto iCand = src->ptrAt(i); - - mvaValues.push_back( mvaEstimators_[iEstimator]->mvaValue( iCand ) ); - mvaCategories.push_back( mvaEstimators_[iEstimator]->findCategory( iCand ) ); + auto iCand = src->ptrAt(i); + mvaValues.push_back( thisEstimator->mvaValue( iCand, iEvent ) ); + mvaCategories.push_back( thisEstimator->findCategory( iCand ) ); } // end loop over particles writeValueMap(iEvent, src, mvaValues, mvaValueMapNames_[iEstimator] ); - writeValueMap(iEvent, src, mvaCategories, mvaCategoriesMapNames_[iEstimator] ); - + writeValueMap(iEvent, src, mvaCategories, mvaCategoriesMapNames_[iEstimator] ); } // end loop over estimators @@ -158,9 +148,9 @@ void MVAValueMapProducer::produce(edm::Event& iEvent, const edm::E template template void MVAValueMapProducer::writeValueMap(edm::Event &iEvent, - const edm::Handle > & handle, - const std::vector & values, - const std::string & label) const + const edm::Handle > & handle, + const std::vector & values, + const std::string & label) const { using namespace edm; using namespace std; @@ -180,3 +170,4 @@ template descriptions.addDefault(desc); } +#endif diff --git a/RecoEgamma/EgammaTools/plugins/EGExtraInfoModifierFromDB.cc b/RecoEgamma/EgammaTools/plugins/EGExtraInfoModifierFromDB.cc new file mode 100644 index 0000000000000..480d62bc6c098 --- /dev/null +++ b/RecoEgamma/EgammaTools/plugins/EGExtraInfoModifierFromDB.cc @@ -0,0 +1,659 @@ +#include "CommonTools/CandAlgos/interface/ModifyObjectValueBase.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "DataFormats/Common/interface/ValueMap.h" +#include "FWCore/Framework/interface/ESHandle.h" + +#include "DataFormats/EgammaCandidates/interface/GsfElectron.h" +#include "DataFormats/EgammaCandidates/interface/Photon.h" + +#include "CondFormats/DataRecord/interface/GBRDWrapperRcd.h" +#include "CondFormats/EgammaObjects/interface/GBRForestD.h" +#include "CondFormats/DataRecord/interface/GBRWrapperRcd.h" +#include "CondFormats/EgammaObjects/interface/GBRForest.h" + +#include "DataFormats/EcalDetId/interface/EBDetId.h" +#include "DataFormats/EcalDetId/interface/EEDetId.h" + +#include + +namespace { + const edm::InputTag empty_tag; +} + +#include + +class EGExtraInfoModifierFromDB : public ModifyObjectValueBase { +public: + typedef edm::EDGetTokenT > ValMapFloatToken; + typedef edm::EDGetTokenT > ValMapIntToken; + typedef std::pair ValMapFloatTagTokenPair; + typedef std::pair ValMapIntTagTokenPair; + + struct electron_config { + edm::InputTag electron_src; + edm::EDGetTokenT > tok_electron_src; + std::unordered_map tag_float_token_map; + std::unordered_map tag_int_token_map; + + std::vector condnames_mean_50ns; + std::vector condnames_sigma_50ns; + std::vector condnames_mean_25ns; + std::vector condnames_sigma_25ns; + std::string condnames_weight_50ns; + std::string condnames_weight_25ns; + }; + + struct photon_config { + edm::InputTag photon_src; + edm::EDGetTokenT > tok_photon_src; + std::unordered_map tag_float_token_map; + std::unordered_map tag_int_token_map; + + std::vector condnames_mean_50ns; + std::vector condnames_sigma_50ns; + std::vector condnames_mean_25ns; + std::vector condnames_sigma_25ns; + }; + + EGExtraInfoModifierFromDB(const edm::ParameterSet& conf); + ~EGExtraInfoModifierFromDB() {}; + + void setEvent(const edm::Event&) override final; + void setEventContent(const edm::EventSetup&) override final; + void setConsumes(edm::ConsumesCollector&) override final; + + void modifyObject(pat::Electron&) const override final; + void modifyObject(pat::Photon&) const override final; + +private: + electron_config e_conf; + photon_config ph_conf; + std::unordered_map > eles_by_oop; // indexed by original object ptr + std::unordered_map > > ele_vmaps; + std::unordered_map > > ele_int_vmaps; + std::unordered_map > phos_by_oop; + std::unordered_map > > pho_vmaps; + std::unordered_map > > pho_int_vmaps; + + bool autoDetectBunchSpacing_; + int bunchspacing_; + edm::InputTag bunchspacingTag_; + edm::EDGetTokenT bunchSpacingToken_; + float rhoValue_; + edm::InputTag rhoTag_; + edm::EDGetTokenT rhoToken_; + int nVtx_; + edm::InputTag vtxTag_; + edm::EDGetTokenT vtxToken_; + edm::Handle vtxH_; + + std::vector ph_forestH_mean_; + std::vector ph_forestH_sigma_; + std::vector e_forestH_mean_; + std::vector e_forestH_sigma_; + const GBRForest* ep_forestH_weight_; +}; + +DEFINE_EDM_PLUGIN(ModifyObjectValueFactory, + EGExtraInfoModifierFromDB, + "EGExtraInfoModifierFromDB"); + +EGExtraInfoModifierFromDB::EGExtraInfoModifierFromDB(const edm::ParameterSet& conf) : + ModifyObjectValueBase(conf) { + + bunchspacing_ = 450; + autoDetectBunchSpacing_ = conf.getParameter("autoDetectBunchSpacing"); + + rhoTag_ = conf.getParameter("rhoCollection"); + vtxTag_ = conf.getParameter("vertexCollection"); + + if (autoDetectBunchSpacing_) { + bunchspacingTag_ = conf.getParameter("bunchSpacingTag"); + } else { + bunchspacing_ = conf.getParameter("manualBunchSpacing"); + } + + constexpr char electronSrc[] = "electronSrc"; + constexpr char photonSrc[] = "photonSrc"; + + if(conf.exists("electron_config")) { + const edm::ParameterSet& electrons = conf.getParameter("electron_config"); + if( electrons.exists(electronSrc) ) + e_conf.electron_src = electrons.getParameter(electronSrc); + + std::vector intValueMaps; + if ( electrons.existsAs >("intValueMaps")) + intValueMaps = electrons.getParameter >("intValueMaps"); + + const std::vector parameters = electrons.getParameterNames(); + for( const std::string& name : parameters ) { + if( std::string(electronSrc) == name ) + continue; + if( electrons.existsAs(name)) { + for (auto vmp : intValueMaps) { + if (name == vmp) { + e_conf.tag_int_token_map[name] = ValMapIntTagTokenPair(electrons.getParameter(name), ValMapIntToken()); + break; + } + } + e_conf.tag_float_token_map[name] = ValMapFloatTagTokenPair(electrons.getParameter(name), ValMapFloatToken()); + } + } + + e_conf.condnames_mean_50ns = electrons.getParameter >("regressionKey_50ns"); + e_conf.condnames_sigma_50ns = electrons.getParameter >("uncertaintyKey_50ns"); + e_conf.condnames_mean_25ns = electrons.getParameter >("regressionKey_25ns"); + e_conf.condnames_sigma_25ns = electrons.getParameter >("uncertaintyKey_25ns"); + e_conf.condnames_weight_50ns = electrons.getParameter("combinationKey_50ns"); + e_conf.condnames_weight_25ns = electrons.getParameter("combinationKey_25ns"); + } + + if( conf.exists("photon_config") ) { + const edm::ParameterSet& photons = conf.getParameter("photon_config"); + + if( photons.exists(photonSrc) ) + ph_conf.photon_src = photons.getParameter(photonSrc); + + std::vector intValueMaps; + if ( photons.existsAs >("intValueMaps")) + intValueMaps = photons.getParameter >("intValueMaps"); + + const std::vector parameters = photons.getParameterNames(); + for( const std::string& name : parameters ) { + if( std::string(photonSrc) == name ) + continue; + if( photons.existsAs(name)) { + for (auto vmp : intValueMaps) { + if (name == vmp) { + ph_conf.tag_int_token_map[name] = ValMapIntTagTokenPair(photons.getParameter(name), ValMapIntToken()); + break; + } + } + ph_conf.tag_float_token_map[name] = ValMapFloatTagTokenPair(photons.getParameter(name), ValMapFloatToken()); + } + } + + ph_conf.condnames_mean_50ns = photons.getParameter>("regressionKey_50ns"); + ph_conf.condnames_sigma_50ns = photons.getParameter>("uncertaintyKey_50ns"); + ph_conf.condnames_mean_25ns = photons.getParameter>("regressionKey_25ns"); + ph_conf.condnames_sigma_25ns = photons.getParameter>("uncertaintyKey_25ns"); + } +} + +template +inline void get_product(const edm::Event& evt, + const edm::EDGetTokenT >& tok, + std::unordered_map > >& map) { + evt.getByToken(tok,map[tok.index()]); +} + +void EGExtraInfoModifierFromDB::setEvent(const edm::Event& evt) { + eles_by_oop.clear(); + phos_by_oop.clear(); + ele_vmaps.clear(); + ele_int_vmaps.clear(); + pho_vmaps.clear(); + pho_int_vmaps.clear(); + + if( !e_conf.tok_electron_src.isUninitialized() ) { + edm::Handle > eles; + evt.getByToken(e_conf.tok_electron_src, eles); + + for( unsigned i = 0; i < eles->size(); ++i ) { + edm::Ptr ptr = eles->ptrAt(i); + eles_by_oop[ptr->originalObjectRef().key()] = ptr; + } + } + + for (std::unordered_map::iterator imap = e_conf.tag_float_token_map.begin(); + imap != e_conf.tag_float_token_map.end(); + imap++) { + get_product(evt, imap->second.second, ele_vmaps); + } + + for (std::unordered_map::iterator imap = e_conf.tag_int_token_map.begin(); + imap != e_conf.tag_int_token_map.end(); + imap++) { + get_product(evt, imap->second.second, ele_int_vmaps); + } + + if( !ph_conf.tok_photon_src.isUninitialized() ) { + edm::Handle > phos; + evt.getByToken(ph_conf.tok_photon_src,phos); + + for( unsigned i = 0; i < phos->size(); ++i ) { + edm::Ptr ptr = phos->ptrAt(i); + phos_by_oop[ptr->originalObjectRef().key()] = ptr; + } + } + + + for (std::unordered_map::iterator imap = ph_conf.tag_float_token_map.begin(); + imap != ph_conf.tag_float_token_map.end(); + imap++) { + get_product(evt, imap->second.second, pho_vmaps); + } + + for (std::unordered_map::iterator imap = ph_conf.tag_int_token_map.begin(); + imap != ph_conf.tag_int_token_map.end(); + imap++) { + get_product(evt, imap->second.second, pho_int_vmaps); + } + + if (autoDetectBunchSpacing_) { + if (evt.isRealData()) { + edm::RunNumber_t run = evt.run(); + if (run == 178003 || + run == 178004 || + run == 209089 || + run == 209106 || + run == 209109 || + run == 209146 || + run == 209148 || + run == 209151) { + bunchspacing_ = 25; + } + else if (run < 253000) { + bunchspacing_ = 50; + } + else { + bunchspacing_ = 25; + } + } else { + edm::Handle bunchSpacingH; + evt.getByToken(bunchSpacingToken_,bunchSpacingH); + bunchspacing_ = *bunchSpacingH; + } + } + + edm::Handle rhoH; + evt.getByToken(rhoToken_, rhoH); + rhoValue_ = *rhoH; + + evt.getByToken(vtxToken_, vtxH_); + nVtx_ = vtxH_->size(); +} + +void EGExtraInfoModifierFromDB::setEventContent(const edm::EventSetup& evs) { + + edm::ESHandle forestDEH; + edm::ESHandle forestEH; + + const std::vector ph_condnames_mean = (bunchspacing_ == 25) ? ph_conf.condnames_mean_25ns : ph_conf.condnames_mean_50ns; + const std::vector ph_condnames_sigma = (bunchspacing_ == 25) ? ph_conf.condnames_sigma_25ns : ph_conf.condnames_sigma_50ns; + + unsigned int ncor = ph_condnames_mean.size(); + for (unsigned int icor=0; icor().get(ph_condnames_mean[icor], forestDEH); + ph_forestH_mean_.push_back(forestDEH.product()); + evs.get().get(ph_condnames_sigma[icor], forestDEH); + ph_forestH_sigma_.push_back(forestDEH.product()); + } + + const std::vector e_condnames_mean = (bunchspacing_ == 25) ? e_conf.condnames_mean_25ns : e_conf.condnames_mean_50ns; + const std::vector e_condnames_sigma = (bunchspacing_ == 25) ? e_conf.condnames_sigma_25ns : e_conf.condnames_sigma_50ns; + const std::string ep_condnames_weight = (bunchspacing_ == 25) ? e_conf.condnames_weight_25ns : e_conf.condnames_weight_50ns; + + unsigned int encor = e_condnames_mean.size(); + evs.get().get(ep_condnames_weight, forestEH); + ep_forestH_weight_ = forestEH.product(); + + for (unsigned int icor=0; icor().get(e_condnames_mean[icor], forestDEH); + e_forestH_mean_.push_back(forestDEH.product()); + evs.get().get(e_condnames_sigma[icor], forestDEH); + e_forestH_sigma_.push_back(forestDEH.product()); + } +} + +template +inline void make_consumes(T& tag,U& tok,V& sume) { + if(!(empty_tag == tag)) + tok = sume.template consumes >(tag); +} + +template +inline void make_int_consumes(T& tag,U& tok,V& sume) { + if(!(empty_tag == tag)) + tok = sume.template consumes >(tag); +} + +void EGExtraInfoModifierFromDB::setConsumes(edm::ConsumesCollector& sumes) { + + rhoToken_ = sumes.consumes(rhoTag_); + vtxToken_ = sumes.consumes(vtxTag_); + + if (autoDetectBunchSpacing_) + bunchSpacingToken_ = sumes.consumes(bunchspacingTag_); + + //setup electrons + if(!(empty_tag == e_conf.electron_src)) + e_conf.tok_electron_src = sumes.consumes >(e_conf.electron_src); + + for ( std::unordered_map::iterator imap = e_conf.tag_float_token_map.begin(); + imap != e_conf.tag_float_token_map.end(); + imap++) { + make_consumes(imap->second.first, imap->second.second, sumes); + } + + for ( std::unordered_map::iterator imap = e_conf.tag_int_token_map.begin(); + imap != e_conf.tag_int_token_map.end(); + imap++) { + make_int_consumes(imap->second.first, imap->second.second, sumes); + } + + // setup photons + if(!(empty_tag == ph_conf.photon_src)) + ph_conf.tok_photon_src = sumes.consumes >(ph_conf.photon_src); + + for ( std::unordered_map::iterator imap = ph_conf.tag_float_token_map.begin(); + imap != ph_conf.tag_float_token_map.end(); + imap++) { + make_consumes(imap->second.first, imap->second.second, sumes); + } + + for ( std::unordered_map::iterator imap = ph_conf.tag_int_token_map.begin(); + imap != ph_conf.tag_int_token_map.end(); + imap++) { + make_int_consumes(imap->second.first, imap->second.second, sumes); + } +} + +template +inline void assignValue(const T& ptr, const U& tok, const V& map, Z& value) { + if( !tok.isUninitialized() ) value = map.find(tok.index())->second->get(ptr.id(),ptr.key()); +} + +void EGExtraInfoModifierFromDB::modifyObject(pat::Electron& ele) const { + // we encounter two cases here, either we are running AOD -> MINIAOD + // and the value maps are to the reducedEG object, can use original object ptr + // or we are running MINIAOD->MINIAOD and we need to fetch the pat objects to reference + + edm::Ptr ptr(ele.originalObjectRef()); + if( !e_conf.tok_electron_src.isUninitialized() ) { + auto key = eles_by_oop.find(ele.originalObjectRef().key()); + if( key != eles_by_oop.end() ) { + ptr = key->second; + } else { + throw cms::Exception("BadElectronKey") + << "Original object pointer with key = " << ele.originalObjectRef().key() + << " not found in cache!"; + } + } + std::array eval; + + reco::SuperClusterRef sc = ele.superCluster(); + edm::Ptr theseed = sc->seed(); + + // SET INPUTS + eval[0] = nVtx_; + eval[1] = sc->rawEnergy(); + eval[2] = sc->eta(); + eval[3] = sc->phi(); + eval[4] = sc->etaWidth(); + eval[5] = sc->phiWidth(); + eval[6] = ele.r9(); + eval[7] = theseed->energy()/sc->rawEnergy(); + + float sieip=0, cryPhi=0, cryEta=0; + int iPhi=0, iEta=0; + float eMax=0, e2nd=0, eTop=0, eBottom=0, eLeft=0, eRight=0; + float clusterMaxDR=0, clusterMaxDRDPhi=0, clusterMaxDRDEta=0, clusterMaxDRRawEnergy=0; + float clusterRawEnergy0=0, clusterRawEnergy1=0, clusterRawEnergy2=0; + float clusterDPhiToSeed0=0, clusterDPhiToSeed1=0, clusterDPhiToSeed2=0; + float clusterDEtaToSeed0=0, clusterDEtaToSeed1=0, clusterDEtaToSeed2=0; + + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("sigmaIetaIphi"))->second.second, ele_vmaps, sieip); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("eMax"))->second.second, ele_vmaps, eMax); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("e2nd"))->second.second, ele_vmaps, e2nd); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("eTop"))->second.second, ele_vmaps, eTop); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("eBottom"))->second.second, ele_vmaps, eBottom); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("eLeft"))->second.second, ele_vmaps, eLeft); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("eRight"))->second.second, ele_vmaps, eRight); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterMaxDR"))->second.second, ele_vmaps, clusterMaxDR); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterMaxDRDPhi"))->second.second, ele_vmaps, clusterMaxDRDPhi); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterMaxDRDEta"))->second.second, ele_vmaps, clusterMaxDRDEta); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterMaxDRRawEnergy"))->second.second, ele_vmaps, clusterMaxDRRawEnergy); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterRawEnergy0"))->second.second, ele_vmaps, clusterRawEnergy0); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterRawEnergy1"))->second.second, ele_vmaps, clusterRawEnergy1); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterRawEnergy2"))->second.second, ele_vmaps, clusterRawEnergy2); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterDPhiToSeed0"))->second.second, ele_vmaps, clusterDPhiToSeed0); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterDPhiToSeed1"))->second.second, ele_vmaps, clusterDPhiToSeed1); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterDPhiToSeed2"))->second.second, ele_vmaps, clusterDPhiToSeed2); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterDEtaToSeed0"))->second.second, ele_vmaps, clusterDEtaToSeed0); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterDEtaToSeed1"))->second.second, ele_vmaps, clusterDEtaToSeed1); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("clusterDEtaToSeed2"))->second.second, ele_vmaps, clusterDEtaToSeed2); + assignValue(ptr, e_conf.tag_int_token_map.find(std::string("iPhi"))->second.second, ele_int_vmaps, iPhi); + assignValue(ptr, e_conf.tag_int_token_map.find(std::string("iEta"))->second.second, ele_int_vmaps, iEta); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("cryPhi"))->second.second, ele_vmaps, cryPhi); + assignValue(ptr, e_conf.tag_float_token_map.find(std::string("cryEta"))->second.second, ele_vmaps, cryEta); + + eval[8] = eMax/sc->rawEnergy(); + eval[9] = e2nd/sc->rawEnergy(); + eval[10] = (eLeft+eRight!=0. ? (eLeft-eRight)/(eLeft+eRight) : 0.); + eval[11] = (eTop+eBottom!=0. ? (eTop-eBottom)/(eTop+eBottom) : 0.); + eval[12] = ele.sigmaIetaIeta(); + eval[13] = sieip; + eval[14] = ele.sigmaIphiIphi(); + const int N_ECAL = sc->clustersEnd() - sc->clustersBegin(); + eval[15] = std::max(0,N_ECAL - 1); + eval[16] = clusterMaxDR; + eval[17] = clusterMaxDRDPhi; + eval[18] = clusterMaxDRDEta; + eval[19] = clusterMaxDRRawEnergy/sc->rawEnergy(); + eval[20] = clusterRawEnergy0/sc->rawEnergy(); + eval[21] = clusterRawEnergy1/sc->rawEnergy(); + eval[22] = clusterRawEnergy2/sc->rawEnergy(); + eval[23] = clusterDPhiToSeed0; + eval[24] = clusterDPhiToSeed1; + eval[25] = clusterDPhiToSeed2; + eval[26] = clusterDEtaToSeed0; + eval[27] = clusterDEtaToSeed1; + eval[28] = clusterDEtaToSeed2; + + bool iseb = ele.isEB(); + + if (iseb) { + eval[29] = cryEta; + eval[30] = cryPhi; + eval[31] = iEta; + eval[32] = iPhi; + } else { + eval[29] = sc->preshowerEnergy()/sc->rawEnergy(); + } + + //magic numbers for MINUIT-like transformation of BDT output onto limited range + //(These should be stored inside the conditions object in the future as well) + constexpr double meanlimlow = 0.2; + constexpr double meanlimhigh = 2.0; + constexpr double meanoffset = meanlimlow + 0.5*(meanlimhigh-meanlimlow); + constexpr double meanscale = 0.5*(meanlimhigh-meanlimlow); + + constexpr double sigmalimlow = 0.0002; + constexpr double sigmalimhigh = 0.5; + constexpr double sigmaoffset = sigmalimlow + 0.5*(sigmalimhigh-sigmalimlow); + constexpr double sigmascale = 0.5*(sigmalimhigh-sigmalimlow); + + int coridx = 0; + if (!iseb) + coridx = 1; + + //these are the actual BDT responses + double rawmean = e_forestH_mean_[coridx]->GetResponse(eval.data()); + double rawsigma = e_forestH_sigma_[coridx]->GetResponse(eval.data()); + + //apply transformation to limited output range (matching the training) + double mean = meanoffset + meanscale*vdt::fast_sin(rawmean); + double sigma = sigmaoffset + sigmascale*vdt::fast_sin(rawsigma); + + //regression target is ln(Etrue/Eraw) + //so corrected energy is ecor=exp(mean)*e, uncertainty is exp(mean)*eraw*sigma=ecor*sigma + double ecor = mean*(eval[1]); + if (!iseb) + ecor = mean*(eval[1]+sc->preshowerEnergy()); + const double sigmacor = sigma*ecor; + + ele.setCorrectedEcalEnergy(ecor); + ele.setCorrectedEcalEnergyError(sigmacor); + + // E-p combination + //std::array eval_ep; + float eval_ep[11]; + + const float ep = ele.trackMomentumAtVtx().R(); + const float tot_energy = sc->rawEnergy()+sc->preshowerEnergy(); + const float momentumError = ele.trackMomentumError(); + const float trkMomentumRelError = ele.trackMomentumError()/ep; + const float eOverP = tot_energy*mean/ep; + eval_ep[0] = tot_energy*mean; + eval_ep[1] = sigma/mean; + eval_ep[2] = ep; + eval_ep[3] = trkMomentumRelError; + eval_ep[4] = sigma/mean/trkMomentumRelError; + eval_ep[5] = tot_energy*mean/ep; + eval_ep[6] = tot_energy*mean/ep*sqrt(sigma/mean*sigma/mean+trkMomentumRelError*trkMomentumRelError); + eval_ep[7] = ele.ecalDriven(); + eval_ep[8] = ele.trackerDrivenSeed(); + eval_ep[9] = int(ele.classification());//eleClass; + eval_ep[10] = iseb; + + // CODE FOR FUTURE SEMI_PARAMETRIC + //double rawweight = ep_forestH_mean_[coridx]->GetResponse(eval_ep.data()); + ////rawsigma = ep_forestH_sigma_[coridx]->GetResponse(eval.data()); + //double weight = meanoffset + meanscale*vdt::fast_sin(rawweight); + ////sigma = sigmaoffset + sigmascale*vdt::fast_sin(rawsigma); + + // CODE FOR STANDARD BDT + double weight = 0.; + if ( eOverP > 0.025 && + std::abs(ep-ecor) < 15.*std::sqrt( momentumError*momentumError + sigmacor*sigmacor ) ) { + // protection against crazy track measurement + weight = ep_forestH_weight_->GetResponse(eval_ep); + if(weight>1.) + weight = 1.; + else if(weight<0.) + weight = 0.; + } + + double combinedMomentum = weight*ele.trackMomentumAtVtx().R() + (1.-weight)*ecor; + double combinedMomentumError = sqrt(weight*weight*ele.trackMomentumError()*ele.trackMomentumError() + (1.-weight)*(1.-weight)*sigmacor*sigmacor); + + math::XYZTLorentzVector oldMomentum = ele.p4(); + math::XYZTLorentzVector newMomentum = math::XYZTLorentzVector(oldMomentum.x()*combinedMomentum/oldMomentum.t(), + oldMomentum.y()*combinedMomentum/oldMomentum.t(), + oldMomentum.z()*combinedMomentum/oldMomentum.t(), + combinedMomentum); + + //ele.correctEcalEnergy(combinedMomentum, combinedMomentumError); + ele.correctMomentum(newMomentum, ele.trackMomentumError(), combinedMomentumError); +} + +void EGExtraInfoModifierFromDB::modifyObject(pat::Photon& pho) const { + // we encounter two cases here, either we are running AOD -> MINIAOD + // and the value maps are to the reducedEG object, can use original object ptr + // or we are running MINIAOD->MINIAOD and we need to fetch the pat objects to reference + edm::Ptr ptr(pho.originalObjectRef()); + + if(!ph_conf.tok_photon_src.isUninitialized()) { + auto key = phos_by_oop.find(pho.originalObjectRef().key()); + if( key != phos_by_oop.end() ) { + ptr = key->second; + } else { + throw cms::Exception("BadPhotonKey") + << "Original object pointer with key = " << pho.originalObjectRef().key() << " not found in cache!"; + } + } + + std::array eval; + reco::SuperClusterRef sc = pho.superCluster(); + edm::Ptr theseed = sc->seed(); + + // SET INPUTS + eval[0] = sc->rawEnergy(); + //eval[1] = sc->position().Eta(); + //eval[2] = sc->position().Phi(); + eval[1] = pho.r9(); + eval[2] = sc->etaWidth(); + eval[3] = sc->phiWidth(); + const int N_ECAL = sc->clustersEnd() - sc->clustersBegin(); + eval[4] = std::max(0,N_ECAL - 1); + eval[5] = pho.hadronicOverEm(); + eval[6] = rhoValue_; + eval[7] = nVtx_; + eval[8] = theseed->eta()-sc->position().Eta(); + eval[9] = reco::deltaPhi(theseed->phi(),sc->position().Phi()); + eval[10] = pho.seedEnergy()/sc->rawEnergy(); + eval[11] = pho.e3x3()/pho.e5x5(); + eval[12] = pho.sigmaIetaIeta(); + + float sipip=0, sieip=0, e2x5Max=0, e2x5Left=0, e2x5Right=0, e2x5Top=0, e2x5Bottom=0; + assignValue(ptr, ph_conf.tag_float_token_map.find(std::string("sigmaIphiIphi"))->second.second, pho_vmaps, sipip); + assignValue(ptr, ph_conf.tag_float_token_map.find(std::string("e2x5Max"))->second.second, pho_vmaps, e2x5Max); + assignValue(ptr, ph_conf.tag_float_token_map.find(std::string("e2x5Left"))->second.second, pho_vmaps, e2x5Left); + assignValue(ptr, ph_conf.tag_float_token_map.find(std::string("e2x5Right"))->second.second, pho_vmaps, e2x5Right); + assignValue(ptr, ph_conf.tag_float_token_map.find(std::string("e2x5Top"))->second.second, pho_vmaps, e2x5Top); + assignValue(ptr, ph_conf.tag_float_token_map.find(std::string("e2x5Bottom"))->second.second, pho_vmaps, e2x5Bottom); + + eval[13] = sipip; + eval[14] = sieip; + eval[15] = pho.maxEnergyXtal()/pho.e5x5(); + eval[16] = pho.e2nd()/pho.e5x5(); + eval[17] = pho.eTop()/pho.e5x5(); + eval[18] = pho.eBottom()/pho.e5x5(); + eval[19] = pho.eLeft()/pho.e5x5(); + eval[20] = pho.eRight()/pho.e5x5(); + eval[21] = e2x5Max/pho.e5x5(); + eval[22] = e2x5Left/pho.e5x5(); + eval[23] = e2x5Right/pho.e5x5(); + eval[24] = e2x5Top/pho.e5x5(); + eval[25] = e2x5Bottom/pho.e5x5(); + + bool iseb = pho.isEB(); + + if (iseb) { + EBDetId ebseedid(theseed->seed()); + eval[26] = pho.e5x5()/pho.seedEnergy(); + eval[27] = ebseedid.ieta(); + eval[28] = ebseedid.iphi(); + } else { + EEDetId eeseedid(theseed->seed()); + eval[26] = sc->preshowerEnergy()/sc->rawEnergy(); + eval[27] = sc->preshowerEnergyPlane1()/sc->rawEnergy(); + eval[28] = sc->preshowerEnergyPlane2()/sc->rawEnergy(); + eval[29] = eeseedid.ix(); + eval[30] = eeseedid.iy(); + } + + //magic numbers for MINUIT-like transformation of BDT output onto limited range + //(These should be stored inside the conditions object in the future as well) + const double meanlimlow = 0.2; + const double meanlimhigh = 2.0; + const double meanoffset = meanlimlow + 0.5*(meanlimhigh-meanlimlow); + const double meanscale = 0.5*(meanlimhigh-meanlimlow); + + const double sigmalimlow = 0.0002; + const double sigmalimhigh = 0.5; + const double sigmaoffset = sigmalimlow + 0.5*(sigmalimhigh-sigmalimlow); + const double sigmascale = 0.5*(sigmalimhigh-sigmalimlow); + + int coridx = 0; + if (!iseb) + coridx = 1; + + //these are the actual BDT responses + double rawmean = ph_forestH_mean_[coridx]->GetResponse(eval.data()); + double rawsigma = ph_forestH_sigma_[coridx]->GetResponse(eval.data()); + //apply transformation to limited output range (matching the training) + double mean = meanoffset + meanscale*vdt::fast_sin(rawmean); + double sigma = sigmaoffset + sigmascale*vdt::fast_sin(rawsigma); + + //regression target is ln(Etrue/Eraw) + //so corrected energy is ecor=exp(mean)*e, uncertainty is exp(mean)*eraw*sigma=ecor*sigma + double ecor = mean*eval[0]; + if (!iseb) + ecor = mean*(eval[0]+sc->preshowerEnergy()); + + double sigmacor = sigma*ecor; + pho.setCorrectedEnergy(reco::Photon::P4type::regression2, ecor, sigmacor, true); +} diff --git a/RecoEgamma/EgammaTools/plugins/EGPfIsolationModifier.cc b/RecoEgamma/EgammaTools/plugins/EGPfIsolationModifier.cc new file mode 100644 index 0000000000000..48b9c1b005c3d --- /dev/null +++ b/RecoEgamma/EgammaTools/plugins/EGPfIsolationModifier.cc @@ -0,0 +1,255 @@ +#include "CommonTools/CandAlgos/interface/ModifyObjectValueBase.h" + +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "DataFormats/Common/interface/ValueMap.h" + +#include "DataFormats/EgammaCandidates/interface/GsfElectron.h" +#include "DataFormats/EgammaCandidates/interface/Photon.h" + +#include +#include + +namespace { + const edm::EDGetTokenT > empty_token; + const static edm::InputTag empty_tag(""); + + const static std::array electron_vars = { { "sumChargedHadronPt", + "sumNeutralHadronEt", + "sumPhotonEt", + "sumChargedParticlePt", + "sumNeutralHadronEtHighThreshold", + "sumPhotonEtHighThreshold", + "sumPUPt" } }; + + const static std::array photon_vars = { { "chargedHadronIso", + "chargedHadronIsoWrongVtx", + "neutralHadronIso", + "photonIso", + "modFrixione", + "sumChargedParticlePt", + "sumNeutralHadronEtHighThreshold", + "sumPhotonEtHighThreshold", + "sumPUPt" } }; +} + +#include + +class EGPfIsolationModifierFromValueMaps : public ModifyObjectValueBase { +public: + typedef std::tuple > > tag_and_token; + typedef std::unordered_map input_map; + + struct electron_config { + edm::InputTag electron_src; + edm::EDGetTokenT > tok_electron_src; + input_map electron_inputs; + }; + + struct photon_config { + edm::InputTag photon_src; + edm::EDGetTokenT > tok_photon_src; + input_map photon_inputs; + }; + + EGPfIsolationModifierFromValueMaps(const edm::ParameterSet& conf); + + void setEvent(const edm::Event&) override final; + void setEventContent(const edm::EventSetup&) override final; + void setConsumes(edm::ConsumesCollector&) override final; + + void modifyObject(pat::Electron&) const override final; + void modifyObject(pat::Photon&) const override final; + +private: + electron_config e_conf; + photon_config ph_conf; + std::unordered_map > eles_by_oop; // indexed by original object ptr + std::unordered_map > > ele_vmaps; + std::unordered_map > phos_by_oop; + std::unordered_map > > pho_vmaps; + mutable unsigned ele_idx,pho_idx; // hack here until we figure out why some slimmedPhotons don't have original object ptrs +}; + +DEFINE_EDM_PLUGIN(ModifyObjectValueFactory, + EGPfIsolationModifierFromValueMaps, + "EGPfIsolationModifierFromValueMaps"); + +EGPfIsolationModifierFromValueMaps:: +EGPfIsolationModifierFromValueMaps(const edm::ParameterSet& conf) : + ModifyObjectValueBase(conf) { + if( conf.exists("electron_config") ) { + const edm::ParameterSet& electrons = conf.getParameter("electron_config"); + if( electrons.exists("electronSrc") ) e_conf.electron_src = electrons.getParameter("electronSrc"); + for( const std::string& varname : electron_vars ) { + if( electrons.exists(varname) ) { + std::get<0>(e_conf.electron_inputs[varname]) = electrons.getParameter(varname); + } + } + } + if( conf.exists("photon_config") ) { + const edm::ParameterSet& photons = conf.getParameter("photon_config"); + if( photons.exists("photonSrc") ) ph_conf.photon_src = photons.getParameter("photonSrc"); + for( const std::string& varname : photon_vars ) { + if( photons.exists(varname) ) { + std::get<0>(ph_conf.photon_inputs[varname]) = photons.getParameter(varname); + } + } + } + + ele_idx = pho_idx = 0; +} + +inline void get_product(const edm::Event& evt, + const edm::EDGetTokenT >& tok, + std::unordered_map > >& map) { + if( !tok.isUninitialized() ) evt.getByToken(tok,map[tok.index()]); +} + +void EGPfIsolationModifierFromValueMaps:: +setEvent(const edm::Event& evt) { + eles_by_oop.clear(); + phos_by_oop.clear(); + ele_vmaps.clear(); + pho_vmaps.clear(); + + ele_idx = pho_idx = 0; + + if( !e_conf.tok_electron_src.isUninitialized() ) { + edm::Handle > eles; + evt.getByToken(e_conf.tok_electron_src,eles); + + for( unsigned i = 0; i < eles->size(); ++i ) { + edm::Ptr ptr = eles->ptrAt(i); + eles_by_oop[i] = ptr; + } + } + + for( const std::string& varname : electron_vars ) { + auto& inputs = e_conf.electron_inputs; + if( inputs.find(varname) == inputs.end() ) continue; + get_product(evt,std::get<1>(inputs[varname]),ele_vmaps); + } + + if( !ph_conf.tok_photon_src.isUninitialized() ) { + edm::Handle > phos; + evt.getByToken(ph_conf.tok_photon_src,phos); + + for( unsigned i = 0; i < phos->size(); ++i ) { + edm::Ptr ptr = phos->ptrAt(i); + phos_by_oop[i] = ptr; + } + } + + for( const std::string& varname : photon_vars ) { + auto& inputs = ph_conf.photon_inputs; + if( inputs.find(varname) == inputs.end() ) continue; + get_product(evt,std::get<1>(inputs[varname]),pho_vmaps); + } +} + +void EGPfIsolationModifierFromValueMaps:: +setEventContent(const edm::EventSetup& evs) { +} + +template +inline void make_consumes(T& tag,U& tok,V& sume) { if( !(empty_tag == tag) ) tok = sume.template consumes >(tag); } + +void EGPfIsolationModifierFromValueMaps:: +setConsumes(edm::ConsumesCollector& sumes) { + //setup electrons + if( !(empty_tag == e_conf.electron_src) ) e_conf.tok_electron_src = sumes.consumes >(e_conf.electron_src); + + for( const std::string& varname : electron_vars ) { + auto& inputs = e_conf.electron_inputs; + if( inputs.find(varname) == inputs.end() ) continue; + auto& the_tuple = inputs[varname]; + make_consumes(std::get<0>(the_tuple),std::get<1>(the_tuple),sumes); + } + + // setup photons + if( !(empty_tag == ph_conf.photon_src) ) ph_conf.tok_photon_src = sumes.consumes >(ph_conf.photon_src); + + for( const std::string& varname : photon_vars ) { + auto& inputs = ph_conf.photon_inputs; + if( inputs.find(varname) == inputs.end() ) continue; + auto& the_tuple = inputs[varname]; + make_consumes(std::get<0>(the_tuple),std::get<1>(the_tuple),sumes); + } +} + +template +inline void assignValue(const T& ptr, const U& input_map, const std::string& name, const V& map, float& value) { + auto itr = input_map.find(name); + if( itr == input_map.end() ) return; + const auto& tok = std::get<1>(itr->second); + if( !tok.isUninitialized() ) value = map.find(tok.index())->second->get(ptr.id(),ptr.key()); +} + +void EGPfIsolationModifierFromValueMaps:: +modifyObject(pat::Electron& ele) const { + // we encounter two cases here, either we are running AOD -> MINIAOD + // and the value maps are to the reducedEG object, can use original object ptr + // or we are running MINIAOD->MINIAOD and we need to fetch the pat objects to reference + edm::Ptr ptr(ele.originalObjectRef()); + if( !e_conf.tok_electron_src.isUninitialized() ) { + auto key = eles_by_oop.find(ele_idx); + if( key != eles_by_oop.end() ) { + ptr = key->second; + } else { + throw cms::Exception("BadElectronKey") + << "Original object pointer with key = " << ele.originalObjectRef().key() << " not found in cache!"; + } + } + //now we go through and modify the objects using the valuemaps we read in + auto pfIso = ele.pfIsolationVariables(); + + const auto& e_inputs = e_conf.electron_inputs; + + assignValue(ptr,e_inputs,electron_vars[0],ele_vmaps,pfIso.sumChargedHadronPt); + assignValue(ptr,e_inputs,electron_vars[1],ele_vmaps,pfIso.sumNeutralHadronEt); + assignValue(ptr,e_inputs,electron_vars[2],ele_vmaps,pfIso.sumPhotonEt); + assignValue(ptr,e_inputs,electron_vars[3],ele_vmaps,pfIso.sumChargedParticlePt); + assignValue(ptr,e_inputs,electron_vars[4],ele_vmaps,pfIso.sumNeutralHadronEtHighThreshold); + assignValue(ptr,e_inputs,electron_vars[5],ele_vmaps,pfIso.sumPhotonEtHighThreshold); + assignValue(ptr,e_inputs,electron_vars[6],ele_vmaps,pfIso.sumPUPt); + + ele.setPfIsolationVariables(pfIso); + ++ele_idx; +} + + +void EGPfIsolationModifierFromValueMaps:: +modifyObject(pat::Photon& pho) const { + // we encounter two cases here, either we are running AOD -> MINIAOD + // and the value maps are to the reducedEG object, can use original object ptr + // or we are running MINIAOD->MINIAOD and we need to fetch the pat objects to reference + edm::Ptr ptr(pho.originalObjectRef()); + if( !ph_conf.tok_photon_src.isUninitialized() ) { + auto key = phos_by_oop.find(pho_idx); + if( key != phos_by_oop.end() ) { + ptr = key->second; + } else { + throw cms::Exception("BadPhotonKey") + << "Original object pointer with key = " << pho.originalObjectRef().key() << " not found in cache!"; + } + } + + //now we go through and modify the objects using the valuemaps we read in + auto pfIso = pho.getPflowIsolationVariables(); + + const auto& ph_inputs = ph_conf.photon_inputs; + + assignValue(ptr,ph_inputs,photon_vars[0],pho_vmaps,pfIso.chargedHadronIso); + assignValue(ptr,ph_inputs,photon_vars[1],pho_vmaps,pfIso.chargedHadronIsoWrongVtx); + assignValue(ptr,ph_inputs,photon_vars[2],pho_vmaps,pfIso.neutralHadronIso); + assignValue(ptr,ph_inputs,photon_vars[3],pho_vmaps,pfIso.photonIso); + assignValue(ptr,ph_inputs,photon_vars[4],pho_vmaps,pfIso.modFrixione); + assignValue(ptr,ph_inputs,photon_vars[5],pho_vmaps,pfIso.sumChargedParticlePt); + assignValue(ptr,ph_inputs,photon_vars[6],pho_vmaps,pfIso.sumNeutralHadronEtHighThreshold); + assignValue(ptr,ph_inputs,photon_vars[7],pho_vmaps,pfIso.sumPhotonEtHighThreshold); + assignValue(ptr,ph_inputs,photon_vars[8],pho_vmaps,pfIso.sumPUPt); + + pho.setPflowIsolationVariables(pfIso); + ++pho_idx; +} diff --git a/RecoEgamma/EgammaTools/python/egammaObjectModificationsInMiniAOD_cff.py b/RecoEgamma/EgammaTools/python/egammaObjectModificationsInMiniAOD_cff.py index a52e73bf7cd76..8fcb0120eb10b 100644 --- a/RecoEgamma/EgammaTools/python/egammaObjectModificationsInMiniAOD_cff.py +++ b/RecoEgamma/EgammaTools/python/egammaObjectModificationsInMiniAOD_cff.py @@ -1,3 +1,69 @@ import FWCore.ParameterSet.Config as cms -egamma_modifications = cms.VPSet( ) +#electron mva ids +import RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Spring15_25ns_nonTrig_V1_cff as ele_spring15_nt + +#photon mva ids +import RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_Spring15_25ns_nonTrig_V2_cff as pho_spring15_25_nt +import RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_Spring15_50ns_nonTrig_V2_cff as pho_spring15_50_nt + +ele_mva_prod_name = 'electronMVAValueMapProducer' +pho_mva_prod_name = 'photonMVAValueMapProducer' + +def setup_mva(val_pset,cat_pset,prod_name,mva_name): + value_name = '%s:%sValues'%(prod_name,mva_name) + cat_name = '%s:%sCategories'%(prod_name,mva_name) + setattr( val_pset, '%sValues'%mva_name, cms.InputTag(value_name) ) + setattr( cat_pset, '%sCategories'%mva_name, cms.InputTag(cat_name) ) + +egamma_modifications = cms.VPSet( + cms.PSet( modifierName = cms.string('EGFull5x5ShowerShapeModifierFromValueMaps'), + photon_config = cms.PSet( sigmaIetaIeta = cms.InputTag('photonIDValueMapProducer:phoFull5x5SigmaIEtaIEta'), + e5x5 = cms.InputTag('photonIDValueMapProducer:phoFull5x5E5x5') + ) + ), + cms.PSet( modifierName = cms.string('EGPfIsolationModifierFromValueMaps'), + photon_config = cms.PSet( chargedHadronIso = cms.InputTag('photonIDValueMapProducer:phoChargedIsolation'), + neutralHadronIsolation = cms.InputTag('photonIDValueMapProducer:phoNeutralHadronIsolation'), + photonIso = cms.InputTag('photonIDValueMapProducer:phoPhotonIsolation'), + chargedHadronIsoWrongVtx = cms.InputTag('photonIDValueMapProducer:phoWorstChargedIsolation') + ) + ), + cms.PSet( modifierName = cms.string('EGExtraInfoModifierFromFloatValueMaps'), + electron_config = cms.PSet( ), + photon_config = cms.PSet( phoFull5x5SigmaIEtaIPhi = cms.InputTag('photonIDValueMapProducer:phoFull5x5SigmaIEtaIPhi'), + phoFull5x5E1x3 = cms.InputTag('photonIDValueMapProducer:phoFull5x5E1x3'), + phoFull5x5E2x2 = cms.InputTag('photonIDValueMapProducer:phoFull5x5E2x2'), + phoFull5x5E2x5Max = cms.InputTag('photonIDValueMapProducer:phoFull5x5E2x5Max'), + phoESEffSigmaRR = cms.InputTag('photonIDValueMapProducer:phoESEffSigmaRR'), + ) + ), + cms.PSet( modifierName = cms.string('EGExtraInfoModifierFromIntValueMaps'), + electron_config = cms.PSet( ), + photon_config = cms.PSet( ) + ) +) + +#setup the mva value maps to embed +setup_mva(egamma_modifications[2].electron_config, + egamma_modifications[3].electron_config, + ele_mva_prod_name, + ele_spring15_nt.mvaSpring15NonTrigClassName+ele_spring15_nt.mvaTag) + +setup_mva(egamma_modifications[2].photon_config, + egamma_modifications[3].photon_config, + pho_mva_prod_name, + pho_spring15_25_nt.mvaSpring15NonTrigClassName+pho_spring15_25_nt.mvaTag) + +setup_mva(egamma_modifications[2].photon_config, + egamma_modifications[3].photon_config, + pho_mva_prod_name, + pho_spring15_50_nt.mvaSpring15NonTrigClassName+pho_spring15_50_nt.mvaTag) + +############################################################# +# REGRESSION MODIFIERS +############################################################# + +from RecoEgamma.EgammaTools.regressionModifier_cfi import * + +egamma_modifications.append( regressionModifier ) diff --git a/RecoEgamma/EgammaTools/python/egammaObjectModificationsPatches_cff.py b/RecoEgamma/EgammaTools/python/egammaObjectModificationsPatches_cff.py index 07373a71ba432..a52e73bf7cd76 100644 --- a/RecoEgamma/EgammaTools/python/egammaObjectModificationsPatches_cff.py +++ b/RecoEgamma/EgammaTools/python/egammaObjectModificationsPatches_cff.py @@ -1,63 +1,3 @@ import FWCore.ParameterSet.Config as cms -#electron mva ids -import RecoEgamma.ElectronIdentification.Identification.mvaElectronID_PHYS14_PU20bx25_nonTrig_V1_cff as ele_phys14_nt - -#photon mva ids -import RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_PHYS14_PU20bx25_nonTrig_V1_cff as pho_phys14_nt -import RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_Spring15_50ns_nonTrig_V0_cff as pho_spring15_nt - -ele_mva_prod_name = 'electronMVAValueMapProducer' -pho_mva_prod_name = 'photonMVAValueMapProducer' - -def setup_mva(val_pset,cat_pset,prod_name,mva_name): - value_name = '%s:%sValues'%(prod_name,mva_name) - cat_name = '%s:%sCategories'%(prod_name,mva_name) - setattr( val_pset, '%sValues'%mva_name, cms.InputTag(value_name) ) - setattr( cat_pset, '%sCategories'%mva_name, cms.InputTag(cat_name) ) - -egamma_modifications = cms.VPSet( - cms.PSet( modifierName = cms.string('EGFull5x5ShowerShapeModifierFromValueMaps'), - photon_config = cms.PSet( photonSrc = cms.InputTag('slimmedPhotons',processName=cms.InputTag.skipCurrentProcess()), - sigmaIetaIeta = cms.InputTag('photonIDValueMapProducer:phoFull5x5SigmaIEtaIEta'), - e5x5 = cms.InputTag('photonIDValueMapProducer:phoFull5x5E5x5') - ) - ), - cms.PSet( modifierName = cms.string('EGExtraInfoModifierFromFloatValueMaps'), - electron_config = cms.PSet( electronSrc = cms.InputTag('slimmedElectrons',processName=cms.InputTag.skipCurrentProcess()) - ), - photon_config = cms.PSet( photonSrc = cms.InputTag('slimmedPhotons',processName=cms.InputTag.skipCurrentProcess()), - phoFull5x5SigmaIEtaIPhi = cms.InputTag('photonIDValueMapProducer:phoFull5x5SigmaIEtaIPhi'), - phoFull5x5E1x3 = cms.InputTag('photonIDValueMapProducer:phoFull5x5E1x3'), - phoFull5x5E2x2 = cms.InputTag('photonIDValueMapProducer:phoFull5x5E2x2'), - phoFull5x5E2x5Max = cms.InputTag('photonIDValueMapProducer:phoFull5x5E2x5Max'), - phoESEffSigmaRR = cms.InputTag('photonIDValueMapProducer:phoESEffSigmaRR'), - phoChargedIsolation = cms.InputTag('photonIDValueMapProducer:phoChargedIsolation'), - phoNeutralHadronIsolation = cms.InputTag('photonIDValueMapProducer:phoNeutralHadronIsolation'), - phoPhotonIsolation = cms.InputTag('photonIDValueMapProducer:phoPhotonIsolation'), - phoWorstChargedIsolation = cms.InputTag('photonIDValueMapProducer:phoWorstChargedIsolation') - ) - ), - cms.PSet( modifierName = cms.string('EGExtraInfoModifierFromIntValueMaps'), - electron_config = cms.PSet( electronSrc = cms.InputTag('slimmedElectrons',processName=cms.InputTag.skipCurrentProcess()) - ), - photon_config = cms.PSet( photonSrc = cms.InputTag('slimmedPhotons',processName=cms.InputTag.skipCurrentProcess()) - ) - ) -) - -#setup the mva value maps to embed -setup_mva(egamma_modifications[1].electron_config, - egamma_modifications[2].electron_config, - ele_mva_prod_name, - ele_phys14_nt.mvaPhys14NonTrigClassName) - -setup_mva(egamma_modifications[1].photon_config, - egamma_modifications[2].photon_config, - pho_mva_prod_name, - pho_phys14_nt.mvaPhys14NonTrigClassName) - -setup_mva(egamma_modifications[1].photon_config, - egamma_modifications[2].photon_config, - pho_mva_prod_name, - pho_spring15_nt.mvaSpring15NonTrigClassName) +egamma_modifications = cms.VPSet( ) diff --git a/RecoEgamma/EgammaTools/python/regressionModifier_cfi.py b/RecoEgamma/EgammaTools/python/regressionModifier_cfi.py new file mode 100644 index 0000000000000..4e357c09b1fec --- /dev/null +++ b/RecoEgamma/EgammaTools/python/regressionModifier_cfi.py @@ -0,0 +1,62 @@ +import FWCore.ParameterSet.Config as cms + +regressionModifier = \ + cms.PSet( modifierName = cms.string('EGExtraInfoModifierFromDB'), + autoDetectBunchSpacing = cms.bool(True), + bunchSpacingTag = cms.InputTag("addPileupInfo:bunchSpacing"), + manualBunchSpacing = cms.int32(50), + rhoCollection = cms.InputTag("fixedGridRhoFastjetAll"), + vertexCollection = cms.InputTag("offlinePrimaryVertices"), + electron_config = cms.PSet( sigmaIetaIphi = cms.InputTag('electronRegressionValueMapProducer:sigmaIEtaIPhi'), + eMax = cms.InputTag("electronRegressionValueMapProducer:eMax"), + e2nd = cms.InputTag("electronRegressionValueMapProducer:e2nd"), + eTop = cms.InputTag("electronRegressionValueMapProducer:eTop"), + eBottom = cms.InputTag("electronRegressionValueMapProducer:eBottom"), + eLeft = cms.InputTag("electronRegressionValueMapProducer:eLeft"), + eRight = cms.InputTag("electronRegressionValueMapProducer:eRight"), + clusterMaxDR = cms.InputTag("electronRegressionValueMapProducer:clusterMaxDR"), + clusterMaxDRDPhi = cms.InputTag("electronRegressionValueMapProducer:clusterMaxDRDPhi"), + clusterMaxDRDEta = cms.InputTag("electronRegressionValueMapProducer:clusterMaxDRDEta"), + clusterMaxDRRawEnergy = cms.InputTag("electronRegressionValueMapProducer:clusterMaxDRRawEnergy"), + clusterRawEnergy0 = cms.InputTag("electronRegressionValueMapProducer:clusterRawEnergy0"), + clusterRawEnergy1 = cms.InputTag("electronRegressionValueMapProducer:clusterRawEnergy1"), + clusterRawEnergy2 = cms.InputTag("electronRegressionValueMapProducer:clusterRawEnergy2"), + clusterDPhiToSeed0 = cms.InputTag("electronRegressionValueMapProducer:clusterDPhiToSeed0"), + clusterDPhiToSeed1 = cms.InputTag("electronRegressionValueMapProducer:clusterDPhiToSeed1"), + clusterDPhiToSeed2 = cms.InputTag("electronRegressionValueMapProducer:clusterDPhiToSeed2"), + clusterDEtaToSeed0 = cms.InputTag("electronRegressionValueMapProducer:clusterDEtaToSeed0"), + clusterDEtaToSeed1 = cms.InputTag("electronRegressionValueMapProducer:clusterDEtaToSeed1"), + clusterDEtaToSeed2 = cms.InputTag("electronRegressionValueMapProducer:clusterDEtaToSeed2"), + iPhi = cms.InputTag("electronRegressionValueMapProducer:iPhi"), + iEta = cms.InputTag("electronRegressionValueMapProducer:iEta"), + cryPhi = cms.InputTag("electronRegressionValueMapProducer:cryPhi"), + cryEta = cms.InputTag("electronRegressionValueMapProducer:cryEta"), + intValueMaps = cms.vstring("iPhi", "iEta"), + + # EB, EE + regressionKey_25ns = cms.vstring('gedelectron_EBCorrection_25ns', 'gedelectron_EECorrection_25ns'), + uncertaintyKey_25ns = cms.vstring('gedelectron_EBUncertainty_25ns', 'gedelectron_EEUncertainty_25ns'), + combinationKey_25ns = cms.string('gedelectron_p4combination_25ns'), + + regressionKey_50ns = cms.vstring('gedelectron_EBCorrection_50ns', 'gedelectron_EECorrection_50ns'), + uncertaintyKey_50ns = cms.vstring('gedelectron_EBUncertainty_50ns', 'gedelectron_EEUncertainty_50ns'), + combinationKey_50ns = cms.string('gedelectron_p4combination_50ns'), + ), + + photon_config = cms.PSet( sigmaIetaIphi = cms.InputTag('photonRegressionValueMapProducer:sigmaIEtaIPhi'), + sigmaIphiIphi = cms.InputTag('photonRegressionValueMapProducer:sigmaIPhiIPhi'), + e2x5Max = cms.InputTag('photonRegressionValueMapProducer:e2x5Max'), + e2x5Left = cms.InputTag('photonRegressionValueMapProducer:e2x5Left'), + e2x5Right = cms.InputTag('photonRegressionValueMapProducer:e2x5Right'), + e2x5Top = cms.InputTag('photonRegressionValueMapProducer:e2x5Top'), + e2x5Bottom = cms.InputTag('photonRegressionValueMapProducer:e2x5Bottom'), + + # EB, EE + regressionKey_25ns = cms.vstring('gedphoton_EBCorrection_25ns', 'gedphoton_EECorrection_25ns'), + uncertaintyKey_25ns = cms.vstring('gedphoton_EBUncertainty_25ns', 'gedphoton_EEUncertainty_25ns'), + + regressionKey_50ns = cms.vstring('gedphoton_EBCorrection_50ns', 'gedphoton_EECorrection_50ns'), + uncertaintyKey_50ns = cms.vstring('gedphoton_EBUncertainty_50ns', 'gedphoton_EEUncertainty_50ns'), + ) + ) + diff --git a/RecoEgamma/EgammaTools/src/MVAObjectCache.cc b/RecoEgamma/EgammaTools/src/MVAObjectCache.cc new file mode 100644 index 0000000000000..e86cb5a67609e --- /dev/null +++ b/RecoEgamma/EgammaTools/src/MVAObjectCache.cc @@ -0,0 +1,45 @@ +#include "RecoEgamma/EgammaTools/interface/MVAObjectCache.h" + +using namespace egamma; + +MVAObjectCache::MVAObjectCache(const edm::ParameterSet& conf) { + const std::vector& mvaEstimatorConfigs + = conf.getParameterSetVector("mvaConfigurations"); + + for( auto &imva : mvaEstimatorConfigs ){ + // building the mva class is now done in the ObjectCache, + // so we loop over what's in that. + std::unique_ptr thisEstimator; + thisEstimator.reset(nullptr); + if( !imva.empty() ) { + const std::string& pName = imva.getParameter("mvaName"); + // The factory below constructs the MVA of the appropriate type based + // on the "mvaName" which is the name of the derived MVA class (plugin) + const AnyMVAEstimatorRun2Base *estimator = AnyMVAEstimatorRun2Factory::get()->create( pName, imva ); + // Declare all event content, such as ValueMaps produced upstream or other, + // original event data pieces, that is needed (if any is implemented in the specific + // MVA classes) + const std::string full_name = estimator->getName() + estimator->getTag(); + auto diditwork = mvas_.emplace( full_name, MVAPtr(estimator) ); + if( !diditwork.second ) { + throw cms::Exception("MVA configured twice: ") + << "Tried already to make an mva of name: " << estimator->getName() + << " please ensure that the name of the MVA is unique!" << std::endl; + } + } else { + throw cms::Exception(" MVA configuration not found: ") + << " failed to find proper configuration for " + <<"one of the MVAs in the main python script " << std::endl; + } + } +} + +const MVAObjectCache::MVAPtr& +MVAObjectCache::getMVA(const std::string& mva) const { + auto itr = mvas_.find(mva); + if( itr == mvas_.end() ) { + throw cms::Exception("InvalidMVAName") + << mva << " is not managed by this evaluator!"; + } + return itr->second; +} diff --git a/RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_25ns.txt b/RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_25ns.txt new file mode 100644 index 0000000000000..00e8420f6436c --- /dev/null +++ b/RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_25ns.txt @@ -0,0 +1,14 @@ +# This file contains Effective Area constants for +# computing pile-up corrections for the neutral hadron and photon +# isolation for an electron object. +# Documentation: +# https://indico.cern.ch/event/369239/contribution/4/attachments/1134761/1623262/talk_effective_areas_25ns.pdf +# +# |eta| min |eta| max effective area +0.0000 1.0000 0.1752 +1.0000 1.4790 0.1862 +1.4790 2.0000 0.1411 +2.0000 2.2000 0.1534 +2.2000 2.3000 0.1903 +2.3000 2.4000 0.2243 +2.4000 5.0000 0.2687 diff --git a/RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_50ns.txt b/RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_50ns.txt new file mode 100644 index 0000000000000..1f32ec396832b --- /dev/null +++ b/RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_50ns.txt @@ -0,0 +1,12 @@ +# This file contains Effective Area constants for +# computing pile-up corrections for the neutral hadron and photon +# isolation for an electron object. +# Documentation: +# https://indico.cern.ch/event/369235/contribution/4/attachments/734635/1007867/Rami_EffAreas.pdf +# +# |eta| min |eta| max effective area +0.0000 0.8000 0.0973 +0.8000 1.3000 0.0954 +1.3000 2.0000 0.0632 +2.0000 2.2000 0.0727 +2.2000 5.0000 0.1337 diff --git a/RecoEgamma/ElectronIdentification/interface/ElectronMVAEstimatorRun2Phys14NonTrig.h b/RecoEgamma/ElectronIdentification/interface/ElectronMVAEstimatorRun2Phys14NonTrig.h index 61e8001e43322..8977ed2b41034 100644 --- a/RecoEgamma/ElectronIdentification/interface/ElectronMVAEstimatorRun2Phys14NonTrig.h +++ b/RecoEgamma/ElectronIdentification/interface/ElectronMVAEstimatorRun2Phys14NonTrig.h @@ -5,12 +5,16 @@ #include "DataFormats/EgammaCandidates/interface/GsfElectron.h" +#include "CondFormats/EgammaObjects/interface/GBRForest.h" + #include #include +#include #include #include "TMVA/Factory.h" #include "TMVA/Tools.h" #include "TMVA/Reader.h" +#include "TMVA/MethodBDT.h" class ElectronMVAEstimatorRun2Phys14NonTrig : public AnyMVAEstimatorRun2Base{ @@ -18,7 +22,7 @@ class ElectronMVAEstimatorRun2Phys14NonTrig : public AnyMVAEstimatorRun2Base{ // Define here the number and the meaning of the categories // for this specific MVA - const int nCategories = 6; + static constexpr int nCategories = 6; enum mvaCategories { UNDEFINED = -1, CAT_EB1_PT5to10 = 0, @@ -31,34 +35,34 @@ class ElectronMVAEstimatorRun2Phys14NonTrig : public AnyMVAEstimatorRun2Base{ // Define the struct that contains all necessary for MVA variables struct AllVariables { - float kfhits; + float kfhits; // 0 // Pure ECAL -> shower shapes - float see; - float spp; - float OneMinusE1x5E5x5; - float R9; - float etawidth; - float phiwidth; - float HoE; + float see; // 1 + float spp; // 2 + float OneMinusE1x5E5x5; // 3 + float R9; // 4 + float etawidth; // 5 + float phiwidth; // 6 + float HoE; // 7 // Endcap only variables - float PreShowerOverRaw; + float PreShowerOverRaw; // 8 //Pure tracking variables - float kfchi2; - float gsfchi2; + float kfchi2; // 9 + float gsfchi2; // 10 // Energy matching - float fbrem; - float EoP; - float eleEoPout; - float IoEmIoP; + float fbrem; // 11 + float EoP; // 12 + float eleEoPout; // 13 + float IoEmIoP; // 14 // Geometrical matchings - float deta; - float dphi; - float detacalo; + float deta; // 15 + float dphi; // 16 + float detacalo; // 17 // Spectator variables - float pt; - float isBarrel; - float isEndcap; - float SCeta; + float pt; // 18 + float isBarrel; // 19 + float isEndcap; // 20 + float SCeta; // 21 }; // Constructor and destructor @@ -66,32 +70,37 @@ class ElectronMVAEstimatorRun2Phys14NonTrig : public AnyMVAEstimatorRun2Base{ ~ElectronMVAEstimatorRun2Phys14NonTrig(); // Calculation of the MVA value - float mvaValue( const edm::Ptr& particle); + float mvaValue( const edm::Ptr& particle, const edm::Event& evt) const; // Utility functions - TMVA::Reader *createSingleReader(const int iCategory, const edm::FileInPath &weightFile); - - inline int getNCategories(){return nCategories;}; - bool isEndcapCategory( int category ); - const inline std::string getName(){return name_;}; - + std::unique_ptr createSingleReader(const int iCategory, const edm::FileInPath &weightFile) ; + + virtual int getNCategories() const override final { return nCategories; } + bool isEndcapCategory( int category ) const; + virtual const std::string& getName() const override final { return _name; } + virtual const std::string& getTag() const override final { return _tag; } + // Functions that should work on both pat and reco electrons // (use the fact that pat::Electron inherits from reco::GsfElectron) - void fillMVAVariables(const edm::Ptr& particle); - int findCategory( const edm::Ptr& particle); + std::vector fillMVAVariables(const edm::Ptr& particle, const edm::Event&) const; + int findCategory(const edm::Ptr& particle) const; // The function below ensures that the variables passed to MVA are // within reasonable bounds - void constrainMVAVariables(); + void constrainMVAVariables(AllVariables& vars) const; private: // MVA name. This is a unique name for this MVA implementation. // It will be used as part of ValueMap names. // For simplicity, keep it set to the class name. - const std::string name_ = "ElectronMVAEstimatorRun2Phys14NonTrig"; + const std::string _name = "ElectronMVAEstimatorRun2Phys14NonTrig"; + // MVA tag. This is an additional string variable to distinguish + // instances of the estimator of this class configured with different + // weight files. + std::string _tag; // Data members - std::vector< std::unique_ptr > _tmvaReaders; + std::vector< std::unique_ptr > _gbrForests; // All variables needed by this MVA std::string _MethodName; diff --git a/RecoEgamma/ElectronIdentification/interface/ElectronMVAEstimatorRun2Spring15NonTrig.h b/RecoEgamma/ElectronIdentification/interface/ElectronMVAEstimatorRun2Spring15NonTrig.h new file mode 100644 index 0000000000000..4b6a6ee7e2671 --- /dev/null +++ b/RecoEgamma/ElectronIdentification/interface/ElectronMVAEstimatorRun2Spring15NonTrig.h @@ -0,0 +1,151 @@ +#ifndef RecoEgamma_ElectronIdentification_ElectronMVAEstimatorRun2Spring15NonTrig_H +#define RecoEgamma_ElectronIdentification_ElectronMVAEstimatorRun2Spring15NonTrig_H + +#include "RecoEgamma/EgammaTools/interface/AnyMVAEstimatorRun2Base.h" + +#include "DataFormats/EgammaCandidates/interface/GsfElectron.h" + +#include "DataFormats/BeamSpot/interface/BeamSpot.h" + +#include "DataFormats/EgammaCandidates/interface/ConversionFwd.h" +#include "DataFormats/EgammaCandidates/interface/Conversion.h" +#include "RecoEgamma/EgammaTools/interface/ConversionTools.h" + +#include "CondFormats/EgammaObjects/interface/GBRForest.h" + +#include +#include +#include +#include "TMVA/Factory.h" +#include "TMVA/Tools.h" +#include "TMVA/Reader.h" + +class ElectronMVAEstimatorRun2Spring15NonTrig : public AnyMVAEstimatorRun2Base{ + + public: + + // Define here the number and the meaning of the categories + // for this specific MVA + const int nCategories = 6; + enum mvaCategories { + UNDEFINED = -1, + CAT_EB1_PT5to10 = 0, + CAT_EB2_PT5to10 = 1, + CAT_EE_PT5to10 = 2, + CAT_EB1_PT10plus = 3, + CAT_EB2_PT10plus = 4, + CAT_EE_PT10plus = 5 + }; + + // Define the struct that contains all necessary for MVA variables + // Note: all variables have to be floats for TMVA Reader, even if + // the training was done with ints. + struct AllVariables { + // Pure ECAL -> shower shapes + float see; // 0 + float spp; // 1 + float OneMinusE1x5E5x5; // 2 + float R9; // 3 + float etawidth; // 4 + float phiwidth; // 5 + float HoE; // 6 + // Endcap only variables + float PreShowerOverRaw; // 7 + //Pure tracking variables + float kfhits; // 8 + float kfchi2; // 9 + float gsfchi2; // 10 + // Energy matching + float fbrem; // 11 + + float gsfhits; // 12 + float expectedMissingInnerHits; // 13 + float convVtxFitProbability; // 14 + + float EoP; // 15 + float eleEoPout; // 16 + float IoEmIoP; // 17 + // Geometrical matchings + float deta; // 18 + float dphi; // 19 + float detacalo; // 20 + // Spectator variables + float pt; // 21 + float isBarrel; // 22 + float isEndcap; // 23 + float SCeta; // 24 + // + float eClass; // 25 + float pfRelIso; // 26 + float expectedInnerHits; // 27 + float vtxconv; // 28 + float mcEventWeight; // 29 + float mcCBmatchingCategory; // 30 + + }; + + // Constructor and destructor + ElectronMVAEstimatorRun2Spring15NonTrig(const edm::ParameterSet& conf); + ~ElectronMVAEstimatorRun2Spring15NonTrig(); + + // Calculation of the MVA value + float mvaValue( const edm::Ptr& particle, const edm::Event&) const override; + + // Utility functions + std::unique_ptr createSingleReader(const int iCategory, + const edm::FileInPath &weightFile); + + virtual int getNCategories() const override { return nCategories; } + bool isEndcapCategory( int category ) const; + virtual const std::string& getName() const override final { return _name; } + virtual const std::string& getTag() const override final { return _tag; } + + // Functions that should work on both pat and reco electrons + // (use the fact that pat::Electron inherits from reco::GsfElectron) + std::vector fillMVAVariables(const edm::Ptr& particle, const edm::Event&) const override; + int findCategory( const edm::Ptr& particle) const override; + // The function below ensures that the variables passed to MVA are + // within reasonable bounds + void constrainMVAVariables(AllVariables&) const; + + // Call this function once after the constructor to declare + // the needed event content pieces to the framework + void setConsumes(edm::ConsumesCollector&&) const override final; + // Call this function once per event to retrieve all needed + // event content pices + + private: + + // MVA name. This is a unique name for this MVA implementation. + // It will be used as part of ValueMap names. + // For simplicity, keep it set to the class name. + const std::string _name = "ElectronMVAEstimatorRun2Spring15NonTrig"; + // MVA tag. This is an additional string variable to distinguish + // instances of the estimator of this class configured with different + // weight files. + const std::string _tag; + + // Data members + std::vector< std::unique_ptr > _gbrForests; + + // All variables needed by this MVA + const std::string _MethodName; + AllVariables _allMVAVars; + + // + // Declare all tokens that will be needed to retrieve misc + // data from the event content required by this MVA + // + const edm::InputTag _beamSpotLabel; + // Conversions in AOD and miniAOD have different names + const edm::InputTag _conversionsLabelAOD; + const edm::InputTag _conversionsLabelMiniAOD; + + +}; + +DEFINE_EDM_PLUGIN(AnyMVAEstimatorRun2Factory, + ElectronMVAEstimatorRun2Spring15NonTrig, + "ElectronMVAEstimatorRun2Spring15NonTrig"); + +#endif diff --git a/RecoEgamma/ElectronIdentification/plugins/ElectronMVAEstimatorRun2Phys14NonTrig.cc b/RecoEgamma/ElectronIdentification/plugins/ElectronMVAEstimatorRun2Phys14NonTrig.cc index fee844edbf679..c806768cd434c 100644 --- a/RecoEgamma/ElectronIdentification/plugins/ElectronMVAEstimatorRun2Phys14NonTrig.cc +++ b/RecoEgamma/ElectronIdentification/plugins/ElectronMVAEstimatorRun2Phys14NonTrig.cc @@ -8,8 +8,10 @@ #include "FWCore/ParameterSet/interface/FileInPath.h" ElectronMVAEstimatorRun2Phys14NonTrig::ElectronMVAEstimatorRun2Phys14NonTrig(const edm::ParameterSet& conf): - AnyMVAEstimatorRun2Base(conf){ + AnyMVAEstimatorRun2Base(conf) { + _tag = conf.getParameter("mvaTag"); + const std::vector weightFileNames = conf.getParameter >("weightFileNames"); @@ -17,7 +19,7 @@ ElectronMVAEstimatorRun2Phys14NonTrig::ElectronMVAEstimatorRun2Phys14NonTrig(con throw cms::Exception("MVA config failure: ") << "wrong number of weightfiles" << std::endl; - _tmvaReaders.clear(); + _gbrForests.clear(); _MethodName = "BDTG method"; // Create a TMVA reader object for each category for(int i=0; i ( createSingleReader(i, weightFile ) ) ); + _gbrForests.push_back( createSingleReader(i, weightFile ) ); } @@ -34,50 +36,50 @@ ElectronMVAEstimatorRun2Phys14NonTrig::ElectronMVAEstimatorRun2Phys14NonTrig(con ElectronMVAEstimatorRun2Phys14NonTrig:: ~ElectronMVAEstimatorRun2Phys14NonTrig(){ - - _tmvaReaders.clear(); } float ElectronMVAEstimatorRun2Phys14NonTrig:: -mvaValue( const edm::Ptr& particle){ +mvaValue( const edm::Ptr& particle, const edm::Event& evt) const { - int iCategory = findCategory( particle ); - fillMVAVariables( particle ); - constrainMVAVariables(); - float result = _tmvaReaders.at(iCategory)->EvaluateMVA(_MethodName); + const int iCategory = findCategory( particle ); + const std::vector vars = std::move( fillMVAVariables( particle, evt ) ); + const float result = _gbrForests.at(iCategory)->GetClassifier(vars.data()); - bool debug = false; + constexpr bool debug = false; if(debug) { std::cout << " *** Inside the class _MethodName " << _MethodName << std::endl; std::cout << " bin " << iCategory - << " fbrem " << _allMVAVars.fbrem - << " kfchi2 " << _allMVAVars.kfchi2 - << " mykfhits " << _allMVAVars.kfhits - << " gsfchi2 " << _allMVAVars.gsfchi2 - << " deta " << _allMVAVars.deta - << " dphi " << _allMVAVars.dphi - << " detacalo " << _allMVAVars.detacalo - << " see " << _allMVAVars.see - << " spp " << _allMVAVars.spp - << " etawidth " << _allMVAVars.etawidth - << " phiwidth " << _allMVAVars.phiwidth - << " OneMinusE1x5E5x5 " << _allMVAVars.OneMinusE1x5E5x5 - << " R9 " << _allMVAVars.R9 - << " HoE " << _allMVAVars.HoE - << " EoP " << _allMVAVars.EoP - << " IoEmIoP " << _allMVAVars.IoEmIoP - << " eleEoPout " << _allMVAVars.eleEoPout + << " fbrem " << vars[11] //_allMVAVars.fbrem + << " kfchi2 " << vars[9] //_allMVAVars.kfchi2 + << " mykfhits " << vars[0] //_allMVAVars.kfhits + << " gsfchi2 " << vars[10] //_allMVAVars.gsfchi2 + << " deta " << vars[15] //_allMVAVars.deta + << " dphi " << vars[16] //_allMVAVars.dphi + << " detacalo " << vars[17] //_allMVAVars.detacalo + << " see " << vars[1] //_allMVAVars.see + << " spp " << vars[2] //_allMVAVars.spp + << " etawidth " << vars[5] //_allMVAVars.etawidth + << " phiwidth " << vars[6] // _allMVAVars.phiwidth + << " OneMinusE1x5E5x5 " << vars[3] //_allMVAVars.OneMinusE1x5E5x5 + << " R9 " << vars[4] //_allMVAVars.R9 + << " HoE " << vars[7] //_allMVAVars.HoE + << " EoP " << vars[12] //_allMVAVars.EoP + << " IoEmIoP " << vars[14] //_allMVAVars.IoEmIoP + << " eleEoPout " << vars[13] // _allMVAVars.eleEoPout //<< " d0 " << _allMVAVars.d0 // << " ip3d " << _allMVAVars.ip3d - << " eta " << _allMVAVars.SCeta - << " pt " << _allMVAVars.pt << std::endl; + << " eta " << vars[21] //_allMVAVars.SCeta + << " isBarrel " << vars[19] //_allMVAVars.isBarrel + << " isEndcap " << vars[20] //_allMVAVars.isEndcap + << " pt " << vars[18] //_allMVAVars.pt + << std::endl; std::cout << " ### MVA " << result << std::endl; } return result; } -int ElectronMVAEstimatorRun2Phys14NonTrig::findCategory( const edm::Ptr& particle){ +int ElectronMVAEstimatorRun2Phys14NonTrig::findCategory(const edm::Ptr& particle) const { // Try to cast the particle into a reco particle. // This should work for both reco and pat. @@ -87,16 +89,16 @@ int ElectronMVAEstimatorRun2Phys14NonTrig::findCategory( const edm::Ptrpt(); - float eta = eleRecoPtr->superCluster()->eta(); + const float pt = eleRecoPtr->pt(); + const float eta = eleRecoPtr->superCluster()->eta(); // // Determine the category // int iCategory = UNDEFINED; - const float ptSplit = 10; // we have above and below 10 GeV categories - const float ebSplit = 0.800;// barrel is split into two regions - const float ebeeSplit = 1.479; // division between barrel and endcap + constexpr float ptSplit = 10; // we have above and below 10 GeV categories + constexpr float ebSplit = 0.800;// barrel is split into two regions + constexpr float ebeeSplit = 1.479; // division between barrel and endcap if (pt < ptSplit && std::abs(eta) < ebSplit) iCategory = CAT_EB1_PT5to10; @@ -120,7 +122,7 @@ int ElectronMVAEstimatorRun2Phys14NonTrig::findCategory( const edm::Ptr +ElectronMVAEstimatorRun2Phys14NonTrig:: +createSingleReader(const int iCategory, const edm::FileInPath &weightFile) { // // Create the reader // - TMVA::Reader *tmpTMVAReader = new TMVA::Reader( "!Color:Silent:Error" ); + TMVA::Reader tmpTMVAReader( "!Color:Silent:Error" ); // // Configure all variables and spectators. Note: the order and names // must match what is found in the xml weights file! // - tmpTMVAReader->AddVariable("ele_kfhits", &_allMVAVars.kfhits); + tmpTMVAReader.AddVariable("ele_kfhits", &_allMVAVars.kfhits); // Pure ECAL -> shower shapes - tmpTMVAReader->AddVariable("ele_oldsigmaietaieta", &_allMVAVars.see); - tmpTMVAReader->AddVariable("ele_oldsigmaiphiiphi", &_allMVAVars.spp); - tmpTMVAReader->AddVariable("ele_oldcircularity", &_allMVAVars.OneMinusE1x5E5x5); - tmpTMVAReader->AddVariable("ele_oldr9", &_allMVAVars.R9); - tmpTMVAReader->AddVariable("ele_scletawidth", &_allMVAVars.etawidth); - tmpTMVAReader->AddVariable("ele_sclphiwidth", &_allMVAVars.phiwidth); - tmpTMVAReader->AddVariable("ele_he", &_allMVAVars.HoE); + tmpTMVAReader.AddVariable("ele_oldsigmaietaieta", &_allMVAVars.see); + tmpTMVAReader.AddVariable("ele_oldsigmaiphiiphi", &_allMVAVars.spp); + tmpTMVAReader.AddVariable("ele_oldcircularity", &_allMVAVars.OneMinusE1x5E5x5); + tmpTMVAReader.AddVariable("ele_oldr9", &_allMVAVars.R9); + tmpTMVAReader.AddVariable("ele_scletawidth", &_allMVAVars.etawidth); + tmpTMVAReader.AddVariable("ele_sclphiwidth", &_allMVAVars.phiwidth); + tmpTMVAReader.AddVariable("ele_he", &_allMVAVars.HoE); // Endcap only variables if( isEndcapCategory(iCategory) ) - tmpTMVAReader->AddVariable("ele_psEoverEraw", &_allMVAVars.PreShowerOverRaw); + tmpTMVAReader.AddVariable("ele_psEoverEraw", &_allMVAVars.PreShowerOverRaw); //Pure tracking variables - tmpTMVAReader->AddVariable("ele_kfchi2", &_allMVAVars.kfchi2); - tmpTMVAReader->AddVariable("ele_chi2_hits", &_allMVAVars.gsfchi2); + tmpTMVAReader.AddVariable("ele_kfchi2", &_allMVAVars.kfchi2); + tmpTMVAReader.AddVariable("ele_chi2_hits", &_allMVAVars.gsfchi2); // Energy matching - tmpTMVAReader->AddVariable("ele_fbrem", &_allMVAVars.fbrem); - tmpTMVAReader->AddVariable("ele_ep", &_allMVAVars.EoP); - tmpTMVAReader->AddVariable("ele_eelepout", &_allMVAVars.eleEoPout); - tmpTMVAReader->AddVariable("ele_IoEmIop", &_allMVAVars.IoEmIoP); + tmpTMVAReader.AddVariable("ele_fbrem", &_allMVAVars.fbrem); + tmpTMVAReader.AddVariable("ele_ep", &_allMVAVars.EoP); + tmpTMVAReader.AddVariable("ele_eelepout", &_allMVAVars.eleEoPout); + tmpTMVAReader.AddVariable("ele_IoEmIop", &_allMVAVars.IoEmIoP); // Geometrical matchings - tmpTMVAReader->AddVariable("ele_deltaetain", &_allMVAVars.deta); - tmpTMVAReader->AddVariable("ele_deltaphiin", &_allMVAVars.dphi); - tmpTMVAReader->AddVariable("ele_deltaetaseed", &_allMVAVars.detacalo); + tmpTMVAReader.AddVariable("ele_deltaetain", &_allMVAVars.deta); + tmpTMVAReader.AddVariable("ele_deltaphiin", &_allMVAVars.dphi); + tmpTMVAReader.AddVariable("ele_deltaetaseed", &_allMVAVars.detacalo); // Spectator variables - tmpTMVAReader->AddSpectator("ele_pT", &_allMVAVars.pt); - tmpTMVAReader->AddSpectator("ele_isbarrel", &_allMVAVars.isBarrel); - tmpTMVAReader->AddSpectator("ele_isendcap", &_allMVAVars.isEndcap); - tmpTMVAReader->AddSpectator("scl_eta", &_allMVAVars.SCeta); + tmpTMVAReader.AddSpectator("ele_pT", &_allMVAVars.pt); + tmpTMVAReader.AddSpectator("ele_isbarrel", &_allMVAVars.isBarrel); + tmpTMVAReader.AddSpectator("ele_isendcap", &_allMVAVars.isEndcap); + tmpTMVAReader.AddSpectator("scl_eta", &_allMVAVars.SCeta); // // Book the method and set up the weights file // - tmpTMVAReader->BookMVA(_MethodName , weightFile.fullPath() ); - - return tmpTMVAReader; + std::unique_ptr temp( tmpTMVAReader.BookMVA(_MethodName , weightFile.fullPath() ) ); + + return std::unique_ptr ( new GBRForest( dynamic_cast( tmpTMVAReader.FindMVA(_MethodName) ) ) ); } // A function that should work on both pat and reco objects -void ElectronMVAEstimatorRun2Phys14NonTrig::fillMVAVariables(const edm::Ptr& particle){ +std::vector +ElectronMVAEstimatorRun2Phys14NonTrig::fillMVAVariables(const edm::Ptr& particle, + const edm::Event& ) const { // Try to cast the particle into a reco particle. // This should work for both reco and pat. @@ -197,6 +202,9 @@ void ElectronMVAEstimatorRun2Phys14NonTrig::fillMVAVariables(const edm::PtrsuperCluster(); @@ -213,85 +221,131 @@ void ElectronMVAEstimatorRun2Phys14NonTrig::fillMVAVariables(const edm::PtrclosestCtfTrackRef(); validKF = (myTrackRef.isAvailable() && (myTrackRef.isNonnull()) ); - _allMVAVars.kfhits = (validKF) ? myTrackRef->hitPattern().trackerLayersWithMeasurement() : -1. ; + allMVAVars.kfhits = (validKF) ? myTrackRef->hitPattern().trackerLayersWithMeasurement() : -1. ; // Pure ECAL -> shower shapes - _allMVAVars.see = eleRecoPtr->full5x5_sigmaIetaIeta(); - _allMVAVars.spp = eleRecoPtr->full5x5_sigmaIphiIphi(); - _allMVAVars.OneMinusE1x5E5x5 = 1. - eleRecoPtr->full5x5_e1x5() / eleRecoPtr->full5x5_e5x5(); - _allMVAVars.R9 = eleRecoPtr->full5x5_r9(); - _allMVAVars.etawidth = superCluster->etaWidth(); - _allMVAVars.phiwidth = superCluster->phiWidth(); - _allMVAVars.HoE = eleRecoPtr->hadronicOverEm(); + allMVAVars.see = eleRecoPtr->full5x5_sigmaIetaIeta(); + allMVAVars.spp = eleRecoPtr->full5x5_sigmaIphiIphi(); + allMVAVars.OneMinusE1x5E5x5 = 1. - eleRecoPtr->full5x5_e1x5() / eleRecoPtr->full5x5_e5x5(); + allMVAVars.R9 = eleRecoPtr->full5x5_r9(); + allMVAVars.etawidth = superCluster->etaWidth(); + allMVAVars.phiwidth = superCluster->phiWidth(); + allMVAVars.HoE = eleRecoPtr->hadronicOverEm(); // Endcap only variables - _allMVAVars.PreShowerOverRaw = superCluster->preshowerEnergy() / superCluster->rawEnergy(); + allMVAVars.PreShowerOverRaw = superCluster->preshowerEnergy() / superCluster->rawEnergy(); //Pure tracking variables - _allMVAVars.kfchi2 = (validKF) ? myTrackRef->normalizedChi2() : 0; - _allMVAVars.gsfchi2 = eleRecoPtr->gsfTrack()->normalizedChi2(); + allMVAVars.kfchi2 = (validKF) ? myTrackRef->normalizedChi2() : 0; + allMVAVars.gsfchi2 = eleRecoPtr->gsfTrack()->normalizedChi2(); // Energy matching - _allMVAVars.fbrem = eleRecoPtr->fbrem(); - _allMVAVars.EoP = eleRecoPtr->eSuperClusterOverP(); - _allMVAVars.eleEoPout = eleRecoPtr->eEleClusterOverPout(); - _allMVAVars.IoEmIoP = (1.0/eleRecoPtr->ecalEnergy()) - (1.0 / eleRecoPtr->p()); + allMVAVars.fbrem = eleRecoPtr->fbrem(); + allMVAVars.EoP = eleRecoPtr->eSuperClusterOverP(); + allMVAVars.eleEoPout = eleRecoPtr->eEleClusterOverPout(); + allMVAVars.IoEmIoP = (1.0/eleRecoPtr->ecalEnergy()) - (1.0 / eleRecoPtr->p()); // Geometrical matchings - _allMVAVars.deta = eleRecoPtr->deltaEtaSuperClusterTrackAtVtx(); - _allMVAVars.dphi = eleRecoPtr->deltaPhiSuperClusterTrackAtVtx(); - _allMVAVars.detacalo = eleRecoPtr->deltaEtaSeedClusterTrackAtCalo(); + allMVAVars.deta = eleRecoPtr->deltaEtaSuperClusterTrackAtVtx(); + allMVAVars.dphi = eleRecoPtr->deltaPhiSuperClusterTrackAtVtx(); + allMVAVars.detacalo = eleRecoPtr->deltaEtaSeedClusterTrackAtCalo(); // Spectator variables - _allMVAVars.pt = eleRecoPtr->pt(); - float scEta = superCluster->eta(); - _allMVAVars.isBarrel = ( std::abs(scEta) < 1.479 ); - _allMVAVars.isEndcap = ( std::abs(scEta) >= 1.479); - _allMVAVars.SCeta = scEta; - + allMVAVars.pt = eleRecoPtr->pt(); + const float scEta = superCluster->eta(); + constexpr float ebeeSplit = 1.479; + allMVAVars.isBarrel = ( std::abs(scEta) < ebeeSplit ); + allMVAVars.isEndcap = ( std::abs(scEta) >= ebeeSplit ); + allMVAVars.SCeta = scEta; + + constrainMVAVariables(allMVAVars); + + std::vector vars; + + if( isEndcapCategory( findCategory(particle) ) ) { + vars = std::move( packMVAVariables( allMVAVars.kfhits, + allMVAVars.see, + allMVAVars.spp, + allMVAVars.OneMinusE1x5E5x5, + allMVAVars.R9, + allMVAVars.etawidth, + allMVAVars.phiwidth, + allMVAVars.HoE, + allMVAVars.PreShowerOverRaw, + allMVAVars.kfchi2, + allMVAVars.gsfchi2, + allMVAVars.fbrem, + allMVAVars.EoP, + allMVAVars.eleEoPout, + allMVAVars.IoEmIoP, + allMVAVars.deta, + allMVAVars.dphi, + allMVAVars.detacalo, + allMVAVars.pt, + allMVAVars.isBarrel, + allMVAVars.isEndcap, + allMVAVars.SCeta ) + ); + } else { + vars = std::move( packMVAVariables( allMVAVars.kfhits, + allMVAVars.see, + allMVAVars.spp, + allMVAVars.OneMinusE1x5E5x5, + allMVAVars.R9, + allMVAVars.etawidth, + allMVAVars.phiwidth, + allMVAVars.HoE, + allMVAVars.kfchi2, + allMVAVars.gsfchi2, + allMVAVars.fbrem, + allMVAVars.EoP, + allMVAVars.eleEoPout, + allMVAVars.IoEmIoP, + allMVAVars.deta, + allMVAVars.dphi, + allMVAVars.detacalo, + allMVAVars.pt, + allMVAVars.isBarrel, + allMVAVars.isEndcap, + allMVAVars.SCeta ) + ); + } + return vars; } -void ElectronMVAEstimatorRun2Phys14NonTrig::constrainMVAVariables(){ +void ElectronMVAEstimatorRun2Phys14NonTrig::constrainMVAVariables(AllVariables& vars) const { // Check that variables do not have crazy values - if(_allMVAVars.fbrem < -1.) - _allMVAVars.fbrem = -1.; + if(vars.fbrem < -1.) + vars.fbrem = -1.; - _allMVAVars.deta = fabs(_allMVAVars.deta); - if(_allMVAVars.deta > 0.06) - _allMVAVars.deta = 0.06; + vars.deta = std::abs(vars.deta); + if(vars.deta > 0.06) + vars.deta = 0.06; + vars.dphi = std::abs(vars.dphi); + if(vars.dphi > 0.6) + vars.dphi = 0.6; - _allMVAVars.dphi = fabs(_allMVAVars.dphi); - if(_allMVAVars.dphi > 0.6) - _allMVAVars.dphi = 0.6; - - - if(_allMVAVars.EoP > 20.) - _allMVAVars.EoP = 20.; + if(vars.EoP > 20.) + vars.EoP = 20.; - if(_allMVAVars.eleEoPout > 20.) - _allMVAVars.eleEoPout = 20.; + if(vars.eleEoPout > 20.) + vars.eleEoPout = 20.; + + vars.detacalo = std::abs(vars.detacalo); + if(vars.detacalo > 0.2) + vars.detacalo = 0.2; + if(vars.OneMinusE1x5E5x5 < -1.) + vars.OneMinusE1x5E5x5 = -1; - _allMVAVars.detacalo = fabs(_allMVAVars.detacalo); - if(_allMVAVars.detacalo > 0.2) - _allMVAVars.detacalo = 0.2; + if(vars.OneMinusE1x5E5x5 > 2.) + vars.OneMinusE1x5E5x5 = 2.; + + if(vars.R9 > 5) + vars.R9 = 5; - if(_allMVAVars.OneMinusE1x5E5x5 < -1.) - _allMVAVars.OneMinusE1x5E5x5 = -1; - - if(_allMVAVars.OneMinusE1x5E5x5 > 2.) - _allMVAVars.OneMinusE1x5E5x5 = 2.; - - - - if(_allMVAVars.R9 > 5) - _allMVAVars.R9 = 5; - - if(_allMVAVars.gsfchi2 > 200.) - _allMVAVars.gsfchi2 = 200; - - - if(_allMVAVars.kfchi2 > 10.) - _allMVAVars.kfchi2 = 10.; - - + if(vars.gsfchi2 > 200.) + vars.gsfchi2 = 200; + + if(vars.kfchi2 > 10.) + vars.kfchi2 = 10.; } diff --git a/RecoEgamma/ElectronIdentification/plugins/ElectronMVAEstimatorRun2Spring15NonTrig.cc b/RecoEgamma/ElectronIdentification/plugins/ElectronMVAEstimatorRun2Spring15NonTrig.cc new file mode 100644 index 0000000000000..1129f64b52885 --- /dev/null +++ b/RecoEgamma/ElectronIdentification/plugins/ElectronMVAEstimatorRun2Spring15NonTrig.cc @@ -0,0 +1,465 @@ +#include "RecoEgamma/ElectronIdentification/interface/ElectronMVAEstimatorRun2Spring15NonTrig.h" + +#include "DataFormats/TrackReco/interface/Track.h" +#include "DataFormats/GsfTrackReco/interface/GsfTrack.h" + +#include "DataFormats/PatCandidates/interface/Electron.h" + +#include "FWCore/ParameterSet/interface/FileInPath.h" + +#include "TMath.h" +#include "TMVA/MethodBDT.h" + +ElectronMVAEstimatorRun2Spring15NonTrig::ElectronMVAEstimatorRun2Spring15NonTrig(const edm::ParameterSet& conf): + AnyMVAEstimatorRun2Base(conf), + _tag(conf.getParameter("mvaTag")), + _MethodName("BDTG method"), + _beamSpotLabel(conf.getParameter("beamSpot")), + _conversionsLabelAOD(conf.getParameter("conversionsAOD")), + _conversionsLabelMiniAOD(conf.getParameter("conversionsMiniAOD")) { + + const std::vector weightFileNames + = conf.getParameter >("weightFileNames"); + + if( (int)(weightFileNames.size()) != nCategories ) + throw cms::Exception("MVA config failure: ") + << "wrong number of weightfiles" << std::endl; + + _gbrForests.clear(); + // Create a TMVA reader object for each category + for(int i=0; i(_beamSpotLabel); + + // Conversions collection (different names in AOD and miniAOD) + cc.mayConsume(_conversionsLabelAOD); + cc.mayConsume(_conversionsLabelMiniAOD); + + +} + +float ElectronMVAEstimatorRun2Spring15NonTrig:: +mvaValue( const edm::Ptr& particle, const edm::Event& iEvent) const { + + const int iCategory = findCategory( particle ); + const std::vector vars = std::move( fillMVAVariables( particle, iEvent ) ); + const float result = _gbrForests.at(iCategory)->GetClassifier(vars.data()); + + const bool debug = false; + if(debug) { + std::cout << " *** Inside the class _MethodName " << _MethodName << std::endl; + std::cout << " bin " << iCategory + << " fbrem " << vars[11] + << " kfchi2 " << vars[9] + << " mykfhits " << vars[8] + << " gsfchi2 " << vars[10] + << " deta " << vars[18] + << " dphi " << vars[19] + << " detacalo " << vars[20] + << " see " << vars[0] + << " spp " << vars[1] + << " etawidth " << vars[4] + << " phiwidth " << vars[5] + << " OneMinusE1x5E5x5 " << vars[2] + << " R9 " << vars[3] + << " HoE " << vars[6] + << " EoP " << vars[15] + << " IoEmIoP " << vars[17] + << " eleEoPout " << vars[16] + << " eta " << vars[24] + << " pt " << vars[21] << std::endl; + std::cout << " ### MVA " << result << std::endl; + } + + return result; +} + +int ElectronMVAEstimatorRun2Spring15NonTrig::findCategory( const edm::Ptr& particle) const { + + // Try to cast the particle into a reco particle. + // This should work for both reco and pat. + const edm::Ptr eleRecoPtr = ( edm::Ptr )particle; + if( eleRecoPtr.get() == NULL ) + throw cms::Exception("MVA failure: ") + << " given particle is expected to be reco::GsfElectron or pat::Electron," << std::endl + << " but appears to be neither" << std::endl; + + float pt = eleRecoPtr->pt(); + float eta = eleRecoPtr->superCluster()->eta(); + + // + // Determine the category + // + int iCategory = UNDEFINED; + const float ptSplit = 10; // we have above and below 10 GeV categories + const float ebSplit = 0.800;// barrel is split into two regions + const float ebeeSplit = 1.479; // division between barrel and endcap + + if (pt < ptSplit && std::abs(eta) < ebSplit) + iCategory = CAT_EB1_PT5to10; + + if (pt < ptSplit && std::abs(eta) >= ebSplit && std::abs(eta) < ebeeSplit) + iCategory = CAT_EB2_PT5to10; + + if (pt < ptSplit && std::abs(eta) >= ebeeSplit) + iCategory = CAT_EE_PT5to10; + + if (pt >= ptSplit && std::abs(eta) < ebSplit) + iCategory = CAT_EB1_PT10plus; + + if (pt >= ptSplit && std::abs(eta) >= ebSplit && std::abs(eta) < ebeeSplit) + iCategory = CAT_EB2_PT10plus; + + if (pt >= ptSplit && std::abs(eta) >= ebeeSplit) + iCategory = CAT_EE_PT10plus; + + return iCategory; +} + +bool ElectronMVAEstimatorRun2Spring15NonTrig:: +isEndcapCategory(int category ) const { + + bool isEndcap = false; + if( category == CAT_EE_PT5to10 || category == CAT_EE_PT10plus ) + isEndcap = true; + + return isEndcap; +} + + +std::unique_ptr ElectronMVAEstimatorRun2Spring15NonTrig:: +createSingleReader(const int iCategory, const edm::FileInPath &weightFile){ + + // + // Create the reader + // + TMVA::Reader tmpTMVAReader( "!Color:Silent:!Error" ); + + // + // Configure all variables and spectators. Note: the order and names + // must match what is found in the xml weights file! + // + // Pure ECAL -> shower shapes + tmpTMVAReader.AddVariable("ele_oldsigmaietaieta", &_allMVAVars.see); + tmpTMVAReader.AddVariable("ele_oldsigmaiphiiphi", &_allMVAVars.spp); + tmpTMVAReader.AddVariable("ele_oldcircularity", &_allMVAVars.OneMinusE1x5E5x5); + tmpTMVAReader.AddVariable("ele_oldr9", &_allMVAVars.R9); + tmpTMVAReader.AddVariable("ele_scletawidth", &_allMVAVars.etawidth); + tmpTMVAReader.AddVariable("ele_sclphiwidth", &_allMVAVars.phiwidth); + tmpTMVAReader.AddVariable("ele_he", &_allMVAVars.HoE); + // Endcap only variables + if( isEndcapCategory(iCategory) ) + tmpTMVAReader.AddVariable("ele_psEoverEraw", &_allMVAVars.PreShowerOverRaw); + + //Pure tracking variables + tmpTMVAReader.AddVariable("ele_kfhits", &_allMVAVars.kfhits); + tmpTMVAReader.AddVariable("ele_kfchi2", &_allMVAVars.kfchi2); + tmpTMVAReader.AddVariable("ele_gsfchi2", &_allMVAVars.gsfchi2); + + // Energy matching + tmpTMVAReader.AddVariable("ele_fbrem", &_allMVAVars.fbrem); + + tmpTMVAReader.AddVariable("ele_gsfhits", &_allMVAVars.gsfhits); + tmpTMVAReader.AddVariable("ele_expected_inner_hits", &_allMVAVars.expectedMissingInnerHits); + tmpTMVAReader.AddVariable("ele_conversionVertexFitProbability", &_allMVAVars.convVtxFitProbability); + + tmpTMVAReader.AddVariable("ele_ep", &_allMVAVars.EoP); + tmpTMVAReader.AddVariable("ele_eelepout", &_allMVAVars.eleEoPout); + tmpTMVAReader.AddVariable("ele_IoEmIop", &_allMVAVars.IoEmIoP); + + // Geometrical matchings + tmpTMVAReader.AddVariable("ele_deltaetain", &_allMVAVars.deta); + tmpTMVAReader.AddVariable("ele_deltaphiin", &_allMVAVars.dphi); + tmpTMVAReader.AddVariable("ele_deltaetaseed", &_allMVAVars.detacalo); + + // Spectator variables + tmpTMVAReader.AddSpectator("ele_pT", &_allMVAVars.pt); + tmpTMVAReader.AddSpectator("ele_isbarrel", &_allMVAVars.isBarrel); + tmpTMVAReader.AddSpectator("ele_isendcap", &_allMVAVars.isEndcap); + tmpTMVAReader.AddSpectator("scl_eta", &_allMVAVars.SCeta); + + tmpTMVAReader.AddSpectator("ele_eClass", &_allMVAVars.eClass); + tmpTMVAReader.AddSpectator("ele_pfRelIso", &_allMVAVars.pfRelIso); + tmpTMVAReader.AddSpectator("ele_expected_inner_hits", &_allMVAVars.expectedInnerHits); + tmpTMVAReader.AddSpectator("ele_vtxconv", &_allMVAVars.vtxconv); + tmpTMVAReader.AddSpectator("mc_event_weight", &_allMVAVars.mcEventWeight); + tmpTMVAReader.AddSpectator("mc_ele_CBmatching_category", &_allMVAVars.mcCBmatchingCategory); + + // + // Book the method and set up the weights file + // + std::unique_ptr temp( tmpTMVAReader.BookMVA(_MethodName , weightFile.fullPath() ) ); + + return std::unique_ptr ( new GBRForest( dynamic_cast( tmpTMVAReader.FindMVA(_MethodName) ) ) ); +} + +// A function that should work on both pat and reco objects +std::vector ElectronMVAEstimatorRun2Spring15NonTrig:: +fillMVAVariables(const edm::Ptr& particle, + const edm::Event& iEvent ) const { + + // + // Declare all value maps corresponding to the products we defined earlier + // + edm::Handle theBeamSpot; + edm::Handle conversions; + + // Get data needed for conversion rejection + iEvent.getByLabel(_beamSpotLabel, theBeamSpot); + + // Conversions in miniAOD and AOD have different names, + // but the same type, so we use the same handle with different tokens. + iEvent.getByLabel(_conversionsLabelAOD, conversions); + if( !conversions.isValid() ) + iEvent.getByLabel(_conversionsLabelMiniAOD, conversions); + + // Make sure everything is retrieved successfully + if(! (theBeamSpot.isValid() + && conversions.isValid() ) + ) + throw cms::Exception("MVA failure: ") + << "Failed to retrieve event content needed for this MVA" + << std::endl + << "Check python MVA configuration file." + << std::endl; + + // Try to cast the particle into a reco particle. + // This should work for both reco and pat. + const edm::Ptr eleRecoPtr = ( edm::Ptr )particle; + if( eleRecoPtr.get() == NULL ) + throw cms::Exception("MVA failure: ") + << " given particle is expected to be reco::GsfElectron or pat::Electron," << std::endl + << " but appears to be neither" << std::endl; + + // Both pat and reco particles have exactly the same accessors, so we use a reco ptr + // throughout the code, with a single exception as of this writing, handled separately below. + auto superCluster = eleRecoPtr->superCluster(); + + AllVariables allMVAVars; + + // Pure ECAL -> shower shapes + allMVAVars.see = eleRecoPtr->full5x5_sigmaIetaIeta(); + allMVAVars.spp = eleRecoPtr->full5x5_sigmaIphiIphi(); + allMVAVars.OneMinusE1x5E5x5 = 1. - eleRecoPtr->full5x5_e1x5() / eleRecoPtr->full5x5_e5x5(); + allMVAVars.R9 = eleRecoPtr->full5x5_r9(); + allMVAVars.etawidth = superCluster->etaWidth(); + allMVAVars.phiwidth = superCluster->phiWidth(); + allMVAVars.HoE = eleRecoPtr->hadronicOverEm(); + // Endcap only variables + allMVAVars.PreShowerOverRaw = superCluster->preshowerEnergy() / superCluster->rawEnergy(); + + // To get to CTF track information in pat::Electron, we have to have the pointer + // to pat::Electron, it is not accessible from the pointer to reco::GsfElectron. + // This behavior is reported and is expected to change in the future (post-7.4.5 some time). + bool validKF= false; + reco::TrackRef myTrackRef = eleRecoPtr->closestCtfTrackRef(); + const edm::Ptr elePatPtr(eleRecoPtr); + // Check if this is really a pat::Electron, and if yes, get the track ref from this new + // pointer instead + if( elePatPtr.get() != NULL ) + myTrackRef = elePatPtr->closestCtfTrackRef(); + validKF = (myTrackRef.isAvailable() && (myTrackRef.isNonnull()) ); + + //Pure tracking variables + allMVAVars.kfhits = (validKF) ? myTrackRef->hitPattern().trackerLayersWithMeasurement() : -1. ; + allMVAVars.kfchi2 = (validKF) ? myTrackRef->normalizedChi2() : 0; + allMVAVars.gsfchi2 = eleRecoPtr->gsfTrack()->normalizedChi2(); + + // Energy matching + allMVAVars.fbrem = eleRecoPtr->fbrem(); + + allMVAVars.gsfhits = eleRecoPtr->gsfTrack()->found(); + allMVAVars.expectedMissingInnerHits = eleRecoPtr->gsfTrack() + ->hitPattern().numberOfHits(reco::HitPattern::MISSING_INNER_HITS); + + reco::ConversionRef conv_ref = ConversionTools::matchedConversion(*eleRecoPtr, + conversions, + theBeamSpot->position()); + double vertexFitProbability = -1.; + if(!conv_ref.isNull()) { + const reco::Vertex &vtx = conv_ref.get()->conversionVertex(); if (vtx.isValid()) { + vertexFitProbability = TMath::Prob( vtx.chi2(), vtx.ndof()); + } + } + allMVAVars.convVtxFitProbability = vertexFitProbability; + + allMVAVars.EoP = eleRecoPtr->eSuperClusterOverP(); + allMVAVars.eleEoPout = eleRecoPtr->eEleClusterOverPout(); + allMVAVars.IoEmIoP = (1.0/eleRecoPtr->ecalEnergy()) - (1.0 / eleRecoPtr->p()); + + // Geometrical matchings + allMVAVars.deta = eleRecoPtr->deltaEtaSuperClusterTrackAtVtx(); + allMVAVars.dphi = eleRecoPtr->deltaPhiSuperClusterTrackAtVtx(); + allMVAVars.detacalo = eleRecoPtr->deltaEtaSeedClusterTrackAtCalo(); + + // Spectator variables + allMVAVars.pt = eleRecoPtr->pt(); + float scEta = superCluster->eta(); + constexpr float ebeeSplit = 1.479; + allMVAVars.isBarrel = ( std::abs(scEta) < ebeeSplit ); + allMVAVars.isEndcap = ( std::abs(scEta) >= ebeeSplit ); + allMVAVars.SCeta = scEta; + // The spectator variables below were examined for training, but + // are not necessary for evaluating the discriminator, so they are + // given dummy values (the specator variables above are also unimportant). + // They are introduced only to match the definition of the discriminator + // in the weights file. + constexpr unsigned nines = 999; + allMVAVars.eClass = nines; + allMVAVars.pfRelIso = nines; + allMVAVars.expectedInnerHits = nines; + allMVAVars.vtxconv = nines; + allMVAVars.mcEventWeight = nines; + allMVAVars.mcCBmatchingCategory = nines; + + constrainMVAVariables(allMVAVars); + + std::vector vars; + + if( isEndcapCategory( findCategory( particle ) ) ) { + vars = std::move( packMVAVariables(allMVAVars.see, + allMVAVars.spp, + allMVAVars.OneMinusE1x5E5x5, + allMVAVars.R9, + allMVAVars.etawidth, + allMVAVars.phiwidth, + allMVAVars.HoE, + // Endcap only variables + allMVAVars.PreShowerOverRaw, + //Pure tracking variables + allMVAVars.kfhits, + allMVAVars.kfchi2, + allMVAVars.gsfchi2, + // Energy matching + allMVAVars.fbrem, + allMVAVars.gsfhits, + allMVAVars.expectedMissingInnerHits, + allMVAVars.convVtxFitProbability, + allMVAVars.EoP, + allMVAVars.eleEoPout, + allMVAVars.IoEmIoP, + // Geometrical matchings + allMVAVars.deta, + allMVAVars.dphi, + allMVAVars.detacalo, + // Spectator variables + allMVAVars.pt, + allMVAVars.isBarrel, + allMVAVars.isEndcap, + allMVAVars.SCeta, + allMVAVars.eClass, + allMVAVars.pfRelIso, + allMVAVars.expectedInnerHits, + allMVAVars.vtxconv, + allMVAVars.mcEventWeight, + allMVAVars.mcCBmatchingCategory) + ); + } else { + vars = std::move( packMVAVariables(allMVAVars.see, + allMVAVars.spp, + allMVAVars.OneMinusE1x5E5x5, + allMVAVars.R9, + allMVAVars.etawidth, + allMVAVars.phiwidth, + allMVAVars.HoE, + //Pure tracking variables + allMVAVars.kfhits, + allMVAVars.kfchi2, + allMVAVars.gsfchi2, + // Energy matching + allMVAVars.fbrem, + allMVAVars.gsfhits, + allMVAVars.expectedMissingInnerHits, + allMVAVars.convVtxFitProbability, + allMVAVars.EoP, + allMVAVars.eleEoPout, + allMVAVars.IoEmIoP, + // Geometrical matchings + allMVAVars.deta, + allMVAVars.dphi, + allMVAVars.detacalo, + // Spectator variables + allMVAVars.pt, + allMVAVars.isBarrel, + allMVAVars.isEndcap, + allMVAVars.SCeta, + allMVAVars.eClass, + allMVAVars.pfRelIso, + allMVAVars.expectedInnerHits, + allMVAVars.vtxconv, + allMVAVars.mcEventWeight, + allMVAVars.mcCBmatchingCategory) + ); + } + return vars; +} + +void ElectronMVAEstimatorRun2Spring15NonTrig::constrainMVAVariables(AllVariables& allMVAVars) const { + + // Check that variables do not have crazy values + + if(allMVAVars.fbrem < -1.) + allMVAVars.fbrem = -1.; + + allMVAVars.deta = fabs(allMVAVars.deta); + if(allMVAVars.deta > 0.06) + allMVAVars.deta = 0.06; + + + allMVAVars.dphi = fabs(allMVAVars.dphi); + if(allMVAVars.dphi > 0.6) + allMVAVars.dphi = 0.6; + + + if(allMVAVars.EoP > 20.) + allMVAVars.EoP = 20.; + + if(allMVAVars.eleEoPout > 20.) + allMVAVars.eleEoPout = 20.; + + + allMVAVars.detacalo = fabs(allMVAVars.detacalo); + if(allMVAVars.detacalo > 0.2) + allMVAVars.detacalo = 0.2; + + if(allMVAVars.OneMinusE1x5E5x5 < -1.) + allMVAVars.OneMinusE1x5E5x5 = -1; + + if(allMVAVars.OneMinusE1x5E5x5 > 2.) + allMVAVars.OneMinusE1x5E5x5 = 2.; + + + + if(allMVAVars.R9 > 5) + allMVAVars.R9 = 5; + + if(allMVAVars.gsfchi2 > 200.) + allMVAVars.gsfchi2 = 200; + + + if(allMVAVars.kfchi2 > 10.) + allMVAVars.kfchi2 = 10.; + + +} + diff --git a/RecoEgamma/ElectronIdentification/plugins/ElectronRegressionValueMapProducer.cc b/RecoEgamma/ElectronIdentification/plugins/ElectronRegressionValueMapProducer.cc new file mode 100644 index 0000000000000..05190e7d92905 --- /dev/null +++ b/RecoEgamma/ElectronIdentification/plugins/ElectronRegressionValueMapProducer.cc @@ -0,0 +1,462 @@ +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" + +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" + +#include "FWCore/ParameterSet/interface/ParameterSet.h" + +#include "DataFormats/Common/interface/ValueMap.h" +#include "DataFormats/Common/interface/View.h" + +#include "DataFormats/EgammaCandidates/interface/GsfElectron.h" +#include "DataFormats/EgammaCandidates/interface/GsfElectronFwd.h" + +#include "DataFormats/PatCandidates/interface/Electron.h" +#include "RecoEcal/EgammaCoreTools/interface/EcalClusterLazyTools.h" +#include "RecoEgamma/EgammaTools/interface/EcalClusterLocal.h" + +#include "TVector2.h" + +#include +#include + +namespace { + constexpr char sigmaIEtaIPhi_[] = "sigmaIEtaIPhi"; + constexpr char eMax_[] = "eMax"; + constexpr char e2nd_[] = "e2nd"; + constexpr char eTop_[] = "eTop"; + constexpr char eBottom_[] = "eBottom"; + constexpr char eLeft_[] = "eLeft"; + constexpr char eRight_[] = "eRight"; + constexpr char clusterMaxDR_[] = "clusterMaxDR"; + constexpr char clusterMaxDRDPhi_[] = "clusterMaxDRDPhi"; + constexpr char clusterMaxDRDEta_[] = "clusterMaxDRDEta"; + constexpr char clusterMaxDRRawEnergy_[] = "clusterMaxDRRawEnergy"; + constexpr char clusterRawEnergy0_[] = "clusterRawEnergy0"; + constexpr char clusterRawEnergy1_[] = "clusterRawEnergy1"; + constexpr char clusterRawEnergy2_[] = "clusterRawEnergy2"; + constexpr char clusterDPhiToSeed0_[] = "clusterDPhiToSeed0"; + constexpr char clusterDPhiToSeed1_[] = "clusterDPhiToSeed1"; + constexpr char clusterDPhiToSeed2_[] = "clusterDPhiToSeed2"; + constexpr char clusterDEtaToSeed0_[] = "clusterDEtaToSeed0"; + constexpr char clusterDEtaToSeed1_[] = "clusterDEtaToSeed1"; + constexpr char clusterDEtaToSeed2_[] = "clusterDEtaToSeed2"; + constexpr char eleIPhi_[] = "iPhi"; + constexpr char eleIEta_[] = "iEta"; + constexpr char eleCryPhi_[] = "cryPhi"; + constexpr char eleCryEta_[] = "cryEta"; +} + +class ElectronRegressionValueMapProducer : public edm::stream::EDProducer<> { + + public: + + explicit ElectronRegressionValueMapProducer(const edm::ParameterSet&); + ~ElectronRegressionValueMapProducer(); + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + + private: + + virtual void produce(edm::Event&, const edm::EventSetup&) override; + + void writeValueMap(edm::Event &iEvent, + const edm::Handle > & handle, + const std::vector & values, + const std::string & label) const ; + + void writeValueMap(edm::Event &iEvent, + const edm::Handle > & handle, + const std::vector & values, + const std::string & label) const ; + + std::unique_ptr lazyTools; + + // for AOD case + edm::EDGetTokenT ebReducedRecHitCollection_; + edm::EDGetTokenT eeReducedRecHitCollection_; + edm::EDGetTokenT esReducedRecHitCollection_; + edm::EDGetToken src_; + + // for miniAOD case + edm::EDGetTokenT ebReducedRecHitCollectionMiniAOD_; + edm::EDGetTokenT eeReducedRecHitCollectionMiniAOD_; + edm::EDGetTokenT esReducedRecHitCollectionMiniAOD_; + edm::EDGetToken srcMiniAOD_; + + const bool use_full5x5_; +}; + +ElectronRegressionValueMapProducer::ElectronRegressionValueMapProducer(const edm::ParameterSet& iConfig) : + use_full5x5_(iConfig.getParameter("useFull5x5")) { + + // + // Declare consummables, handle both AOD and miniAOD case + // + ebReducedRecHitCollection_ = mayConsume(iConfig.getParameter + ("ebReducedRecHitCollection")); + ebReducedRecHitCollectionMiniAOD_ = mayConsume(iConfig.getParameter + ("ebReducedRecHitCollectionMiniAOD")); + + eeReducedRecHitCollection_ = mayConsume(iConfig.getParameter + ("eeReducedRecHitCollection")); + eeReducedRecHitCollectionMiniAOD_ = mayConsume(iConfig.getParameter + ("eeReducedRecHitCollectionMiniAOD")); + + esReducedRecHitCollection_ = mayConsume(iConfig.getParameter + ("esReducedRecHitCollection")); + esReducedRecHitCollectionMiniAOD_ = mayConsume(iConfig.getParameter + ("esReducedRecHitCollectionMiniAOD")); + + src_ = mayConsume >(iConfig.getParameter("src")); + srcMiniAOD_ = mayConsume >(iConfig.getParameter("srcMiniAOD")); + + produces >(sigmaIEtaIPhi_); + produces >(eMax_); + produces >(e2nd_); + produces >(eTop_); + produces >(eBottom_); + produces >(eLeft_); + produces >(eRight_); + produces >(clusterMaxDR_); + produces >(clusterMaxDRDPhi_); + produces >(clusterMaxDRDEta_); + produces >(clusterMaxDRRawEnergy_); + produces >(clusterRawEnergy0_); + produces >(clusterRawEnergy1_); + produces >(clusterRawEnergy2_); + produces >(clusterDPhiToSeed0_); + produces >(clusterDPhiToSeed1_); + produces >(clusterDPhiToSeed2_); + produces >(clusterDEtaToSeed0_); + produces >(clusterDEtaToSeed1_); + produces >(clusterDEtaToSeed2_); + produces >(eleIPhi_); + produces >(eleIEta_); + produces >(eleCryPhi_); + produces >(eleCryEta_); +} + +ElectronRegressionValueMapProducer::~ElectronRegressionValueMapProducer() { +} + +template +inline void calculateValues(EcalClusterLazyToolsBase* tools_tocast, + const edm::Ptr& iEle, + const edm::EventSetup& iSetup, + std::vector& vsigmaIEtaIPhi, + std::vector& veMax, + std::vector& ve2nd, + std::vector& veTop, + std::vector& veBottom, + std::vector& veLeft, + std::vector& veRight, + std::vector& vclusterMaxDR, + std::vector& vclusterMaxDRDPhi, + std::vector& vclusterMaxDRDEta, + std::vector& vclusterMaxDRRawEnergy, + std::vector& vclusterRawEnergy0, + std::vector& vclusterRawEnergy1, + std::vector& vclusterRawEnergy2, + std::vector& vclusterDPhiToSeed0, + std::vector& vclusterDPhiToSeed1, + std::vector& vclusterDPhiToSeed2, + std::vector& vclusterDEtaToSeed0, + std::vector& vclusterDEtaToSeed1, + std::vector& vclusterDEtaToSeed2, + std::vector& veleIPhi, + std::vector& veleIEta, + std::vector& veleCryPhi, + std::vector& veleCryEta) { + LazyTools* tools = static_cast(tools_tocast); + + const auto& the_sc = iEle->superCluster(); + const auto& theseed = the_sc->seed(); + + std::vector vCov = tools->localCovariances( *theseed ); + + const float eMax = tools->eMax( *theseed ); + const float e2nd = tools->e2nd( *theseed ); + const float eTop = tools->eTop( *theseed ); + const float eLeft = tools->eLeft( *theseed ); + const float eRight = tools->eRight( *theseed ); + const float eBottom = tools->eBottom( *theseed ); + + float dummy; + int iPhi; + int iEta; + float cryPhi; + float cryEta; + EcalClusterLocal _ecalLocal; + if (iEle->isEB()) + _ecalLocal.localCoordsEB(*theseed, iSetup, cryEta, cryPhi, iEta, iPhi, dummy, dummy); + else + _ecalLocal.localCoordsEE(*theseed, iSetup, cryEta, cryPhi, iEta, iPhi, dummy, dummy); + + double see = (isnan(vCov[0]) ? 0. : sqrt(vCov[0])); + double spp = (isnan(vCov[2]) ? 0. : sqrt(vCov[2])); + double sep; + if (see*spp > 0) + sep = vCov[1] / (see * spp); + else if (vCov[1] > 0) + sep = 1.0; + else + sep = -1.0; + + vsigmaIEtaIPhi.push_back(sep); + veMax.push_back(eMax); + ve2nd.push_back(e2nd); + veTop.push_back(eTop); + veBottom.push_back(eBottom); + veLeft.push_back(eLeft); + veRight.push_back(eRight); + veleIPhi.push_back(iPhi); + veleIEta.push_back(iEta); + veleCryPhi.push_back(cryPhi); + veleCryEta.push_back(cryEta); + + // loop over all clusters that aren't the seed + auto clusend = the_sc->clustersEnd(); + int numberOfClusters = the_sc->clusters().size(); + + std::vector _clusterRawEnergy; + _clusterRawEnergy.resize(std::max(3, numberOfClusters), 0); + std::vector _clusterDEtaToSeed; + _clusterDEtaToSeed.resize(std::max(3, numberOfClusters), 0); + std::vector _clusterDPhiToSeed; + _clusterDPhiToSeed.resize(std::max(3, numberOfClusters), 0); + float _clusterMaxDR = 999.; + float _clusterMaxDRDPhi = 999.; + float _clusterMaxDRDEta = 999.; + float _clusterMaxDRRawEnergy = 0.; + + size_t iclus = 0; + float maxDR = 0; + edm::Ptr pclus; + for( auto clus = the_sc->clustersBegin(); clus != clusend; ++clus ) { + pclus = *clus; + + if(theseed == pclus ) + continue; + _clusterRawEnergy.push_back(pclus->energy()); + _clusterDPhiToSeed.push_back(reco::deltaPhi(pclus->phi(),theseed->phi())); + _clusterDEtaToSeed.push_back(pclus->eta() - theseed->eta()); + + // find cluster with max dR + if(reco::deltaR(*pclus, *theseed) > maxDR) { + maxDR = reco::deltaR(*pclus, *theseed); + _clusterMaxDR = maxDR; + _clusterMaxDRDPhi = _clusterDPhiToSeed[iclus]; + _clusterMaxDRDEta = _clusterDEtaToSeed[iclus]; + _clusterMaxDRRawEnergy = _clusterRawEnergy[iclus]; + } + + ++iclus; + } + + vclusterMaxDR.push_back(_clusterMaxDR); + vclusterMaxDRDPhi.push_back(_clusterMaxDRDPhi); + vclusterMaxDRDEta.push_back(_clusterMaxDRDEta); + vclusterMaxDRRawEnergy.push_back(_clusterMaxDRRawEnergy); + vclusterRawEnergy0.push_back(_clusterRawEnergy[0]); + vclusterRawEnergy1.push_back(_clusterRawEnergy[1]); + vclusterRawEnergy2.push_back(_clusterRawEnergy[2]); + vclusterDPhiToSeed0.push_back(_clusterDPhiToSeed[0]); + vclusterDPhiToSeed1.push_back(_clusterDPhiToSeed[1]); + vclusterDPhiToSeed2.push_back(_clusterDPhiToSeed[2]); + vclusterDEtaToSeed0.push_back(_clusterDEtaToSeed[0]); + vclusterDEtaToSeed1.push_back(_clusterDEtaToSeed[1]); + vclusterDEtaToSeed2.push_back(_clusterDEtaToSeed[2]); +} + +void ElectronRegressionValueMapProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { + + using namespace edm; + + edm::Handle > src; + + // Retrieve the collection of electrons from the event. + // If we fail to retrieve the collection with the standard AOD + // name, we next look for the one with the stndard miniAOD name. + bool isAOD = true; + iEvent.getByToken(src_, src); + + if( !src.isValid() ) { + isAOD = false; + iEvent.getByToken(srcMiniAOD_,src); + } + + // configure lazy tools + edm::EDGetTokenT ebrh, eerh, esrh; + + if( isAOD ) { + ebrh = ebReducedRecHitCollection_; + eerh = eeReducedRecHitCollection_; + esrh = esReducedRecHitCollection_; + } else { + ebrh = ebReducedRecHitCollectionMiniAOD_; + eerh = eeReducedRecHitCollectionMiniAOD_; + esrh = esReducedRecHitCollectionMiniAOD_; + } + + if( use_full5x5_ ) { + lazyTools.reset( new noZS::EcalClusterLazyTools(iEvent, iSetup, + ebrh, eerh, esrh ) ); + } else { + lazyTools.reset( new EcalClusterLazyTools(iEvent, iSetup, + ebrh, eerh, esrh ) ); + } + + std::vector sigmaIEtaIPhi; + std::vector eMax; + std::vector e2nd; + std::vector eTop; + std::vector eBottom; + std::vector eLeft; + std::vector eRight; + std::vector clusterMaxDR; + std::vector clusterMaxDRDPhi; + std::vector clusterMaxDRDEta; + std::vector clusterMaxDRRawEnergy; + std::vector clusterRawEnergy0; + std::vector clusterRawEnergy1; + std::vector clusterRawEnergy2; + std::vector clusterDPhiToSeed0; + std::vector clusterDPhiToSeed1; + std::vector clusterDPhiToSeed2; + std::vector clusterDEtaToSeed0; + std::vector clusterDEtaToSeed1; + std::vector clusterDEtaToSeed2; + std::vector eleIPhi; + std::vector eleIEta; + std::vector eleCryPhi; + std::vector eleCryEta; + + // reco::GsfElectron::superCluster() is virtual so we can exploit polymorphism + for (size_t i = 0; i < src->size(); ++i){ + auto iEle = src->ptrAt(i); + + if( use_full5x5_ ) { + calculateValues(lazyTools.get(), + iEle, + iSetup, + sigmaIEtaIPhi, + eMax, + e2nd, + eTop, + eBottom, + eLeft, + eRight, + clusterMaxDR, + clusterMaxDRDPhi, + clusterMaxDRDEta, + clusterMaxDRRawEnergy, + clusterRawEnergy0, + clusterRawEnergy1, + clusterRawEnergy2, + clusterDPhiToSeed0, + clusterDPhiToSeed1, + clusterDPhiToSeed2, + clusterDEtaToSeed0, + clusterDEtaToSeed1, + clusterDEtaToSeed2, + eleIPhi, + eleIEta, + eleCryPhi, + eleCryEta); + } else { + calculateValues(lazyTools.get(), + iEle, + iSetup, + sigmaIEtaIPhi, + eMax, + e2nd, + eTop, + eBottom, + eLeft, + eRight, + clusterMaxDR, + clusterMaxDRDPhi, + clusterMaxDRDEta, + clusterMaxDRRawEnergy, + clusterRawEnergy0, + clusterRawEnergy1, + clusterRawEnergy2, + clusterDPhiToSeed0, + clusterDPhiToSeed1, + clusterDPhiToSeed2, + clusterDEtaToSeed0, + clusterDEtaToSeed1, + clusterDEtaToSeed2, + eleIPhi, + eleIEta, + eleCryPhi, + eleCryEta); + } + } + + writeValueMap(iEvent, src, sigmaIEtaIPhi, sigmaIEtaIPhi_); + writeValueMap(iEvent, src, eMax ,eMax_); + writeValueMap(iEvent, src, e2nd ,e2nd_); + writeValueMap(iEvent, src, eTop ,eTop_); + writeValueMap(iEvent, src, eBottom ,eBottom_); + writeValueMap(iEvent, src, eLeft ,eLeft_); + writeValueMap(iEvent, src, eRight ,eRight_); + writeValueMap(iEvent, src, clusterMaxDR, clusterMaxDR_); + writeValueMap(iEvent, src, clusterMaxDRDPhi, clusterMaxDRDPhi_); + writeValueMap(iEvent, src, clusterMaxDRDEta, clusterMaxDRDEta_); + writeValueMap(iEvent, src, clusterMaxDRRawEnergy,clusterMaxDRRawEnergy_); + writeValueMap(iEvent, src, clusterRawEnergy0, clusterRawEnergy0_); + writeValueMap(iEvent, src, clusterRawEnergy1, clusterRawEnergy1_); + writeValueMap(iEvent, src, clusterRawEnergy2, clusterRawEnergy2_); + writeValueMap(iEvent, src, clusterDPhiToSeed0, clusterDPhiToSeed0_); + writeValueMap(iEvent, src, clusterDPhiToSeed1, clusterDPhiToSeed1_); + writeValueMap(iEvent, src, clusterDPhiToSeed2, clusterDPhiToSeed2_); + writeValueMap(iEvent, src, clusterDEtaToSeed0, clusterDEtaToSeed0_); + writeValueMap(iEvent, src, clusterDEtaToSeed1, clusterDEtaToSeed1_); + writeValueMap(iEvent, src, clusterDEtaToSeed2, clusterDEtaToSeed2_); + writeValueMap(iEvent, src, eleIPhi ,eleIPhi_); + writeValueMap(iEvent, src, eleIEta ,eleIEta_); + writeValueMap(iEvent, src, eleCryPhi ,eleCryPhi_); + writeValueMap(iEvent, src, eleCryEta ,eleCryEta_); + lazyTools.reset(); +} + +void ElectronRegressionValueMapProducer::writeValueMap(edm::Event &iEvent, + const edm::Handle > & handle, + const std::vector & values, + const std::string & label) const +{ + using namespace edm; + using namespace std; + auto_ptr > valMap(new ValueMap()); + edm::ValueMap::Filler filler(*valMap); + filler.insert(handle, values.begin(), values.end()); + filler.fill(); + iEvent.put(valMap, label); +} + +void ElectronRegressionValueMapProducer::writeValueMap(edm::Event &iEvent, + const edm::Handle > & handle, + const std::vector & values, + const std::string & label) const +{ + using namespace edm; + using namespace std; + auto_ptr > valMap(new ValueMap()); + edm::ValueMap::Filler filler(*valMap); + filler.insert(handle, values.begin(), values.end()); + filler.fill(); + iEvent.put(valMap, label); +} + +void ElectronRegressionValueMapProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + //The following says we do not know what parameters are allowed so do no validation + // Please change this to state exactly what you do use, even if it is no parameters + edm::ParameterSetDescription desc; + desc.setUnknown(); + descriptions.addDefault(desc); +} + +DEFINE_FWK_MODULE(ElectronRegressionValueMapProducer); diff --git a/RecoEgamma/ElectronIdentification/python/ElectronMVAValueMapProducer_cfi.py b/RecoEgamma/ElectronIdentification/python/ElectronMVAValueMapProducer_cfi.py index 0d2b9ace07ee1..9023e9dd60616 100644 --- a/RecoEgamma/ElectronIdentification/python/ElectronMVAValueMapProducer_cfi.py +++ b/RecoEgamma/ElectronIdentification/python/ElectronMVAValueMapProducer_cfi.py @@ -6,6 +6,9 @@ from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_PHYS14_PU20bx25_nonTrig_V1_cff import * mvaConfigsForEleProducer.append( mvaEleID_PHYS14_PU20bx25_nonTrig_V1_producer_config ) +from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Spring15_25ns_nonTrig_V1_cff import * +mvaConfigsForEleProducer.append( mvaEleID_Spring15_25ns_nonTrig_V1_producer_config ) + electronMVAValueMapProducer = cms.EDProducer('ElectronMVAValueMapProducer', # The module automatically detects AOD vs miniAOD, so we configure both # diff --git a/RecoEgamma/ElectronIdentification/python/ElectronRegressionValueMapProducer_cfi.py b/RecoEgamma/ElectronIdentification/python/ElectronRegressionValueMapProducer_cfi.py new file mode 100644 index 0000000000000..e72bfd917276b --- /dev/null +++ b/RecoEgamma/ElectronIdentification/python/ElectronRegressionValueMapProducer_cfi.py @@ -0,0 +1,21 @@ +import FWCore.ParameterSet.Config as cms + +electronRegressionValueMapProducer = cms.EDProducer('ElectronRegressionValueMapProducer', + #presently the electron regressions use the "zero-suppressed" shower shapes + useFull5x5 = cms.bool(False), + # The module automatically detects AOD vs miniAOD, so we configure both + # + # AOD case + # + ebReducedRecHitCollection = cms.InputTag("reducedEcalRecHitsEB"), + eeReducedRecHitCollection = cms.InputTag("reducedEcalRecHitsEE"), + esReducedRecHitCollection = cms.InputTag("reducedEcalRecHitsES"), + src = cms.InputTag('gedGsfElectrons'), + # + # miniAOD case + # + ebReducedRecHitCollectionMiniAOD = cms.InputTag("reducedEgamma:reducedEBRecHits"), + eeReducedRecHitCollectionMiniAOD = cms.InputTag("reducedEgamma:reducedEERecHits"), + esReducedRecHitCollectionMiniAOD = cms.InputTag("reducedEgamma:reducedESRecHits"), + srcMiniAOD = cms.InputTag('slimmedElectrons',processName=cms.InputTag.skipCurrentProcess()), + ) diff --git a/RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_Spring15_25ns_V1_cff.py b/RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_Spring15_25ns_V1_cff.py new file mode 100644 index 0000000000000..594e156783b8b --- /dev/null +++ b/RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_Spring15_25ns_V1_cff.py @@ -0,0 +1,186 @@ +from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry + +# Common functions and classes for ID definition are imported here: +from RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_tools import * + +# +# This is the first round of Spring15 25ns cuts, optimized on Spring15 25ns samples. +# +# The ID cuts below are optimized IDs for Spring15 Scenario with 25ns bunch spacing +# The cut values are taken from the twiki: +# https://twiki.cern.ch/twiki/bin/view/CMS/CutBasedElectronIdentificationRun2 +# (where they may not stay, if a newer version of cuts becomes available for these +# conditions) +# See also the presentation explaining these working points (this will not change): +# https://indico.cern.ch/event/370507/contribution/1/attachments/1140657/1633761/Rami_eleCB_ID_25ns.pdf +# +# First, define cut values +# + +# Veto working point Barrel and Endcap +idName = "cutBasedElectronID-Spring15-25ns-V1-standalone-veto" +WP_Veto_EB = EleWorkingPoint_V2( + idName , # idName + 0.0152 , # dEtaInCut + 0.216 , # dPhiInCut + 0.0114 , # full5x5_sigmaIEtaIEtaCut + 0.181 , # hOverECut + 0.0564 , # dxyCut + 0.472 , # dzCut + 0.207 , # absEInverseMinusPInverseCut + 0.126 , # relCombIsolationWithEALowPtCut + 0.126 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 2 # missingHitsCut + ) + +WP_Veto_EE = EleWorkingPoint_V2( + idName , # idName + 0.0113 , # dEtaInCut + 0.237 , # dPhiInCut + 0.0352 , # full5x5_sigmaIEtaIEtaCut + 0.116 , # hOverECut + 0.222 , # dxyCut + 0.921 , # dzCut + 0.174 , # absEInverseMinusPInverseCut + 0.144 , # relCombIsolationWithEALowPtCut + 0.144 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 3 # missingHitsCut + ) + +# Loose working point Barrel and Endcap +idName = "cutBasedElectronID-Spring15-25ns-V1-standalone-loose" +WP_Loose_EB = EleWorkingPoint_V2( + idName , # idName + 0.0105 , # dEtaInCut + 0.115 , # dPhiInCut + 0.0103 , # full5x5_sigmaIEtaIEtaCut + 0.104 , # hOverECut + 0.0261 , # dxyCut + 0.41 , # dzCut + 0.102 , # absEInverseMinusPInverseCut + 0.0893 , # relCombIsolationWithEALowPtCut + 0.0893 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 2 # missingHitsCut + ) + +WP_Loose_EE = EleWorkingPoint_V2( + idName , # idName + 0.00814 , # dEtaInCut + 0.182 , # dPhiInCut + 0.0301 , # full5x5_sigmaIEtaIEtaCut + 0.0897 , # hOverECut + 0.118 , # dxyCut + 0.822 , # dzCut + 0.126 , # absEInverseMinusPInverseCut + 0.121 , # relCombIsolationWithEALowPtCut + 0.121 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 1 # missingHitsCut + ) + +# Medium working point Barrel and Endcap +idName = "cutBasedElectronID-Spring15-25ns-V1-standalone-medium" +WP_Medium_EB = EleWorkingPoint_V2( + idName , # idName + 0.0103 , # dEtaInCut + 0.0336 , # dPhiInCut + 0.0101 , # full5x5_sigmaIEtaIEtaCut + 0.0876 , # hOverECut + 0.0118 , # dxyCut + 0.373 , # dzCut + 0.0174 , # absEInverseMinusPInverseCut + 0.0766 , # relCombIsolationWithEALowPtCut + 0.0766 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 2 # missingHitsCut + ) + +WP_Medium_EE = EleWorkingPoint_V2( + idName , # idName + 0.00733 , # dEtaInCut + 0.114 , # dPhiInCut + 0.0283 , # full5x5_sigmaIEtaIEtaCut + 0.0678 , # hOverECut + 0.0739 , # dxyCut + 0.602 , # dzCut + 0.0898 , # absEInverseMinusPInverseCut + 0.0678 , # relCombIsolationWithEALowPtCut + 0.0678 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 1 # missingHitsCut + ) + +# Tight working point Barrel and Endcap +idName = "cutBasedElectronID-Spring15-25ns-V1-standalone-tight" +WP_Tight_EB = EleWorkingPoint_V2( + idName , # idName + 0.00926 , # dEtaInCut + 0.0336 , # dPhiInCut + 0.0101 , # full5x5_sigmaIEtaIEtaCut + 0.0597 , # hOverECut + 0.0111 , # dxyCut + 0.0466 , # dzCut + 0.012 , # absEInverseMinusPInverseCut + 0.0354 , # relCombIsolationWithEALowPtCut + 0.0354 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 2 # missingHitsCut + ) + +WP_Tight_EE = EleWorkingPoint_V2( + idName , # idName + 0.00724 , # dEtaInCut + 0.0918 , # dPhiInCut + 0.0279 , # full5x5_sigmaIEtaIEtaCut + 0.0615 , # hOverECut + 0.0351 , # dxyCut + 0.417 , # dzCut + 0.00999 , # absEInverseMinusPInverseCut + 0.0646 , # relCombIsolationWithEALowPtCut + 0.0646 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 1 # missingHitsCut + ) + +# Second, define what effective areas to use for pile-up correction +isoInputs = IsolationCutInputs_V2( + # phoIsolationEffAreas + "RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_25ns.txt" +) + + +# +# Set up VID configuration for all cuts and working points +# + +cutBasedElectronID_Spring15_25ns_V1_standalone_veto = configureVIDCutBasedEleID_V2(WP_Veto_EB, WP_Veto_EE, isoInputs) +cutBasedElectronID_Spring15_25ns_V1_standalone_loose = configureVIDCutBasedEleID_V2(WP_Loose_EB, WP_Loose_EE, isoInputs) +cutBasedElectronID_Spring15_25ns_V1_standalone_medium = configureVIDCutBasedEleID_V2(WP_Medium_EB, WP_Medium_EE, isoInputs) +cutBasedElectronID_Spring15_25ns_V1_standalone_tight = configureVIDCutBasedEleID_V2(WP_Tight_EB, WP_Tight_EE, isoInputs) + + +# The MD5 sum numbers below reflect the exact set of cut variables +# and values above. If anything changes, one has to +# 1) comment out the lines below about the registry, +# 2) run "calculateMD5 +# 3) update the MD5 sum strings below and uncomment the lines again. +# + +central_id_registry.register(cutBasedElectronID_Spring15_25ns_V1_standalone_veto.idName, + '202030579ee3eec90fdc2d236ba3de7e') +central_id_registry.register(cutBasedElectronID_Spring15_25ns_V1_standalone_loose.idName, + '4fab9e4d09a2c1a36cbbd2279deb3627') +central_id_registry.register(cutBasedElectronID_Spring15_25ns_V1_standalone_medium.idName, + 'aa291aba714c148fcba156544907c440') +central_id_registry.register(cutBasedElectronID_Spring15_25ns_V1_standalone_tight.idName, + '4e13b87c0573d3c8ebf91d446fa1d90f') + + +### for now until we have a database... +cutBasedElectronID_Spring15_25ns_V1_standalone_veto.isPOGApproved = cms.untracked.bool(True) +cutBasedElectronID_Spring15_25ns_V1_standalone_loose.isPOGApproved = cms.untracked.bool(True) +cutBasedElectronID_Spring15_25ns_V1_standalone_medium.isPOGApproved = cms.untracked.bool(True) +cutBasedElectronID_Spring15_25ns_V1_standalone_tight.isPOGApproved = cms.untracked.bool(True) diff --git a/RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_Spring15_50ns_V1_cff.py b/RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_Spring15_50ns_V1_cff.py new file mode 100644 index 0000000000000..0bc58a67052ee --- /dev/null +++ b/RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_Spring15_50ns_V1_cff.py @@ -0,0 +1,179 @@ +from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry + +# Common functions and classes for ID definition are imported here: +from RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_tools import * + +# +# This is the first round of Spring15 50ns cuts, optimized on Spring15 50ns samples. +# +# The ID cuts below are optimized IDs for Spring15 Scenario with 50ns bunch spacing +# The cut values are taken from the twiki: +# https://twiki.cern.ch/twiki/bin/view/CMS/CutBasedElectronIdentificationRun2 +# (where they may not stay, if a newer version of cuts becomes available for these +# conditions) +# See also the presentation explaining these working points (this will not change): +# https://indico.cern.ch/event/369239/contribution/6/attachments/1134836/1623383/Rami_eleCB_ID_50ns.pdf +# +# First, define cut values +# + +# Veto working point Barrel and Endcap +idName = "cutBasedElectronID-Spring15-50ns-V1-standalone-veto" +WP_Veto_EB = EleWorkingPoint_V2( + idName , # idName + 0.0126 , # dEtaInCut + 0.107 , # dPhiInCut + 0.012 , # full5x5_sigmaIEtaIEtaCut + 0.186 , # hOverECut + 0.0621 , # dxyCut + 0.613 , # dzCut + 0.239 , # absEInverseMinusPInverseCut + 0.161 , # relCombIsolationWithEALowPtCut + 0.161 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 2 # missingHitsCut + ) + +WP_Veto_EE = EleWorkingPoint_V2( + idName , # idName + 0.0109 , # dEtaInCut + 0.219 , # dPhiInCut + 0.0339 , # full5x5_sigmaIEtaIEtaCut + 0.0962 , # hOverECut + 0.279 , # dxyCut + 0.947 , # dzCut + 0.141 , # absEInverseMinusPInverseCut + 0.193 , # relCombIsolationWithEALowPtCut + 0.193 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 3 # missingHitsCut + ) + +# Loose working point Barrel and Endcap +idName = "cutBasedElectronID-Spring15-50ns-V1-standalone-loose" +WP_Loose_EB = EleWorkingPoint_V2( + idName , # idName + 0.0098 , # dEtaInCut + 0.0929 , # dPhiInCut + 0.0105 , # full5x5_sigmaIEtaIEtaCut + 0.0765 , # hOverECut + 0.0227 , # dxyCut + 0.379 , # dzCut + 0.184 , # absEInverseMinusPInverseCut + 0.118 , # relCombIsolationWithEALowPtCut + 0.118 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 2 # missingHitsCut + ) + +WP_Loose_EE = EleWorkingPoint_V2( + idName , # idName + 0.00950 , # dEtaInCut + 0.181 , # dPhiInCut + 0.0318 , # full5x5_sigmaIEtaIEtaCut + 0.0824 , # hOverECut + 0.242 , # dxyCut + 0.921 , # dzCut + 0.125 , # absEInverseMinusPInverseCut + 0.118 , # relCombIsolationWithEALowPtCut + 0.118 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 1 # missingHitsCut + ) + +# Medium working point Barrel and Endcap +idName = "cutBasedElectronID-Spring15-50ns-V1-standalone-medium" +WP_Medium_EB = EleWorkingPoint_V2( + idName , # idName + 0.00945 , # dEtaInCut + 0.0296 , # dPhiInCut + 0.0101 , # full5x5_sigmaIEtaIEtaCut + 0.0372 , # hOverECut + 0.0151 , # dxyCut + 0.238 , # dzCut + 0.118 , # absEInverseMinusPInverseCut + 0.0987 , # relCombIsolationWithEALowPtCut + 0.0987 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 2 # missingHitsCut + ) + +WP_Medium_EE = EleWorkingPoint_V2( + idName , # idName + 0.00773 , # dEtaInCut + 0.148 , # dPhiInCut + 0.0287 , # full5x5_sigmaIEtaIEtaCut + 0.0546 , # hOverECut + 0.0535 , # dxyCut + 0.572 , # dzCut + 0.104 , # absEInverseMinusPInverseCut + 0.0902 , # relCombIsolationWithEALowPtCut + 0.0902 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 1 # missingHitsCut + ) + +# Tight working point Barrel and Endcap +idName = "cutBasedElectronID-Spring15-50ns-V1-standalone-tight" +WP_Tight_EB = EleWorkingPoint_V2( + idName , # idName + 0.00950 , # dEtaInCut + 0.0291 , # dPhiInCut + 0.0101 , # full5x5_sigmaIEtaIEtaCut + 0.0372 , # hOverECut + 0.0144 , # dxyCut + 0.323 , # dzCut + 0.0174 , # absEInverseMinusPInverseCut + 0.0468 , # relCombIsolationWithEALowPtCut + 0.0468 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 2 # missingHitsCut + ) + +WP_Tight_EE = EleWorkingPoint_V2( + idName , # idName + 0.00762 , # dEtaInCut + 0.0439 , # dPhiInCut + 0.0287 , # full5x5_sigmaIEtaIEtaCut + 0.0544 , # hOverECut + 0.0377 , # dxyCut + 0.571 , # dzCut + 0.0100 , # absEInverseMinusPInverseCut + 0.0759 , # relCombIsolationWithEALowPtCut + 0.0759 , # relCombIsolationWithEAHighPtCut + # conversion veto cut needs no parameters, so not mentioned + 1 # missingHitsCut + ) + +# Second, define what effective areas to use for pile-up correction +isoInputs = IsolationCutInputs_V2( + # phoIsolationEffAreas + "RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_50ns.txt" +) + + +# +# Set up VID configuration for all cuts and working points +# + +cutBasedElectronID_Spring15_50ns_V1_standalone_veto = configureVIDCutBasedEleID_V2(WP_Veto_EB, WP_Veto_EE, isoInputs) +cutBasedElectronID_Spring15_50ns_V1_standalone_loose = configureVIDCutBasedEleID_V2(WP_Loose_EB, WP_Loose_EE, isoInputs) +cutBasedElectronID_Spring15_50ns_V1_standalone_medium = configureVIDCutBasedEleID_V2(WP_Medium_EB, WP_Medium_EE, isoInputs) +cutBasedElectronID_Spring15_50ns_V1_standalone_tight = configureVIDCutBasedEleID_V2(WP_Tight_EB, WP_Tight_EE, isoInputs) + + +central_id_registry.register(cutBasedElectronID_Spring15_50ns_V1_standalone_veto.idName, + '1430580668994ed3805fc833c314354e') +central_id_registry.register(cutBasedElectronID_Spring15_50ns_V1_standalone_loose.idName, + '73e5cdf3377e99aa208851339f4906b9') +central_id_registry.register(cutBasedElectronID_Spring15_50ns_V1_standalone_medium.idName, + '2623f1f07173be98c3c3d6e6b69434a6') +central_id_registry.register(cutBasedElectronID_Spring15_50ns_V1_standalone_tight.idName, + '19b4ab8a58bda9c2e1b79b0694f6eee7') + + +# for now until we have a database... +cutBasedElectronID_Spring15_50ns_V1_standalone_veto.isPOGApproved = cms.untracked.bool(True) +cutBasedElectronID_Spring15_50ns_V1_standalone_loose.isPOGApproved = cms.untracked.bool(True) +cutBasedElectronID_Spring15_50ns_V1_standalone_medium.isPOGApproved = cms.untracked.bool(True) +cutBasedElectronID_Spring15_50ns_V1_standalone_tight.isPOGApproved = cms.untracked.bool(True) diff --git a/RecoEgamma/ElectronIdentification/python/Identification/heepElectronID_HEEPV51_cff.py b/RecoEgamma/ElectronIdentification/python/Identification/heepElectronID_HEEPV51_cff.py index ab8eecd3dea63..8a46fd9c3a254 100644 --- a/RecoEgamma/ElectronIdentification/python/Identification/heepElectronID_HEEPV51_cff.py +++ b/RecoEgamma/ElectronIdentification/python/Identification/heepElectronID_HEEPV51_cff.py @@ -99,4 +99,4 @@ central_id_registry.register(heepElectronID_HEEPV51.idName,"b0e84f87acbc411de145ab2b8187ef1c") -heepElectronID_HEEPV51.isPOGApproved = cms.untracked.bool(True) +heepElectronID_HEEPV51.isPOGApproved = cms.untracked.bool(False) diff --git a/RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_PHYS14_PU20bx25_nonTrig_V1_cff.py b/RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_PHYS14_PU20bx25_nonTrig_V1_cff.py index e2ade1494784f..fecd3b4a2c1ba 100644 --- a/RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_PHYS14_PU20bx25_nonTrig_V1_cff.py +++ b/RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_PHYS14_PU20bx25_nonTrig_V1_cff.py @@ -15,6 +15,10 @@ # This MVA implementation class name mvaPhys14NonTrigClassName = "ElectronMVAEstimatorRun2Phys14NonTrig" +# The tag is an extra string attached to the names of the products +# such as ValueMaps that needs to distinguish cases when the same MVA estimator +# class is used with different tuning/weights +mvaTag = "25nsV1" # There are 6 categories in this MVA. They have to be configured in this strict order # (cuts and weight files order): @@ -42,8 +46,8 @@ # The names for the maps are ":Values" # and ":Categories" mvaProducerModuleLabel = "electronMVAValueMapProducer" -mvaValueMapName = mvaProducerModuleLabel + ":" + mvaPhys14NonTrigClassName + "Values" -mvaCategoriesMapName = mvaProducerModuleLabel + ":" + mvaPhys14NonTrigClassName + "Categories" +mvaValueMapName = mvaProducerModuleLabel + ":" + mvaPhys14NonTrigClassName + mvaTag + "Values" +mvaCategoriesMapName = mvaProducerModuleLabel + ":" + mvaPhys14NonTrigClassName + mvaTag + "Categories" # The working point for this MVA that is expected to have about 80% signal # efficiency on in each category @@ -82,6 +86,7 @@ # Create the PSet that will be fed to the MVA value map producer mvaEleID_PHYS14_PU20bx25_nonTrig_V1_producer_config = cms.PSet( mvaName = cms.string(mvaPhys14NonTrigClassName), + mvaTag = cms.string(mvaTag), weightFileNames = mvaPhys14NonTrigWeightFiles_V1 ) # Create the VPset's for VID cuts @@ -96,9 +101,9 @@ # central_id_registry.register( mvaEleID_PHYS14_PU20bx25_nonTrig_V1_wp80.idName, - '8b587a6315d6808df7af9d3471d22a20') + '768465d41956da069c83bf245398d5e6') central_id_registry.register( mvaEleID_PHYS14_PU20bx25_nonTrig_V1_wp90.idName, - 'a01428d36d3d0e6b1f89ab772aa606a1') + '7d091368510c32f0ab29a53323cae95a') -mvaEleID_PHYS14_PU20bx25_nonTrig_V1_wp80.isPOGApproved = cms.untracked.bool(True) -mvaEleID_PHYS14_PU20bx25_nonTrig_V1_wp90.isPOGApproved = cms.untracked.bool(True) +mvaEleID_PHYS14_PU20bx25_nonTrig_V1_wp80.isPOGApproved = cms.untracked.bool(False) +mvaEleID_PHYS14_PU20bx25_nonTrig_V1_wp90.isPOGApproved = cms.untracked.bool(False) diff --git a/RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring15_25ns_nonTrig_V1_cff.py b/RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring15_25ns_nonTrig_V1_cff.py new file mode 100644 index 0000000000000..d248c712f8757 --- /dev/null +++ b/RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring15_25ns_nonTrig_V1_cff.py @@ -0,0 +1,112 @@ +from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry + +import FWCore.ParameterSet.Config as cms + +# +# In this file we define the locations of the MVA weights, cuts on the MVA values +# for specific working points, and configure those cuts in VID +# + +# +# The following MVA is derived for 25ns Spring15 MC samples for non-triggering electrons. +# See more documentation in this presentation (P.Pigard): +# https://indico.cern.ch/event/370506/contribution/1/attachments/1135340/1624370/20150726_EID_POG_circulating_vAsPresentedC.pdf +# + +# This MVA implementation class name +mvaSpring15NonTrigClassName = "ElectronMVAEstimatorRun2Spring15NonTrig" +# The tag is an extra string attached to the names of the products +# such as ValueMaps that needs to distinguish cases when the same MVA estimator +# class is used with different tuning/weights +mvaTag = "25nsV1" + +# There are 6 categories in this MVA. They have to be configured in this strict order +# (cuts and weight files order): +# 0 EB1 (eta<0.8) pt 5-10 GeV +# 1 EB2 (eta>=0.8) pt 5-10 GeV +# 2 EE pt 5-10 GeV +# 3 EB1 (eta<0.8) pt 10-inf GeV +# 4 EB2 (eta>=0.8) pt 10-inf GeV +# 5 EE pt 10-inf GeV + +mvaSpring15NonTrigWeightFiles_V1 = cms.vstring( + "RecoEgamma/ElectronIdentification/data/Spring15/EIDmva_EB1_5_oldNonTrigSpring15_ConvVarCwoBoolean_TMVA412_FullStatLowPt_PairNegWeightsGlobal_BDT.weights.xml", + "RecoEgamma/ElectronIdentification/data/Spring15/EIDmva_EB2_5_oldNonTrigSpring15_ConvVarCwoBoolean_TMVA412_FullStatLowPt_PairNegWeightsGlobal_BDT.weights.xml", + "RecoEgamma/ElectronIdentification/data/Spring15/EIDmva_EE_5_oldNonTrigSpring15_ConvVarCwoBoolean_TMVA412_FullStatLowPt_PairNegWeightsGlobal_BDT.weights.xml", + "RecoEgamma/ElectronIdentification/data/Spring15/EIDmva_EB1_10_oldNonTrigSpring15_ConvVarCwoBoolean_TMVA412_FullStatLowPt_PairNegWeightsGlobal_BDT.weights.xml", + "RecoEgamma/ElectronIdentification/data/Spring15/EIDmva_EB2_10_oldNonTrigSpring15_ConvVarCwoBoolean_TMVA412_FullStatLowPt_PairNegWeightsGlobal_BDT.weights.xml", + "RecoEgamma/ElectronIdentification/data/Spring15/EIDmva_EE_10_oldNonTrigSpring15_ConvVarCwoBoolean_TMVA412_FullStatLowPt_PairNegWeightsGlobal_BDT.weights.xml" + ) + +# Load some common definitions for MVA machinery +from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_tools import * + +# The locatoins of value maps with the actual MVA values and categories +# for all particles. +# The names for the maps are ":Values" +# and ":Categories" +mvaProducerModuleLabel = "electronMVAValueMapProducer" +mvaValueMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Values" +mvaCategoriesMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Categories" + +# The working point for this MVA that is expected to have about 90% signal +# efficiency in each category +idName90 = "mvaEleID-Spring15-25ns-nonTrig-V1-wp90" +MVA_WP90 = EleMVA_6Categories_WP( + idName = idName90, + mvaValueMapName = mvaValueMapName, # map with MVA values for all particles + mvaCategoriesMapName = mvaCategoriesMapName, # map with category index for all particles + cutCategory0 = -0.083313, # EB1 low pt + cutCategory1 = -0.235222, # EB2 low pt + cutCategory2 = -0.67099, # EE low pt + cutCategory3 = 0.913286, # EB1 + cutCategory4 = 0.805013, # EB2 + cutCategory5 = 0.358969 # EE + ) + +idName80 = "mvaEleID-Spring15-25ns-nonTrig-V1-wp80" +MVA_WP80 = EleMVA_6Categories_WP( + idName = idName80, + mvaValueMapName = mvaValueMapName, # map with MVA values for all particles + mvaCategoriesMapName = mvaCategoriesMapName, # map with category index for all particles + cutCategory0 = 0.287435, # EB1 low pt + cutCategory1 = 0.221846, # EB2 low pt + cutCategory2 = -0.303263, # EE low pt + cutCategory3 = 0.967083, # EB1 + cutCategory4 = 0.929117, # EB2 + cutCategory5 = 0.726311 # EE + ) + +# +# Finally, set up VID configuration for all cuts +# + +# Create the PSet that will be fed to the MVA value map producer +mvaEleID_Spring15_25ns_nonTrig_V1_producer_config = cms.PSet( + mvaName = cms.string(mvaSpring15NonTrigClassName), + mvaTag = cms.string(mvaTag), + # This MVA uses conversion info, so configure several data items on that + beamSpot = cms.InputTag('offlineBeamSpot'), + conversionsAOD = cms.InputTag('allConversions'), + conversionsMiniAOD = cms.InputTag('reducedEgamma:reducedConversions'), + # + weightFileNames = mvaSpring15NonTrigWeightFiles_V1 + ) +# Create the VPset's for VID cuts +mvaEleID_Spring15_25ns_nonTrig_V1_wp90 = configureVIDMVAEleID_V1( MVA_WP90 ) +mvaEleID_Spring15_25ns_nonTrig_V1_wp80 = configureVIDMVAEleID_V1( MVA_WP80 ) + +# The MD5 sum numbers below reflect the exact set of cut variables +# and values above. If anything changes, one has to +# 1) comment out the lines below about the registry, +# 2) run "calculateMD5 +# 3) update the MD5 sum strings below and uncomment the lines again. +# + +central_id_registry.register(mvaEleID_Spring15_25ns_nonTrig_V1_wp90.idName, + 'ac4fdc160eefe9eae7338601c02ed4bb') +central_id_registry.register(mvaEleID_Spring15_25ns_nonTrig_V1_wp80.idName, + '113c47ceaea0fa687b8bd6d880eb4957') + +mvaEleID_Spring15_25ns_nonTrig_V1_wp90.isPOGApproved = cms.untracked.bool(True) +mvaEleID_Spring15_25ns_nonTrig_V1_wp80.isPOGApproved = cms.untracked.bool(True) diff --git a/RecoEgamma/ElectronIdentification/python/egmGsfElectronIDs_cff.py b/RecoEgamma/ElectronIdentification/python/egmGsfElectronIDs_cff.py index 2f701dd54d56b..0808477b4b1ef 100644 --- a/RecoEgamma/ElectronIdentification/python/egmGsfElectronIDs_cff.py +++ b/RecoEgamma/ElectronIdentification/python/egmGsfElectronIDs_cff.py @@ -8,5 +8,5 @@ # Load the producer for MVA IDs. Make sure it is also added to the sequence! from RecoEgamma.ElectronIdentification.ElectronMVAValueMapProducer_cfi import * - -egmGsfElectronIDSequence = cms.Sequence( electronMVAValueMapProducer * egmGsfElectronIDs) +from RecoEgamma.ElectronIdentification.ElectronRegressionValueMapProducer_cfi import * +egmGsfElectronIDSequence = cms.Sequence( electronMVAValueMapProducer * egmGsfElectronIDs * electronRegressionValueMapProducer) diff --git a/RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfChargedHadrons_50ns.txt b/RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfChargedHadrons_50ns.txt new file mode 100644 index 0000000000000..04c4101d67c5e --- /dev/null +++ b/RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfChargedHadrons_50ns.txt @@ -0,0 +1,12 @@ +# This file contains Effective Area constants for +# computing pile-up corrections for the photons isolation +# for a photon object. +# +# |eta| min |eta| max effective area +0.0000 1.0000 0.0157 +1.0000 1.4790 0.0143 +1.4790 2.0000 0.0115 +2.0000 2.2000 0.0094 +2.2000 2.3000 0.0095 +2.3000 2.4000 0.0068 +2.4000 5.0000 0.0053 diff --git a/RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfNeutralHadrons_50ns.txt b/RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfNeutralHadrons_50ns.txt new file mode 100644 index 0000000000000..0f50bdee4b4b3 --- /dev/null +++ b/RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfNeutralHadrons_50ns.txt @@ -0,0 +1,12 @@ +# This file contains Effective Area constants for +# computing pile-up corrections for the photons isolation +# for a photon object. +# +# |eta| min |eta| max effective area +0.0000 1.0000 0.0143 +1.0000 1.4790 0.0210 +1.4790 2.0000 0.0148 +2.0000 2.2000 0.0082 +2.2000 2.3000 0.0124 +2.3000 2.4000 0.0186 +2.4000 5.0000 0.0320 diff --git a/RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfPhotons_50ns.txt b/RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfPhotons_50ns.txt new file mode 100644 index 0000000000000..ea6306ad65fac --- /dev/null +++ b/RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfPhotons_50ns.txt @@ -0,0 +1,12 @@ +# This file contains Effective Area constants for +# computing pile-up corrections for the photons isolation +# for a photon object. +# +# |eta| min |eta| max effective area +0.0000 1.0000 0.0725 +1.0000 1.4790 0.0604 +1.4790 2.0000 0.0320 +2.0000 2.2000 0.0512 +2.2000 2.3000 0.0766 +2.3000 2.4000 0.0949 +2.4000 5.0000 0.1160 diff --git a/RecoEgamma/PhotonIdentification/interface/PhotonMVAEstimatorRun2Phys14NonTrig.h b/RecoEgamma/PhotonIdentification/interface/PhotonMVAEstimatorRun2Phys14NonTrig.h index 40db447e91910..e8a492fa4b8a7 100644 --- a/RecoEgamma/PhotonIdentification/interface/PhotonMVAEstimatorRun2Phys14NonTrig.h +++ b/RecoEgamma/PhotonIdentification/interface/PhotonMVAEstimatorRun2Phys14NonTrig.h @@ -9,6 +9,8 @@ #include "DataFormats/EgammaCandidates/interface/Photon.h" +#include "CondFormats/EgammaObjects/interface/GBRForest.h" + #include #include #include @@ -16,7 +18,7 @@ #include "TMVA/Tools.h" #include "TMVA/Reader.h" -class PhotonMVAEstimatorRun2Phys14NonTrig : public AnyMVAEstimatorRun2Base{ +class PhotonMVAEstimatorRun2Phys14NonTrig : public AnyMVAEstimatorRun2Base { public: @@ -30,8 +32,7 @@ class PhotonMVAEstimatorRun2Phys14NonTrig : public AnyMVAEstimatorRun2Base{ }; // Define the struct that contains all necessary for MVA variables - struct AllVariables { - + struct AllVariables { float varPhi; float varR9; float varSieie; @@ -62,29 +63,32 @@ class PhotonMVAEstimatorRun2Phys14NonTrig : public AnyMVAEstimatorRun2Base{ ~PhotonMVAEstimatorRun2Phys14NonTrig(); // Calculation of the MVA value - float mvaValue( const edm::Ptr& particle); + float mvaValue(const edm::Ptr& particle, const edm::Event&) const; // Utility functions - TMVA::Reader *createSingleReader(const int iCategory, const edm::FileInPath &weightFile); - - inline int getNCategories(){return nCategories;}; - bool isEndcapCategory( int category ); - const inline std::string getName(){return name_;}; + std::unique_ptr createSingleReader(const int iCategory, const edm::FileInPath &weightFile) ; + virtual int getNCategories() const override final {return nCategories;}; + bool isEndcapCategory( int category ) const; + virtual const std::string& getName() const override final { return _name; } + virtual const std::string& getTag() const override final { return _tag; } + // Functions that should work on both pat and reco electrons // (use the fact that pat::Electron inherits from reco::GsfElectron) - void fillMVAVariables(const edm::Ptr& particle); - int findCategory( const edm::Ptr& particle); + std::vector fillMVAVariables(const edm::Ptr& particle, const edm::Event&) const override; + int findCategory(const edm::Ptr& particle) const; // The function below ensures that the variables passed to MVA are // within reasonable bounds - void constrainMVAVariables(); + void constrainMVAVariables(AllVariables& vars) const; // Call this function once after the constructor to declare // the needed event content pieces to the framework - void setConsumes(edm::ConsumesCollector&&) override; + // DEPRECATED + void setConsumes(edm::ConsumesCollector&&) const override; // Call this function once per event to retrieve all needed // event content pices - void getEventContent(const edm::Event& iEvent) override; + // DEPRECATED + // void getEventContent(const edm::Event& iEvent) const override; private: @@ -92,13 +96,17 @@ class PhotonMVAEstimatorRun2Phys14NonTrig : public AnyMVAEstimatorRun2Base{ // MVA name. This is a unique name for this MVA implementation. // It will be used as part of ValueMap names. // For simplicity, keep it set to the class name. - const std::string name_ = "PhotonMVAEstimatorRun2Phys14NonTrig"; + const std::string _name = "PhotonMVAEstimatorRun2Phys14NonTrig"; + // MVA tag. This is an additional string variable to distinguish + // instances of the estimator of this class configured with different + // weight files. + std::string _tag; // Data members - std::vector< std::unique_ptr > _tmvaReaders; + std::vector > _gbrForests; // All variables needed by this MVA - std::string _MethodName; + const std::string _MethodName; AllVariables _allMVAVars; // This MVA implementation relies on several ValueMap objects @@ -108,37 +116,19 @@ class PhotonMVAEstimatorRun2Phys14NonTrig : public AnyMVAEstimatorRun2Base{ // Declare all tokens that will be needed to retrieve misc // data from the event content required by this MVA // - edm::EDGetTokenT > _full5x5SigmaIEtaIEtaMapToken; - edm::EDGetTokenT > _full5x5SigmaIEtaIPhiMapToken; - edm::EDGetTokenT > _full5x5E1x3MapToken; - edm::EDGetTokenT > _full5x5E2x2MapToken; - edm::EDGetTokenT > _full5x5E2x5MaxMapToken; - edm::EDGetTokenT > _full5x5E5x5MapToken; - edm::EDGetTokenT > _esEffSigmaRRMapToken; - // - edm::EDGetTokenT > _phoChargedIsolationToken; - edm::EDGetTokenT > _phoPhotonIsolationToken; - edm::EDGetTokenT > _phoWorstChargedIsolationToken; - - // - // Declare all value maps corresponding to the above tokens - // - edm::Handle > _full5x5SigmaIEtaIEtaMap; - edm::Handle > _full5x5SigmaIEtaIPhiMap; - edm::Handle > _full5x5E1x3Map; - edm::Handle > _full5x5E2x2Map; - edm::Handle > _full5x5E2x5MaxMap; - edm::Handle > _full5x5E5x5Map; - edm::Handle > _esEffSigmaRRMap; + const edm::InputTag _full5x5SigmaIEtaIEtaMapLabel; + const edm::InputTag _full5x5SigmaIEtaIPhiMapLabel; + const edm::InputTag _full5x5E1x3MapLabel; + const edm::InputTag _full5x5E2x2MapLabel; + const edm::InputTag _full5x5E2x5MaxMapLabel; + const edm::InputTag _full5x5E5x5MapLabel; + const edm::InputTag _esEffSigmaRRMapLabel; // - edm::Handle > _phoChargedIsolationMap; - edm::Handle > _phoPhotonIsolationMap; - edm::Handle > _phoWorstChargedIsolationMap; - - // Rho will be pulled from the event content - edm::EDGetTokenT _rhoToken; - edm::Handle _rho; - + const edm::InputTag _phoChargedIsolationLabel; + const edm::InputTag _phoPhotonIsolationLabel; + const edm::InputTag _phoWorstChargedIsolationLabel; + // token for rho + const edm::InputTag _rhoLabel; }; DEFINE_EDM_PLUGIN(AnyMVAEstimatorRun2Factory, diff --git a/RecoEgamma/PhotonIdentification/interface/PhotonMVAEstimatorRun2Spring15NonTrig.h b/RecoEgamma/PhotonIdentification/interface/PhotonMVAEstimatorRun2Spring15NonTrig.h index 9b175445a0d4f..c6e6b4333690f 100644 --- a/RecoEgamma/PhotonIdentification/interface/PhotonMVAEstimatorRun2Spring15NonTrig.h +++ b/RecoEgamma/PhotonIdentification/interface/PhotonMVAEstimatorRun2Spring15NonTrig.h @@ -9,6 +9,8 @@ #include "DataFormats/EgammaCandidates/interface/Photon.h" +#include "CondFormats/EgammaObjects/interface/GBRForest.h" + #include #include #include @@ -54,7 +56,6 @@ class PhotonMVAEstimatorRun2Spring15NonTrig : public AnyMVAEstimatorRun2Base{ // Spectators float varPt; float varEta; - }; // Constructor and destructor @@ -62,29 +63,30 @@ class PhotonMVAEstimatorRun2Spring15NonTrig : public AnyMVAEstimatorRun2Base{ ~PhotonMVAEstimatorRun2Spring15NonTrig(); // Calculation of the MVA value - float mvaValue( const edm::Ptr& particle); + float mvaValue( const edm::Ptr& particle, const edm::Event&) const; // Utility functions - TMVA::Reader *createSingleReader(const int iCategory, const edm::FileInPath &weightFile); - - inline int getNCategories(){return nCategories;}; - bool isEndcapCategory( int category ); - const inline std::string getName(){return name_;}; - + std::unique_ptr createSingleReader(const int iCategory, const edm::FileInPath &weightFile); + + virtual int getNCategories() const { return nCategories; } + bool isEndcapCategory( int category ) const; + virtual const std::string& getName() const override final { return _name; } + virtual const std::string& getTag() const override final { return _tag; } + // Functions that should work on both pat and reco electrons // (use the fact that pat::Electron inherits from reco::GsfElectron) - void fillMVAVariables(const edm::Ptr& particle); - int findCategory( const edm::Ptr& particle); + std::vector fillMVAVariables(const edm::Ptr& particle, const edm::Event& iEvent) const override; + int findCategory( const edm::Ptr& particle ) const override; // The function below ensures that the variables passed to MVA are // within reasonable bounds - void constrainMVAVariables(); + void constrainMVAVariables(AllVariables&) const; // Call this function once after the constructor to declare // the needed event content pieces to the framework - void setConsumes(edm::ConsumesCollector&&) override; + void setConsumes(edm::ConsumesCollector&&) const override; // Call this function once per event to retrieve all needed // event content pices - void getEventContent(const edm::Event& iEvent) override; + //void getEventContent(const edm::Event& iEvent) override; private: @@ -92,13 +94,18 @@ class PhotonMVAEstimatorRun2Spring15NonTrig : public AnyMVAEstimatorRun2Base{ // MVA name. This is a unique name for this MVA implementation. // It will be used as part of ValueMap names. // For simplicity, keep it set to the class name. - const std::string name_ = "PhotonMVAEstimatorRun2Spring15NonTrig"; + const std::string _name = "PhotonMVAEstimatorRun2Spring15NonTrig"; + + // MVA tag. This is an additional string variable to distinguish + // instances of the estimator of this class configured with different + // weight files. + std::string _tag; // Data members - std::vector< std::unique_ptr > _tmvaReaders; + std::vector< std::unique_ptr > _gbrForests; // All variables needed by this MVA - std::string _MethodName; + const std::string _MethodName; AllVariables _allMVAVars; // This MVA implementation relies on several ValueMap objects @@ -108,37 +115,18 @@ class PhotonMVAEstimatorRun2Spring15NonTrig : public AnyMVAEstimatorRun2Base{ // Declare all tokens that will be needed to retrieve misc // data from the event content required by this MVA // - edm::EDGetTokenT > _full5x5SigmaIEtaIEtaMapToken; - edm::EDGetTokenT > _full5x5SigmaIEtaIPhiMapToken; - edm::EDGetTokenT > _full5x5E1x3MapToken; - edm::EDGetTokenT > _full5x5E2x2MapToken; - edm::EDGetTokenT > _full5x5E2x5MaxMapToken; - edm::EDGetTokenT > _full5x5E5x5MapToken; - edm::EDGetTokenT > _esEffSigmaRRMapToken; - // - edm::EDGetTokenT > _phoChargedIsolationToken; - edm::EDGetTokenT > _phoPhotonIsolationToken; - edm::EDGetTokenT > _phoWorstChargedIsolationToken; - - // - // Declare all value maps corresponding to the above tokens - // - edm::Handle > _full5x5SigmaIEtaIEtaMap; - edm::Handle > _full5x5SigmaIEtaIPhiMap; - edm::Handle > _full5x5E1x3Map; - edm::Handle > _full5x5E2x2Map; - edm::Handle > _full5x5E2x5MaxMap; - edm::Handle > _full5x5E5x5Map; - edm::Handle > _esEffSigmaRRMap; + const edm::InputTag _full5x5SigmaIEtaIEtaMapLabel; + const edm::InputTag _full5x5SigmaIEtaIPhiMapLabel; + const edm::InputTag _full5x5E1x3MapLabel; + const edm::InputTag _full5x5E2x2MapLabel; + const edm::InputTag _full5x5E2x5MaxMapLabel; + const edm::InputTag _full5x5E5x5MapLabel; + const edm::InputTag _esEffSigmaRRMapLabel; // - edm::Handle > _phoChargedIsolationMap; - edm::Handle > _phoPhotonIsolationMap; - edm::Handle > _phoWorstChargedIsolationMap; - - // Rho will be pulled from the event content - edm::EDGetTokenT _rhoToken; - edm::Handle _rho; - + const edm::InputTag _phoChargedIsolationLabel; + const edm::InputTag _phoPhotonIsolationLabel; + const edm::InputTag _phoWorstChargedIsolationLabel; + const edm::InputTag _rhoLabel; }; DEFINE_EDM_PLUGIN(AnyMVAEstimatorRun2Factory, diff --git a/RecoEgamma/PhotonIdentification/plugins/BuildFile.xml b/RecoEgamma/PhotonIdentification/plugins/BuildFile.xml index c242f3ac7491a..83d9fb25d94b7 100644 --- a/RecoEgamma/PhotonIdentification/plugins/BuildFile.xml +++ b/RecoEgamma/PhotonIdentification/plugins/BuildFile.xml @@ -6,11 +6,12 @@ - + - + + diff --git a/RecoEgamma/PhotonIdentification/plugins/PhotonMVAEstimatorRun2Phys14NonTrig.cc b/RecoEgamma/PhotonIdentification/plugins/PhotonMVAEstimatorRun2Phys14NonTrig.cc index e3d7296d59e9f..7efc788a4917c 100644 --- a/RecoEgamma/PhotonIdentification/plugins/PhotonMVAEstimatorRun2Phys14NonTrig.cc +++ b/RecoEgamma/PhotonIdentification/plugins/PhotonMVAEstimatorRun2Phys14NonTrig.cc @@ -2,13 +2,31 @@ #include "FWCore/ParameterSet/interface/FileInPath.h" -PhotonMVAEstimatorRun2Phys14NonTrig::PhotonMVAEstimatorRun2Phys14NonTrig(const edm::ParameterSet& conf): - AnyMVAEstimatorRun2Base(conf) -{ +#include "TMVA/MethodBDT.h" +PhotonMVAEstimatorRun2Phys14NonTrig::PhotonMVAEstimatorRun2Phys14NonTrig(const edm::ParameterSet& conf) : + AnyMVAEstimatorRun2Base(conf), + // The method name is just a key to retrieve this method later, it is not + // a control parameter for a reader (the full definition of the MVA type and + // everything else comes from the xml weight files). + _MethodName("BDTG method"), + _full5x5SigmaIEtaIEtaMapLabel(conf.getParameter("full5x5SigmaIEtaIEtaMap")), + _full5x5SigmaIEtaIPhiMapLabel(conf.getParameter("full5x5SigmaIEtaIPhiMap")), + _full5x5E1x3MapLabel(conf.getParameter("full5x5E1x3Map")), + _full5x5E2x2MapLabel(conf.getParameter("full5x5E2x2Map")), + _full5x5E2x5MaxMapLabel(conf.getParameter("full5x5E2x5MaxMap")), + _full5x5E5x5MapLabel(conf.getParameter("full5x5E5x5Map")), + _esEffSigmaRRMapLabel(conf.getParameter("esEffSigmaRRMap")), + _phoChargedIsolationLabel(conf.getParameter("phoChargedIsolation")), + _phoPhotonIsolationLabel(conf.getParameter("phoPhotonIsolation")), + _phoWorstChargedIsolationLabel(conf.getParameter("phoWorstChargedIsolation")), + _rhoLabel(conf.getParameter("rho")) +{ // // Construct the MVA estimators // + _tag = conf.getParameter("mvaTag"); + const std::vector weightFileNames = conf.getParameter >("weightFileNames"); @@ -16,177 +34,90 @@ PhotonMVAEstimatorRun2Phys14NonTrig::PhotonMVAEstimatorRun2Phys14NonTrig(const e throw cms::Exception("MVA config failure: ") << "wrong number of weightfiles" << std::endl; - _tmvaReaders.clear(); - // The method name is just a key to retrieve this method later, it is not - // a control parameter for a reader (the full definition of the MVA type and - // everything else comes from the xml weight files). - _MethodName = "BDTG method"; // Create a TMVA reader object for each category for(int i=0; i - ( createSingleReader(i, weightFile ) ) ); - + _gbrForests.push_back( createSingleReader(i, weightFile ) ); } - } PhotonMVAEstimatorRun2Phys14NonTrig:: -~PhotonMVAEstimatorRun2Phys14NonTrig(){ - - _tmvaReaders.clear(); -} - -void PhotonMVAEstimatorRun2Phys14NonTrig::setConsumes(edm::ConsumesCollector&& cc){ - - // All tokens for event content needed by this MVA - // Cluster shapes - _full5x5SigmaIEtaIEtaMapToken = cc.consumes > - (_conf.getParameter("full5x5SigmaIEtaIEtaMap")); - - _full5x5SigmaIEtaIPhiMapToken = cc.consumes > - (_conf.getParameter("full5x5SigmaIEtaIPhiMap")); - - _full5x5E1x3MapToken = cc.consumes > - (_conf.getParameter("full5x5E1x3Map")); - - _full5x5E2x2MapToken = cc.consumes > - (_conf.getParameter("full5x5E2x2Map")); - - _full5x5E2x5MaxMapToken = cc.consumes > - (_conf.getParameter("full5x5E2x5MaxMap")); - - _full5x5E5x5MapToken = cc.consumes > - (_conf.getParameter("full5x5E5x5Map")); - - _esEffSigmaRRMapToken = cc.consumes > - (_conf.getParameter("esEffSigmaRRMap")); - - // Isolations - _phoChargedIsolationToken = cc.consumes > - (_conf.getParameter("phoChargedIsolation")); - - _phoPhotonIsolationToken = cc.consumes > - (_conf.getParameter("phoPhotonIsolation")); - - _phoWorstChargedIsolationToken = cc.consumes > - (_conf.getParameter("phoWorstChargedIsolation")); - - // Pileup - _rhoToken = cc.consumes (_conf.getParameter("rho")); - -} - -void PhotonMVAEstimatorRun2Phys14NonTrig::getEventContent(const edm::Event& iEvent){ - - // Get the full5x5 and ES maps - iEvent.getByToken(_full5x5SigmaIEtaIEtaMapToken, _full5x5SigmaIEtaIEtaMap); - iEvent.getByToken(_full5x5SigmaIEtaIPhiMapToken, _full5x5SigmaIEtaIPhiMap); - iEvent.getByToken(_full5x5E1x3MapToken, _full5x5E1x3Map); - iEvent.getByToken(_full5x5E2x2MapToken, _full5x5E2x2Map); - iEvent.getByToken(_full5x5E2x5MaxMapToken, _full5x5E2x5MaxMap); - iEvent.getByToken(_full5x5E5x5MapToken, _full5x5E5x5Map); - iEvent.getByToken(_esEffSigmaRRMapToken, _esEffSigmaRRMap); - - // Get the isolation maps - iEvent.getByToken(_phoChargedIsolationToken, _phoChargedIsolationMap); - iEvent.getByToken(_phoPhotonIsolationToken, _phoPhotonIsolationMap); - iEvent.getByToken(_phoWorstChargedIsolationToken, _phoWorstChargedIsolationMap); - - // Get rho - iEvent.getByToken(_rhoToken,_rho); - - // Make sure everything is retrieved successfully - if(! (_full5x5SigmaIEtaIEtaMap.isValid() - && _full5x5SigmaIEtaIPhiMap.isValid() - && _full5x5E1x3Map.isValid() - && _full5x5E2x2Map.isValid() - && _full5x5E2x5MaxMap.isValid() - && _full5x5E5x5Map.isValid() - && _esEffSigmaRRMap.isValid() - && _phoChargedIsolationMap.isValid() - && _phoPhotonIsolationMap.isValid() - && _phoWorstChargedIsolationMap.isValid() - && _rho.isValid() ) ) - throw cms::Exception("MVA failure: ") - << "Failed to retrieve event content needed for this MVA" - << std::endl - << "Check python MVA configuration file and make sure all needed" - << std::endl - << "producers are running upstream" << std::endl; +~PhotonMVAEstimatorRun2Phys14NonTrig() { } - float PhotonMVAEstimatorRun2Phys14NonTrig:: -mvaValue( const edm::Ptr& particle){ +mvaValue( const edm::Ptr& particle, const edm::Event& iEvent) const { - int iCategory = findCategory( particle ); - fillMVAVariables( particle ); - constrainMVAVariables(); - float result = _tmvaReaders.at(iCategory)->EvaluateMVA(_MethodName); + const int iCategory = findCategory( particle ); + const std::vector vars = std::move( fillMVAVariables( particle, iEvent ) ); + const float result = _gbrForests.at(iCategory)->GetClassifier(vars.data()); // DEBUG - const bool debug = false; + constexpr bool debug = false; if( debug ){ printf("Printout of the photon variable inputs for MVA:\n"); - printf(" varPhi_ %f\n", _allMVAVars.varPhi ); - printf(" varR9_ %f\n", _allMVAVars.varR9 ); - printf(" varSieie_ %f\n", _allMVAVars.varSieie ); - printf(" varSieip_ %f\n", _allMVAVars.varSieip ); - printf(" varE1x3overE5x5_ %f\n", _allMVAVars.varE1x3overE5x5); - printf(" varE2x2overE5x5_ %f\n", _allMVAVars.varE2x2overE5x5); - printf(" varE2x5overE5x5_ %f\n", _allMVAVars.varE2x5overE5x5); - printf(" varSCEta_ %f\n", _allMVAVars.varSCEta ); - printf(" varRawE_ %f\n", _allMVAVars.varRawE ); - printf(" varSCEtaWidth_ %f\n", _allMVAVars.varSCEtaWidth ); - printf(" varSCPhiWidth_ %f\n", _allMVAVars.varSCPhiWidth ); - printf(" varRho_ %f\n", _allMVAVars.varRho ); - printf(" varPhoIsoRaw_ %f\n", _allMVAVars.varPhoIsoRaw ); - printf(" varChIsoRaw_ %f\n", _allMVAVars.varChIsoRaw ); - printf(" varWorstChRaw_ %f\n", _allMVAVars.varWorstChRaw ); - printf(" varESEnOverRawE_ %f\n", _allMVAVars.varESEnOverRawE); // for endcap MVA only - printf(" varESEffSigmaRR_ %f\n", _allMVAVars.varESEffSigmaRR); // for endcap MVA only - // The spectators - printf(" varPt_ %f\n", _allMVAVars.varPt ); - printf(" varEta_ %f\n", _allMVAVars.varEta ); + printf(" varPhi_ %f\n", vars[0] ); + printf(" varR9_ %f\n", vars[1] ); + printf(" varSieie_ %f\n", vars[2] ); + printf(" varSieip_ %f\n", vars[3] ); + printf(" varE1x3overE5x5_ %f\n", vars[4] ); + printf(" varE2x2overE5x5_ %f\n", vars[5] ); + printf(" varE2x5overE5x5_ %f\n", vars[6] ); + printf(" varSCEta_ %f\n", vars[7] ); + printf(" varRawE_ %f\n", vars[8] ); + printf(" varSCEtaWidth_ %f\n", vars[9] ); + printf(" varSCPhiWidth_ %f\n", vars[10] ); + printf(" varRho_ %f\n", vars[11] ); + printf(" varPhoIsoRaw_ %f\n", vars[12] ); + printf(" varChIsoRaw_ %f\n", vars[13] ); + printf(" varWorstChRaw_ %f\n", vars[14] ); + if( isEndcapCategory( iCategory ) ) { + printf(" varESEnOverRawE_ %f\n", vars[15] ); // for endcap MVA only + printf(" varESEffSigmaRR_ %f\n", vars[16] ); // for endcap MVA only + // The spectators + printf(" varPt_ %f\n", vars[17] ); + printf(" varEta_ %f\n", vars[18] ); + } else { + // The spectators + printf(" varPt_ %f\n", vars[15] ); + printf(" varEta_ %f\n", vars[16] ); + } } return result; } -int PhotonMVAEstimatorRun2Phys14NonTrig::findCategory( const edm::Ptr& particle){ +int PhotonMVAEstimatorRun2Phys14NonTrig::findCategory( const edm::Ptr& particle) const { // Try to cast the particle into a reco particle. // This should work for both reco and pat. - const edm::Ptr phoRecoPtr = ( edm::Ptr )particle; + const edm::Ptr phoRecoPtr(particle); if( phoRecoPtr.isNull() ) throw cms::Exception("MVA failure: ") << " given particle is expected to be reco::Photon or pat::Photon," << std::endl << " but appears to be neither" << std::endl; - float eta = phoRecoPtr->superCluster()->eta(); + const float eta = phoRecoPtr->superCluster()->eta(); // // Determine the category // int iCategory = UNDEFINED; - const float ebeeSplit = 1.479; // division between barrel and endcap + constexpr float ebeeSplit = 1.479; // division between barrel and endcap - if ( std::abs(eta) < ebeeSplit) + if( std::abs(eta) < ebeeSplit ) iCategory = CAT_EB; - if (std::abs(eta) >= ebeeSplit) + if( std::abs(eta) >= ebeeSplit ) iCategory = CAT_EE; return iCategory; } bool PhotonMVAEstimatorRun2Phys14NonTrig:: -isEndcapCategory(int category ){ +isEndcapCategory(int category ) const { // For this specific MVA the function is trivial, but kept for possible // future evolution to an MVA with more categories in eta @@ -198,63 +129,116 @@ isEndcapCategory(int category ){ } -TMVA::Reader *PhotonMVAEstimatorRun2Phys14NonTrig:: -createSingleReader(const int iCategory, const edm::FileInPath &weightFile){ +std::unique_ptr PhotonMVAEstimatorRun2Phys14NonTrig:: +createSingleReader(const int iCategory, const edm::FileInPath &weightFile) { // // Create the reader // - TMVA::Reader *tmpTMVAReader = new TMVA::Reader( "!Color:Silent:Error" ); + TMVA::Reader tmpTMVAReader( "!Color:Silent:Error" ); // // Configure all variables and spectators. Note: the order and names // must match what is found in the xml weights file! // - - tmpTMVAReader->AddVariable("recoPhi" , &_allMVAVars.varPhi); - tmpTMVAReader->AddVariable("r9" , &_allMVAVars.varR9); - tmpTMVAReader->AddVariable("sieie_2012", &_allMVAVars.varSieie); - tmpTMVAReader->AddVariable("sieip_2012", &_allMVAVars.varSieip); - tmpTMVAReader->AddVariable("e1x3_2012/e5x5_2012" , &_allMVAVars.varE1x3overE5x5); - tmpTMVAReader->AddVariable("e2x2_2012/e5x5_2012" , &_allMVAVars.varE2x2overE5x5); - tmpTMVAReader->AddVariable("e2x5_2012/e5x5_2012" , &_allMVAVars.varE2x5overE5x5); - tmpTMVAReader->AddVariable("recoSCEta" , &_allMVAVars.varSCEta); - tmpTMVAReader->AddVariable("rawE" , &_allMVAVars.varRawE); - tmpTMVAReader->AddVariable("scEtaWidth", &_allMVAVars.varSCEtaWidth); - tmpTMVAReader->AddVariable("scPhiWidth", &_allMVAVars.varSCPhiWidth); + tmpTMVAReader.AddVariable("recoPhi" , &_allMVAVars.varPhi); + tmpTMVAReader.AddVariable("r9" , &_allMVAVars.varR9); + tmpTMVAReader.AddVariable("sieie_2012", &_allMVAVars.varSieie); + tmpTMVAReader.AddVariable("sieip_2012", &_allMVAVars.varSieip); + tmpTMVAReader.AddVariable("e1x3_2012/e5x5_2012" , &_allMVAVars.varE1x3overE5x5); + tmpTMVAReader.AddVariable("e2x2_2012/e5x5_2012" , &_allMVAVars.varE2x2overE5x5); + tmpTMVAReader.AddVariable("e2x5_2012/e5x5_2012" , &_allMVAVars.varE2x5overE5x5); + tmpTMVAReader.AddVariable("recoSCEta" , &_allMVAVars.varSCEta); + tmpTMVAReader.AddVariable("rawE" , &_allMVAVars.varRawE); + tmpTMVAReader.AddVariable("scEtaWidth", &_allMVAVars.varSCEtaWidth); + tmpTMVAReader.AddVariable("scPhiWidth", &_allMVAVars.varSCPhiWidth); // Endcap only variables if( isEndcapCategory(iCategory) ){ - tmpTMVAReader->AddVariable("esEn/rawE" , &_allMVAVars.varESEnOverRawE); - tmpTMVAReader->AddVariable("esRR" , &_allMVAVars.varESEffSigmaRR); + tmpTMVAReader.AddVariable("esEn/rawE" , &_allMVAVars.varESEnOverRawE); + tmpTMVAReader.AddVariable("esRR" , &_allMVAVars.varESEffSigmaRR); } // Pileup - tmpTMVAReader->AddVariable("rho" , &_allMVAVars.varRho); + tmpTMVAReader.AddVariable("rho" , &_allMVAVars.varRho); // Isolations - tmpTMVAReader->AddVariable("phoIsoRaw" , &_allMVAVars.varPhoIsoRaw); - tmpTMVAReader->AddVariable("chIsoRaw" , &_allMVAVars.varChIsoRaw); - tmpTMVAReader->AddVariable("chWorstRaw", &_allMVAVars.varWorstChRaw); + tmpTMVAReader.AddVariable("phoIsoRaw" , &_allMVAVars.varPhoIsoRaw); + tmpTMVAReader.AddVariable("chIsoRaw" , &_allMVAVars.varChIsoRaw); + tmpTMVAReader.AddVariable("chWorstRaw", &_allMVAVars.varWorstChRaw); // Spectators - tmpTMVAReader->AddSpectator("recoPt" , &_allMVAVars.varPt); - tmpTMVAReader->AddSpectator("recoEta", &_allMVAVars.varEta); + tmpTMVAReader.AddSpectator("recoPt" , &_allMVAVars.varPt); + tmpTMVAReader.AddSpectator("recoEta", &_allMVAVars.varEta); // // Book the method and set up the weights file // - tmpTMVAReader->BookMVA(_MethodName , weightFile.fullPath() ); + std::unique_ptr temp( tmpTMVAReader.BookMVA(_MethodName , weightFile.fullPath() ) ); - return tmpTMVAReader; + return std::unique_ptr( new GBRForest( dynamic_cast( tmpTMVAReader.FindMVA(_MethodName) ) ) ); } // A function that should work on both pat and reco objects -void PhotonMVAEstimatorRun2Phys14NonTrig::fillMVAVariables(const edm::Ptr& particle){ +std::vector PhotonMVAEstimatorRun2Phys14NonTrig::fillMVAVariables(const edm::Ptr& particle, const edm::Event& iEvent) const { + // + // Declare all value maps corresponding to the above tokens + // + edm::Handle > full5x5SigmaIEtaIEtaMap; + edm::Handle > full5x5SigmaIEtaIPhiMap; + edm::Handle > full5x5E1x3Map; + edm::Handle > full5x5E2x2Map; + edm::Handle > full5x5E2x5MaxMap; + edm::Handle > full5x5E5x5Map; + edm::Handle > esEffSigmaRRMap; + // + edm::Handle > phoChargedIsolationMap; + edm::Handle > phoPhotonIsolationMap; + edm::Handle > phoWorstChargedIsolationMap; + + // Rho will be pulled from the event content + edm::Handle rho; + + // Get the full5x5 and ES maps + iEvent.getByLabel(_full5x5SigmaIEtaIEtaMapLabel, full5x5SigmaIEtaIEtaMap); + iEvent.getByLabel(_full5x5SigmaIEtaIPhiMapLabel, full5x5SigmaIEtaIPhiMap); + iEvent.getByLabel(_full5x5E1x3MapLabel, full5x5E1x3Map); + iEvent.getByLabel(_full5x5E2x2MapLabel, full5x5E2x2Map); + iEvent.getByLabel(_full5x5E2x5MaxMapLabel, full5x5E2x5MaxMap); + iEvent.getByLabel(_full5x5E5x5MapLabel, full5x5E5x5Map); + iEvent.getByLabel(_esEffSigmaRRMapLabel, esEffSigmaRRMap); + + // Get the isolation maps + iEvent.getByLabel(_phoChargedIsolationLabel, phoChargedIsolationMap); + iEvent.getByLabel(_phoPhotonIsolationLabel, phoPhotonIsolationMap); + iEvent.getByLabel(_phoWorstChargedIsolationLabel, phoWorstChargedIsolationMap); + + // Get rho + iEvent.getByLabel(_rhoLabel,rho); + + // Make sure everything is retrieved successfully + if(! (full5x5SigmaIEtaIEtaMap.isValid() + && full5x5SigmaIEtaIPhiMap.isValid() + && full5x5E1x3Map.isValid() + && full5x5E2x2Map.isValid() + && full5x5E2x5MaxMap.isValid() + && full5x5E5x5Map.isValid() + && esEffSigmaRRMap.isValid() + && phoChargedIsolationMap.isValid() + && phoPhotonIsolationMap.isValid() + && phoWorstChargedIsolationMap.isValid() + && rho.isValid() ) ) + throw cms::Exception("MVA failure: ") + << "Failed to retrieve event content needed for this MVA" + << std::endl + << "Check python MVA configuration file and make sure all needed" + << std::endl + << "producers are running upstream" << std::endl; // Try to cast the particle into a reco particle. // This should work for both reco and pat. const edm::Ptr phoRecoPtr = ( edm::Ptr )particle; + AllVariables allMVAVars; if( phoRecoPtr.isNull() ) throw cms::Exception("MVA failure: ") << " given particle is expected to be reco::Photon or pat::Photon," << std::endl @@ -264,35 +248,83 @@ void PhotonMVAEstimatorRun2Phys14NonTrig::fillMVAVariables(const edm::PtrsuperCluster(); // Full 5x5 cluster shapes. We could take some of this directly from // the photon object, but some of these are not available. - float e1x3 = (*_full5x5E1x3Map )[ phoRecoPtr ]; - float e2x2 = (*_full5x5E2x2Map )[ phoRecoPtr ]; - float e2x5 = (*_full5x5E2x5MaxMap)[ phoRecoPtr ]; - float e5x5 = (*_full5x5E5x5Map )[ phoRecoPtr ]; - - _allMVAVars.varPhi = phoRecoPtr->phi(); - _allMVAVars.varR9 = phoRecoPtr->r9() ; - _allMVAVars.varSieie = (*_full5x5SigmaIEtaIEtaMap)[ phoRecoPtr ]; // in principle, in the photon object as well - _allMVAVars.varSieip = (*_full5x5SigmaIEtaIPhiMap)[ phoRecoPtr ]; // not in the photon object - _allMVAVars.varE1x3overE5x5 = e1x3/e5x5; - _allMVAVars.varE2x2overE5x5 = e2x2/e5x5; - _allMVAVars.varE2x5overE5x5 = e2x5/e5x5; - _allMVAVars.varSCEta = superCluster->eta(); - _allMVAVars.varRawE = superCluster->rawEnergy(); - _allMVAVars.varSCEtaWidth = superCluster->etaWidth(); - _allMVAVars.varSCPhiWidth = superCluster->phiWidth(); - _allMVAVars.varESEnOverRawE = superCluster->preshowerEnergy() / superCluster->rawEnergy(); - _allMVAVars.varESEffSigmaRR = (*_esEffSigmaRRMap)[ phoRecoPtr ]; - _allMVAVars.varRho = *_rho; - _allMVAVars.varPhoIsoRaw = (*_phoPhotonIsolationMap)[phoRecoPtr]; - _allMVAVars.varChIsoRaw = (*_phoChargedIsolationMap)[phoRecoPtr]; - _allMVAVars.varWorstChRaw = (*_phoWorstChargedIsolationMap)[phoRecoPtr]; + float e1x3 = (*full5x5E1x3Map )[ phoRecoPtr ]; + float e2x2 = (*full5x5E2x2Map )[ phoRecoPtr ]; + float e2x5 = (*full5x5E2x5MaxMap)[ phoRecoPtr ]; + float e5x5 = (*full5x5E5x5Map )[ phoRecoPtr ]; + + allMVAVars.varPhi = phoRecoPtr->phi(); + allMVAVars.varR9 = phoRecoPtr->r9() ; + allMVAVars.varSieie = (*full5x5SigmaIEtaIEtaMap)[ phoRecoPtr ]; // in principle, in the photon object as well + allMVAVars.varSieip = (*full5x5SigmaIEtaIPhiMap)[ phoRecoPtr ]; // not in the photon object + allMVAVars.varE1x3overE5x5 = e1x3/e5x5; + allMVAVars.varE2x2overE5x5 = e2x2/e5x5; + allMVAVars.varE2x5overE5x5 = e2x5/e5x5; + allMVAVars.varSCEta = superCluster->eta(); + allMVAVars.varRawE = superCluster->rawEnergy(); + allMVAVars.varSCEtaWidth = superCluster->etaWidth(); + allMVAVars.varSCPhiWidth = superCluster->phiWidth(); + allMVAVars.varESEnOverRawE = superCluster->preshowerEnergy() / superCluster->rawEnergy(); + allMVAVars.varESEffSigmaRR = (*esEffSigmaRRMap)[ phoRecoPtr ]; + allMVAVars.varRho = *rho; + allMVAVars.varPhoIsoRaw = (*phoPhotonIsolationMap)[phoRecoPtr]; + allMVAVars.varChIsoRaw = (*phoChargedIsolationMap)[phoRecoPtr]; + allMVAVars.varWorstChRaw = (*phoWorstChargedIsolationMap)[phoRecoPtr]; // Declare spectator vars - _allMVAVars.varPt = phoRecoPtr->pt(); - _allMVAVars.varEta = phoRecoPtr->eta(); + allMVAVars.varPt = phoRecoPtr->pt(); + allMVAVars.varEta = phoRecoPtr->eta(); + + constrainMVAVariables(allMVAVars); + + std::vector vars; + if( isEndcapCategory( findCategory( particle ) ) ) { + vars = std::move( packMVAVariables(allMVAVars.varPhi, + allMVAVars.varR9, + allMVAVars.varSieie, + allMVAVars.varSieip, + allMVAVars.varE1x3overE5x5, + allMVAVars.varE2x2overE5x5, + allMVAVars.varE2x5overE5x5, + allMVAVars.varSCEta, + allMVAVars.varRawE, + allMVAVars.varSCEtaWidth, + allMVAVars.varSCPhiWidth, + allMVAVars.varESEnOverRawE, + allMVAVars.varESEffSigmaRR, + allMVAVars.varRho, + allMVAVars.varPhoIsoRaw, + allMVAVars.varChIsoRaw, + allMVAVars.varWorstChRaw, + // Declare spectator vars + allMVAVars.varPt, + allMVAVars.varEta) + ); + } else { + vars = std::move( packMVAVariables(allMVAVars.varPhi, + allMVAVars.varR9, + allMVAVars.varSieie, + allMVAVars.varSieip, + allMVAVars.varE1x3overE5x5, + allMVAVars.varE2x2overE5x5, + allMVAVars.varE2x5overE5x5, + allMVAVars.varSCEta, + allMVAVars.varRawE, + allMVAVars.varSCEtaWidth, + allMVAVars.varSCPhiWidth, + allMVAVars.varRho, + allMVAVars.varPhoIsoRaw, + allMVAVars.varChIsoRaw, + allMVAVars.varWorstChRaw, + // Declare spectator vars + allMVAVars.varPt, + allMVAVars.varEta) + ); + } + return vars; } -void PhotonMVAEstimatorRun2Phys14NonTrig::constrainMVAVariables(){ +void PhotonMVAEstimatorRun2Phys14NonTrig::constrainMVAVariables(AllVariables&) const { // Check that variables do not have crazy values @@ -302,3 +334,16 @@ void PhotonMVAEstimatorRun2Phys14NonTrig::constrainMVAVariables(){ } +void PhotonMVAEstimatorRun2Phys14NonTrig::setConsumes(edm::ConsumesCollector&& cc) const { + cc.consumes >(_full5x5SigmaIEtaIEtaMapLabel); + cc.consumes >(_full5x5SigmaIEtaIPhiMapLabel); + cc.consumes >(_full5x5E1x3MapLabel); + cc.consumes >(_full5x5E2x2MapLabel); + cc.consumes >(_full5x5E2x5MaxMapLabel); + cc.consumes >(_full5x5E5x5MapLabel); + cc.consumes >(_esEffSigmaRRMapLabel); + cc.consumes >(_phoChargedIsolationLabel); + cc.consumes >(_phoPhotonIsolationLabel); + cc.consumes >( _phoWorstChargedIsolationLabel); + cc.consumes(_rhoLabel); +} diff --git a/RecoEgamma/PhotonIdentification/plugins/PhotonMVAEstimatorRun2Spring15NonTrig.cc b/RecoEgamma/PhotonIdentification/plugins/PhotonMVAEstimatorRun2Spring15NonTrig.cc index 278a770c756ba..8fd8a36c6fb42 100644 --- a/RecoEgamma/PhotonIdentification/plugins/PhotonMVAEstimatorRun2Spring15NonTrig.cc +++ b/RecoEgamma/PhotonIdentification/plugins/PhotonMVAEstimatorRun2Spring15NonTrig.cc @@ -2,13 +2,29 @@ #include "FWCore/ParameterSet/interface/FileInPath.h" +#include "TMVA/MethodBDT.h" + PhotonMVAEstimatorRun2Spring15NonTrig::PhotonMVAEstimatorRun2Spring15NonTrig(const edm::ParameterSet& conf): - AnyMVAEstimatorRun2Base(conf) + AnyMVAEstimatorRun2Base(conf), + _MethodName("BDTG method"), + _full5x5SigmaIEtaIEtaMapLabel(conf.getParameter("full5x5SigmaIEtaIEtaMap")), + _full5x5SigmaIEtaIPhiMapLabel(conf.getParameter("full5x5SigmaIEtaIPhiMap")), + _full5x5E1x3MapLabel(conf.getParameter("full5x5E1x3Map")), + _full5x5E2x2MapLabel(conf.getParameter("full5x5E2x2Map")), + _full5x5E2x5MaxMapLabel(conf.getParameter("full5x5E2x5MaxMap")), + _full5x5E5x5MapLabel(conf.getParameter("full5x5E5x5Map")), + _esEffSigmaRRMapLabel(conf.getParameter("esEffSigmaRRMap")), + _phoChargedIsolationLabel(conf.getParameter("phoChargedIsolation")), + _phoPhotonIsolationLabel(conf.getParameter("phoPhotonIsolation")), + _phoWorstChargedIsolationLabel(conf.getParameter("phoWorstChargedIsolation")), + _rhoLabel(conf.getParameter("rho")) { // // Construct the MVA estimators // + _tag = conf.getParameter("mvaTag"); + const std::vector weightFileNames = conf.getParameter >("weightFileNames"); @@ -16,11 +32,11 @@ PhotonMVAEstimatorRun2Spring15NonTrig::PhotonMVAEstimatorRun2Spring15NonTrig(con throw cms::Exception("MVA config failure: ") << "wrong number of weightfiles" << std::endl; - _tmvaReaders.clear(); + _gbrForests.clear(); // The method name is just a key to retrieve this method later, it is not // a control parameter for a reader (the full definition of the MVA type and // everything else comes from the xml weight files). - _MethodName = "BDTG method"; + // Create a TMVA reader object for each category for(int i=0; i - ( createSingleReader(i, weightFile ) ) ); + _gbrForests.push_back( createSingleReader(i, weightFile ) ); } @@ -37,128 +52,52 @@ PhotonMVAEstimatorRun2Spring15NonTrig::PhotonMVAEstimatorRun2Spring15NonTrig(con PhotonMVAEstimatorRun2Spring15NonTrig:: ~PhotonMVAEstimatorRun2Spring15NonTrig(){ - - _tmvaReaders.clear(); -} - -void PhotonMVAEstimatorRun2Spring15NonTrig::setConsumes(edm::ConsumesCollector&& cc){ - - // All tokens for event content needed by this MVA - // Cluster shapes - _full5x5SigmaIEtaIEtaMapToken = cc.consumes > - (_conf.getParameter("full5x5SigmaIEtaIEtaMap")); - - _full5x5SigmaIEtaIPhiMapToken = cc.consumes > - (_conf.getParameter("full5x5SigmaIEtaIPhiMap")); - - _full5x5E1x3MapToken = cc.consumes > - (_conf.getParameter("full5x5E1x3Map")); - - _full5x5E2x2MapToken = cc.consumes > - (_conf.getParameter("full5x5E2x2Map")); - - _full5x5E2x5MaxMapToken = cc.consumes > - (_conf.getParameter("full5x5E2x5MaxMap")); - - _full5x5E5x5MapToken = cc.consumes > - (_conf.getParameter("full5x5E5x5Map")); - - _esEffSigmaRRMapToken = cc.consumes > - (_conf.getParameter("esEffSigmaRRMap")); - - // Isolations - _phoChargedIsolationToken = cc.consumes > - (_conf.getParameter("phoChargedIsolation")); - - _phoPhotonIsolationToken = cc.consumes > - (_conf.getParameter("phoPhotonIsolation")); - - _phoWorstChargedIsolationToken = cc.consumes > - (_conf.getParameter("phoWorstChargedIsolation")); - - // Pileup - _rhoToken = cc.consumes (_conf.getParameter("rho")); - } -void PhotonMVAEstimatorRun2Spring15NonTrig::getEventContent(const edm::Event& iEvent){ - - // Get the full5x5 and ES maps - iEvent.getByToken(_full5x5SigmaIEtaIEtaMapToken, _full5x5SigmaIEtaIEtaMap); - iEvent.getByToken(_full5x5SigmaIEtaIPhiMapToken, _full5x5SigmaIEtaIPhiMap); - iEvent.getByToken(_full5x5E1x3MapToken, _full5x5E1x3Map); - iEvent.getByToken(_full5x5E2x2MapToken, _full5x5E2x2Map); - iEvent.getByToken(_full5x5E2x5MaxMapToken, _full5x5E2x5MaxMap); - iEvent.getByToken(_full5x5E5x5MapToken, _full5x5E5x5Map); - iEvent.getByToken(_esEffSigmaRRMapToken, _esEffSigmaRRMap); - - // Get the isolation maps - iEvent.getByToken(_phoChargedIsolationToken, _phoChargedIsolationMap); - iEvent.getByToken(_phoPhotonIsolationToken, _phoPhotonIsolationMap); - iEvent.getByToken(_phoWorstChargedIsolationToken, _phoWorstChargedIsolationMap); - - // Get rho - iEvent.getByToken(_rhoToken,_rho); - - // Make sure everything is retrieved successfully - if(! (_full5x5SigmaIEtaIEtaMap.isValid() - && _full5x5SigmaIEtaIPhiMap.isValid() - && _full5x5E1x3Map.isValid() - && _full5x5E2x2Map.isValid() - && _full5x5E2x5MaxMap.isValid() - && _full5x5E5x5Map.isValid() - && _esEffSigmaRRMap.isValid() - && _phoChargedIsolationMap.isValid() - && _phoPhotonIsolationMap.isValid() - && _phoWorstChargedIsolationMap.isValid() - && _rho.isValid() ) ) - throw cms::Exception("MVA failure: ") - << "Failed to retrieve event content needed for this MVA" - << std::endl - << "Check python MVA configuration file and make sure all needed" - << std::endl - << "producers are running upstream" << std::endl; -} - - float PhotonMVAEstimatorRun2Spring15NonTrig:: -mvaValue( const edm::Ptr& particle){ +mvaValue(const edm::Ptr& particle, const edm::Event& iEvent) const { + + const int iCategory = findCategory( particle ); + const std::vector vars = std::move( fillMVAVariables( particle, iEvent ) ); - int iCategory = findCategory( particle ); - fillMVAVariables( particle ); - constrainMVAVariables(); - float result = _tmvaReaders.at(iCategory)->EvaluateMVA(_MethodName); + const float result = _gbrForests.at(iCategory)->GetClassifier(vars.data()); // DEBUG const bool debug = false; if( debug ){ printf("Printout of the photon variable inputs for MVA:\n"); - printf(" varPhi_ %f\n", _allMVAVars.varPhi ); - printf(" varR9_ %f\n", _allMVAVars.varR9 ); - printf(" varSieie_ %f\n", _allMVAVars.varSieie ); - printf(" varSieip_ %f\n", _allMVAVars.varSieip ); - printf(" varE1x3overE5x5_ %f\n", _allMVAVars.varE1x3overE5x5); - printf(" varE2x2overE5x5_ %f\n", _allMVAVars.varE2x2overE5x5); - printf(" varE2x5overE5x5_ %f\n", _allMVAVars.varE2x5overE5x5); - printf(" varSCEta_ %f\n", _allMVAVars.varSCEta ); - printf(" varRawE_ %f\n", _allMVAVars.varRawE ); - printf(" varSCEtaWidth_ %f\n", _allMVAVars.varSCEtaWidth ); - printf(" varSCPhiWidth_ %f\n", _allMVAVars.varSCPhiWidth ); - printf(" varRho_ %f\n", _allMVAVars.varRho ); - printf(" varPhoIsoRaw_ %f\n", _allMVAVars.varPhoIsoRaw ); - printf(" varChIsoRaw_ %f\n", _allMVAVars.varChIsoRaw ); - printf(" varWorstChRaw_ %f\n", _allMVAVars.varWorstChRaw ); - printf(" varESEnOverRawE_ %f\n", _allMVAVars.varESEnOverRawE); // for endcap MVA only - printf(" varESEffSigmaRR_ %f\n", _allMVAVars.varESEffSigmaRR); // for endcap MVA only - // The spectators - printf(" varPt_ %f\n", _allMVAVars.varPt ); - printf(" varEta_ %f\n", _allMVAVars.varEta ); + printf(" varPhi_ %f\n", vars[0] ); + printf(" varR9_ %f\n", vars[1] ); + printf(" varSieie_ %f\n", vars[2] ); + printf(" varSieip_ %f\n", vars[3] ); + printf(" varE1x3overE5x5_ %f\n", vars[4] ); + printf(" varE2x2overE5x5_ %f\n", vars[5] ); + printf(" varE2x5overE5x5_ %f\n", vars[6] ); + printf(" varSCEta_ %f\n", vars[7] ); + printf(" varRawE_ %f\n", vars[8] ); + printf(" varSCEtaWidth_ %f\n", vars[9] ); + printf(" varSCPhiWidth_ %f\n", vars[10] ); + printf(" varRho_ %f\n", vars[11] ); + printf(" varPhoIsoRaw_ %f\n", vars[12] ); + printf(" varChIsoRaw_ %f\n", vars[13] ); + printf(" varWorstChRaw_ %f\n", vars[14] ); + if( isEndcapCategory( iCategory ) ) { + printf(" varESEnOverRawE_ %f\n", vars[15] ); // for endcap MVA only + printf(" varESEffSigmaRR_ %f\n", vars[16] ); // for endcap MVA only + // The spectators + printf(" varPt_ %f\n", vars[17] ); + printf(" varEta_ %f\n", vars[18] ); + } else { + // The spectators + printf(" varPt_ %f\n", vars[15] ); + printf(" varEta_ %f\n", vars[16] ); + } } - + return result; } -int PhotonMVAEstimatorRun2Spring15NonTrig::findCategory( const edm::Ptr& particle){ +int PhotonMVAEstimatorRun2Spring15NonTrig::findCategory( const edm::Ptr& particle) const { // Try to cast the particle into a reco particle. // This should work for both reco and pat. @@ -186,7 +125,7 @@ int PhotonMVAEstimatorRun2Spring15NonTrig::findCategory( const edm::Ptr PhotonMVAEstimatorRun2Spring15NonTrig:: createSingleReader(const int iCategory, const edm::FileInPath &weightFile){ // // Create the reader // - TMVA::Reader *tmpTMVAReader = new TMVA::Reader( "!Color:Silent:Error" ); + TMVA::Reader tmpTMVAReader( "!Color:Silent:Error" ); // // Configure all variables and spectators. Note: the order and names // must match what is found in the xml weights file! // - tmpTMVAReader->AddVariable("recoPhi" , &_allMVAVars.varPhi); - tmpTMVAReader->AddVariable("r9" , &_allMVAVars.varR9); - tmpTMVAReader->AddVariable("sieieFull5x5", &_allMVAVars.varSieie); - tmpTMVAReader->AddVariable("sieipFull5x5", &_allMVAVars.varSieip); - tmpTMVAReader->AddVariable("e1x3Full5x5/e5x5Full5x5" , &_allMVAVars.varE1x3overE5x5); - tmpTMVAReader->AddVariable("e2x2Full5x5/e5x5Full5x5" , &_allMVAVars.varE2x2overE5x5); - tmpTMVAReader->AddVariable("e2x5Full5x5/e5x5Full5x5" , &_allMVAVars.varE2x5overE5x5); - tmpTMVAReader->AddVariable("recoSCEta" , &_allMVAVars.varSCEta); - tmpTMVAReader->AddVariable("rawE" , &_allMVAVars.varRawE); - tmpTMVAReader->AddVariable("scEtaWidth", &_allMVAVars.varSCEtaWidth); - tmpTMVAReader->AddVariable("scPhiWidth", &_allMVAVars.varSCPhiWidth); + tmpTMVAReader.AddVariable("recoPhi" , &_allMVAVars.varPhi); + tmpTMVAReader.AddVariable("r9" , &_allMVAVars.varR9); + tmpTMVAReader.AddVariable("sieieFull5x5", &_allMVAVars.varSieie); + tmpTMVAReader.AddVariable("sieipFull5x5", &_allMVAVars.varSieip); + tmpTMVAReader.AddVariable("e1x3Full5x5/e5x5Full5x5", &_allMVAVars.varE1x3overE5x5); + tmpTMVAReader.AddVariable("e2x2Full5x5/e5x5Full5x5", &_allMVAVars.varE2x2overE5x5); + tmpTMVAReader.AddVariable("e2x5Full5x5/e5x5Full5x5", &_allMVAVars.varE2x5overE5x5); + tmpTMVAReader.AddVariable("recoSCEta" , &_allMVAVars.varSCEta); + tmpTMVAReader.AddVariable("rawE" , &_allMVAVars.varRawE); + tmpTMVAReader.AddVariable("scEtaWidth", &_allMVAVars.varSCEtaWidth); + tmpTMVAReader.AddVariable("scPhiWidth", &_allMVAVars.varSCPhiWidth); // Endcap only variables if( isEndcapCategory(iCategory) ){ - tmpTMVAReader->AddVariable("esEn/rawE" , &_allMVAVars.varESEnOverRawE); - tmpTMVAReader->AddVariable("esRR" , &_allMVAVars.varESEffSigmaRR); + tmpTMVAReader.AddVariable("esEn/rawE" , &_allMVAVars.varESEnOverRawE); + tmpTMVAReader.AddVariable("esRR" , &_allMVAVars.varESEffSigmaRR); } // Pileup - tmpTMVAReader->AddVariable("rho" , &_allMVAVars.varRho); + tmpTMVAReader.AddVariable("rho" , &_allMVAVars.varRho); // Isolations - tmpTMVAReader->AddVariable("phoIsoRaw" , &_allMVAVars.varPhoIsoRaw); - tmpTMVAReader->AddVariable("chIsoRaw" , &_allMVAVars.varChIsoRaw); - tmpTMVAReader->AddVariable("chWorstRaw", &_allMVAVars.varWorstChRaw); + tmpTMVAReader.AddVariable("phoIsoRaw" , &_allMVAVars.varPhoIsoRaw); + tmpTMVAReader.AddVariable("chIsoRaw" , &_allMVAVars.varChIsoRaw); + tmpTMVAReader.AddVariable("chWorstRaw", &_allMVAVars.varWorstChRaw); // Spectators - tmpTMVAReader->AddSpectator("recoPt" , &_allMVAVars.varPt); - tmpTMVAReader->AddSpectator("recoEta", &_allMVAVars.varEta); + tmpTMVAReader.AddSpectator("recoPt" , &_allMVAVars.varPt); + tmpTMVAReader.AddSpectator("recoEta", &_allMVAVars.varEta); // // Book the method and set up the weights file // - tmpTMVAReader->BookMVA(_MethodName , weightFile.fullPath() ); + std::unique_ptr temp( tmpTMVAReader.BookMVA(_MethodName , weightFile.fullPath() ) ); + + return std::unique_ptr( new GBRForest( dynamic_cast( tmpTMVAReader.FindMVA(_MethodName) ) ) ); - return tmpTMVAReader; } // A function that should work on both pat and reco objects -void PhotonMVAEstimatorRun2Spring15NonTrig::fillMVAVariables(const edm::Ptr& particle){ +std::vector PhotonMVAEstimatorRun2Spring15NonTrig::fillMVAVariables(const edm::Ptr& particle, const edm::Event& iEvent) const { + + // + // Declare all value maps corresponding to the above tokens + // + edm::Handle > full5x5SigmaIEtaIEtaMap; + edm::Handle > full5x5SigmaIEtaIPhiMap; + edm::Handle > full5x5E1x3Map; + edm::Handle > full5x5E2x2Map; + edm::Handle > full5x5E2x5MaxMap; + edm::Handle > full5x5E5x5Map; + edm::Handle > esEffSigmaRRMap; + // + edm::Handle > phoChargedIsolationMap; + edm::Handle > phoPhotonIsolationMap; + edm::Handle > phoWorstChargedIsolationMap; + + // Rho will be pulled from the event content + edm::Handle rho; + + // Get the full5x5 and ES maps + iEvent.getByLabel(_full5x5SigmaIEtaIEtaMapLabel, full5x5SigmaIEtaIEtaMap); + iEvent.getByLabel(_full5x5SigmaIEtaIPhiMapLabel, full5x5SigmaIEtaIPhiMap); + iEvent.getByLabel(_full5x5E1x3MapLabel, full5x5E1x3Map); + iEvent.getByLabel(_full5x5E2x2MapLabel, full5x5E2x2Map); + iEvent.getByLabel(_full5x5E2x5MaxMapLabel, full5x5E2x5MaxMap); + iEvent.getByLabel(_full5x5E5x5MapLabel, full5x5E5x5Map); + iEvent.getByLabel(_esEffSigmaRRMapLabel, esEffSigmaRRMap); + + // Get the isolation maps + iEvent.getByLabel(_phoChargedIsolationLabel, phoChargedIsolationMap); + iEvent.getByLabel(_phoPhotonIsolationLabel, phoPhotonIsolationMap); + iEvent.getByLabel(_phoWorstChargedIsolationLabel, phoWorstChargedIsolationMap); + + // Get rho + iEvent.getByLabel(_rhoLabel,rho); + + // Make sure everything is retrieved successfully + if(! (full5x5SigmaIEtaIEtaMap.isValid() + && full5x5SigmaIEtaIPhiMap.isValid() + && full5x5E1x3Map.isValid() + && full5x5E2x2Map.isValid() + && full5x5E2x5MaxMap.isValid() + && full5x5E5x5Map.isValid() + && esEffSigmaRRMap.isValid() + && phoChargedIsolationMap.isValid() + && phoPhotonIsolationMap.isValid() + && phoWorstChargedIsolationMap.isValid() + && rho.isValid() ) ) + throw cms::Exception("MVA failure: ") + << "Failed to retrieve event content needed for this MVA" + << std::endl + << "Check python MVA configuration file and make sure all needed" + << std::endl + << "producers are running upstream" << std::endl; // Try to cast the particle into a reco particle. // This should work for both reco and pat. - const edm::Ptr phoRecoPtr = ( edm::Ptr )particle; + const edm::Ptr phoRecoPtr(particle); if( phoRecoPtr.isNull() ) throw cms::Exception("MVA failure: ") << " given particle is expected to be reco::Photon or pat::Photon," << std::endl @@ -264,35 +258,84 @@ void PhotonMVAEstimatorRun2Spring15NonTrig::fillMVAVariables(const edm::PtrsuperCluster(); // Full 5x5 cluster shapes. We could take some of this directly from // the photon object, but some of these are not available. - float e1x3 = (*_full5x5E1x3Map )[ phoRecoPtr ]; - float e2x2 = (*_full5x5E2x2Map )[ phoRecoPtr ]; - float e2x5 = (*_full5x5E2x5MaxMap)[ phoRecoPtr ]; - float e5x5 = (*_full5x5E5x5Map )[ phoRecoPtr ]; - - _allMVAVars.varPhi = phoRecoPtr->phi(); - _allMVAVars.varR9 = phoRecoPtr->r9() ; - _allMVAVars.varSieie = (*_full5x5SigmaIEtaIEtaMap)[ phoRecoPtr ]; // in principle, in the photon object as well - _allMVAVars.varSieip = (*_full5x5SigmaIEtaIPhiMap)[ phoRecoPtr ]; // not in the photon object - _allMVAVars.varE1x3overE5x5 = e1x3/e5x5; - _allMVAVars.varE2x2overE5x5 = e2x2/e5x5; - _allMVAVars.varE2x5overE5x5 = e2x5/e5x5; - _allMVAVars.varSCEta = superCluster->eta(); - _allMVAVars.varRawE = superCluster->rawEnergy(); - _allMVAVars.varSCEtaWidth = superCluster->etaWidth(); - _allMVAVars.varSCPhiWidth = superCluster->phiWidth(); - _allMVAVars.varESEnOverRawE = superCluster->preshowerEnergy() / superCluster->rawEnergy(); - _allMVAVars.varESEffSigmaRR = (*_esEffSigmaRRMap)[ phoRecoPtr ]; - _allMVAVars.varRho = *_rho; - _allMVAVars.varPhoIsoRaw = (*_phoPhotonIsolationMap)[phoRecoPtr]; - _allMVAVars.varChIsoRaw = (*_phoChargedIsolationMap)[phoRecoPtr]; - _allMVAVars.varWorstChRaw = (*_phoWorstChargedIsolationMap)[phoRecoPtr]; + const float e1x3 = (*full5x5E1x3Map )[ phoRecoPtr ]; + const float e2x2 = (*full5x5E2x2Map )[ phoRecoPtr ]; + const float e2x5 = (*full5x5E2x5MaxMap)[ phoRecoPtr ]; + const float e5x5 = (*full5x5E5x5Map )[ phoRecoPtr ]; + + AllVariables allMVAVars; + + allMVAVars.varPhi = phoRecoPtr->phi(); + allMVAVars.varR9 = phoRecoPtr->r9() ; + allMVAVars.varSieie = (*full5x5SigmaIEtaIEtaMap)[ phoRecoPtr ]; // in principle, in the photon object as well + allMVAVars.varSieip = (*full5x5SigmaIEtaIPhiMap)[ phoRecoPtr ]; // not in the photon object + allMVAVars.varE1x3overE5x5 = e1x3/e5x5; + allMVAVars.varE2x2overE5x5 = e2x2/e5x5; + allMVAVars.varE2x5overE5x5 = e2x5/e5x5; + allMVAVars.varSCEta = superCluster->eta(); + allMVAVars.varRawE = superCluster->rawEnergy(); + allMVAVars.varSCEtaWidth = superCluster->etaWidth(); + allMVAVars.varSCPhiWidth = superCluster->phiWidth(); + allMVAVars.varESEnOverRawE = superCluster->preshowerEnergy() / superCluster->rawEnergy(); + allMVAVars.varESEffSigmaRR = (*esEffSigmaRRMap)[ phoRecoPtr ]; + allMVAVars.varRho = *rho; + allMVAVars.varPhoIsoRaw = (*phoPhotonIsolationMap)[phoRecoPtr]; + allMVAVars.varChIsoRaw = (*phoChargedIsolationMap)[phoRecoPtr]; + allMVAVars.varWorstChRaw = (*phoWorstChargedIsolationMap)[phoRecoPtr]; // Declare spectator vars - _allMVAVars.varPt = phoRecoPtr->pt(); - _allMVAVars.varEta = phoRecoPtr->eta(); - + allMVAVars.varPt = phoRecoPtr->pt(); + allMVAVars.varEta = phoRecoPtr->eta(); + + constrainMVAVariables(allMVAVars); + + std::vector vars; + if( isEndcapCategory( findCategory( particle ) ) ) { + vars = std::move( packMVAVariables(allMVAVars.varPhi, + allMVAVars.varR9, + allMVAVars.varSieie, + allMVAVars.varSieip, + allMVAVars.varE1x3overE5x5, + allMVAVars.varE2x2overE5x5, + allMVAVars.varE2x5overE5x5, + allMVAVars.varSCEta, + allMVAVars.varRawE, + allMVAVars.varSCEtaWidth, + allMVAVars.varSCPhiWidth, + allMVAVars.varESEnOverRawE, + allMVAVars.varESEffSigmaRR, + allMVAVars.varRho, + allMVAVars.varPhoIsoRaw, + allMVAVars.varChIsoRaw, + allMVAVars.varWorstChRaw, + // Declare spectator vars + allMVAVars.varPt, + allMVAVars.varEta) + ); + } else { + vars = std::move( packMVAVariables(allMVAVars.varPhi, + allMVAVars.varR9, + allMVAVars.varSieie, + allMVAVars.varSieip, + allMVAVars.varE1x3overE5x5, + allMVAVars.varE2x2overE5x5, + allMVAVars.varE2x5overE5x5, + allMVAVars.varSCEta, + allMVAVars.varRawE, + allMVAVars.varSCEtaWidth, + allMVAVars.varSCPhiWidth, + allMVAVars.varRho, + allMVAVars.varPhoIsoRaw, + allMVAVars.varChIsoRaw, + allMVAVars.varWorstChRaw, + // Declare spectator vars + allMVAVars.varPt, + allMVAVars.varEta) + ); + } + return vars; } -void PhotonMVAEstimatorRun2Spring15NonTrig::constrainMVAVariables(){ +void PhotonMVAEstimatorRun2Spring15NonTrig::constrainMVAVariables(AllVariables&)const { // Check that variables do not have crazy values @@ -302,3 +345,17 @@ void PhotonMVAEstimatorRun2Spring15NonTrig::constrainMVAVariables(){ } +void PhotonMVAEstimatorRun2Spring15NonTrig::setConsumes(edm::ConsumesCollector&& cc) const { + cc.consumes >(_full5x5SigmaIEtaIEtaMapLabel); + cc.consumes >(_full5x5SigmaIEtaIPhiMapLabel); + cc.consumes >(_full5x5E1x3MapLabel); + cc.consumes >(_full5x5E2x2MapLabel); + cc.consumes >(_full5x5E2x5MaxMapLabel); + cc.consumes >(_full5x5E5x5MapLabel); + cc.consumes >(_esEffSigmaRRMapLabel); + cc.consumes >(_phoChargedIsolationLabel); + cc.consumes >(_phoPhotonIsolationLabel); + cc.consumes >( _phoWorstChargedIsolationLabel); + cc.consumes(_rhoLabel); +} + diff --git a/RecoEgamma/PhotonIdentification/plugins/PhotonRegressionValueMapProducer.cc b/RecoEgamma/PhotonIdentification/plugins/PhotonRegressionValueMapProducer.cc new file mode 100644 index 0000000000000..2930b1219daa3 --- /dev/null +++ b/RecoEgamma/PhotonIdentification/plugins/PhotonRegressionValueMapProducer.cc @@ -0,0 +1,257 @@ +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" + +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" + +#include "FWCore/ParameterSet/interface/ParameterSet.h" + +#include "DataFormats/Common/interface/ValueMap.h" +#include "DataFormats/Common/interface/View.h" + +#include "DataFormats/EgammaCandidates/interface/Photon.h" +#include "DataFormats/PatCandidates/interface/Photon.h" + +#include "DataFormats/PatCandidates/interface/PackedCandidate.h" + +#include "RecoEcal/EgammaCoreTools/interface/EcalClusterLazyTools.h" + +#include +#include + +namespace { + // Cluster shapes + constexpr char sigmaIPhiIPhi_[] = "sigmaIPhiIPhi"; + constexpr char sigmaIEtaIPhi_[] = "sigmaIEtaIPhi"; + constexpr char e2x5Max_[] = "e2x5Max"; + constexpr char e2x5Left_[] = "e2x5Left"; + constexpr char e2x5Right_[] = "e2x5Right"; + constexpr char e2x5Top_[] = "e2x5Top"; + constexpr char e2x5Bottom_[] = "e2x5Bottom"; +} + +class PhotonRegressionValueMapProducer : public edm::stream::EDProducer<> { + + public: + + explicit PhotonRegressionValueMapProducer(const edm::ParameterSet&); + ~PhotonRegressionValueMapProducer(); + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + + private: + + virtual void produce(edm::Event&, const edm::EventSetup&) override; + + void writeValueMap(edm::Event &iEvent, + const edm::Handle > & handle, + const std::vector & values, + const std::string & label) const ; + + // The object that will compute 5x5 quantities + std::unique_ptr lazyTools; + + // for AOD case + edm::EDGetTokenT ebReducedRecHitCollection_; + edm::EDGetTokenT eeReducedRecHitCollection_; + edm::EDGetTokenT esReducedRecHitCollection_; + edm::EDGetToken src_; + + // for miniAOD case + edm::EDGetTokenT ebReducedRecHitCollectionMiniAOD_; + edm::EDGetTokenT eeReducedRecHitCollectionMiniAOD_; + edm::EDGetTokenT esReducedRecHitCollectionMiniAOD_; + edm::EDGetToken srcMiniAOD_; + + const bool use_full5x5_; +}; + +PhotonRegressionValueMapProducer::PhotonRegressionValueMapProducer(const edm::ParameterSet& iConfig) : + use_full5x5_(iConfig.getParameter("useFull5x5")) { + + // + // Declare consummables, handle both AOD and miniAOD case + // + ebReducedRecHitCollection_ = mayConsume(iConfig.getParameter + ("ebReducedRecHitCollection")); + ebReducedRecHitCollectionMiniAOD_ = mayConsume(iConfig.getParameter + ("ebReducedRecHitCollectionMiniAOD")); + + eeReducedRecHitCollection_ = mayConsume(iConfig.getParameter + ("eeReducedRecHitCollection")); + eeReducedRecHitCollectionMiniAOD_ = mayConsume(iConfig.getParameter + ("eeReducedRecHitCollectionMiniAOD")); + + esReducedRecHitCollection_ = mayConsume(iConfig.getParameter + ("esReducedRecHitCollection")); + esReducedRecHitCollectionMiniAOD_ = mayConsume(iConfig.getParameter + ("esReducedRecHitCollectionMiniAOD")); + + // reco photons are castable into pat photons, so no need to handle reco/pat seprately + src_ = mayConsume >(iConfig.getParameter("src")); + srcMiniAOD_ = mayConsume >(iConfig.getParameter("srcMiniAOD")); + + // + // Declare producibles + // + // Cluster shapes + produces >(sigmaIPhiIPhi_); + produces >(sigmaIEtaIPhi_); + produces >(e2x5Max_); + produces >(e2x5Left_); + produces >(e2x5Right_); + produces >(e2x5Top_); + produces >(e2x5Bottom_); +} + +PhotonRegressionValueMapProducer::~PhotonRegressionValueMapProducer() +{} + +template +inline void calculateValues(EcalClusterLazyToolsBase* tools_tocast, + const SeedType& the_seed, + std::vector& sigmaIPhiIPhi, + std::vector& sigmaIEtaIPhi, + std::vector& e2x5Max, + std::vector& e2x5Left, + std::vector& e2x5Right, + std::vector& e2x5Top, + std::vector& e2x5Bottom) { + LazyTools* tools = static_cast(tools_tocast); + + float spp = -999; + std::vector vCov = tools->localCovariances( the_seed ); + spp = (isnan(vCov[2]) ? 0. : sqrt(vCov[2])); + float sep = vCov[1]; + sigmaIPhiIPhi.push_back(spp); + sigmaIEtaIPhi.push_back(sep); + e2x5Max .push_back(tools->e2x5Max(the_seed) ); + e2x5Left .push_back(tools->e2x5Left(the_seed) ); + e2x5Right .push_back(tools->e2x5Right(the_seed) ); + e2x5Top .push_back(tools->e2x5Top(the_seed) ); + e2x5Bottom.push_back(tools->e2x5Bottom(the_seed) ); +} + +void PhotonRegressionValueMapProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { + + using namespace edm; + + edm::Handle > src; + + bool isAOD = true; + iEvent.getByToken(src_, src); + if(!src.isValid() ){ + isAOD = false; + iEvent.getByToken(srcMiniAOD_, src); + } + + if( !src.isValid() ) { + throw cms::Exception("IllDefinedDataTier") + << "DataFormat does not contain a photon source!"; + } + + // configure lazy tools + edm::EDGetTokenT ebrh, eerh, esrh; + + if( isAOD ) { + ebrh = ebReducedRecHitCollection_; + eerh = eeReducedRecHitCollection_; + esrh = esReducedRecHitCollection_; + } else { + ebrh = ebReducedRecHitCollectionMiniAOD_; + eerh = eeReducedRecHitCollectionMiniAOD_; + esrh = esReducedRecHitCollectionMiniAOD_; + } + + if( use_full5x5_ ) { + lazyTools.reset( new noZS::EcalClusterLazyTools(iEvent, iSetup, + ebrh, eerh, esrh ) ); + } else { + lazyTools.reset( new EcalClusterLazyTools(iEvent, iSetup, + ebrh, eerh, esrh ) ); + } + + if( !isAOD && src->size() ) { + edm::Ptr test(src->ptrAt(0)); + if( test.isNull() || !test.isAvailable() ) { + throw cms::Exception("InvalidConfiguration") + <<"DataFormat is detected as miniAOD but cannot cast to pat::Photon!"; + } + } + + // Cluster shapes + std::vector sigmaIPhiIPhi; + std::vector sigmaIEtaIPhi; + std::vector e2x5Max; + std::vector e2x5Left; + std::vector e2x5Right; + std::vector e2x5Top; + std::vector e2x5Bottom; + + // reco::Photon::superCluster() is virtual so we can exploit polymorphism + for (unsigned idxpho = 0; idxpho < src->size(); ++idxpho) { + const auto& iPho = src->ptrAt(idxpho); + + // + // Compute full 5x5 quantities + // + const auto& theseed = *(iPho->superCluster()->seed()); + + if( use_full5x5_ ) { + calculateValues(lazyTools.get(), + theseed, + sigmaIPhiIPhi, + sigmaIEtaIPhi, + e2x5Max, + e2x5Left, + e2x5Right, + e2x5Top, + e2x5Bottom); + } else { + calculateValues(lazyTools.get(), + theseed, + sigmaIPhiIPhi, + sigmaIEtaIPhi, + e2x5Max, + e2x5Left, + e2x5Right, + e2x5Top, + e2x5Bottom); + } + } + + // Cluster shapes + writeValueMap(iEvent, src, sigmaIPhiIPhi, sigmaIPhiIPhi_); + writeValueMap(iEvent, src, sigmaIEtaIPhi, sigmaIEtaIPhi_); + writeValueMap(iEvent, src, e2x5Max, e2x5Max_); + writeValueMap(iEvent, src, e2x5Left, e2x5Left_); + writeValueMap(iEvent, src, e2x5Right, e2x5Right_); + writeValueMap(iEvent, src, e2x5Top, e2x5Top_); + writeValueMap(iEvent, src, e2x5Bottom, e2x5Bottom_); + + lazyTools.reset(nullptr); +} + +void PhotonRegressionValueMapProducer::writeValueMap(edm::Event &iEvent, + const edm::Handle > & handle, + const std::vector & values, + const std::string & label) const +{ + using namespace edm; + using namespace std; + auto_ptr > valMap(new ValueMap()); + edm::ValueMap::Filler filler(*valMap); + filler.insert(handle, values.begin(), values.end()); + filler.fill(); + iEvent.put(valMap, label); +} + +void PhotonRegressionValueMapProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + //The following says we do not know what parameters are allowed so do no validation + // Please change this to state exactly what you do use, even if it is no parameters + edm::ParameterSetDescription desc; + desc.setUnknown(); + descriptions.addDefault(desc); +} + +DEFINE_FWK_MODULE(PhotonRegressionValueMapProducer); diff --git a/RecoEgamma/PhotonIdentification/python/Identification/cutBasedPhotonID_Spring15_50ns_V1_cff.py b/RecoEgamma/PhotonIdentification/python/Identification/cutBasedPhotonID_Spring15_50ns_V1_cff.py new file mode 100644 index 0000000000000..a70d75f428c28 --- /dev/null +++ b/RecoEgamma/PhotonIdentification/python/Identification/cutBasedPhotonID_Spring15_50ns_V1_cff.py @@ -0,0 +1,152 @@ + +from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry + +# Common functions and classes for ID definition are imported here: +from RecoEgamma.PhotonIdentification.Identification.cutBasedPhotonID_tools import * + +# +# This is the first version of Spring15 cuts, optimized on Spring15 50ns samples. +# +# The ID cuts below are optimized IDs for Spring 50ns Scenario +# The cut values are taken from the twiki: +# https://twiki.cern.ch/twiki/bin/viewauth/CMS/CutBasedPhotonIdentificationRun2 +# (where they may not stay, if a newer version of cuts becomes available for these +# conditions) +# See also the presentation explaining these working points (this will not change): +# https://indico.cern.ch/event/369239/contribution/2/attachments/1134693/1623149/spring15_pcb.pdf + +# +# First, define cut values +# + +# Loose working point Barrel and Endcap +idName = "cutBasedPhotonID-Spring15-50ns-V1-standalone-loose" +WP_Loose_EB = WorkingPoint_V2( + idName , # idName + 0.05 , # hOverECut + 0.0103 , # full5x5_SigmaIEtaIEtaCut +# Isolation cuts are generally absIso < C1 + pt*C2, except for NeuHad is < C1 + exp(pt*C2+C3) + 2.44 , # absPFChaHadIsoWithEACut_C1 + 0 , # absPFChaHadIsoWithEACut_C2 + 2.57 , # absPFNeuHadIsoWithEACut_C1 + 0.0044 , # absPFNeuHadIsoWithEACut_C2 + 0.5809 , # absPFNeuHadIsoWithEACut_C3 + 1.92 , # absPFPhoIsoWithEACut_C1 + 0.0043 # absPFPhoIsoWithEACut_C2 + ) +WP_Loose_EE = WorkingPoint_V2( + idName , #idName + 0.05 , # hOverECut + 0.0277 , # full5x5_SigmaIEtaIEtaCut +# Isolation cuts are generally absIso < C1 + pt*C2, except for NeuHad is < C1 + exp(pt*C2+C3) + 1.84 , # absPFChaHadIsoWithEACut_C1 + 0.00 , # absPFChaHadIsoWithEACut_C2 + 4.00 , # absPFNeuHadIsoWithEACut_C1 + 0.0040 , # absPFNeuHadIsoWithEACut_C2 + 0.9402 , # absPFNeuHadIsoWithEACut_C2 + 2.15 , # absPFPhoIsoWithEACut_C1 + 0.0041 # absPFPhoIsoWithEACut_C2 + ) + +# Medium working point Barrel and Endcap +idName = "cutBasedPhotonID-Spring15-50ns-V1-standalone-medium" +WP_Medium_EB = WorkingPoint_V2( + idName , # idName + 0.05 , # hOverECut + 0.0100 , # full5x5_SigmaIEtaIEtaCut +# Isolation cuts are generally absIso < C1 + pt*C2, except for NeuHad is < C1 + exp(pt*C2+C3) + 1.31 , # absPFChaHadIsoWithEACut_C1 + 0.00 , # absPFChaHadIsoWithEACut_C2 + 0.60 , # absPFNeuHadIsoWithEACut_C1 + 0.0044 , # absPFNeuHadIsoWithEACut_C2 + 0.5809 , # absPFNeuHadIsowithEACut_C3 + 1.33 , # absPFPhoIsoWithEACut_C1 + 0.0043 # absPFPhoIsoWithEACut_C2 + ) + +WP_Medium_EE = WorkingPoint_V2( + idName , #idName + 0.05 , # hOverECut + 0.0267 , # full5x5_SigmaIEtaIEtaCut +# Isolation cuts are generally absIso < C1 + pt*C2, except for NeuHad is < C1 + exp(pt*C2+C3) + 1.25 , # absPFChaHadIsoWithEACut_C1 + 0.00 , # absPFChaHadIsoWithEACut_C2 + 1.65 , # absPFNeuHadIsoWithEACut_C1 + 0.0040 , # absPFNeuHadIsoWithEACut_C2 + 0.9402 , # absPFNeuHadIsowithEACut_C3 + 1.02 , # absPFPhoIsoWithEACut_C1 + 0.0041 # absPFPhoIsoWithEACut_C2 + ) + +# Tight working point Barrel and Endcap +idName = "cutBasedPhotonID-Spring15-50ns-V1-standalone-tight" +WP_Tight_EB = WorkingPoint_V2( + idName , # idName + 0.05 , # hOverECut + 0.0100 , # full5x5_SigmaIEtaIEtaCut +# Isolation cuts are generally absIso < C1 + pt*C2, except for NeuHad is < C1 + exp(pt*C2+C3) + 0.91 , # absPFChaHadIsoWithEACut_C1 + 0.00 , # absPFChaHadIsoWithEACut_C2 + 0.33 , # absPFNeuHadIsoWithEACut_C1 + 0.0044 , # absPFNeuHadIsoWithEACut_C2 + 0.5809 , # absPFNeuHadIsowithEACut_C3 + 0.61 , # absPFPhoIsoWithEACut_C1 + 0.0043 # absPFPhoIsoWithEACut_C2 + ) + +WP_Tight_EE = WorkingPoint_V2( + idName , #idName + 0.05 , # hOverECut + 0.0267 , # full5x5_SigmaIEtaIEtaCut +# Isolation cuts are generally absIso < C1 + pt*C2, except for NeuHad is < C1 + exp(pt*C2+C3) + 0.65 , # absPFChaHadIsoWithEACut_C1 + 0.00 , # absPFChaHadIsoWithEACut_C2 + 0.93 , # absPFNeuHadIsoWithEACut_C1 + 0.0040 , # absPFNeuHadIsoWithEACut_C2 + 0.9402 , # absPFNeuHadIsowithEACut_C3 + 0.54 , # absPFPhoIsoWithEACut_C1 + 0.0041 # absPFPhoIsoWithEACut_C2 + ) + + +# Second, define where to find the precomputed isolations and what effective +# areas to use for pile-up correction +isoInputs = IsolationCutInputs( + # chHadIsolationMapName + 'photonIDValueMapProducer:phoChargedIsolation' , + # chHadIsolationEffAreas + "RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfChargedHadrons_50ns.txt" , + # neuHadIsolationMapName + 'photonIDValueMapProducer:phoNeutralHadronIsolation' , + # neuHadIsolationEffAreas + "RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfNeutralHadrons_50ns.txt" , + # phoIsolationMapName + "photonIDValueMapProducer:phoPhotonIsolation" , + # phoIsolationEffAreas + "RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfPhotons_50ns.txt" +) + +# +# Finally, set up VID configuration for all cuts +# +cutBasedPhotonID_Spring15_50ns_V1_standalone_loose = configureVIDCutBasedPhoID_V3 ( WP_Loose_EB, WP_Loose_EE, isoInputs) +cutBasedPhotonID_Spring15_50ns_V1_standalone_medium = configureVIDCutBasedPhoID_V3 ( WP_Medium_EB, WP_Medium_EE, isoInputs) +cutBasedPhotonID_Spring15_50ns_V1_standalone_tight = configureVIDCutBasedPhoID_V3 ( WP_Tight_EB, WP_Tight_EE, isoInputs) + +## The MD5 sum numbers below reflect the exact set of cut variables +# and values above. If anything changes, one has to +# 1) comment out the lines below about the registry, +# 2) run "calculateMD5 +# 3) update the MD5 sum strings below and uncomment the lines again. +# + +central_id_registry.register(cutBasedPhotonID_Spring15_50ns_V1_standalone_loose.idName, + '3d50a36a9fe1a807fefffe0e6712210a') +central_id_registry.register(cutBasedPhotonID_Spring15_50ns_V1_standalone_medium.idName, + '63a4ab695fabdae62764db5c55f57b10') +central_id_registry.register(cutBasedPhotonID_Spring15_50ns_V1_standalone_tight.idName, + 'cb046b1400392c9f5db251b5316a87cb') + +cutBasedPhotonID_Spring15_50ns_V1_standalone_loose.isPOGApproved = cms.untracked.bool(True) +cutBasedPhotonID_Spring15_50ns_V1_standalone_medium.isPOGApproved = cms.untracked.bool(True) +cutBasedPhotonID_Spring15_50ns_V1_standalone_tight.isPOGApproved = cms.untracked.bool(True) diff --git a/RecoEgamma/PhotonIdentification/python/Identification/cutBasedPhotonID_tools.py b/RecoEgamma/PhotonIdentification/python/Identification/cutBasedPhotonID_tools.py index b4197cc22b658..afd09f620dfc7 100644 --- a/RecoEgamma/PhotonIdentification/python/Identification/cutBasedPhotonID_tools.py +++ b/RecoEgamma/PhotonIdentification/python/Identification/cutBasedPhotonID_tools.py @@ -159,7 +159,7 @@ def psetPhoFull5x5SigmaIEtaIEtaCut(wpEB, wpEE): cutValueEE = cms.double( wpEE.full5x5_sigmaIEtaIEtaCut ), full5x5SigmaIEtaIEtaMap = cms.InputTag('photonIDValueMapProducer:phoFull5x5SigmaIEtaIEta'), barrelCutOff = cms.double(ebCutOff), - needsAdditionalProducts = cms.bool(True), + needsAdditionalProducts = cms.bool(False), isIgnored = cms.bool(False) ) diff --git a/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_PHYS14_PU20bx25_nonTrig_V1_cff.py b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_PHYS14_PU20bx25_nonTrig_V1_cff.py index 5b5b858f288bc..cfbddd3cae7f8 100644 --- a/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_PHYS14_PU20bx25_nonTrig_V1_cff.py +++ b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_PHYS14_PU20bx25_nonTrig_V1_cff.py @@ -15,6 +15,10 @@ # This MVA implementation class name mvaPhys14NonTrigClassName = "PhotonMVAEstimatorRun2Phys14NonTrig" +# The tag is an extra string attached to the names of the products +# such as ValueMaps that needs to distinguish cases when the same MVA estimator +# class is used with different tuning/weights +mvaTag = "25nsV1" # There are 2 categories in this MVA. They have to be configured in this strict order # (cuts and weight files order): @@ -34,8 +38,8 @@ # The names for the maps are ":Values" # and ":Categories" mvaProducerModuleLabel = "photonMVAValueMapProducer" -mvaValueMapName = mvaProducerModuleLabel + ":" + mvaPhys14NonTrigClassName + "Values" -mvaCategoriesMapName = mvaProducerModuleLabel + ":" + mvaPhys14NonTrigClassName + "Categories" +mvaValueMapName = mvaProducerModuleLabel + ":" + mvaPhys14NonTrigClassName + mvaTag + "Values" +mvaCategoriesMapName = mvaProducerModuleLabel + ":" + mvaPhys14NonTrigClassName + mvaTag + "Categories" # The working point for this MVA that is expected to have about 90% signal # efficiency in each category for photons with pt>30 GeV (somewhat lower @@ -56,6 +60,7 @@ # Create the PSet that will be fed to the MVA value map producer mvaPhoID_PHYS14_PU20bx25_nonTrig_V1_producer_config = cms.PSet( mvaName = cms.string(mvaPhys14NonTrigClassName), + mvaTag = cms.string(mvaTag), weightFileNames = mvaPhys14NonTrigWeightFiles_V1, # # All the event content needed for this MVA implementation follows @@ -89,6 +94,6 @@ # central_id_registry.register( mvaPhoID_PHYS14_PU20bx25_nonTrig_V1_wp90.idName, - '6919edf9f82a78f675d9dec796fd5fab') + 'f3ff6ade4680f277c31deb921aa370e4') mvaPhoID_PHYS14_PU20bx25_nonTrig_V1_wp90.isPOGApproved = cms.untracked.bool(True) diff --git a/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_25ns_nonTrig_V0_cff.py b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_25ns_nonTrig_V0_cff.py new file mode 100644 index 0000000000000..a1fef17683f01 --- /dev/null +++ b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_25ns_nonTrig_V0_cff.py @@ -0,0 +1,99 @@ +from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry + +import FWCore.ParameterSet.Config as cms + +# +# In this file we define the locations of the MVA weights, cuts on the MVA values +# for specific working points, and configure those cuts in VID +# + +# +# The following MVA is derived for Spring15 MC samples for non-triggering photons. +# See more documentation in this presentation: +# https://indico.cern.ch/event/369237/contribution/2/attachments/1128009/1611753/egamma-July17-2015.pdf +# + +# This MVA implementation class name +mvaSpring15NonTrigClassName = "PhotonMVAEstimatorRun2Spring15NonTrig" +# The tag is an extra string attached to the names of the products +# such as ValueMaps that needs to distinguish cases when the same MVA estimator +# class is used with different tuning/weights +mvaTag = "25nsV0" + +# There are 2 categories in this MVA. They have to be configured in this strict order +# (cuts and weight files order): +# 0 barrel photons +# 1 endcap photons + +mvaSpring15NonTrigWeightFiles_V0 = cms.vstring( + "RecoEgamma/PhotonIdentification/data/Spring15/photon_general_MVA_Spring15_25ns_EB_V0.weights.xml", + "RecoEgamma/PhotonIdentification/data/Spring15/photon_general_MVA_Spring15_25ns_EE_V0.weights.xml" + ) + +# Load some common definitions for MVA machinery +from RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_tools import * + +# The locatoins of value maps with the actual MVA values and categories +# for all particles. +# The names for the maps are ":Values" +# and ":Categories" +mvaProducerModuleLabel = "photonMVAValueMapProducer" +mvaValueMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Values" +mvaCategoriesMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Categories" + +# The working point for this MVA that is expected to have about 90% signal +# efficiency in each category for photons with pt>30 GeV (somewhat lower +# for lower pt photons). +idName = "mvaPhoID-Spring15-25ns-nonTrig-V0-wp90" +MVA_WP90 = PhoMVA_2Categories_WP( + idName = idName, + mvaValueMapName = mvaValueMapName, # map with MVA values for all particles + mvaCategoriesMapName = mvaCategoriesMapName, # map with category index for all particles + cutCategory0 = 0.302, # EB + cutCategory1 = 0.307 # EE + ) + +# +# Finally, set up VID configuration for all cuts +# + +# Create the PSet that will be fed to the MVA value map producer +mvaPhoID_Spring15_25ns_nonTrig_V0_producer_config = cms.PSet( + mvaName = cms.string(mvaSpring15NonTrigClassName), + mvaTag = cms.string(mvaTag), + weightFileNames = mvaSpring15NonTrigWeightFiles_V0, + # + # All the event content needed for this MVA implementation follows + # + # All the value maps: these are expected to be produced by the + # PhotonIDValueMapProducer running upstream + # + full5x5SigmaIEtaIEtaMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5SigmaIEtaIEta"), + full5x5SigmaIEtaIPhiMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5SigmaIEtaIPhi"), + full5x5E1x3Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E1x3"), + full5x5E2x2Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E2x2"), + full5x5E2x5MaxMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5E2x5Max"), + full5x5E5x5Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E5x5"), + esEffSigmaRRMap = cms.InputTag("photonIDValueMapProducer:phoESEffSigmaRR"), + phoChargedIsolation = cms.InputTag("photonIDValueMapProducer:phoChargedIsolation"), + phoPhotonIsolation = cms.InputTag("photonIDValueMapProducer:phoPhotonIsolation"), + phoWorstChargedIsolation = cms.InputTag("photonIDValueMapProducer:phoWorstChargedIsolation"), + # + # Original event content: pileup in this case + # + rho = cms.InputTag("fixedGridRhoFastjetAll") + ) +# Create the VPset's for VID cuts +mvaPhoID_Spring15_25ns_nonTrig_V0_wp90 = configureVIDMVAPhoID_V1( MVA_WP90 ) + +# The MD5 sum numbers below reflect the exact set of cut variables +# and values above. If anything changes, one has to +# 1) comment out the lines below about the registry, +# 2) run "calculateMD5 +# 3) update the MD5 sum strings below and uncomment the lines again. +# + +central_id_registry.register( mvaPhoID_Spring15_25ns_nonTrig_V0_wp90.idName, + '0db3574f257ba9082ffc4874ba6cee99') + +mvaPhoID_Spring15_25ns_nonTrig_V0_wp90.isPOGApproved = cms.untracked.bool(False) diff --git a/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_25ns_nonTrig_V2_cff.py b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_25ns_nonTrig_V2_cff.py new file mode 100644 index 0000000000000..b02fa40e446de --- /dev/null +++ b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_25ns_nonTrig_V2_cff.py @@ -0,0 +1,100 @@ +from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry + +import FWCore.ParameterSet.Config as cms + +# +# In this file we define the locations of the MVA weights, cuts on the MVA values +# for specific working points, and configure those cuts in VID +# + +# +# The following MVA is derived for Spring15 MC samples for non-triggering photons. +# See more documentation in this presentation: +# +# https://indico.cern.ch/event/369241/contribution/1/attachments/1140148/1632879/egamma-Aug14-2015.pdf +# + +# This MVA implementation class name +mvaSpring15NonTrigClassName = "PhotonMVAEstimatorRun2Spring15NonTrig" +# The tag is an extra string attached to the names of the products +# such as ValueMaps that needs to distinguish cases when the same MVA estimator +# class is used with different tuning/weights +mvaTag = "25nsV2" + +# There are 2 categories in this MVA. They have to be configured in this strict order +# (cuts and weight files order): +# 0 barrel photons +# 1 endcap photons + +mvaSpring15NonTrigWeightFiles_V2 = cms.vstring( + "RecoEgamma/PhotonIdentification/data/Spring15/photon_general_MVA_Spring15_25ns_EB_V2.weights.xml", + "RecoEgamma/PhotonIdentification/data/Spring15/photon_general_MVA_Spring15_25ns_EE_V2.weights.xml" + ) + +# Load some common definitions for MVA machinery +from RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_tools import * + +# The locatoins of value maps with the actual MVA values and categories +# for all particles. +# The names for the maps are ":Values" +# and ":Categories" +mvaProducerModuleLabel = "photonMVAValueMapProducer" +mvaValueMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Values" +mvaCategoriesMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Categories" + +# The working point for this MVA that is expected to have about 90% signal +# efficiency in each category for photons with pt>30 GeV (somewhat lower +# for lower pt photons). +idName = "mvaPhoID-Spring15-25ns-nonTrig-V2-wp90" +MVA_WP90 = PhoMVA_2Categories_WP( + idName = idName, + mvaValueMapName = mvaValueMapName, # map with MVA values for all particles + mvaCategoriesMapName = mvaCategoriesMapName, # map with category index for all particles + cutCategory0 = 0.374, # EB + cutCategory1 = 0.336 # EE + ) + +# +# Finally, set up VID configuration for all cuts +# + +# Create the PSet that will be fed to the MVA value map producer +mvaPhoID_Spring15_25ns_nonTrig_V2_producer_config = cms.PSet( + mvaName = cms.string(mvaSpring15NonTrigClassName), + mvaTag = cms.string(mvaTag), + weightFileNames = mvaSpring15NonTrigWeightFiles_V2, + # + # All the event content needed for this MVA implementation follows + # + # All the value maps: these are expected to be produced by the + # PhotonIDValueMapProducer running upstream + # + full5x5SigmaIEtaIEtaMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5SigmaIEtaIEta"), + full5x5SigmaIEtaIPhiMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5SigmaIEtaIPhi"), + full5x5E1x3Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E1x3"), + full5x5E2x2Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E2x2"), + full5x5E2x5MaxMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5E2x5Max"), + full5x5E5x5Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E5x5"), + esEffSigmaRRMap = cms.InputTag("photonIDValueMapProducer:phoESEffSigmaRR"), + phoChargedIsolation = cms.InputTag("photonIDValueMapProducer:phoChargedIsolation"), + phoPhotonIsolation = cms.InputTag("photonIDValueMapProducer:phoPhotonIsolation"), + phoWorstChargedIsolation = cms.InputTag("photonIDValueMapProducer:phoWorstChargedIsolation"), + # + # Original event content: pileup in this case + # + rho = cms.InputTag("fixedGridRhoFastjetAll") + ) +# Create the VPset's for VID cuts +mvaPhoID_Spring15_25ns_nonTrig_V2_wp90 = configureVIDMVAPhoID_V1( MVA_WP90 ) + +# The MD5 sum numbers below reflect the exact set of cut variables +# and values above. If anything changes, one has to +# 1) comment out the lines below about the registry, +# 2) run "calculateMD5 +# 3) update the MD5 sum strings below and uncomment the lines again. +# + +central_id_registry.register( mvaPhoID_Spring15_25ns_nonTrig_V2_wp90.idName, + '8a6870b7182e5aeee51b71cdba3c3fce') + +mvaPhoID_Spring15_25ns_nonTrig_V2_wp90.isPOGApproved = cms.untracked.bool(True) diff --git a/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_50ns_nonTrig_V0_cff.py b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_50ns_nonTrig_V0_cff.py index 0875c6f89ee6b..366ab6b8170eb 100644 --- a/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_50ns_nonTrig_V0_cff.py +++ b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_50ns_nonTrig_V0_cff.py @@ -15,6 +15,10 @@ # This MVA implementation class name mvaSpring15NonTrigClassName = "PhotonMVAEstimatorRun2Spring15NonTrig" +# The tag is an extra string attached to the names of the products +# such as ValueMaps that needs to distinguish cases when the same MVA estimator +# class is used with different tuning/weights +mvaTag = "50nsV0" # There are 2 categories in this MVA. They have to be configured in this strict order # (cuts and weight files order): @@ -34,8 +38,8 @@ # The names for the maps are ":Values" # and ":Categories" mvaProducerModuleLabel = "photonMVAValueMapProducer" -mvaValueMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + "Values" -mvaCategoriesMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + "Categories" +mvaValueMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Values" +mvaCategoriesMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Categories" # The working point for this MVA that is expected to have about 90% signal # efficiency in each category for photons with pt>30 GeV (somewhat lower @@ -56,6 +60,7 @@ # Create the PSet that will be fed to the MVA value map producer mvaPhoID_Spring15_50ns_nonTrig_V0_producer_config = cms.PSet( mvaName = cms.string(mvaSpring15NonTrigClassName), + mvaTag = cms.string(mvaTag), weightFileNames = mvaSpring15NonTrigWeightFiles_V0, # # All the event content needed for this MVA implementation follows @@ -89,6 +94,6 @@ # central_id_registry.register( mvaPhoID_Spring15_50ns_nonTrig_V0_wp90.idName, - '70d691efd445926bdccda729bb3f8542') + 'f7632ecc85a3b775335fd9bf78f468df') -mvaPhoID_Spring15_50ns_nonTrig_V0_wp90.isPOGApproved = cms.untracked.bool(True) +mvaPhoID_Spring15_50ns_nonTrig_V0_wp90.isPOGApproved = cms.untracked.bool(False) diff --git a/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_50ns_nonTrig_V1_cff.py b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_50ns_nonTrig_V1_cff.py new file mode 100644 index 0000000000000..3666e44a4080b --- /dev/null +++ b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_50ns_nonTrig_V1_cff.py @@ -0,0 +1,99 @@ +from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry + +import FWCore.ParameterSet.Config as cms + +# +# In this file we define the locations of the MVA weights, cuts on the MVA values +# for specific working points, and configure those cuts in VID +# + +# +# The following MVA is derived for Spring15 MC samples for non-triggering photons. +# See more documentation in this presentation: +# https://indico.cern.ch/event/369237/contribution/2/attachments/1128009/1611753/egamma-July17-2015.pdf +# + +# This MVA implementation class name +mvaSpring15NonTrigClassName = "PhotonMVAEstimatorRun2Spring15NonTrig" +# The tag is an extra string attached to the names of the products +# such as ValueMaps that needs to distinguish cases when the same MVA estimator +# class is used with different tuning/weights +mvaTag = "50nsV1" + +# There are 2 categories in this MVA. They have to be configured in this strict order +# (cuts and weight files order): +# 0 barrel photons +# 1 endcap photons + +mvaSpring15NonTrigWeightFiles_V1 = cms.vstring( + "RecoEgamma/PhotonIdentification/data/Spring15/photon_general_MVA_Spring15_50ns_EB_V1.weights.xml", + "RecoEgamma/PhotonIdentification/data/Spring15/photon_general_MVA_Spring15_50ns_EE_V1.weights.xml" + ) + +# Load some common definitions for MVA machinery +from RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_tools import * + +# The locatoins of value maps with the actual MVA values and categories +# for all particles. +# The names for the maps are ":Values" +# and ":Categories" +mvaProducerModuleLabel = "photonMVAValueMapProducer" +mvaValueMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Values" +mvaCategoriesMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Categories" + +# The working point for this MVA that is expected to have about 90% signal +# efficiency in each category for photons with pt>30 GeV (somewhat lower +# for lower pt photons). +idName = "mvaPhoID-Spring15-50ns-nonTrig-V1-wp90" +MVA_WP90 = PhoMVA_2Categories_WP( + idName = idName, + mvaValueMapName = mvaValueMapName, # map with MVA values for all particles + mvaCategoriesMapName = mvaCategoriesMapName, # map with category index for all particles + cutCategory0 = 0.284, # EB + cutCategory1 = 0.432 # EE + ) + +# +# Finally, set up VID configuration for all cuts +# + +# Create the PSet that will be fed to the MVA value map producer +mvaPhoID_Spring15_50ns_nonTrig_V1_producer_config = cms.PSet( + mvaName = cms.string(mvaSpring15NonTrigClassName), + mvaTag = cms.string(mvaTag), + weightFileNames = mvaSpring15NonTrigWeightFiles_V1, + # + # All the event content needed for this MVA implementation follows + # + # All the value maps: these are expected to be produced by the + # PhotonIDValueMapProducer running upstream + # + full5x5SigmaIEtaIEtaMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5SigmaIEtaIEta"), + full5x5SigmaIEtaIPhiMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5SigmaIEtaIPhi"), + full5x5E1x3Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E1x3"), + full5x5E2x2Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E2x2"), + full5x5E2x5MaxMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5E2x5Max"), + full5x5E5x5Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E5x5"), + esEffSigmaRRMap = cms.InputTag("photonIDValueMapProducer:phoESEffSigmaRR"), + phoChargedIsolation = cms.InputTag("photonIDValueMapProducer:phoChargedIsolation"), + phoPhotonIsolation = cms.InputTag("photonIDValueMapProducer:phoPhotonIsolation"), + phoWorstChargedIsolation = cms.InputTag("photonIDValueMapProducer:phoWorstChargedIsolation"), + # + # Original event content: pileup in this case + # + rho = cms.InputTag("fixedGridRhoFastjetAll") + ) +# Create the VPset's for VID cuts +mvaPhoID_Spring15_50ns_nonTrig_V1_wp90 = configureVIDMVAPhoID_V1( MVA_WP90 ) + +# The MD5 sum numbers below reflect the exact set of cut variables +# and values above. If anything changes, one has to +# 1) comment out the lines below about the registry, +# 2) run "calculateMD5 +# 3) update the MD5 sum strings below and uncomment the lines again. +# + +central_id_registry.register( mvaPhoID_Spring15_50ns_nonTrig_V1_wp90.idName, + '48415c18fe032ac6838a5eb1650574b0') + +mvaPhoID_Spring15_50ns_nonTrig_V1_wp90.isPOGApproved = cms.untracked.bool(False) diff --git a/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_50ns_nonTrig_V2_cff.py b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_50ns_nonTrig_V2_cff.py new file mode 100644 index 0000000000000..c92baaaa2dc20 --- /dev/null +++ b/RecoEgamma/PhotonIdentification/python/Identification/mvaPhotonID_Spring15_50ns_nonTrig_V2_cff.py @@ -0,0 +1,101 @@ +from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry + +import FWCore.ParameterSet.Config as cms + +# +# In this file we define the locations of the MVA weights, cuts on the MVA values +# for specific working points, and configure those cuts in VID +# + +# +# The following MVA is derived for Spring15 MC samples for non-triggering photons. +# See more documentation in this presentation: +# https://indico.cern.ch/event/369237/contribution/2/attachments/1128009/1611753/egamma-July17-2015.pdf +# this also contains a minor update from email exchanges (thus move to V2) +# Specific docs for V2 are in this final presentation: +# https://indico.cern.ch/event/369241/contribution/1/attachments/1140148/1632879/egamma-Aug14-2015.pdf + +# This MVA implementation class name +mvaSpring15NonTrigClassName = "PhotonMVAEstimatorRun2Spring15NonTrig" +# The tag is an extra string attached to the names of the products +# such as ValueMaps that needs to distinguish cases when the same MVA estimator +# class is used with different tuning/weights +mvaTag = "50nsV2" + +# There are 2 categories in this MVA. They have to be configured in this strict order +# (cuts and weight files order): +# 0 barrel photons +# 1 endcap photons + +mvaSpring15NonTrigWeightFiles_V2 = cms.vstring( + "RecoEgamma/PhotonIdentification/data/Spring15/photon_general_MVA_Spring15_50ns_EB_V2.weights.xml", + "RecoEgamma/PhotonIdentification/data/Spring15/photon_general_MVA_Spring15_50ns_EE_V2.weights.xml" + ) + +# Load some common definitions for MVA machinery +from RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_tools import * + +# The locatoins of value maps with the actual MVA values and categories +# for all particles. +# The names for the maps are ":Values" +# and ":Categories" +mvaProducerModuleLabel = "photonMVAValueMapProducer" +mvaValueMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Values" +mvaCategoriesMapName = mvaProducerModuleLabel + ":" + mvaSpring15NonTrigClassName + mvaTag + "Categories" + +# The working point for this MVA that is expected to have about 90% signal +# efficiency in each category for photons with pt>30 GeV (somewhat lower +# for lower pt photons). +idName = "mvaPhoID-Spring15-50ns-nonTrig-V2-wp90" +MVA_WP90 = PhoMVA_2Categories_WP( + idName = idName, + mvaValueMapName = mvaValueMapName, # map with MVA values for all particles + mvaCategoriesMapName = mvaCategoriesMapName, # map with category index for all particles + cutCategory0 = 0.29538, # EB + cutCategory1 = 0.45837 # EE + ) + +# +# Finally, set up VID configuration for all cuts +# + +# Create the PSet that will be fed to the MVA value map producer +mvaPhoID_Spring15_50ns_nonTrig_V2_producer_config = cms.PSet( + mvaName = cms.string(mvaSpring15NonTrigClassName), + mvaTag = cms.string(mvaTag), + weightFileNames = mvaSpring15NonTrigWeightFiles_V2, + # + # All the event content needed for this MVA implementation follows + # + # All the value maps: these are expected to be produced by the + # PhotonIDValueMapProducer running upstream + # + full5x5SigmaIEtaIEtaMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5SigmaIEtaIEta"), + full5x5SigmaIEtaIPhiMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5SigmaIEtaIPhi"), + full5x5E1x3Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E1x3"), + full5x5E2x2Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E2x2"), + full5x5E2x5MaxMap = cms.InputTag("photonIDValueMapProducer:phoFull5x5E2x5Max"), + full5x5E5x5Map = cms.InputTag("photonIDValueMapProducer:phoFull5x5E5x5"), + esEffSigmaRRMap = cms.InputTag("photonIDValueMapProducer:phoESEffSigmaRR"), + phoChargedIsolation = cms.InputTag("photonIDValueMapProducer:phoChargedIsolation"), + phoPhotonIsolation = cms.InputTag("photonIDValueMapProducer:phoPhotonIsolation"), + phoWorstChargedIsolation = cms.InputTag("photonIDValueMapProducer:phoWorstChargedIsolation"), + # + # Original event content: pileup in this case + # + rho = cms.InputTag("fixedGridRhoFastjetAll") + ) +# Create the VPset's for VID cuts +mvaPhoID_Spring15_50ns_nonTrig_V2_wp90 = configureVIDMVAPhoID_V1( MVA_WP90 ) + +# The MD5 sum numbers below reflect the exact set of cut variables +# and values above. If anything changes, one has to +# 1) comment out the lines below about the registry, +# 2) run "calculateMD5 +# 3) update the MD5 sum strings below and uncomment the lines again. +# + +central_id_registry.register( mvaPhoID_Spring15_50ns_nonTrig_V2_wp90.idName, + '541536b38c6a01f2ceded658477ff570') + +mvaPhoID_Spring15_50ns_nonTrig_V2_wp90.isPOGApproved = cms.untracked.bool(True) diff --git a/RecoEgamma/PhotonIdentification/python/PhotonMVAValueMapProducer_cfi.py b/RecoEgamma/PhotonIdentification/python/PhotonMVAValueMapProducer_cfi.py index 6c6113ab35134..870ea91319e35 100644 --- a/RecoEgamma/PhotonIdentification/python/PhotonMVAValueMapProducer_cfi.py +++ b/RecoEgamma/PhotonIdentification/python/PhotonMVAValueMapProducer_cfi.py @@ -7,8 +7,14 @@ from RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_PHYS14_PU20bx25_nonTrig_V1_cff import * mvaConfigsForPhoProducer.append( mvaPhoID_PHYS14_PU20bx25_nonTrig_V1_producer_config ) -from RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_Spring15_50ns_nonTrig_V0_cff import * -mvaConfigsForPhoProducer.append( mvaPhoID_Spring15_50ns_nonTrig_V0_producer_config ) +from RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_Spring15_50ns_nonTrig_V1_cff import * +mvaConfigsForPhoProducer.append( mvaPhoID_Spring15_50ns_nonTrig_V1_producer_config ) + +from RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_Spring15_50ns_nonTrig_V2_cff import * +mvaConfigsForPhoProducer.append( mvaPhoID_Spring15_50ns_nonTrig_V2_producer_config ) + +from RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_Spring15_25ns_nonTrig_V2_cff import * +mvaConfigsForPhoProducer.append( mvaPhoID_Spring15_25ns_nonTrig_V2_producer_config ) photonMVAValueMapProducer = cms.EDProducer('PhotonMVAValueMapProducer', # The module automatically detects AOD vs miniAOD, so we configure both diff --git a/RecoEgamma/PhotonIdentification/python/PhotonRegressionValueMapProducer_cfi.py b/RecoEgamma/PhotonIdentification/python/PhotonRegressionValueMapProducer_cfi.py new file mode 100644 index 0000000000000..d741db108d5a6 --- /dev/null +++ b/RecoEgamma/PhotonIdentification/python/PhotonRegressionValueMapProducer_cfi.py @@ -0,0 +1,22 @@ +import FWCore.ParameterSet.Config as cms + +photonRegressionValueMapProducer = cms.EDProducer('PhotonRegressionValueMapProducer', + #presently the photon regressions use the fraction-ized (PF clustering) shower shapes + useFull5x5 = cms.bool(False), + # The module automatically detects AOD vs miniAOD, so we configure both + # + # AOD case + # + ebReducedRecHitCollection = cms.InputTag("reducedEcalRecHitsEB"), + eeReducedRecHitCollection = cms.InputTag("reducedEcalRecHitsEE"), + esReducedRecHitCollection = cms.InputTag("reducedEcalRecHitsES"), + src = cms.InputTag('gedPhotons'), + # + # miniAOD case + # + ebReducedRecHitCollectionMiniAOD = cms.InputTag("reducedEgamma:reducedEBRecHits"), + eeReducedRecHitCollectionMiniAOD = cms.InputTag("reducedEgamma:reducedEERecHits"), + esReducedRecHitCollectionMiniAOD = cms.InputTag("reducedEgamma:reducedESRecHits"), + srcMiniAOD = cms.InputTag('slimmedPhotons', + processName=cms.InputTag.skipCurrentProcess()), + ) diff --git a/RecoEgamma/PhotonIdentification/python/egmPhotonIDs_cff.py b/RecoEgamma/PhotonIdentification/python/egmPhotonIDs_cff.py index 1cf2c845411ba..6fc7b92c39b37 100644 --- a/RecoEgamma/PhotonIdentification/python/egmPhotonIDs_cff.py +++ b/RecoEgamma/PhotonIdentification/python/egmPhotonIDs_cff.py @@ -8,8 +8,9 @@ # Load the producer for MVA IDs. Make sure it is also added to the sequence! from RecoEgamma.PhotonIdentification.PhotonMVAValueMapProducer_cfi import * +from RecoEgamma.PhotonIdentification.PhotonRegressionValueMapProducer_cfi import * # The sequence below is important. The MVA ValueMapProducer # needs to be downstream from the ID ValueMapProducer because it relies # on some of its products -egmPhotonIDSequence = cms.Sequence(photonIDValueMapProducer * photonMVAValueMapProducer * egmPhotonIDs) +egmPhotonIDSequence = cms.Sequence(photonIDValueMapProducer * photonMVAValueMapProducer * egmPhotonIDs * photonRegressionValueMapProducer ) diff --git a/RecoJets/JetProducers/python/ak4PFJetsPuppi_cfi.py b/RecoJets/JetProducers/python/ak4PFJetsPuppi_cfi.py index ff9fa3d6c5049..09612938453d0 100644 --- a/RecoJets/JetProducers/python/ak4PFJetsPuppi_cfi.py +++ b/RecoJets/JetProducers/python/ak4PFJetsPuppi_cfi.py @@ -2,9 +2,9 @@ from RecoJets.JetProducers.PFJetParameters_cfi import * from RecoJets.JetProducers.AnomalousCellParameters_cfi import * -from RecoJets.JetProducers.ak4PFJets_cfi import ak4PFJets +import RecoJets.JetProducers.ak4PFJets_cfi -ak4PFJetsPuppi = ak4PFJets.clone( +ak4PFJetsPuppi = RecoJets.JetProducers.ak4PFJets_cfi.ak4PFJets.clone( src = cms.InputTag("puppi") ) diff --git a/RecoMET/METFilters/python/EcalDeadCellBoundaryEnergyFilter_cfi.py b/RecoMET/METFilters/python/EcalDeadCellBoundaryEnergyFilter_cfi.py index e9d8df1969abe..c3c0ffdff0caf 100644 --- a/RecoMET/METFilters/python/EcalDeadCellBoundaryEnergyFilter_cfi.py +++ b/RecoMET/METFilters/python/EcalDeadCellBoundaryEnergyFilter_cfi.py @@ -38,8 +38,8 @@ #### limitDeadCellToChannelStatusEB=cms.vint32(12,14) #### limitDeadCellToChannelStatusEE=cms.vint32() #### for negative values all status>=abs(given value) are used (e.g. limitDeadCellToChannelStatusEE=cms.vint32(-13)--->limitDeadCellToChannelStatusEE=cms.vint32(13,14,15,16,17,...)) - limitDeadCellToChannelStatusEB=cms.vint32(12, 14), - limitDeadCellToChannelStatusEE=cms.vint32(12, 14), + limitDeadCellToChannelStatusEB=cms.vint32(12, 13, 14), + limitDeadCellToChannelStatusEE=cms.vint32(12, 13, 14), #### enable calculation of energy deposits next to cracks/gaps enableGap=cms.untracked.bool(False), taggingMode = cms.bool(False), diff --git a/RecoMET/METFilters/python/EcalDeadCellDeltaRFilter_cfi.py b/RecoMET/METFilters/python/EcalDeadCellDeltaRFilter_cfi.py deleted file mode 100644 index 89fe6e3673d27..0000000000000 --- a/RecoMET/METFilters/python/EcalDeadCellDeltaRFilter_cfi.py +++ /dev/null @@ -1,50 +0,0 @@ -import FWCore.ParameterSet.Config as cms - -simpleDRfilter = cms.EDFilter('simpleDRfilter', - -# In debug mode, there are print-out if the MET is due to dead cell or cracks - debug = cms.untracked.bool( False ), -# No usage now - printSkimInfo = cms.untracked.bool( False ), - - taggingMode = cms.bool(False), - -# It's written in general that one can put pf, calo and tracking jets - jetInputTag = cms.InputTag('ak4PFJets'), -# The pt and eta cuts applied, for instance, pt>30 && |eta|<9999 - jetSelCuts = cms.vdouble(30, 9999), # pt, eta - -# This is also in general that one can put pf, tc and calo met - metInputTag = cms.InputTag('pfMet'), - -# If enabled, a root file will be produced with name give in profileRootName. -# One can produce histograms. Currently, no histograms are produced. - makeProfileRoot = cms.untracked.bool( False ), - profileRootName = cms.untracked.string( "simpleDRfilter.root" ), - -# The status of masked cells we want to pick from global tag, for instance here, >=1 -# Don't need to change ususally. - maskedEcalChannelStatusThreshold = cms.int32( 1 ), -# The channels status we want to evaluate -# positive numbers, e.g., 12, means only channels with status 12 are considered -# negative numbers, e.g., -12, means channels with status >=12 are all considered - chnStatusToBeEvaluated = cms.int32(-12), - -# No usage now - isProd = cms.untracked.bool( False ), - -# If enabled, also check if MET is due to cracks or not. If found, events are filtered -# (if doFilter is enabled) - doCracks = cms.untracked.bool( False ), - -# No usage now - verbose = cms.int32( 0 ), - -# Simple DR filter 0 : dphi cut of jet to MET 1 : dR cut of jets to masked channles - simpleDRfilterInput = cms.vdouble(0.5, 0.3), # 0.5, 0.3 are what RA1 use - -# Definition of cracks for HB/HE and HE/HF - cracksHBHEdef = cms.vdouble(1.3, 1.7), # crab between 1.3 and 1.7 - cracksHEHFdef = cms.vdouble(2.8, 3.2), # crab between 2.8 and 3.2 - -) diff --git a/RecoMET/METFilters/python/EcalDeadCellTriggerPrimitiveFilter_cfi.py b/RecoMET/METFilters/python/EcalDeadCellTriggerPrimitiveFilter_cfi.py index 1a392975fa399..188d93496d0b3 100644 --- a/RecoMET/METFilters/python/EcalDeadCellTriggerPrimitiveFilter_cfi.py +++ b/RecoMET/METFilters/python/EcalDeadCellTriggerPrimitiveFilter_cfi.py @@ -12,7 +12,7 @@ verbose = cms.int32( 1 ), tpDigiCollection = cms.InputTag("ecalTPSkimNA"), - etValToBeFlagged = cms.double(63.75), + etValToBeFlagged = cms.double(127.5), ebReducedRecHitCollection = cms.InputTag("reducedEcalRecHitsEB"), eeReducedRecHitCollection = cms.InputTag("reducedEcalRecHitsEE"), diff --git a/RecoMET/METFilters/python/metFilters_cff.py b/RecoMET/METFilters/python/metFilters_cff.py index 51baff0e72f9d..b8c621f81e60f 100644 --- a/RecoMET/METFilters/python/metFilters_cff.py +++ b/RecoMET/METFilters/python/metFilters_cff.py @@ -13,6 +13,9 @@ ## The ECAL dead cell trigger primitive filter _______________________________|| from RecoMET.METFilters.EcalDeadCellTriggerPrimitiveFilter_cfi import * +## The ECAL dead cell trigger primitive filter _______________________________|| +from RecoMET.METFilters.EcalDeadCellBoundaryEnergyFilter_cfi import * + ## The EE bad SuperCrystal filter ____________________________________________|| from RecoMET.METFilters.eeBadScFilter_cfi import * @@ -22,7 +25,7 @@ ## The Good vertices collection needed by the tracking failure filter ________|| goodVertices = cms.EDFilter( "VertexSelector", - filter = cms.bool(False), + filter = cms.bool(True), src = cms.InputTag("offlinePrimaryVertices"), cut = cms.string("!isFake && ndof > 4 && abs(z) <= 24 && position.rho < 2") ) @@ -30,6 +33,12 @@ ## The tracking failure filter _______________________________________________|| from RecoMET.METFilters.trackingFailureFilter_cfi import * +##noscraping (outdated)_______________________________________________________|| +from RecoMET.METFilters.scrapingFilter_cfi import * + +## The primary vertex filter__ _______________________________________________|| +from RecoMET.METFilters.primaryVertexFilter_cfi import * + ## The tracking POG filters __________________________________________________|| from RecoMET.METFilters.trackingPOGFilters_cff import * ## NOTE: to make tagging mode of the tracking POG filters (three of them), please do: @@ -46,11 +55,13 @@ metFilters = cms.Sequence( HBHENoiseFilterResultProducer * HBHENoiseFilter * + primaryVertexFilter* +# HBHENoiseIsoFilter* CSCTightHaloFilter * - hcalLaserEventFilter * - EcalDeadCellTriggerPrimitiveFilter * - goodVertices * trackingFailureFilter * - eeBadScFilter * - ecalLaserCorrFilter * - trkPOGFilters +# hcalLaserEventFilter * + EcalDeadCellTriggerPrimitiveFilter* +# *goodVertices * trackingFailureFilter * + eeBadScFilter +# ecalLaserCorrFilter * +# trkPOGFilters ) diff --git a/RecoMET/METFilters/python/primaryVertexFilter_cfi.py b/RecoMET/METFilters/python/primaryVertexFilter_cfi.py new file mode 100644 index 0000000000000..ce3f9afad0625 --- /dev/null +++ b/RecoMET/METFilters/python/primaryVertexFilter_cfi.py @@ -0,0 +1,9 @@ + +import FWCore.ParameterSet.Config as cms + +primaryVertexFilter = cms.EDFilter("GoodVertexFilter", + vertexCollection = cms.InputTag('offlinePrimaryVertices'), + minimumNDOF = cms.uint32(4) , + maxAbsZ = cms.double(24), + maxd0 = cms.double(2) + ) diff --git a/RecoMET/METFilters/python/scrapingFilter_cfi.py b/RecoMET/METFilters/python/scrapingFilter_cfi.py new file mode 100644 index 0000000000000..66a914c81d6ed --- /dev/null +++ b/RecoMET/METFilters/python/scrapingFilter_cfi.py @@ -0,0 +1,7 @@ +import FWCore.ParameterSet.Config as cms +noscraping = cms.EDFilter("FilterOutScraping", + applyfilter = cms.untracked.bool(True), + debugOn = cms.untracked.bool(False), + numtrack = cms.untracked.uint32(10), + thresh = cms.untracked.double(0.25) +) diff --git a/RecoTauTag/Configuration/python/HPSPFTaus_cff.py b/RecoTauTag/Configuration/python/HPSPFTaus_cff.py index 62c2dce2c1c6e..5718fc487c720 100644 --- a/RecoTauTag/Configuration/python/HPSPFTaus_cff.py +++ b/RecoTauTag/Configuration/python/HPSPFTaus_cff.py @@ -25,8 +25,8 @@ # Load PFjet input parameters from RecoTauTag.RecoTau.PFRecoTauPFJetInputs_cfi import PFRecoTauPFJetInputs -# deltaBeta correction factor calculated for taus from ak5PFJets (Run I) -ak5dBetaCorrection=0.0772/0.1687 +# deltaBeta correction factor +ak4dBetaCorrection=0.20 # Load MVAs from SQLlite file/prep. DB from RecoTauTag.Configuration.loadRecoTauTagMVAsFromPrepDB_cfi import * @@ -108,7 +108,7 @@ applyOccupancyCut = False, applySumPtCut = True, ) -hpsPFTauDiscriminationByVLooseIsolationDBSumPtCorr.maximumSumPtCut=hpsPFTauDiscriminationByVLooseIsolationDBSumPtCorr.qualityCuts.isolationQualityCuts.minGammaEt +hpsPFTauDiscriminationByVLooseIsolationDBSumPtCorr.maximumSumPtCut = hpsPFTauDiscriminationByVLooseIsolationDBSumPtCorr.qualityCuts.isolationQualityCuts.minGammaEt hpsPFTauDiscriminationByLooseIsolationDBSumPtCorr = hpsPFTauDiscriminationByLooseIsolation.clone( deltaBetaPUTrackPtCutOverride = cms.double(0.5), @@ -118,7 +118,7 @@ applyOccupancyCut = False, applySumPtCut = True, ) -hpsPFTauDiscriminationByLooseIsolationDBSumPtCorr.maximumSumPtCut=hpsPFTauDiscriminationByLooseIsolationDBSumPtCorr.qualityCuts.isolationQualityCuts.minGammaEt +hpsPFTauDiscriminationByLooseIsolationDBSumPtCorr.maximumSumPtCut = hpsPFTauDiscriminationByLooseIsolationDBSumPtCorr.qualityCuts.isolationQualityCuts.minGammaEt hpsPFTauDiscriminationByMediumIsolationDBSumPtCorr = hpsPFTauDiscriminationByMediumIsolation.clone( deltaBetaPUTrackPtCutOverride = cms.double(0.5), @@ -128,17 +128,17 @@ applyOccupancyCut = False, applySumPtCut = True, ) -hpsPFTauDiscriminationByMediumIsolationDBSumPtCorr.maximumSumPtCut=hpsPFTauDiscriminationByMediumIsolationDBSumPtCorr.qualityCuts.isolationQualityCuts.minGammaEt +hpsPFTauDiscriminationByMediumIsolationDBSumPtCorr.maximumSumPtCut = hpsPFTauDiscriminationByMediumIsolationDBSumPtCorr.qualityCuts.isolationQualityCuts.minGammaEt hpsPFTauDiscriminationByTightIsolationDBSumPtCorr = hpsPFTauDiscriminationByTightIsolation.clone( deltaBetaPUTrackPtCutOverride = cms.double(0.5), applyDeltaBetaCorrection = True, isoConeSizeForDeltaBeta = 0.8, - deltaBetaFactor = "%0.4f"%(ak5dBetaCorrection), + deltaBetaFactor = "%0.4f"%(ak4dBetaCorrection), applyOccupancyCut = False, applySumPtCut = True, ) -hpsPFTauDiscriminationByTightIsolationDBSumPtCorr.maximumSumPtCut=hpsPFTauDiscriminationByTightIsolationDBSumPtCorr.qualityCuts.isolationQualityCuts.minGammaEt +hpsPFTauDiscriminationByTightIsolationDBSumPtCorr.maximumSumPtCut = hpsPFTauDiscriminationByTightIsolationDBSumPtCorr.qualityCuts.isolationQualityCuts.minGammaEt hpsPFTauDiscriminationByIsolationSeqDBSumPtCorr = cms.Sequence( hpsPFTauDiscriminationByVLooseIsolationDBSumPtCorr* @@ -150,10 +150,10 @@ hpsPFTauDiscriminationByVLooseCombinedIsolationDBSumPtCorr = hpsPFTauDiscriminationByVLooseIsolationDBSumPtCorr.clone( ApplyDiscriminationByTrackerIsolation = True, ApplyDiscriminationByECALIsolation = True, - deltaBetaFactor = "%0.4f"%((0.09/0.25)*(ak5dBetaCorrection)), + deltaBetaFactor = "%0.4f"%((0.09/0.25)*(ak4dBetaCorrection)), applyOccupancyCut = False, applySumPtCut = True, - maximumSumPtCut = 3.0, + maximumSumPtCut = 3.5, Prediscriminants = requireDecayMode.clone() ) hpsPFTauDiscriminationByVLooseCombinedIsolationDBSumPtCorr.qualityCuts.isolationQualityCuts.minTrackPt = 0.5 @@ -162,10 +162,10 @@ hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr = hpsPFTauDiscriminationByLooseIsolationDBSumPtCorr.clone( ApplyDiscriminationByTrackerIsolation = True, ApplyDiscriminationByECALIsolation = True, - deltaBetaFactor = "%0.4f"%(ak5dBetaCorrection), + deltaBetaFactor = "%0.4f"%(ak4dBetaCorrection), applyOccupancyCut = False, applySumPtCut = True, - maximumSumPtCut = 2.0, + maximumSumPtCut = 2.5, Prediscriminants = requireDecayMode.clone() ) hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr.qualityCuts.isolationQualityCuts.minTrackPt = 0.5 @@ -191,10 +191,10 @@ hpsPFTauDiscriminationByMediumCombinedIsolationDBSumPtCorr = hpsPFTauDiscriminationByMediumIsolationDBSumPtCorr.clone( ApplyDiscriminationByTrackerIsolation = True, ApplyDiscriminationByECALIsolation = True, - deltaBetaFactor = "%0.4f"%(ak5dBetaCorrection), + deltaBetaFactor = "%0.4f"%(ak4dBetaCorrection), applyOccupancyCut = False, applySumPtCut = True, - maximumSumPtCut = 1.0, + maximumSumPtCut = 1.5, Prediscriminants = requireDecayMode.clone() ) hpsPFTauDiscriminationByMediumCombinedIsolationDBSumPtCorr.qualityCuts.isolationQualityCuts.minTrackPt = 0.5 @@ -203,7 +203,7 @@ hpsPFTauDiscriminationByTightCombinedIsolationDBSumPtCorr = hpsPFTauDiscriminationByTightIsolationDBSumPtCorr.clone( ApplyDiscriminationByTrackerIsolation = True, ApplyDiscriminationByECALIsolation = True, - deltaBetaFactor = "%0.4f"%(ak5dBetaCorrection), + deltaBetaFactor = "%0.4f"%(ak4dBetaCorrection), applyOccupancyCut = False, applySumPtCut = True, maximumSumPtCut = 0.8, @@ -534,8 +534,11 @@ hpsPFTauDiscriminationByTightCombinedIsolationDBSumPtCorr3Hits = hpsPFTauDiscriminationByTightCombinedIsolationDBSumPtCorr.clone() hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr3Hits.qualityCuts.isolationQualityCuts.minTrackHits = cms.uint32(3) +hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr3Hits.applyPhotonPtSumOutsideSignalConeCut = cms.bool(True) hpsPFTauDiscriminationByMediumCombinedIsolationDBSumPtCorr3Hits.qualityCuts.isolationQualityCuts.minTrackHits = cms.uint32(3) +hpsPFTauDiscriminationByMediumCombinedIsolationDBSumPtCorr3Hits.applyPhotonPtSumOutsideSignalConeCut = cms.bool(True) hpsPFTauDiscriminationByTightCombinedIsolationDBSumPtCorr3Hits.qualityCuts.isolationQualityCuts.minTrackHits = cms.uint32(3) +hpsPFTauDiscriminationByTightCombinedIsolationDBSumPtCorr3Hits.applyPhotonPtSumOutsideSignalConeCut = cms.bool(True) hpsPFTauDiscriminationByRawCombinedIsolationDBSumPtCorr3Hits = hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr3Hits.clone( applySumPtCut = False, @@ -549,18 +552,59 @@ hpsPFTauDiscriminationByRawCombinedIsolationDBSumPtCorr3Hits ) +hpsPFTauDiscriminationByLoosePileupWeightedIsolation3Hits = hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr3Hits.clone( + ApplyDiscriminationByECALIsolation = cms.bool(False), + applyDeltaBetaCorrection = cms.bool(False), + ApplyDiscriminationByWeightedECALIsolation = cms.bool(True), + UseAllPFCandsForWeights = cms.bool(True), + applyFootprintCorrection = cms.bool(True), + applyPhotonPtSumOutsideSignalConeCut = cms.bool(True) +) + +hpsPFTauDiscriminationByMediumPileupWeightedIsolation3Hits = hpsPFTauDiscriminationByLoosePileupWeightedIsolation3Hits.clone( + maximumSumPtCut = hpsPFTauDiscriminationByMediumCombinedIsolationDBSumPtCorr3Hits.maximumSumPtCut +) + +hpsPFTauDiscriminationByTightPileupWeightedIsolation3Hits = hpsPFTauDiscriminationByLoosePileupWeightedIsolation3Hits.clone( + maximumSumPtCut = hpsPFTauDiscriminationByTightCombinedIsolationDBSumPtCorr3Hits.maximumSumPtCut +) + +hpsPFTauDiscriminationByPhotonPtSumOutsideSignalCone = hpsPFTauDiscriminationByLoosePileupWeightedIsolation3Hits.clone( + applySumPtCut = cms.bool(False) +) + +hpsPFTauDiscriminationByRawPileupWeightedIsolation3Hits = hpsPFTauDiscriminationByLoosePileupWeightedIsolation3Hits.clone( + Prediscriminants = cms.PSet( + BooleanOperator = cms.string("and"), + decayMode = cms.PSet( + Producer = cms.InputTag('hpsPFTauDiscriminationByPhotonPtSumOutsideSignalCone'), + cut = cms.double(0.5) + ) + ), + applySumPtCut = cms.bool(False), + storeRawSumPt = cms.bool(True) +) + +hpsPFTauDiscriminationByPileupWeightedIsolationSeq3Hits = cms.Sequence( + hpsPFTauDiscriminationByLoosePileupWeightedIsolation3Hits* + hpsPFTauDiscriminationByMediumPileupWeightedIsolation3Hits* + hpsPFTauDiscriminationByTightPileupWeightedIsolation3Hits* + hpsPFTauDiscriminationByPhotonPtSumOutsideSignalCone* + hpsPFTauDiscriminationByRawPileupWeightedIsolation3Hits +) + # Define the HPS selection discriminator used in cleaning hpsSelectionDiscriminator.PFTauProducer = cms.InputTag("combinatoricRecoTaus") from RecoTauTag.RecoTau.RecoTauCleaner_cfi import RecoTauCleaner -hpsPFTauProducerSansRefs=RecoTauCleaner.clone( - src=cms.InputTag("combinatoricRecoTaus") +hpsPFTauProducerSansRefs = RecoTauCleaner.clone( + src = cms.InputTag("combinatoricRecoTaus") ) from RecoTauTag.RecoTau.RecoTauPiZeroUnembedder_cfi import RecoTauPiZeroUnembedder -hpsPFTauProducer=RecoTauPiZeroUnembedder.clone( - src = cms.InputTag("hpsPFTauProducerSansRefs") +hpsPFTauProducer = RecoTauPiZeroUnembedder.clone( + src = cms.InputTag("hpsPFTauProducerSansRefs") ) @@ -604,7 +648,7 @@ ) from RecoTauTag.RecoTau.PFRecoTauDiscriminationByMVAIsolation2_cff import * -hpsPFTauMVA3IsolationChargedIsoPtSum = hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr3Hits.clone( +hpsPFTauChargedIsoPtSum = hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr3Hits.clone( PFTauProducer = cms.InputTag('hpsPFTauProducer'), Prediscriminants = requireDecayMode.clone(), ApplyDiscriminationByECALIsolation = cms.bool(False), @@ -617,12 +661,12 @@ isoConeSizeForDeltaBeta = cms.double(0.8), verbosity = cms.int32(0) ) -hpsPFTauMVA3IsolationNeutralIsoPtSum = hpsPFTauMVA3IsolationChargedIsoPtSum.clone( +hpsPFTauNeutralIsoPtSum = hpsPFTauChargedIsoPtSum.clone( ApplyDiscriminationByECALIsolation = cms.bool(True), ApplyDiscriminationByTrackerIsolation = cms.bool(False), verbosity = cms.int32(0) ) -hpsPFTauMVA3IsolationPUcorrPtSum = hpsPFTauMVA3IsolationChargedIsoPtSum.clone( +hpsPFTauPUcorrPtSum = hpsPFTauChargedIsoPtSum.clone( ApplyDiscriminationByECALIsolation = cms.bool(False), ApplyDiscriminationByTrackerIsolation = cms.bool(False), applyDeltaBetaCorrection = cms.bool(True), @@ -630,12 +674,24 @@ storeRawPUsumPt = cms.bool(True), verbosity = cms.int32(0) ) -hpsPFTauMVA3IsolationNeutralIsoPtSumWeight = hpsPFTauMVA3IsolationChargedIsoPtSum.clone( +hpsPFTauNeutralIsoPtSumWeight = hpsPFTauChargedIsoPtSum.clone( ApplyDiscriminationByWeightedECALIsolation = cms.bool(True), ApplyDiscriminationByTrackerIsolation = cms.bool(False), UseAllPFCandsForWeights = cms.bool(True), verbosity = cms.int32(0) ) +hpsPFTauFootprintCorrection = hpsPFTauChargedIsoPtSum.clone( + ApplyDiscriminationByTrackerIsolation = cms.bool(False), + storeRawSumPt = cms.bool(False), + storeRawFootprintCorrection = cms.bool(True), + verbosity = cms.int32(0) +) +hpsPFTauPhotonPtSumOutsideSignalCone = hpsPFTauChargedIsoPtSum.clone( + ApplyDiscriminationByTrackerIsolation = cms.bool(False), + storeRawSumPt = cms.bool(False), + storeRawPhotonSumPt_outsideSignalCone = cms.bool(True), + verbosity = cms.int32(0) +) hpsPFTauDiscriminationByIsolationMVA3oldDMwoLTraw = discriminationByIsolationMVA2raw.clone( PFTauProducer = cms.InputTag('hpsPFTauProducer'), @@ -644,9 +700,9 @@ mvaName = cms.string("RecoTauTag_tauIdMVAoldDMwoLTv1"), mvaOpt = cms.string("oldDMwoLT"), srcTauTransverseImpactParameters = cms.InputTag('hpsPFTauTransverseImpactParameters'), - srcChargedIsoPtSum = cms.InputTag('hpsPFTauMVA3IsolationChargedIsoPtSum'), - srcNeutralIsoPtSum = cms.InputTag('hpsPFTauMVA3IsolationNeutralIsoPtSum'), - srcPUcorrPtSum = cms.InputTag('hpsPFTauMVA3IsolationPUcorrPtSum'), + srcChargedIsoPtSum = cms.InputTag('hpsPFTauChargedIsoPtSum'), + srcNeutralIsoPtSum = cms.InputTag('hpsPFTauNeutralIsoPtSum'), + srcPUcorrPtSum = cms.InputTag('hpsPFTauPUcorrPtSum'), verbosity = cms.int32(0) ) hpsPFTauDiscriminationByVLooseIsolationMVA3oldDMwoLT = discriminationByIsolationMVA2VLoose.clone( @@ -758,11 +814,13 @@ hpsPFTauDiscriminationByVVTightIsolationMVA3newDMwLT = hpsPFTauDiscriminationByVLooseIsolationMVA3newDMwLT.clone() hpsPFTauDiscriminationByVVTightIsolationMVA3newDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAnewDMwLTv1_WPEff40") - hpsPFTauMVAIsolation2Seq = cms.Sequence( - hpsPFTauMVA3IsolationChargedIsoPtSum - + hpsPFTauMVA3IsolationNeutralIsoPtSum - + hpsPFTauMVA3IsolationPUcorrPtSum + hpsPFTauChargedIsoPtSum + + hpsPFTauNeutralIsoPtSum + + hpsPFTauPUcorrPtSum + + hpsPFTauNeutralIsoPtSumWeight + + hpsPFTauFootprintCorrection + + hpsPFTauPhotonPtSumOutsideSignalCone + hpsPFTauDiscriminationByIsolationMVA3oldDMwoLTraw + hpsPFTauDiscriminationByVLooseIsolationMVA3oldDMwoLT + hpsPFTauDiscriminationByLooseIsolationMVA3oldDMwoLT @@ -813,13 +871,14 @@ #hpsPFTauDiscriminationByIsolationSeqRhoCorr* #hpsPFTauDiscriminationByIsolationSeqCustomRhoCorr* hpsPFTauDiscriminationByIsolationSeqDBSumPtCorr* - + hpsPFTauDiscriminationByRawCombinedIsolationDBSumPtCorr* hpsPFTauDiscriminationByRawChargedIsolationDBSumPtCorr* hpsPFTauDiscriminationByRawGammaIsolationDBSumPtCorr* hpsPFTauDiscriminationByCombinedIsolationSeqDBSumPtCorr* hpsPFTauDiscriminationByCombinedIsolationSeqDBSumPtCorr3Hits* + hpsPFTauDiscriminationByPileupWeightedIsolationSeq3Hits* hpsPFTauDiscriminationByLooseElectronRejection* hpsPFTauDiscriminationByMediumElectronRejection* diff --git a/RecoTauTag/Configuration/python/updateHPSPFTaus_cff.py b/RecoTauTag/Configuration/python/updateHPSPFTaus_cff.py index 49c090d733c84..d6c2be075e4df 100644 --- a/RecoTauTag/Configuration/python/updateHPSPFTaus_cff.py +++ b/RecoTauTag/Configuration/python/updateHPSPFTaus_cff.py @@ -11,4 +11,30 @@ ''' + +from RecoTauTag.RecoTau.PFRecoTauDiscriminationByMVAIsolation2_cff import * +from RecoTauTag.RecoTau.PFRecoTauDiscriminationByIsolation_cfi import * +from RecoTauTag.RecoTau.PFRecoTauDiscriminationByLeadingTrackFinding_cfi import * +from RecoTauTag.RecoTau.PFRecoTauDiscriminationAgainstElectron_cfi import * +from RecoTauTag.RecoTau.PFRecoTauDiscriminationAgainstElectronMVA5_cfi import * +from RecoTauTag.RecoTau.PFRecoTauDiscriminationAgainstElectronDeadECAL_cfi import * +from RecoTauTag.RecoTau.PFRecoTauDiscriminationAgainstMuon_cfi import * +from RecoTauTag.RecoTau.PFRecoTauDiscriminationAgainstMuon2_cfi import * +from RecoTauTag.RecoTau.PFRecoTauDiscriminationAgainstMuonMVA_cfi import * + +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauChargedIsoPtSum +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauNeutralIsoPtSum +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauPUcorrPtSum +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauNeutralIsoPtSumWeight +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauFootprintCorrection +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauPhotonPtSumOutsideSignalCone +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauDiscriminationByRawCombinedIsolationDBSumPtCorr3Hits +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauDiscriminationByLoosePileupWeightedIsolation3Hits +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauDiscriminationByMediumPileupWeightedIsolation3Hits +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauDiscriminationByTightPileupWeightedIsolation3Hits +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauDiscriminationByPhotonPtSumOutsideSignalCone +from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauDiscriminationByRawPileupWeightedIsolation3Hits + + updateHPSPFTaus = cms.Sequence() + diff --git a/RecoTauTag/RecoTau/interface/RecoTauConstructor.h b/RecoTauTag/RecoTau/interface/RecoTauConstructor.h index 7fa1345a471a0..1f68ca8a18822 100644 --- a/RecoTauTag/RecoTau/interface/RecoTauConstructor.h +++ b/RecoTauTag/RecoTau/interface/RecoTauConstructor.h @@ -30,6 +30,7 @@ #include "DataFormats/JetReco/interface/PFJetCollection.h" #include "DataFormats/TauReco/interface/PFRecoTauChargedHadron.h" #include "DataFormats/TauReco/interface/RecoTauPiZero.h" +#include "CommonTools/Utils/interface/StringObjectFunction.h" #include "boost/shared_ptr.hpp" #include @@ -53,7 +54,10 @@ class RecoTauConstructor { /// Constructor with PFCandidate Handle RecoTauConstructor(const PFJetRef& jetRef, const edm::Handle& pfCands, - bool copyGammasFromPiZeros=false); + bool copyGammasFromPiZeros = false, + const StringObjectFunction* signalConeSize = 0, + double minAbsPhotonSumPt_insideSignalCone = 2.5, double minRelPhotonSumPt_insideSignalCone = 0., + double minAbsPhotonSumPt_outsideSignalCone = 1.e+9, double minRelPhotonSumPt_outsideSignalCone = 1.e+9); /* * Code to set leading candidates. These are just wrappers about @@ -133,6 +137,13 @@ class RecoTauConstructor { typedef std::map SortedCollectionMap; bool copyGammas_; + + const StringObjectFunction* signalConeSize_; + double minAbsPhotonSumPt_insideSignalCone_; + double minRelPhotonSumPt_insideSignalCone_; + double minAbsPhotonSumPt_outsideSignalCone_; + double minRelPhotonSumPt_outsideSignalCone_; + // Retrieve collection associated to signal/iso and type std::vector* getCollection(Region region, ParticleType type); SortedListPtr getSortedCollection(Region region, ParticleType type); diff --git a/RecoTauTag/RecoTau/interface/TauDiscriminationProducerBase.h b/RecoTauTag/RecoTau/interface/TauDiscriminationProducerBase.h index f5e17c02eba2e..c933264b2b0c2 100644 --- a/RecoTauTag/RecoTau/interface/TauDiscriminationProducerBase.h +++ b/RecoTauTag/RecoTau/interface/TauDiscriminationProducerBase.h @@ -85,6 +85,7 @@ class TauDiscriminationProducerBase : public edm::stream::EDProducer<> { evt.getByToken(disc_token, handle); }; }; + protected: //value given to taus that fail prediscriminants diff --git a/RecoTauTag/RecoTau/plugins/PFRecoTauDiscriminationByHPSSelection.cc b/RecoTauTag/RecoTau/plugins/PFRecoTauDiscriminationByHPSSelection.cc index 7b147717f4d67..24322c7606dc3 100644 --- a/RecoTauTag/RecoTau/plugins/PFRecoTauDiscriminationByHPSSelection.cc +++ b/RecoTauTag/RecoTau/plugins/PFRecoTauDiscriminationByHPSSelection.cc @@ -47,19 +47,19 @@ class PFRecoTauDiscriminationByHPSSelection : public PFTauDiscriminationProducer typedef std::pair DoublePair; typedef std::map DecayModeCutMap; - TauFunc signalConeFun_; DecayModeCutMap decayModeCuts_; double matchingCone_; double minPt_; bool requireTauChargedHadronsToBeChargedPFCands_; + int minPixelHits_; + int verbosity_; }; PFRecoTauDiscriminationByHPSSelection::PFRecoTauDiscriminationByHPSSelection(const edm::ParameterSet& pset) - : PFTauDiscriminationProducerBase(pset), - signalConeFun_(pset.getParameter("coneSizeFormula")) + : PFTauDiscriminationProducerBase(pset) { // Get the matchign cut matchingCone_ = pset.getParameter("matchingCone"); @@ -103,8 +103,11 @@ PFRecoTauDiscriminationByHPSSelection::PFRecoTauDiscriminationByHPSSelection(con )); } requireTauChargedHadronsToBeChargedPFCands_ = pset.getParameter("requireTauChargedHadronsToBeChargedPFCands"); + minPixelHits_ = pset.getParameter("minPixelHits"); verbosity_ = pset.exists("verbosity") ? pset.getParameter("verbosity") : 0; + + } PFRecoTauDiscriminationByHPSSelection::~PFRecoTauDiscriminationByHPSSelection() @@ -214,7 +217,7 @@ PFRecoTauDiscriminationByHPSSelection::discriminate(const reco::PFTauRef& tau) c // Check if tau fails mass cut double maxMass_value = (*massWindow.maxMass_)(*tau); - if ( tauP4.M() > maxMass_value || tauP4.M() < massWindow.minMass_ ) { + if ( !((tauP4.M() - tau->bendCorrMass()) < maxMass_value && (tauP4.M() + tau->bendCorrMass()) > massWindow.minMass_) ) { if ( verbosity_ ) { edm::LogPrint("PFTauByHPSSelect") << " fails tau mass-window cut." ; } @@ -240,9 +243,9 @@ PFRecoTauDiscriminationByHPSSelection::discriminate(const reco::PFTauRef& tau) c } // Check if tau passes cone cut - double cone_size = signalConeFun_(*tau); + double cone_size = tau->signalConeSize(); // Check if any charged objects fail the signal cone cut - BOOST_FOREACH(const reco::PFRecoTauChargedHadron& cand, tau->signalTauChargedHadronCandidates()) { + for (auto const& cand : tau->signalTauChargedHadronCandidates()) { if ( verbosity_ ) { edm::LogPrint("PFTauByHPSSelect") << "dR(tau, signalPFChargedHadr) = " << deltaR(cand.p4(), tauP4) ; } @@ -254,11 +257,14 @@ PFRecoTauDiscriminationByHPSSelection::discriminate(const reco::PFTauRef& tau) c } } // Now check the pizeros - BOOST_FOREACH(const reco::RecoTauPiZero& cand, tau->signalPiZeroCandidates()) { + for (auto const& cand : tau->signalPiZeroCandidates()) { + double dEta = std::max(0., fabs(cand.eta() - tauP4.eta()) - cand.bendCorrEta()); + double dPhi = std::max(0., std::abs(reco::deltaPhi(cand.phi(), tauP4.phi())) - cand.bendCorrPhi()); + double dR2 = dEta*dEta + dPhi*dPhi; if ( verbosity_ ) { - edm::LogPrint("PFTauByHPSSelect") << "dR(tau, signalPiZero) = " << deltaR(cand.p4(), tauP4) ; + edm::LogPrint("PFTauByHPSSelect") << "dR2(tau, signalPiZero) = " << dR2 ; } - if ( deltaR(cand.p4(), tauP4) > cone_size ) { + if ( dR2 > cone_size*cone_size ) { if ( verbosity_ ) { edm::LogPrint("PFTauByHPSSelect") << " fails signal-cone cut for strip(s)." ; } @@ -284,6 +290,27 @@ PFRecoTauDiscriminationByHPSSelection::discriminate(const reco::PFTauRef& tau) c } } } + + if ( minPixelHits_ > 0 ) { + int numPixelHits = 0; + const std::vector& chargedHadrCands = tau->signalPFChargedHadrCands(); + for ( std::vector::const_iterator chargedHadrCand = chargedHadrCands.begin(); + chargedHadrCand != chargedHadrCands.end(); ++chargedHadrCand ) { + const reco::Track* track = 0; + if ( (*chargedHadrCand)->trackRef().isNonnull() ) track = (*chargedHadrCand)->trackRef().get(); + else if ( (*chargedHadrCand)->gsfTrackRef().isNonnull() ) track = (*chargedHadrCand)->gsfTrackRef().get(); + if ( track ) { + numPixelHits += track->hitPattern().numberOfValidPixelHits(); + } + } + if ( !(numPixelHits >= minPixelHits_) ) { + if ( verbosity_ ) { + edm::LogPrint("PFTauByHPSSelect") << " fails cut on sum of pixel hits." ; + } + return 0.0; + } + } + // Otherwise, we pass! if ( verbosity_ ) { diff --git a/RecoTauTag/RecoTau/plugins/PFRecoTauDiscriminationByIsolation.cc b/RecoTauTag/RecoTau/plugins/PFRecoTauDiscriminationByIsolation.cc index 8738f1dbb6cb7..6599f756456af 100644 --- a/RecoTauTag/RecoTau/plugins/PFRecoTauDiscriminationByIsolation.cc +++ b/RecoTauTag/RecoTau/plugins/PFRecoTauDiscriminationByIsolation.cc @@ -4,6 +4,8 @@ #include "RecoTauTag/RecoTau/interface/RecoTauQualityCuts.h" #include "RecoTauTag/RecoTau/interface/RecoTauVertexAssociator.h" #include "RecoTauTag/RecoTau/interface/ConeTools.h" +#include "CommonTools/Utils/interface/StringCutObjectSelector.h" +#include "CommonTools/Utils/interface/StringObjectFunction.h" #include "DataFormats/VertexReco/interface/Vertex.h" #include "TMath.h" @@ -56,6 +58,10 @@ class PFRecoTauDiscriminationByIsolation : public PFTauDiscriminationProducerBas pset.getParameter("storeRawSumPt") : false; storeRawPUsumPt_ = pset.exists("storeRawPUsumPt") ? pset.getParameter("storeRawPUsumPt") : false; + storeRawFootprintCorrection_ = pset.exists("storeRawFootprintCorrection") ? + pset.getParameter("storeRawFootprintCorrection") : false; + storeRawPhotonSumPt_outsideSignalCone_ = pset.exists("storeRawPhotonSumPt_outsideSignalCone") ? + pset.getParameter("storeRawPhotonSumPt_outsideSignalCone") : false; // Sanity check on requested options. We can't apply cuts and store the // raw output at the same time @@ -68,21 +74,22 @@ class PFRecoTauDiscriminationByIsolation : public PFTauDiscriminationProducerBas } // sanity check2 - can't use weighted and unweighted iso at the same time - if(includeGammas_ && calculateWeights_) - { - throw cms::Exception("BasIsoConfig") - << "Both 'ApplyDiscriminationByECALIsolation' and 'ApplyDiscriminationByWeightedECALIsolation' " - << "have been set to true. These options are mutually exclusive."; - } + if ( includeGammas_ && calculateWeights_ ) { + throw cms::Exception("BasIsoConfig") + << "Both 'ApplyDiscriminationByECALIsolation' and 'ApplyDiscriminationByWeightedECALIsolation' " + << "have been set to true. These options are mutually exclusive."; + } // Can only store one type int numStoreOptions = 0; - if ( storeRawSumPt_ ) ++numStoreOptions; - if ( storeRawOccupancy_ ) ++numStoreOptions; - if ( storeRawPUsumPt_ ) ++numStoreOptions; + if ( storeRawSumPt_ ) ++numStoreOptions; + if ( storeRawOccupancy_ ) ++numStoreOptions; + if ( storeRawPUsumPt_ ) ++numStoreOptions; + if ( storeRawFootprintCorrection_ ) ++numStoreOptions; + if ( storeRawPhotonSumPt_outsideSignalCone_ ) ++numStoreOptions; if ( numStoreOptions > 1 ) { throw cms::Exception("BadIsoConfig") - << "Both 'store sum pt' and 'store occupancy' options are set." + << "Multiple 'store sum pt' and/or 'store occupancy' options are set." << " These options are mutually exclusive."; } @@ -92,6 +99,26 @@ class PFRecoTauDiscriminationByIsolation : public PFTauDiscriminationProducerBas customIsoCone_ = -1; } + applyPhotonPtSumOutsideSignalConeCut_ = ( pset.exists("applyPhotonPtSumOutsideSignalConeCut") ) ? + pset.getParameter("applyPhotonPtSumOutsideSignalConeCut") : false; + if ( applyPhotonPtSumOutsideSignalConeCut_ ) { + maxAbsPhotonSumPt_outsideSignalCone_ = pset.getParameter("maxAbsPhotonSumPt_outsideSignalCone"); + maxRelPhotonSumPt_outsideSignalCone_ = pset.getParameter("maxRelPhotonSumPt_outsideSignalCone"); + } + + applyFootprintCorrection_ = ( pset.exists("applyFootprintCorrection") ) ? + pset.getParameter("applyFootprintCorrection") : false; + if ( applyFootprintCorrection_ || storeRawFootprintCorrection_ ) { + edm::VParameterSet cfgFootprintCorrections = pset.getParameter("footprintCorrections"); + for ( edm::VParameterSet::const_iterator cfgFootprintCorrection = cfgFootprintCorrections.begin(); + cfgFootprintCorrection != cfgFootprintCorrections.end(); ++cfgFootprintCorrection ) { + std::string selection = cfgFootprintCorrection->getParameter("selection"); + std::string offset = cfgFootprintCorrection->getParameter("offset"); + std::unique_ptr footprintCorrection(new FootprintCorrection(selection, offset)); + footprintCorrections_.push_back(std::move(footprintCorrection)); + } + } + // Get the quality cuts specific to the isolation region edm::ParameterSet isolationQCuts = qualityCutsPSet_.getParameterSet( "isolationQualityCuts"); @@ -99,7 +126,7 @@ class PFRecoTauDiscriminationByIsolation : public PFTauDiscriminationProducerBas qcuts_.reset(new tau::RecoTauQualityCuts(isolationQCuts)); vertexAssociator_.reset( - new tau::RecoTauVertexAssociator(qualityCutsPSet_,consumesCollector())); + new tau::RecoTauVertexAssociator(qualityCutsPSet_,consumesCollector())); applyDeltaBeta_ = pset.exists("applyDeltaBetaCorrection") ? pset.getParameter("applyDeltaBetaCorrection") : false; @@ -130,9 +157,9 @@ class PFRecoTauDiscriminationByIsolation : public PFTauDiscriminationProducerBas puFactorizedIsoQCuts.second)); pfCandSrc_ = pset.getParameter("particleFlowSrc"); - pfCand_token=consumes(pfCandSrc_); + pfCand_token = consumes(pfCandSrc_); vertexSrc_ = pset.getParameter("vertexSrc"); - vertex_token=consumes(vertexSrc_); + vertex_token = consumes(vertexSrc_); deltaBetaCollectionCone_ = pset.getParameter( "isoConeSizeForDeltaBeta"); std::string deltaBetaFactorFormula = @@ -157,7 +184,9 @@ class PFRecoTauDiscriminationByIsolation : public PFTauDiscriminationProducerBas pset.getParameter("verbosity") : 0; } - ~PFRecoTauDiscriminationByIsolation(){} + ~PFRecoTauDiscriminationByIsolation() + { + } void beginEvent(const edm::Event& evt, const edm::EventSetup& evtSetup) override; double discriminate(const PFTauRef& pfTau) const override; @@ -195,10 +224,29 @@ class PFRecoTauDiscriminationByIsolation : public PFTauDiscriminationProducerBas double offsetRelativeSumPt_; double customIsoCone_; + bool applyPhotonPtSumOutsideSignalConeCut_; + double maxAbsPhotonSumPt_outsideSignalCone_; + double maxRelPhotonSumPt_outsideSignalCone_; + + bool applyFootprintCorrection_; + struct FootprintCorrection + { + FootprintCorrection(const std::string& selection, const std::string& offset) + : selection_(selection), + offset_(offset) + {} + ~FootprintCorrection() {} + StringCutObjectSelector selection_; + StringObjectFunction offset_; + }; + std::vector > footprintCorrections_; + // Options to store the raw value in the discriminator instead of boolean pass/fail flag bool storeRawOccupancy_; bool storeRawSumPt_; bool storeRawPUsumPt_; + bool storeRawFootprintCorrection_; + bool storeRawPhotonSumPt_outsideSignalCone_; /* ********************************************************************** **** Pileup Subtraction Parameters *********************************** @@ -360,54 +408,51 @@ PFRecoTauDiscriminationByIsolation::discriminate(const PFTauRef& pfTau) const LogTrace("discriminate") << "After track cuts: " << allPU.size() ; // Now apply the rest of the cuts, like pt, and TIP, tracker hits, etc - if(!useAllPFCands_){ - std::vector cleanPU = - pileupQcutsGeneralQCuts_->filterCandRefs(allPU); + if ( !useAllPFCands_ ) { + std::vector cleanPU = + pileupQcutsGeneralQCuts_->filterCandRefs(allPU); std::vector cleanNPU = pileupQcutsGeneralQCuts_->filterCandRefs(allNPU); - LogTrace("discriminate") << "After cleaning cuts: " << cleanPU.size() ; - - // Only select PU tracks inside the isolation cone. - DRFilter deltaBetaFilter(pfTau->p4(), 0, deltaBetaCollectionCone_); - for(auto const & cand : cleanPU) { - if ( deltaBetaFilter(cand) ) isoPU_.push_back(cand); - } - - for(auto const & cand : cleanNPU) { - if ( deltaBetaFilter(cand) ) chPV_.push_back(cand); + // Only select PU tracks inside the isolation cone. + DRFilter deltaBetaFilter(pfTau->p4(), 0, deltaBetaCollectionCone_); + for ( auto const & cand : cleanPU ) { + if ( deltaBetaFilter(cand) ) isoPU_.push_back(cand); + } + + for ( auto const & cand : cleanNPU ) { + if ( deltaBetaFilter(cand) ) chPV_.push_back(cand); } LogTrace("discriminate") << "After cone cuts: " << isoPU_.size() << " " << chPV_.size() ; - }else{ - isoPU_=allPU; - chPV_= allNPU; + } else { + isoPU_ = allPU; + chPV_ = allNPU; } } - if (calculateWeights_) - { - for( auto const & isoObject : isoNeutral_ ) { - if(isoObject->charge() !=0){ - // weight only neutral objects - isoNeutralWeight_.push_back(*isoObject); - continue; - } - - double eta=isoObject->eta(); - double phi=isoObject->phi(); - double sumNPU = 0.5*log(weightedSum(chPV_,eta,phi)); - - double sumPU = 0.5*log(weightedSum(isoPU_,eta,phi)); - PFCandidate neutral = *isoObject; - if (sumNPU+sumPU>0) neutral.setP4(((sumNPU)/(sumNPU+sumPU))*neutral.p4()); - - isoNeutralWeight_.push_back(neutral); - } + if ( calculateWeights_ ) { + for ( auto const & isoObject : isoNeutral_ ) { + if ( isoObject->charge() != 0 ) { + // weight only neutral objects + isoNeutralWeight_.push_back(*isoObject); + continue; } + double eta = isoObject->eta(); + double phi = isoObject->phi(); + double sumNPU = 0.5*log(weightedSum(chPV_, eta, phi)); + + double sumPU = 0.5*log(weightedSum(isoPU_, eta, phi)); + PFCandidate neutral = (*isoObject); + if ( (sumNPU + sumPU) > 0 ) neutral.setP4(((sumNPU)/(sumNPU + sumPU))*neutral.p4()); + + isoNeutralWeight_.push_back(neutral); + } + } + // Check if we want a custom iso cone if ( customIsoCone_ >= 0. ) { DRFilter filter(pfTau->p4(), 0, customIsoCone_); @@ -451,46 +496,61 @@ PFRecoTauDiscriminationByIsolation::discriminate(const PFTauRef& pfTau) const failsOccupancyCut = ( nOccupants > maximumOccupancy_ ); - double totalPt = 0.0; - double puPt = 0.0; + double footprintCorrection_value = 0.; + if ( applyFootprintCorrection_ || storeRawFootprintCorrection_ ) { + for ( std::vector >::const_iterator footprintCorrection = footprintCorrections_.begin(); + footprintCorrection != footprintCorrections_.end(); ++footprintCorrection ) { + if ( (*footprintCorrection)->selection_(*pfTau) ) { + footprintCorrection_value = (*footprintCorrection)->offset_(*pfTau); + } + } + } + + double totalPt = 0.; + double puPt = 0.; //--- Sum PT requirement if ( applySumPtCut_ || applyRelativeSumPtCut_ || storeRawSumPt_ || storeRawPUsumPt_ ) { - double chargedPt = 0.0; - double neutralPt = 0.0; - double weightedNeutralPt = 0.0; - for( auto const & isoObject : isoCharged_ ) { + double chargedPt = 0.; + double neutralPt = 0.; + double weightedNeutralPt = 0.; + for ( auto const & isoObject : isoCharged_ ) { chargedPt += isoObject->pt(); } - if(!calculateWeights_){ - for( auto const & isoObject : isoNeutral_ ) { + if ( !calculateWeights_ ) { + for ( auto const & isoObject : isoNeutral_ ) { neutralPt += isoObject->pt(); } - }else{ - for( auto const & isoObject : isoNeutralWeight_){ - weightedNeutralPt+=isoObject.pt(); + } else { + for ( auto const & isoObject : isoNeutralWeight_ ) { + weightedNeutralPt += isoObject.pt(); } } - for( auto const & isoObject : isoPU_ ) { + for ( auto const & isoObject : isoPU_ ) { puPt += isoObject->pt(); } - LogTrace("discriminate") << "chargedPt = " << chargedPt ; - LogTrace("discriminate") << "neutralPt = " << neutralPt ; - LogTrace("discriminate") << "weighted neutral Pt = " << weightedNeutralPt ; - LogTrace("discriminate") << "puPt = " << puPt << " (delta-beta corr. = " << (deltaBetaFactorThisEvent_*puPt) << ")" ; - if( calculateWeights_) { + LogTrace("discriminate") << "chargedPt = " << chargedPt ; + LogTrace("discriminate") << "neutralPt = " << neutralPt ; + LogTrace("discriminate") << "weighted neutral Pt = " << weightedNeutralPt ; + LogTrace("discriminate") << "puPt = " << puPt << " (delta-beta corr. = " << (deltaBetaFactorThisEvent_*puPt) << ")" ; + + if ( calculateWeights_ ) { neutralPt = weightedNeutralPt; } if ( applyDeltaBeta_ ) { - neutralPt -= deltaBetaFactorThisEvent_*puPt; + neutralPt -= (deltaBetaFactorThisEvent_*puPt); } - + + if ( applyFootprintCorrection_ ) { + neutralPt -= footprintCorrection_value; + } + if ( applyRhoCorrection_ ) { neutralPt -= rhoThisEvent_; } - if ( neutralPt < 0.0 ) { - neutralPt = 0.0; + if ( neutralPt < 0. ) { + neutralPt = 0.; } totalPt = chargedPt + neutralPt; @@ -501,10 +561,25 @@ PFRecoTauDiscriminationByIsolation::discriminate(const PFTauRef& pfTau) const //--- Relative Sum PT requirement failsRelativeSumPtCut = (totalPt > ((pfTau->pt() - offsetRelativeSumPt_)*maximumRelativeSumPt_)); } + + bool failsPhotonPtSumOutsideSignalConeCut = false; + double photonSumPt_outsideSignalCone = 0.; + if ( applyPhotonPtSumOutsideSignalConeCut_ || storeRawPhotonSumPt_outsideSignalCone_ ) { + const std::vector& signalPFGammas = pfTau->signalPFGammaCands(); + for ( std::vector::const_iterator signalPFGamma = signalPFGammas.begin(); + signalPFGamma != signalPFGammas.end(); ++signalPFGamma ) { + double dR = deltaR(pfTau->eta(), pfTau->phi(), (*signalPFGamma)->eta(), (*signalPFGamma)->phi()); + if ( dR > pfTau->signalConeSize() ) photonSumPt_outsideSignalCone += (*signalPFGamma)->pt(); + } + if ( photonSumPt_outsideSignalCone > maxAbsPhotonSumPt_outsideSignalCone_ || photonSumPt_outsideSignalCone > (maxRelPhotonSumPt_outsideSignalCone_*pfTau->pt()) ) { + failsPhotonPtSumOutsideSignalConeCut = true; + } + } bool fails = (applyOccupancyCut_ && failsOccupancyCut) || (applySumPtCut_ && failsSumPtCut) || - (applyRelativeSumPtCut_ && failsRelativeSumPtCut); + (applyRelativeSumPtCut_ && failsRelativeSumPtCut) || + (applyPhotonPtSumOutsideSignalConeCut_ && failsPhotonPtSumOutsideSignalConeCut); // We did error checking in the constructor, so this is safe. if ( storeRawSumPt_ ) { @@ -515,6 +590,10 @@ PFRecoTauDiscriminationByIsolation::discriminate(const PFTauRef& pfTau) const else return 0.; } else if ( storeRawOccupancy_ ) { return nOccupants; + } else if ( storeRawFootprintCorrection_ ) { + return footprintCorrection_value; + } else if ( storeRawPhotonSumPt_outsideSignalCone_ ) { + return photonSumPt_outsideSignalCone; } else { return (fails ? 0. : 1.); } diff --git a/RecoTauTag/RecoTau/plugins/PFTauPrimaryVertexProducer.cc b/RecoTauTag/RecoTau/plugins/PFTauPrimaryVertexProducer.cc index 156e4ac737f42..f22554a583f32 100644 --- a/RecoTauTag/RecoTau/plugins/PFTauPrimaryVertexProducer.cc +++ b/RecoTauTag/RecoTau/plugins/PFTauPrimaryVertexProducer.cc @@ -252,11 +252,13 @@ void PFTauPrimaryVertexProducer::produce(edm::Event& iEvent,const edm::EventSetu // } for(std::vector::const_iterator vtxTrkRef=thePV.tracks_begin();vtxTrkRefbuild(*iter)); } bool FitOk(true); - if ( transTracks.size() >= 3 ) { + if ( transTracks.size() >= 2 ) { AdaptiveVertexFitter avf; avf.setWeightThreshold(0.1); //weight per track. allow almost every fit, else --> exception try { diff --git a/RecoTauTag/RecoTau/plugins/PFTauSecondaryVertexProducer.cc b/RecoTauTag/RecoTau/plugins/PFTauSecondaryVertexProducer.cc index cab1ab4e29f6d..f458bc2c6b246 100644 --- a/RecoTauTag/RecoTau/plugins/PFTauSecondaryVertexProducer.cc +++ b/RecoTauTag/RecoTau/plugins/PFTauSecondaryVertexProducer.cc @@ -88,7 +88,7 @@ void PFTauSecondaryVertexProducer::produce(edm::Event& iEvent,const edm::EventSe for(reco::PFTauCollection::size_type iPFTau = 0; iPFTau < Tau->size(); iPFTau++) { reco::PFTauRef RefPFTau(Tau, iPFTau); std::vector SV; - if(RefPFTau->decayMode()==10){ + if(RefPFTau->decayMode()>=5){ /////////////////////////////////////////////////////////////////////////////////////////////// // Get tracks form PFTau daugthers std::vector transTrk; diff --git a/RecoTauTag/RecoTau/plugins/RecoTauBuilderCombinatoricPlugin.cc b/RecoTauTag/RecoTau/plugins/RecoTauBuilderCombinatoricPlugin.cc index 96edf7b7fc677..521d2e4794481 100644 --- a/RecoTauTag/RecoTau/plugins/RecoTauBuilderCombinatoricPlugin.cc +++ b/RecoTauTag/RecoTau/plugins/RecoTauBuilderCombinatoricPlugin.cc @@ -7,6 +7,8 @@ #include "RecoTauTag/RecoTau/interface/RecoTauCrossCleaning.h" #include "RecoTauTag/RecoTau/interface/ConeTools.h" +#include "CommonTools/Utils/interface/StringObjectFunction.h" + #include "DataFormats/TauReco/interface/PFTau.h" #include "DataFormats/TauReco/interface/PFRecoTauChargedHadron.h" #include "DataFormats/TauReco/interface/RecoTauPiZero.h" @@ -14,6 +16,8 @@ #include "RecoTauTag/RecoTau/interface/RecoTauConstructor.h" #include "RecoTauTag/RecoTau/interface/RecoTauQualityCuts.h" +#include + namespace reco { namespace tau { typedef std::vector ChargedHadronList; @@ -47,13 +51,24 @@ class RecoTauBuilderCombinatoricPlugin : public RecoTauBuilderPlugin }; std::vector decayModesToBuild_; + StringObjectFunction signalConeSize_; + double minAbsPhotonSumPt_insideSignalCone_; + double minRelPhotonSumPt_insideSignalCone_; + double minAbsPhotonSumPt_outsideSignalCone_; + double minRelPhotonSumPt_outsideSignalCone_; + int verbosity_; }; RecoTauBuilderCombinatoricPlugin::RecoTauBuilderCombinatoricPlugin(const edm::ParameterSet& pset, edm::ConsumesCollector && iC) : RecoTauBuilderPlugin(pset, std::move(iC)), qcuts_(pset.getParameterSet("qualityCuts").getParameterSet("signalQualityCuts")), - isolationConeSize_(pset.getParameter("isolationConeSize")) + isolationConeSize_(pset.getParameter("isolationConeSize")), + signalConeSize_(pset.getParameter("signalConeSize")), + minAbsPhotonSumPt_insideSignalCone_(pset.getParameter("minAbsPhotonSumPt_insideSignalCone")), + minRelPhotonSumPt_insideSignalCone_(pset.getParameter("minRelPhotonSumPt_insideSignalCone")), + minAbsPhotonSumPt_outsideSignalCone_(pset.getParameter("minAbsPhotonSumPt_outsideSignalCone")), + minRelPhotonSumPt_outsideSignalCone_(pset.getParameter("minRelPhotonSumPt_outsideSignalCone")) { typedef std::vector VPSet; const VPSet& decayModes = pset.getParameter("decayModes"); @@ -66,7 +81,7 @@ RecoTauBuilderCombinatoricPlugin::RecoTauBuilderCombinatoricPlugin(const edm::Pa info.maxPiZeros_ = decayMode->getParameter("maxPiZeros"); decayModesToBuild_.push_back(info); } - + verbosity_ = ( pset.exists("verbosity") ) ? pset.getParameter("verbosity") : 0; } @@ -144,17 +159,9 @@ namespace } }; - std::string getPFCandidateType(reco::PFCandidate::ParticleType pfCandidateType) + double square(double x) { - if ( pfCandidateType == reco::PFCandidate::X ) return "undefined"; - else if ( pfCandidateType == reco::PFCandidate::h ) return "PFChargedHadron"; - else if ( pfCandidateType == reco::PFCandidate::e ) return "PFElectron"; - else if ( pfCandidateType == reco::PFCandidate::mu ) return "PFMuon"; - else if ( pfCandidateType == reco::PFCandidate::gamma ) return "PFGamma"; - else if ( pfCandidateType == reco::PFCandidate::h0 ) return "PFNeutralHadron"; - else if ( pfCandidateType == reco::PFCandidate::h_HF ) return "HF_had"; - else if ( pfCandidateType == reco::PFCandidate::egamma_HF ) return "HF_em"; - else assert(0); + return x*x; } } @@ -195,20 +202,16 @@ RecoTauBuilderCombinatoricPlugin::operator()( idx = 0; for ( PiZeroList::const_iterator piZero = piZeros.begin(); piZero != piZeros.end(); ++piZero ) { - std::cout << "piZero #" << idx << ": Pt = " << piZero->pt() << ", eta = " << piZero->eta() << ", phi = " << piZero->phi() << std::endl; - size_t numDaughters = piZero->numberOfDaughters(); - for ( size_t iDaughter = 0; iDaughter < numDaughters; ++iDaughter ) { - const reco::PFCandidate* daughter = dynamic_cast(piZero->daughterPtr(iDaughter).get()); - std::cout << " daughter #" << iDaughter << " (" << getPFCandidateType(daughter->particleId()) << "):" - << " Pt = " << daughter->pt() << ", eta = " << daughter->eta() << ", phi = " << daughter->phi() << std::endl; - } + std::cout << "piZero #" << idx << ":" << std::endl; + piZero->print(std::cout); ++idx; } } PFCandPtrs pfchs = qcuts_.filterCandRefs(pfChargedCands(*jet)); PFCandPtrs pfnhs = qcuts_.filterCandRefs(pfCandidates(*jet, reco::PFCandidate::h0)); - + PFCandPtrs pfgammas = qcuts_.filterCandRefs(pfCandidates(*jet, reco::PFCandidate::gamma)); + /// Apply quality cuts to the regional junk around the jet. Note that the /// particle contents of the junk is exclusive to the jet content. PFCandPtrs regionalJunk = qcuts_.filterCandRefs(regionalExtras); @@ -222,8 +225,9 @@ RecoTauBuilderCombinatoricPlugin::operator()( size_t tracksToBuild = decayMode->nCharged_; if ( verbosity_ ) { std::cout << "piZerosToBuild = " << piZerosToBuild << std::endl; + std::cout << "#piZeros = " << piZeros.size() << std::endl; std::cout << "tracksToBuild = " << tracksToBuild << std::endl; - std::cout << "#chargedHadrons = " << chargedHadrons.size() << std::endl; + std::cout << "#chargedHadrons = " << chargedHadrons.size() << std::endl; } // Skip decay mode if jet doesn't have the multiplicity to support it @@ -271,7 +275,10 @@ RecoTauBuilderCombinatoricPlugin::operator()( for ( PiZeroCombo::iterator piZeroCombo = piZeroCombos.begin(); piZeroCombo != piZeroCombos.end(); ++piZeroCombo ) { // Output tau - RecoTauConstructor tau(jet, getPFCands(), true); + RecoTauConstructor tau( + jet, getPFCands(), true, + &signalConeSize_, + minAbsPhotonSumPt_insideSignalCone_, minRelPhotonSumPt_insideSignalCone_, minAbsPhotonSumPt_outsideSignalCone_, minRelPhotonSumPt_outsideSignalCone_); // Reserve space in our collections tau.reserve( RecoTauConstructor::kSignal, @@ -301,6 +308,16 @@ RecoTauBuilderCombinatoricPlugin::operator()( cleanIsolationPiZeros.push_back(precleanedPiZero); } } + if ( verbosity_ ) { + std::cout << "#cleanIsolationPiZeros = " << cleanIsolationPiZeros.size() << std::endl; + int idx = 0; + for ( PiZeroList::const_iterator piZero = cleanIsolationPiZeros.begin(); + piZero != cleanIsolationPiZeros.end(); ++piZero ) { + std::cout << "piZero #" << idx << ":" << std::endl; + piZero->print(std::cout); + ++idx; + } + } // FIXME - are all these reserves okay? will they get propagated to the // dataformat size if they are wrong? @@ -345,7 +362,7 @@ RecoTauBuilderCombinatoricPlugin::operator()( xclean::PredicateAND pfCandFilter_comboChargedHadrons(isolationConeFilter, pfChargedHadronXCleaner_comboChargedHadrons); // 2.) to select neutral PFCandidates within jet xclean::CrossCleanPtrs pfChargedHadronXCleaner_allChargedHadrons(chargedHadrons.begin(), chargedHadrons.end()); - xclean::CrossCleanPtrs piZeroXCleaner(cleanIsolationPiZeros.begin(), cleanIsolationPiZeros.end()); + xclean::CrossCleanPtrs piZeroXCleaner(piZeros.begin(), piZeros.end()); typedef xclean::PredicateAND, xclean::CrossCleanPtrs > pfCandXCleanerType; pfCandXCleanerType pfCandXCleaner_allChargedHadrons(pfChargedHadronXCleaner_allChargedHadrons, piZeroXCleaner); // And this cleaning filter predicate with our Iso cone filter @@ -429,6 +446,18 @@ RecoTauBuilderCombinatoricPlugin::operator()( boost::make_filter_iterator( pfChargedJunk, regionalJunk.end(), regionalJunk.end())); + // Add all PFGamma constituents of the jet that are not part of a PiZero + if ( verbosity_ >= 2 ) { + std::cout << "adding isolation PFGammas not considered in PiZeros:" << std::endl; + } + tau.addPFCands( + RecoTauConstructor::kIsolation, RecoTauConstructor::kGamma, + boost::make_filter_iterator( + pfCandFilter_allChargedHadrons, + pfgammas.begin(), pfgammas.end()), + boost::make_filter_iterator( + pfCandFilter_allChargedHadrons, + pfgammas.end(), pfgammas.end())); // Add all gammas that are in the iso cone but weren't in the // orginal PFJet tau.addPFCands( @@ -458,7 +487,30 @@ RecoTauBuilderCombinatoricPlugin::operator()( std::auto_ptr tauPtr = tau.get(true); - if ( primaryVertexRef.isNonnull() ) tauPtr->setVertex(primaryVertexRef->position()); + if ( primaryVertexRef.isNonnull() ) { + tauPtr->setVertex(primaryVertexRef->position()); + } + + double tauEn = tauPtr->energy(); + double tauPz = tauPtr->pz(); + const double chargedPionMass = 0.13957; // GeV + double tauMass = std::max(tauPtr->mass(), chargedPionMass); + double bendCorrMass2 = 0.; + const std::vector& piZeros = tauPtr->signalPiZeroCandidates(); + for (auto const& piZero : piZeros ) { + double piZeroEn = piZero.energy(); + double piZeroPx = piZero.px(); + double piZeroPy = piZero.py(); + double piZeroPz = piZero.pz(); + double tau_wo_piZeroPx = tauPtr->px() - piZeroPx; + double tau_wo_piZeroPy = tauPtr->py() - piZeroPy; + // CV: Compute effect of varying strip four-vector by eta and phi correction on tau mass + // (derrivative of tau mass by strip eta, phi has been computed using Mathematica) + bendCorrMass2 += square(((piZeroPz*tauEn - piZeroEn*tauPz)/tauMass)*piZero.bendCorrEta()); + bendCorrMass2 += square(((piZeroPy*tau_wo_piZeroPx - piZeroPx*tau_wo_piZeroPy)/tauMass)*piZero.bendCorrPhi()); + } + //edm::LogPrint("RecoTauBuilderCombinatoricPlugin") << "bendCorrMass2 = " << sqrt(bendCorrMass2) << std::endl; + tauPtr->setBendCorrMass(sqrt(bendCorrMass2)); output.push_back(tauPtr); } diff --git a/RecoTauTag/RecoTau/plugins/RecoTauChargeCleanerPlugin.cc b/RecoTauTag/RecoTau/plugins/RecoTauChargeCleanerPlugin.cc new file mode 100644 index 0000000000000..5125c4c88721c --- /dev/null +++ b/RecoTauTag/RecoTau/plugins/RecoTauChargeCleanerPlugin.cc @@ -0,0 +1,59 @@ +/* + * Original author: Alexander Nehrkorn (RWTH Aachen) + * + * Description: + * This module rejects tau candidates that do not have unit charge. + * It takes the fact into account that taus do not necessarily need + * to be created from PF charged hadrons only but can be created + * from a combination of PF charged hadrons and tracks. + * + */ + +#include "RecoTauTag/RecoTau/interface/RecoTauBuilderPlugins.h" +#include "DataFormats/TauReco/interface/PFTau.h" +#include "DataFormats/TauReco/interface/PFTauFwd.h" + +namespace reco { namespace tau { + +class RecoTauChargeCleanerPlugin : public RecoTauCleanerPlugin +{ +public: + explicit RecoTauChargeCleanerPlugin(const edm::ParameterSet&, edm::ConsumesCollector &&iC); + ~RecoTauChargeCleanerPlugin() {} + double operator()(const PFTauRef& tau) const override; + +private: + std::vector nprongs_; + double failResult_; + int charge_; +}; + +RecoTauChargeCleanerPlugin::RecoTauChargeCleanerPlugin(const edm::ParameterSet& pset, edm::ConsumesCollector &&iC) + : RecoTauCleanerPlugin(pset,std::move(iC)), + nprongs_(pset.getParameter >("nprongs")), + failResult_(pset.getParameter("selectionFailValue")), + charge_(pset.getParameter("passForCharge")) +{} + +double RecoTauChargeCleanerPlugin::operator()(const PFTauRef& cand) const +{ + int charge = 0; + unsigned nChargedPFCandidate(0), nTrack(0); + for(auto const& tauCand : cand->signalTauChargedHadronCandidates()){ + charge += tauCand.charge(); + if(tauCand.algoIs(reco::PFRecoTauChargedHadron::kChargedPFCandidate)) nChargedPFCandidate++; + else if(tauCand.algoIs(reco::PFRecoTauChargedHadron::kTrack)) nTrack++; + } + + for(auto nprong : nprongs_){ + if(nChargedPFCandidate+nTrack == nprong) return abs(charge)-charge_; + } + + return failResult_; +} + +}} + +#include "FWCore/Framework/interface/MakerMacros.h" + +DEFINE_EDM_PLUGIN(RecoTauCleanerPluginFactory, reco::tau::RecoTauChargeCleanerPlugin, "RecoTauChargeCleanerPlugin"); diff --git a/RecoTauTag/RecoTau/plugins/RecoTauChargedHadronMultiplicityCleanerPlugin.cc b/RecoTauTag/RecoTau/plugins/RecoTauChargedHadronMultiplicityCleanerPlugin.cc new file mode 100644 index 0000000000000..b57cdb9a36202 --- /dev/null +++ b/RecoTauTag/RecoTau/plugins/RecoTauChargedHadronMultiplicityCleanerPlugin.cc @@ -0,0 +1,49 @@ +/* + * RecoTauChargedHadronMultiplicityCleanerPlugin + * + * Author: Christian Veelken, NICPB Tallinn + * + * A reco tau cleaner plugin that ranks the PFTaus by the number of charged hadrons. + */ + +#include "RecoTauTag/RecoTau/interface/RecoTauBuilderPlugins.h" +#include "DataFormats/TauReco/interface/PFTauDiscriminator.h" + +namespace reco { namespace tau { + +class RecoTauChargedHadronMultiplicityCleanerPlugin : public RecoTauCleanerPlugin +{ + public: + RecoTauChargedHadronMultiplicityCleanerPlugin(const edm::ParameterSet& pset, edm::ConsumesCollector&& iC); + + // Get ranking value for a given tau Ref + double operator()(const reco::PFTauRef&) const override; +}; + +RecoTauChargedHadronMultiplicityCleanerPlugin::RecoTauChargedHadronMultiplicityCleanerPlugin(const edm::ParameterSet& pset, edm::ConsumesCollector&& iC) + : RecoTauCleanerPlugin(pset,std::move(iC)) +{} + +double RecoTauChargedHadronMultiplicityCleanerPlugin::operator()(const reco::PFTauRef& tau) const +{ + // Get the ranking value for this tau. + // N.B. lower value means more "tau like"! + double result = 0.; + const std::vector& chargedHadrons = tau->signalTauChargedHadronCandidates(); + for ( std::vector::const_iterator chargedHadron = chargedHadrons.begin(); + chargedHadron != chargedHadrons.end(); ++chargedHadron ) { + if ( chargedHadron->algo() == PFRecoTauChargedHadron::kChargedPFCandidate ) result -= 8.; + else if ( chargedHadron->algo() == PFRecoTauChargedHadron::kTrack ) result -= 4.; + else if ( chargedHadron->algo() == PFRecoTauChargedHadron::kPFNeutralHadron ) result -= 2.; + else result -= 1.; + } + return result; +} + +}} // end namespace reco::tau + +// Register our plugin +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_EDM_PLUGIN(RecoTauCleanerPluginFactory, + reco::tau::RecoTauChargedHadronMultiplicityCleanerPlugin, + "RecoTauChargedHadronMultiplicityCleanerPlugin"); diff --git a/RecoTauTag/RecoTau/plugins/RecoTauPiZeroStripPlugin2.cc b/RecoTauTag/RecoTau/plugins/RecoTauPiZeroStripPlugin2.cc index 84bb16d88a94c..df052e5929f9c 100644 --- a/RecoTauTag/RecoTau/plugins/RecoTauPiZeroStripPlugin2.cc +++ b/RecoTauTag/RecoTau/plugins/RecoTauPiZeroStripPlugin2.cc @@ -170,15 +170,16 @@ void RecoTauPiZeroStripPlugin2::addCandsToStrip(RecoTauPiZero& strip, PFCandPtrs } } -void markCandsInStrip(std::vector& candFlags, const std::set& candIds) +namespace { - for ( std::set::const_iterator candId = candIds.begin(); - candId != candIds.end(); ++candId ) { - candFlags[*candId] = true; + void markCandsInStrip(std::vector& candFlags, const std::set& candIds) + { + for ( std::set::const_iterator candId = candIds.begin(); + candId != candIds.end(); ++candId ) { + candFlags[*candId] = true; + } } -} - -namespace { + inline const reco::TrackBaseRef getTrack(const PFCandidate& cand) { if ( cand.trackRef().isNonnull() ) return reco::TrackBaseRef(cand.trackRef()); diff --git a/RecoTauTag/RecoTau/plugins/RecoTauPiZeroStripPlugin3.cc b/RecoTauTag/RecoTau/plugins/RecoTauPiZeroStripPlugin3.cc new file mode 100644 index 0000000000000..1c72af7a69104 --- /dev/null +++ b/RecoTauTag/RecoTau/plugins/RecoTauPiZeroStripPlugin3.cc @@ -0,0 +1,393 @@ +/* + * RecoTauPiZeroStripPlugin3 + * + * Merges PFGammas in a PFJet into Candidate piZeros defined as + * strips in eta-phi. + * + * Author: Michail Bachtis (University of Wisconsin) + * + * Code modifications: Evan Friis (UC Davis), + * Christian Veelken (LLR) + * + */ + +#include +#include + +#include "boost/bind.hpp" + +#include "RecoTauTag/RecoTau/interface/RecoTauPiZeroPlugins.h" + +#include "DataFormats/ParticleFlowCandidate/interface/PFCandidateFwd.h" +#include "DataFormats/ParticleFlowCandidate/interface/PFCandidate.h" +#include "DataFormats/VertexReco/interface/Vertex.h" +#include "DataFormats/VertexReco/interface/VertexFwd.h" +#include "DataFormats/TauReco/interface/RecoTauPiZero.h" +#include "DataFormats/JetReco/interface/PFJet.h" +#include "CommonTools/CandUtils/interface/AddFourMomenta.h" +#include "DataFormats/Math/interface/deltaPhi.h" + +#include "RecoTauTag/RecoTau/interface/RecoTauCommonUtilities.h" +#include "RecoTauTag/RecoTau/interface/RecoTauQualityCuts.h" +#include "RecoTauTag/RecoTau/interface/RecoTauVertexAssociator.h" +#include "RecoTauTag/RecoTau/interface/CombinatoricGenerator.h" + +//------------------------------------------------------------------------------- +// CV: the following headers are needed only for debug print-out +#include "DataFormats/GsfTrackReco/interface/GsfTrack.h" +#include "DataFormats/TrackReco/interface/Track.h" +//------------------------------------------------------------------------------- + +#include "TString.h" +#include "TFormula.h" + +namespace reco { namespace tau { + +namespace { + // Apply a hypothesis on the mass of the strips. + math::XYZTLorentzVector applyMassConstraint( + const math::XYZTLorentzVector& vec,double mass) { + double factor = sqrt(vec.energy()*vec.energy()-mass*mass)/vec.P(); + return math::XYZTLorentzVector( + vec.px()*factor,vec.py()*factor,vec.pz()*factor,vec.energy()); + } +} + +class RecoTauPiZeroStripPlugin3 : public RecoTauPiZeroBuilderPlugin +{ + public: + explicit RecoTauPiZeroStripPlugin3(const edm::ParameterSet&, edm::ConsumesCollector &&iC); + virtual ~RecoTauPiZeroStripPlugin3(); + // Return type is auto_ptr + return_type operator()(const reco::PFJet&) const override; + // Hook to update PV information + virtual void beginEvent() override; + + private: + typedef std::vector PFCandPtrs; + void addCandsToStrip(RecoTauPiZero&, PFCandPtrs&, const std::vector&, std::set&, bool&) const; + + RecoTauVertexAssociator vertexAssociator_; + + std::unique_ptr qcuts_; + bool applyElecTrackQcuts_; + double minGammaEtStripSeed_; + double minGammaEtStripAdd_; + + double minStripEt_; + + std::vector inputPdgIds_; // type of candidates to clusterize + std::unique_ptr etaAssociationDistance_; // size of strip clustering window in eta direction + std::unique_ptr phiAssociationDistance_; // size of strip clustering window in phi direction + + bool updateStripAfterEachDaughter_; + int maxStripBuildIterations_; + + // Parameters for build strip combinations + bool combineStrips_; + int maxStrips_; + double combinatoricStripMassHypo_; + + AddFourMomenta p4Builder_; + + int verbosity_; +}; + +namespace +{ + std::unique_ptr makeFunction(const std::string& functionName, const edm::ParameterSet& pset) + { + TString formula = pset.getParameter("function"); + formula = formula.ReplaceAll("pT", "x"); + std::unique_ptr function(new TFormula(functionName.data(), formula.Data())); + int numParameter = function->GetNpar(); + for ( int idxParameter = 0; idxParameter < numParameter; ++idxParameter ) { + std::string parameterName = Form("par%i", idxParameter); + double parameter = pset.getParameter(parameterName); + function->SetParameter(idxParameter, parameter); + } + return function; + } +} + +RecoTauPiZeroStripPlugin3::RecoTauPiZeroStripPlugin3(const edm::ParameterSet& pset, edm::ConsumesCollector &&iC) + : RecoTauPiZeroBuilderPlugin(pset, std::move(iC)), + vertexAssociator_(pset.getParameter("qualityCuts"), std::move(iC)), qcuts_(nullptr), etaAssociationDistance_(nullptr), phiAssociationDistance_(nullptr) +{ + minGammaEtStripSeed_ = pset.getParameter("minGammaEtStripSeed"); + minGammaEtStripAdd_ = pset.getParameter("minGammaEtStripAdd"); + + minStripEt_ = pset.getParameter("minStripEt"); + + edm::ParameterSet qcuts_pset = pset.getParameterSet("qualityCuts").getParameterSet("signalQualityCuts"); +//------------------------------------------------------------------------------- +// CV: disable track quality cuts for PFElectronsPFElectron +// (treat PFElectrons like PFGammas for the purpose of building eta-phi strips) + applyElecTrackQcuts_ = pset.getParameter("applyElecTrackQcuts"); + if ( !applyElecTrackQcuts_ ) { + qcuts_pset.addParameter("minTrackPt", std::min(minGammaEtStripSeed_, minGammaEtStripAdd_)); + qcuts_pset.addParameter("maxTrackChi2", 1.e+9); + qcuts_pset.addParameter("maxTransverseImpactParameter", 1.e+9); + qcuts_pset.addParameter("maxDeltaZ", 1.e+9); + qcuts_pset.addParameter("minTrackVertexWeight", -1.); + qcuts_pset.addParameter("minTrackPixelHits", 0); + qcuts_pset.addParameter("minTrackHits", 0); + } +//------------------------------------------------------------------------------- + qcuts_pset.addParameter("minGammaEt", std::min(minGammaEtStripSeed_, minGammaEtStripAdd_)); + //qcuts_ = new RecoTauQualityCuts(qcuts_pset); + //std::unique_ptr qcuts_(new RecoTauQualityCuts(qcuts_pset)); + + qcuts_.reset(new RecoTauQualityCuts(qcuts_pset)); + + inputPdgIds_ = pset.getParameter >("stripCandidatesParticleIds"); + edm::ParameterSet stripSize_eta_pset = pset.getParameterSet("stripEtaAssociationDistance"); + etaAssociationDistance_ = makeFunction("etaAssociationDistance", stripSize_eta_pset); + edm::ParameterSet stripSize_phi_pset = pset.getParameterSet("stripPhiAssociationDistance"); + phiAssociationDistance_ = makeFunction("phiAssociationDistance", stripSize_phi_pset); + + updateStripAfterEachDaughter_ = pset.getParameter("updateStripAfterEachDaughter"); + maxStripBuildIterations_ = pset.getParameter("maxStripBuildIterations"); + + combineStrips_ = pset.getParameter("makeCombinatoricStrips"); + if ( combineStrips_ ) { + maxStrips_ = pset.getParameter("maxInputStrips"); + combinatoricStripMassHypo_ = pset.getParameter("stripMassWhenCombining"); + } + + verbosity_ = ( pset.exists("verbosity") ) ? + pset.getParameter("verbosity") : 0; +} +RecoTauPiZeroStripPlugin3::~RecoTauPiZeroStripPlugin3() +{ +} + +// Update the primary vertex +void RecoTauPiZeroStripPlugin3::beginEvent() +{ + vertexAssociator_.setEvent(*evt()); +} + +void RecoTauPiZeroStripPlugin3::addCandsToStrip(RecoTauPiZero& strip, PFCandPtrs& cands, const std::vector& candFlags, + std::set& candIdsCurrentStrip, bool& isCandAdded) const +{ + if ( verbosity_ >= 1 ) { + edm::LogPrint("RecoTauPiZeroStripPlugin3") << ":" ; + } + size_t numCands = cands.size(); + for ( size_t candId = 0; candId < numCands; ++candId ) { + if ( (!candFlags[candId]) && candIdsCurrentStrip.find(candId) == candIdsCurrentStrip.end() ) { // do not include same cand twice + reco::PFCandidatePtr cand = cands[candId]; + double etaAssociationDistance_value = etaAssociationDistance_->Eval(strip.pt()) + etaAssociationDistance_->Eval(cand->pt()); + double phiAssociationDistance_value = phiAssociationDistance_->Eval(strip.pt()) + phiAssociationDistance_->Eval(cand->pt()); + if ( fabs(strip.eta() - cand->eta()) < etaAssociationDistance_value && // check if cand is within eta-phi window centered on strip + reco::deltaPhi(strip.phi(), cand->phi()) < phiAssociationDistance_value ) { + if ( verbosity_ >= 2 ) { + edm::LogPrint("RecoTauPiZeroStripPlugin3") << "--> adding PFCand #" << candId << " (" << cand.id() << ":" << cand.key() << "): Et = " << cand->et() << ", eta = " << cand->eta() << ", phi = " << cand->phi() ; + } + strip.addDaughter(cand); + if ( updateStripAfterEachDaughter_ ) p4Builder_.set(strip); + isCandAdded = true; + candIdsCurrentStrip.insert(candId); + } + } + } +} + +namespace +{ + void markCandsInStrip(std::vector& candFlags, const std::set& candIds) + { + for ( std::set::const_iterator candId = candIds.begin(); + candId != candIds.end(); ++candId ) { + candFlags[*candId] = true; + } + } + + inline const reco::TrackBaseRef getTrack(const PFCandidate& cand) + { + if ( cand.trackRef().isNonnull() ) return reco::TrackBaseRef(cand.trackRef()); + else if ( cand.gsfTrackRef().isNonnull() ) return reco::TrackBaseRef(cand.gsfTrackRef()); + else return reco::TrackBaseRef(); + } +} + +RecoTauPiZeroStripPlugin3::return_type RecoTauPiZeroStripPlugin3::operator()(const reco::PFJet& jet) const +{ + if ( verbosity_ >= 1 ) { + edm::LogPrint("RecoTauPiZeroStripPlugin3") << ":" ; + edm::LogPrint("RecoTauPiZeroStripPlugin3") << " minGammaEtStripSeed = " << minGammaEtStripSeed_ ; + edm::LogPrint("RecoTauPiZeroStripPlugin3") << " minGammaEtStripAdd = " << minGammaEtStripAdd_ ; + edm::LogPrint("RecoTauPiZeroStripPlugin3") << " minStripEt = " << minStripEt_ ; + } + + PiZeroVector output; + + // Get the candidates passing our quality cuts + qcuts_->setPV(vertexAssociator_.associatedVertex(jet)); + PFCandPtrs candsVector = qcuts_->filterCandRefs(pfCandidates(jet, inputPdgIds_)); + + // Convert to stl::list to allow fast deletions + PFCandPtrs seedCands; + PFCandPtrs addCands; + int idx = 0; + for ( PFCandPtrs::iterator cand = candsVector.begin(); + cand != candsVector.end(); ++cand ) { + if ( verbosity_ >= 1 ) { + edm::LogPrint("RecoTauPiZeroStripPlugin3") << "PFGamma #" << idx << " (" << cand->id() << ":" << cand->key() << "): Et = " << (*cand)->et() << ", eta = " << (*cand)->eta() << ", phi = " << (*cand)->phi() ; + } + if ( (*cand)->et() > minGammaEtStripSeed_ ) { + if ( verbosity_ >= 2 ) { + edm::LogPrint("RecoTauPiZeroStripPlugin3") << "--> assigning seedCandId = " << seedCands.size() ; + const reco::TrackBaseRef candTrack = getTrack(*cand); + if ( candTrack.isNonnull() ) { + edm::LogPrint("RecoTauPiZeroStripPlugin3") << "track: Pt = " << candTrack->pt() << " eta = " << candTrack->eta() << ", phi = " << candTrack->phi() << ", charge = " << candTrack->charge() ; + edm::LogPrint("RecoTauPiZeroStripPlugin3") << " (dZ = " << candTrack->dz(vertexAssociator_.associatedVertex(jet)->position()) << ", dXY = " << candTrack->dxy(vertexAssociator_.associatedVertex(jet)->position()) << "," + << " numHits = " << candTrack->hitPattern().numberOfValidTrackerHits() << ", numPxlHits = " << candTrack->hitPattern().numberOfValidPixelHits() << "," + << " chi2 = " << candTrack->normalizedChi2() << ", dPt/Pt = " << (candTrack->ptError()/candTrack->pt()) << ")" ; + } + edm::LogPrint("RecoTauPiZeroStripPlugin3") << "ECAL Et: calibrated = " << (*cand)->ecalEnergy()*sin((*cand)->theta()) << "," + << " raw = " << (*cand)->rawEcalEnergy()*sin((*cand)->theta()) ; + edm::LogPrint("RecoTauPiZeroStripPlugin3") << "HCAL Et: calibrated = " << (*cand)->hcalEnergy()*sin((*cand)->theta()) << "," + << " raw = " << (*cand)->rawHcalEnergy()*sin((*cand)->theta()) ; + } + seedCands.push_back(*cand); + } else if ( (*cand)->et() > minGammaEtStripAdd_ ) { + if ( verbosity_ >= 2 ) { + edm::LogPrint("RecoTauPiZeroStripPlugin3") << "--> assigning addCandId = " << addCands.size() ; + } + addCands.push_back(*cand); + } + ++idx; + } + + std::vector seedCandFlags(seedCands.size()); // true/false: seedCand is already/not yet included in strip + std::vector addCandFlags(addCands.size()); // true/false: addCand is already/not yet included in strip + + std::set seedCandIdsCurrentStrip; + std::set addCandIdsCurrentStrip; + + size_t idxSeed = 0; + while ( idxSeed < seedCands.size() ) { + if ( verbosity_ >= 2 ) edm::LogPrint("RecoTauPiZeroStripPlugin3") << "processing seed #" << idxSeed ; + + seedCandIdsCurrentStrip.clear(); + addCandIdsCurrentStrip.clear(); + + std::auto_ptr strip(new RecoTauPiZero(*seedCands[idxSeed], RecoTauPiZero::kStrips)); + strip->addDaughter(seedCands[idxSeed]); + seedCandIdsCurrentStrip.insert(idxSeed); + + bool isCandAdded; + int stripBuildIteration = 0; + do { + isCandAdded = false; + + //if ( verbosity_ >= 2 ) edm::LogPrint("RecoTauPiZeroStripPlugin3") << " adding seedCands to strip..." ; + addCandsToStrip(*strip, seedCands, seedCandFlags, seedCandIdsCurrentStrip, isCandAdded); + //if ( verbosity_ >= 2 ) edm::LogPrint("RecoTauPiZeroStripPlugin3") << " adding addCands to strip..." ; + addCandsToStrip(*strip, addCands, addCandFlags, addCandIdsCurrentStrip, isCandAdded); + + if ( !updateStripAfterEachDaughter_ ) p4Builder_.set(*strip); + + ++stripBuildIteration; + } while ( isCandAdded && (stripBuildIteration < maxStripBuildIterations_ || maxStripBuildIterations_ == -1) ); + + if ( strip->et() > minStripEt_ ) { // strip passed Et cuts, add it to the event + if ( verbosity_ >= 2 ) edm::LogPrint("RecoTauPiZeroStripPlugin3") << "Building strip: Et = " << strip->et() << ", eta = " << strip->eta() << ", phi = " << strip->phi() ; + + // Update the vertex + if ( strip->daughterPtr(0).isNonnull() ) strip->setVertex(strip->daughterPtr(0)->vertex()); + output.push_back(strip); + + // Mark daughters as being part of this strip + markCandsInStrip(seedCandFlags, seedCandIdsCurrentStrip); + markCandsInStrip(addCandFlags, addCandIdsCurrentStrip); + } else { // strip failed Et cuts, just skip it + if ( verbosity_ >= 2 ) edm::LogPrint("RecoTauPiZeroStripPlugin3") << "Discarding strip: Et = " << strip->et() << ", eta = " << strip->eta() << ", phi = " << strip->phi() ; + } + + ++idxSeed; + while ( idxSeed < seedCands.size() && seedCandFlags[idxSeed] ) { + ++idxSeed; // fast-forward to next seed cand not yet included in any strip + } + } + + // Check if we want to combine our strips + if ( combineStrips_ && output.size() > 1 ) { + PiZeroVector stripCombinations; + // Sort the output by descending pt + output.sort(output.begin(), output.end(), + boost::bind(&RecoTauPiZero::pt, _1) > + boost::bind(&RecoTauPiZero::pt, _2)); + // Get the end of interesting set of strips to try and combine + PiZeroVector::const_iterator end_iter = takeNElements( + output.begin(), output.end(), maxStrips_); + + // Look at all the combinations + for ( PiZeroVector::const_iterator first = output.begin(); + first != end_iter-1; ++first ) { + for ( PiZeroVector::const_iterator second = first+1; + second != end_iter; ++second ) { + Candidate::LorentzVector firstP4 = first->p4(); + Candidate::LorentzVector secondP4 = second->p4(); + // If we assume a certain mass for each strip apply it here. + firstP4 = applyMassConstraint(firstP4, combinatoricStripMassHypo_); + secondP4 = applyMassConstraint(secondP4, combinatoricStripMassHypo_); + Candidate::LorentzVector totalP4 = firstP4 + secondP4; + // Make our new combined strip + std::auto_ptr combinedStrips( + new RecoTauPiZero(0, totalP4, + Candidate::Point(0, 0, 0), + //111, 10001, true, RecoTauPiZero::kCombinatoricStrips)); + 111, 10001, true, RecoTauPiZero::kUndefined)); + + // Now loop over the strip members + for ( auto const& gamma : first->daughterPtrVector()) { + combinedStrips->addDaughter(gamma); + } + for ( auto const& gamma : second->daughterPtrVector()) { + combinedStrips->addDaughter(gamma); + } + // Update the vertex + if ( combinedStrips->daughterPtr(0).isNonnull() ) { + combinedStrips->setVertex(combinedStrips->daughterPtr(0)->vertex()); + } + + // Add to our collection of combined strips + stripCombinations.push_back(combinedStrips); + } + } + // When done doing all the combinations, add the combined strips to the + // output. + output.transfer(output.end(), stripCombinations); + } + + // Compute correction to account for spread of photon energy in eta and phi + // in case charged pions make nuclear interactions or photons convert within the tracking detector + for ( PiZeroVector::iterator strip = output.begin(); + strip != output.end(); ++strip ) { + double bendCorrEta = 0.; + double bendCorrPhi = 0.; + double energySum = 0.; + for (auto const& gamma : strip->daughterPtrVector()) { + bendCorrEta += (gamma->energy()*etaAssociationDistance_->Eval(gamma->pt())); + bendCorrPhi += (gamma->energy()*phiAssociationDistance_->Eval(gamma->pt())); + energySum += gamma->energy(); + } + if ( energySum > 1.e-2 ) { + bendCorrEta /= energySum; + bendCorrPhi /= energySum; + } + //std::cout << "stripPt = " << strip->pt() << ": bendCorrEta = " << bendCorrEta << ", bendCorrPhi = " << bendCorrPhi << std::endl; + strip->setBendCorrEta(bendCorrEta); + strip->setBendCorrPhi(bendCorrPhi); + } + + return output.release(); +} +}} // end namespace reco::tau + +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_EDM_PLUGIN(RecoTauPiZeroBuilderPluginFactory, + reco::tau::RecoTauPiZeroStripPlugin3, "RecoTauPiZeroStripPlugin3"); diff --git a/RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByHPSSelection_cfi.py b/RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByHPSSelection_cfi.py index ee0b64a68ec57..48f680f011703 100644 --- a/RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByHPSSelection_cfi.py +++ b/RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByHPSSelection_cfi.py @@ -66,7 +66,6 @@ Prediscriminants = noPrediscriminants, matchingCone = PFRecoTauPFJetInputs.jetConeSize, minTauPt = cms.double(0.0), - coneSizeFormula = cms.string("max(min(0.1, 3.0/pt()), 0.05)"), decayModes = cms.VPSet( decayMode_1Prong0Pi0, decayMode_1Prong1Pi0, @@ -75,7 +74,9 @@ decayMode_2Prong1Pi0, decayMode_3Prong0Pi0 ), - requireTauChargedHadronsToBeChargedPFCands = cms.bool(False) + requireTauChargedHadronsToBeChargedPFCands = cms.bool(False), + # CV: require at least one pixel hit for the sum of all tracks + minPixelHits = cms.int32(1) ) diff --git a/RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByIsolation_cfi.py b/RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByIsolation_cfi.py index d395f033c7609..73c7eac0acad9 100644 --- a/RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByIsolation_cfi.py +++ b/RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByIsolation_cfi.py @@ -26,13 +26,16 @@ relativeSumPtCut = cms.double(0.0), relativeSumPtOffset = cms.double(0.0), - qualityCuts = PFTauQualityCuts,# set the standard quality cuts + applyPhotonPtSumOutsideSignalConeCut = cms.bool(False), + maxAbsPhotonSumPt_outsideSignalCone = cms.double(1.e+9), + maxRelPhotonSumPt_outsideSignalCone = cms.double(0.10), + + qualityCuts = PFTauQualityCuts, # set the standard quality cuts # Delta-Beta corrections to remove Pileup applyDeltaBetaCorrection = cms.bool(False), particleFlowSrc = cms.InputTag("particleFlow"), vertexSrc = PFTauQualityCuts.primaryVertexSrc, - # This must correspond to the cone size of the algorithm which built the # tau. (or if customOuterCone option is used, the custom cone size) isoConeSizeForDeltaBeta = cms.double(0.5), @@ -47,6 +50,31 @@ # Uncommenting the parameter below allows this threshold to be overridden. #deltaBetaPUTrackPtCutOverride = cms.double(1.5), + # Tau footprint correction + applyFootprintCorrection = cms.bool(False), + footprintCorrections = cms.VPSet( + cms.PSet( + selection = cms.string("decayMode() = 0"), + offset = cms.string("0.0") + ), + cms.PSet( + selection = cms.string("decayMode() = 1 || decayMode() = 2"), + offset = cms.string("0.0") + ), + cms.PSet( + selection = cms.string("decayMode() = 5"), + offset = cms.string("2.7") + ), + cms.PSet( + selection = cms.string("decayMode() = 6"), + offset = cms.string("0.0") + ), + cms.PSet( + selection = cms.string("decayMode() = 10"), + offset = cms.string("max(2.0, 0.22*pt() - 2.0)") + ) + ), + # Rho corrections applyRhoCorrection = cms.bool(False), rhoProducer = cms.InputTag("fixedGridRhoFastjetAll"), diff --git a/RecoTauTag/RecoTau/python/RecoTauCleanerPlugins.py b/RecoTauTag/RecoTau/python/RecoTauCleanerPlugins.py index c6bfc1c13f4fa..8dcfc7df1ca76 100644 --- a/RecoTauTag/RecoTau/python/RecoTauCleanerPlugins.py +++ b/RecoTauTag/RecoTau/python/RecoTauCleanerPlugins.py @@ -28,6 +28,18 @@ selectionFailValue = cms.double(0), ) +# similar to unitCharge but handles also cases where tau is made up of +# a combination of tracks and pf charged hadrons +charge = cms.PSet( + name = cms.string("Charge"), + plugin = cms.string("RecoTauChargeCleanerPlugin"), + # cleaner is applied to decay modes with the number of prongs given here + nprongs = cms.vuint32(1,3), + # taus with charge != 1 are rejected + passForCharge = cms.int32(1), + selectionFailValue = cms.double(0), +) + # Prefer taus with pt greater 15 ptGt15 = cms.PSet( name = cms.string("PtGt15"), @@ -60,6 +72,11 @@ tolerance = cms.double(1.e-2) # CV: consider candidates with almost equal pT to be of the same rank (to avoid sensitivity to rounding errors) ) +chargedHadronMultiplicity = cms.PSet( + name = cms.string("ChargedHadronMultiplicity"), + plugin = cms.string("RecoTauChargedHadronMultiplicityCleanerPlugin") +) + stripMultiplicity = cms.PSet( name = cms.string("StripMultiplicity"), plugin = cms.string("RecoTauStringCleanerPlugin"), diff --git a/RecoTauTag/RecoTau/python/RecoTauCleaner_cfi.py b/RecoTauTag/RecoTau/python/RecoTauCleaner_cfi.py index f990d3c474231..d387946755de3 100644 --- a/RecoTauTag/RecoTau/python/RecoTauCleaner_cfi.py +++ b/RecoTauTag/RecoTau/python/RecoTauCleaner_cfi.py @@ -2,36 +2,27 @@ import RecoTauTag.RecoTau.RecoTauCleanerPlugins as cleaners -RecoTauCleaner = cms.EDProducer( - "RecoTauCleaner", - src = cms.InputTag("combinatoricRecoTaus"), - cleaners = cms.VPSet( - # Reject taus that have charge == 3 - cleaners.unitCharge, - # Ignore taus reconstructed in pi0 decay modes in which the highest Pt ("leading") pi0 has pt below 2.5 GeV - # (in order to make decay mode reconstruction less sensitive to pile-up) - # NOTE: strips are sorted by decreasing pt - cms.PSet( - name = cms.string("leadStripPtLt2_5"), - plugin = cms.string("RecoTauStringCleanerPlugin"), - selection = cms.string("signalPiZeroCandidates().size() = 0 | signalPiZeroCandidates()[0].pt() > 2.5"), - selectionPassFunction = cms.string("0"), - selectionFailValue = cms.double(1e3) - ), - # Reject taus that are not within DR<0.1 of the jet axis - #cleaners.matchingConeCut, - # Reject taus that fail HPS selections - cms.PSet( - name = cms.string("HPS_Select"), - plugin = cms.string("RecoTauDiscriminantCleanerPlugin"), - src = cms.InputTag("hpsSelectionDiscriminator"), - ), - # CV: Take highes pT tau (use for testing of new high pT tau reconstruction and check if it can become the new default) - cleaners.pt, - # CV: in case two candidates have the same Pt, - # prefer candidates in which PFGammas are part of strips (rather than being merged with PFRecoTauChargedHadrons) - cleaners.stripMultiplicity, - # Take most isolated tau - cleaners.combinedIsolation - ) - ) +RecoTauCleaner = cms.EDProducer("RecoTauCleaner", + src = cms.InputTag("combinatoricRecoTaus"), + cleaners = cms.VPSet( + # Reject taus that have charge == 3 + cleaners.charge, + # Reject taus that are not within DR<0.1 of the jet axis + #cleaners.matchingConeCut, + # Reject taus that fail HPS selections + cms.PSet( + name = cms.string("HPS_Select"), + plugin = cms.string("RecoTauDiscriminantCleanerPlugin"), + src = cms.InputTag("hpsSelectionDiscriminator"), + ), + # CV: prefer 3-prong candidates over 2-prong candidates and 2-prong candidates over 1-prong candidates + cleaners.chargedHadronMultiplicity, + # CV: Take highest pT tau (use for testing of new high pT tau reconstruction and check if it can become the new default) + cleaners.pt, + # CV: in case two candidates have the same Pt, + # prefer candidates in which PFGammas are part of strips (rather than being merged with PFRecoTauChargedHadrons) + cleaners.stripMultiplicity, + # Take most isolated tau + cleaners.combinedIsolation + ) +) diff --git a/RecoTauTag/RecoTau/python/RecoTauCombinatoricProducer_cfi.py b/RecoTauTag/RecoTau/python/RecoTauCombinatoricProducer_cfi.py index 8a39046ee66d8..8023b7b429927 100644 --- a/RecoTauTag/RecoTau/python/RecoTauCombinatoricProducer_cfi.py +++ b/RecoTauTag/RecoTau/python/RecoTauCombinatoricProducer_cfi.py @@ -83,7 +83,12 @@ combinatoricDecayModeConfigs.config2prong0pi0, combinatoricDecayModeConfigs.config2prong1pi0, combinatoricDecayModeConfigs.config3prong0pi0 - ) + ), + signalConeSize = cms.string("max(min(0.1, 3.0/pt()), 0.05)"), + minAbsPhotonSumPt_insideSignalCone = cms.double(2.5), + minRelPhotonSumPt_insideSignalCone = cms.double(0.10), + minAbsPhotonSumPt_outsideSignalCone = cms.double(1.e+9), # CV: always require at least some photon energy inside signal cone + minRelPhotonSumPt_outsideSignalCone = cms.double(1.e+9) # for a tau to be reconstructed in a decay mode with pi0s ) combinatoricRecoTaus = cms.EDProducer("RecoTauProducer", diff --git a/RecoTauTag/RecoTau/python/RecoTauPiZeroBuilderPlugins_cfi.py b/RecoTauTag/RecoTau/python/RecoTauPiZeroBuilderPlugins_cfi.py index fafbeba289195..5a6ed014f35d3 100644 --- a/RecoTauTag/RecoTau/python/RecoTauPiZeroBuilderPlugins_cfi.py +++ b/RecoTauTag/RecoTau/python/RecoTauPiZeroBuilderPlugins_cfi.py @@ -36,9 +36,9 @@ plugin = cms.string("RecoTauPiZeroStripPlugin"), qualityCuts = PFTauQualityCuts, # Clusterize photons and electrons (PF numbering) - stripCandidatesParticleIds = cms.vint32(2, 4), - stripEtaAssociationDistance = cms.double(0.05), - stripPhiAssociationDistance = cms.double(0.2), + stripCandidatesParticleIds = cms.vint32(2, 4), + stripEtaAssociationDistance = cms.double(0.05), + stripPhiAssociationDistance = cms.double(0.2), makeCombinatoricStrips = cms.bool(False) ) @@ -47,9 +47,9 @@ plugin = cms.string("RecoTauPiZeroStripPlugin"), qualityCuts = PFTauQualityCuts, # Clusterize photons and electrons (PF numbering) - stripCandidatesParticleIds = cms.vint32(2, 4), - stripEtaAssociationDistance = cms.double(0.05), - stripPhiAssociationDistance = cms.double(0.2), + stripCandidatesParticleIds = cms.vint32(2, 4), + stripEtaAssociationDistance = cms.double(0.05), + stripPhiAssociationDistance = cms.double(0.2), makeCombinatoricStrips = cms.bool(True), maxInputStrips = cms.int32(5), stripMassWhenCombining = cms.double(0.0), # assume photon like @@ -67,3 +67,27 @@ maxStripBuildIterations = cms.int32(-1) ) +# Produce a "strips" of photons +# with no track quality cuts applied to PFElectrons +# and eta x phi size of strip increasing for low pT photons +modStrips2 = strips.clone( + plugin = cms.string('RecoTauPiZeroStripPlugin3'), + applyElecTrackQcuts = cms.bool(False), + minGammaEtStripSeed = cms.double(0.5), + minGammaEtStripAdd = cms.double(0.), + minStripEt = cms.double(0.5), + # CV: parametrization of strip size in eta and phi determined by Yuta Takahashi, + # chosen to contain 95% of photons from tau decays + stripEtaAssociationDistance = cms.PSet( + function = cms.string("TMath::Min(0.15, TMath::Max(0.05, [0]*TMath::Power(pT, -[1])))"), + par0 = cms.double(1.97077e-01), + par1 = cms.double(6.58701e-01) + ), + stripPhiAssociationDistance = cms.PSet( + function = cms.string("TMath::Min(0.3, TMath::Max(0.05, [0]*TMath::Power(pT, -[1])))"), + par0 = cms.double(3.52476e-01), + par1 = cms.double(7.07716e-01) + ), + updateStripAfterEachDaughter = cms.bool(False), + maxStripBuildIterations = cms.int32(-1) +) diff --git a/RecoTauTag/RecoTau/python/RecoTauPiZeroProducer_cfi.py b/RecoTauTag/RecoTau/python/RecoTauPiZeroProducer_cfi.py index 3e71ed9f9d487..00b5419b8a260 100644 --- a/RecoTauTag/RecoTau/python/RecoTauPiZeroProducer_cfi.py +++ b/RecoTauTag/RecoTau/python/RecoTauPiZeroProducer_cfi.py @@ -12,7 +12,8 @@ outputSelection = cms.string('pt > 0'), builders = cms.VPSet( #builders.strips - builders.modStrips + #builders.modStrips + builders.modStrips2 ), ranking = cms.VPSet( ranking.isInStrip @@ -39,7 +40,8 @@ builders = cms.VPSet( builders.combinatoricPhotonPairs, #builders.strips - builders.modStrips + #builders.modStrips + builders.modStrips2 ), ranking = cms.VPSet( ranking.nearPiZeroMassBarrel, # Prefer pi zeros +- 0.05 GeV correct mass diff --git a/RecoTauTag/RecoTau/src/RecoTauConstructor.cc b/RecoTauTag/RecoTau/src/RecoTauConstructor.cc index 5e724b429ee6f..c5f36486f0765 100644 --- a/RecoTauTag/RecoTau/src/RecoTauConstructor.cc +++ b/RecoTauTag/RecoTau/src/RecoTauConstructor.cc @@ -3,6 +3,8 @@ #include "RecoTauTag/RecoTau/interface/RecoTauCommonUtilities.h" #include "DataFormats/Common/interface/RefToPtr.h" +#include "DataFormats/Math/interface/deltaR.h" + #include "RecoTauTag/RecoTau/interface/pfRecoTauChargedHadronAuxFunctions.h" #include @@ -10,8 +12,17 @@ namespace reco { namespace tau { -RecoTauConstructor::RecoTauConstructor(const PFJetRef& jet, const edm::Handle& pfCands, bool copyGammasFromPiZeros) - : pfCands_(pfCands) +RecoTauConstructor::RecoTauConstructor(const PFJetRef& jet, const edm::Handle& pfCands, + bool copyGammasFromPiZeros, + const StringObjectFunction* signalConeSize, + double minAbsPhotonSumPt_insideSignalCone, double minRelPhotonSumPt_insideSignalCone, + double minAbsPhotonSumPt_outsideSignalCone, double minRelPhotonSumPt_outsideSignalCone) + : signalConeSize_(signalConeSize), + minAbsPhotonSumPt_insideSignalCone_(minAbsPhotonSumPt_insideSignalCone), + minRelPhotonSumPt_insideSignalCone_(minRelPhotonSumPt_insideSignalCone), + minAbsPhotonSumPt_outsideSignalCone_(minAbsPhotonSumPt_outsideSignalCone), + minRelPhotonSumPt_outsideSignalCone_(minRelPhotonSumPt_outsideSignalCone), + pfCands_(pfCands) { // Initialize tau tau_.reset(new PFTau()); @@ -296,6 +307,50 @@ void RecoTauConstructor::sortAndCopyIntoTau() { } } +namespace +{ + PFTau::hadronicDecayMode calculateDecayMode(const reco::PFTau& tau, double dRsignalCone, + double minAbsPhotonSumPt_insideSignalCone, double minRelPhotonSumPt_insideSignalCone, + double minAbsPhotonSumPt_outsideSignalCone, double minRelPhotonSumPt_outsideSignalCone) + { + unsigned int nCharged = tau.signalTauChargedHadronCandidates().size(); + // If no tracks exist, this is definitely not a tau! + if ( !nCharged ) return PFTau::kNull; + + unsigned int nPiZeros = 0; + const std::vector& piZeros = tau.signalPiZeroCandidates(); + for ( std::vector::const_iterator piZero = piZeros.begin(); + piZero != piZeros.end(); ++piZero ) { + double photonSumPt_insideSignalCone = 0.; + double photonSumPt_outsideSignalCone = 0.; + int numPhotons = piZero->numberOfDaughters(); + for ( int idxPhoton = 0; idxPhoton < numPhotons; ++idxPhoton ) { + const reco::Candidate* photon = piZero->daughter(idxPhoton); + double dR = deltaR(photon->p4(), tau.p4()); + if ( dR < dRsignalCone ) { + photonSumPt_insideSignalCone += photon->pt(); + } else { + photonSumPt_outsideSignalCone += photon->pt(); + } + } + if ( photonSumPt_insideSignalCone > minAbsPhotonSumPt_insideSignalCone || photonSumPt_insideSignalCone > (minRelPhotonSumPt_insideSignalCone*tau.pt()) || + photonSumPt_outsideSignalCone > minAbsPhotonSumPt_outsideSignalCone || photonSumPt_outsideSignalCone > (minRelPhotonSumPt_outsideSignalCone*tau.pt()) ) ++nPiZeros; + } + + // Find the maximum number of PiZeros our parameterization can hold + const unsigned int maxPiZeros = PFTau::kOneProngNPiZero; + + // Determine our track index + unsigned int trackIndex = (nCharged - 1)*(maxPiZeros + 1); + + // Check if we handle the given number of tracks + if ( trackIndex >= PFTau::kRareDecayMode ) return PFTau::kRareDecayMode; + + if ( nPiZeros > maxPiZeros ) nPiZeros = maxPiZeros; + return static_cast(trackIndex + nPiZeros); + } +} + std::auto_ptr RecoTauConstructor::get(bool setupLeadingObjects) { LogDebug("TauConstructorGet") << "Start getting" ; @@ -334,14 +389,14 @@ std::auto_ptr RecoTauConstructor::get(bool setupLeadingObjects) // Set P4 tau_->setP4(p4_); -// tau_->setP4( -// sumPFCandP4( -// getCollection(kSignal, kAll)->begin(), -// getCollection(kSignal, kAll)->end() -// ) -// ); + // Set Decay Mode - PFTau::hadronicDecayMode dm = tau_->calculateDecayMode(); + double dRsignalCone = ( signalConeSize_ ) ? (*signalConeSize_)(*tau_) : 0.5; + tau_->setSignalConeSize(dRsignalCone); + PFTau::hadronicDecayMode dm = calculateDecayMode( + *tau_, + dRsignalCone, + minAbsPhotonSumPt_insideSignalCone_, minRelPhotonSumPt_insideSignalCone_, minAbsPhotonSumPt_outsideSignalCone_, minRelPhotonSumPt_outsideSignalCone_); tau_->setDecayMode(dm); LogDebug("TauConstructorGet") << "Pt = " << tau_->pt() << ", eta = " << tau_->eta() << ", phi = " << tau_->phi() << ", mass = " << tau_->mass() << ", dm = " << tau_->decayMode() ; diff --git a/RecoVertex/VertexTools/src/SharedTracks.cc b/RecoVertex/VertexTools/src/SharedTracks.cc index 610c22ca666d3..4614d42734798 100644 --- a/RecoVertex/VertexTools/src/SharedTracks.cc +++ b/RecoVertex/VertexTools/src/SharedTracks.cc @@ -25,7 +25,7 @@ namespace vertexTools { iter != svTracks.end(); iter++) { if( std::abs((*iter)->bestTrack()->dz()-pv.z())/(*iter)->bestTrack()->dzError() < maxsigma && - std::abs((*iter)->bestTrack()->dxy(pv.position())/(*iter)->bestTrack()->dxyError() < maxsigma ) + std::abs((*iter)->bestTrack()->dxy(pv.position())/(*iter)->bestTrack()->dxyError()) < maxsigma ) count++; }