diff --git a/Alignment/APEEstimation/python/ApeEstimator_cfi.py b/Alignment/APEEstimation/python/ApeEstimator_cfi.py
index 1ab006bfbfecc..c8894c01b754b 100644
--- a/Alignment/APEEstimation/python/ApeEstimator_cfi.py
+++ b/Alignment/APEEstimation/python/ApeEstimator_cfi.py
@@ -74,7 +74,7 @@
minGoodHitsPerTrack = cms.uint32(0),
#File containing TrackerTree with ideal Geometry
- TrackerTreeFile = cms.string(os.environ['CMSSW_BASE'] + '/src/Alignment/TrackerAlignment/hists/TrackerTree.root'),
+ TrackerTreeFile = cms.string(os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/TrackerTree.root'),
#Sectors defining set of modules for common overview plots resp. APE values
Sectors = cms.VPSet(),
diff --git a/Alignment/APEEstimation/python/samples/MC_UnitTest_TkAlMuonIsolated_cff.py b/Alignment/APEEstimation/python/samples/MC_UnitTest_TkAlMuonIsolated_cff.py
new file mode 100644
index 0000000000000..27e1538ec30d0
--- /dev/null
+++ b/Alignment/APEEstimation/python/samples/MC_UnitTest_TkAlMuonIsolated_cff.py
@@ -0,0 +1,20 @@
+import FWCore.ParameterSet.Config as cms
+
+maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
+readFiles = cms.untracked.vstring()
+secFiles = cms.untracked.vstring()
+source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
+
+
+readFiles.extend( [
+"/store/mc/Run3Winter22DRPremix/WJetsToLNu_TuneCP5_13p6TeV-madgraphMLM-pythia8/ALCARECO/TkAlMuonIsolated-TRKALCADesign_design_geometry_122X_mcRun3_2021_design_v9-v2/2520000/0027e6ed-2626-4ede-97a5-f0a44164b81b.root",
+"/store/mc/Run3Winter22DRPremix/WJetsToLNu_TuneCP5_13p6TeV-madgraphMLM-pythia8/ALCARECO/TkAlMuonIsolated-TRKALCADesign_design_geometry_122X_mcRun3_2021_design_v9-v2/2520000/00899b9d-32ab-46f2-b77b-0b0a8d666027.root",
+"/store/mc/Run3Winter22DRPremix/WJetsToLNu_TuneCP5_13p6TeV-madgraphMLM-pythia8/ALCARECO/TkAlMuonIsolated-TRKALCADesign_design_geometry_122X_mcRun3_2021_design_v9-v2/2520000/07851676-0c65-4630-bbab-7406defeb670.root",
+"/store/mc/Run3Winter22DRPremix/WJetsToLNu_TuneCP5_13p6TeV-madgraphMLM-pythia8/ALCARECO/TkAlMuonIsolated-TRKALCADesign_design_geometry_122X_mcRun3_2021_design_v9-v2/2520000/0a54c512-0f69-44e1-90bc-16da035cbe02.root",
+ ] );
+
+
+
+secFiles.extend( [
+ ] )
+
diff --git a/Alignment/APEEstimation/scripts/initialise.bash b/Alignment/APEEstimation/scripts/initialise.bash
index 083753a57639a..9f9adbba24439 100644
--- a/Alignment/APEEstimation/scripts/initialise.bash
+++ b/Alignment/APEEstimation/scripts/initialise.bash
@@ -2,8 +2,6 @@
DIRBASE="$CMSSW_BASE/src/Alignment/APEEstimation"
-mkdir $CMSSW_BASE/src/Alignment/TrackerAlignment/hists/
-
mkdir $DIRBASE/hists/
mkdir $DIRBASE/hists/workingArea/
mkdir $DIRBASE/hists/workingArea/apeObjects/
diff --git a/Alignment/APEEstimation/test/BuildFile.xml b/Alignment/APEEstimation/test/BuildFile.xml
new file mode 100644
index 0000000000000..1d697d08f8da8
--- /dev/null
+++ b/Alignment/APEEstimation/test/BuildFile.xml
@@ -0,0 +1,3 @@
+
+
+
diff --git a/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py b/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py
index 42d16e267f987..b04d66eb6d921 100644
--- a/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py
+++ b/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py
@@ -10,15 +10,11 @@
import sys
options = VarParsing.VarParsing ('standard')
options.register('sample', 'data1', VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, "Input sample")
-options.register('useTrackList', False, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.bool, "Use list of preselected tracks")
-options.register('isTest', False, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.bool, "Test run")
# get and parse the command line arguments
options.parseArguments()
print("Input sample: ", options.sample)
-print("Use list of preselected tracks: ", options.useTrackList)
-print("Test run: ", options.isTest)
##
@@ -79,11 +75,6 @@
outputName = 'MinBias.root'
#outputPath = "workingArea"
trackSelection = "MinBias"
-if options.sample == 'data2':
- process.load("Alignment.APEEstimation.samples.Data_TkAlMinBias_Run2018C_PromptReco_v3_cff")
- outputName = 'MinBias1.root'
- #outputPath = "workingArea"
- trackSelection = "MinBias"
if options.sample == 'data3':
process.load("Alignment.APEEstimation.samples.Data_TkAlMuonIsolated_22Jan2013C_v1_cff")
outputName = 'Data_TkAlMuonIsolated_22Jan2013C.root'
@@ -104,22 +95,14 @@
outputPath = '/eos/cms/store/caf/user/jschulz/Skims/MC/UL2016ReRecoRealistic'
outputName = 'Mc_TkAlMuonIsolated_WJetsToLNu_2016.root'
trackSelection = "SingleMu"
-if options.sample == 'zmumu':
- process.load("")
- outputName = ''
- trackSelection = "DoubleMu"
-if options.sample == 'zmumu10':
- process.load("Alignment.APEEstimation.samples.Mc_TkAlMuonIsolated_Summer12_zmumu10_cff")
- outputName = 'Mc_TkAlMuonIsolated_Summer12_zmumu10.root'
- trackSelection = "DoubleMu"
-if options.sample == 'zmumu20':
- process.load("Alignment.APEEstimation.samples.Mc_TkAlMuonIsolated_Summer12_zmumu20_cff")
- outputName = 'Mc_TkAlMuonIsolated_Summer12_zmumu20.root'
- trackSelection = "DoubleMu"
-if options.sample == 'zmumu50':
- process.load("Alignment.APEEstimation.samples.DYToMuMu_M-50_Tune4C_13TeV-pythia8_Spring14dr-TkAlMuonIsolated-castor_PU_S14_POSTLS170_V6-v1_ALCARECO_cff")
- outputName = 'Mc_DYToMuMu_M-50_Tune4C_13TeV-pythia8_Spring14dr-TkAlMuonIsolated-castor_PU_S14_POSTLS170_V6-v1.root'
- trackSelection = "DoubleMu"
+
+# For unit tests
+if options.sample == 'UnitTest':
+ process.load("Alignment.APEEstimation.samples.MC_UnitTest_TkAlMuonIsolated_cff")
+ outputName = 'MC_UnitTest_TkAlMuonIsolated.root'
+ maxEvents = 1000
+ globalTag = "auto:phase1_2022_design"
+ trackSelection = "SingleMu"
print("Using output name %s"%(outputName))
@@ -148,7 +131,6 @@
## Number of Events (should be after input file)
##
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(maxEvents) )
-if options.isTest: process.maxEvents.input = 1001
##
@@ -177,13 +159,6 @@
process.MuSkim = trackSelector
-##
-## If preselected track list is used
-##
-if options.useTrackList:
- process.MuSkim.src = 'TrackList'
- process.TriggerSelectionSequence *= process.TrackList
-
import Alignment.CommonAlignment.tools.trackselectionRefitting as trackselRefit
process.seqTrackselRefit = trackselRefit.getSequence(process, trackSelector.src.getModuleLabel())
@@ -226,10 +201,6 @@
process.out.outputCommands.extend(process.ApeSkimEventContent.outputCommands)
-if options.isTest:
- process.out.fileName = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_apeSkim.root'
-
-
##
## Outpath
##
diff --git a/Alignment/APEEstimation/test/SkimProducer/startSkim.py b/Alignment/APEEstimation/test/SkimProducer/startSkim.py
index f2eb82619d527..e73b035f71dcb 100644
--- a/Alignment/APEEstimation/test/SkimProducer/startSkim.py
+++ b/Alignment/APEEstimation/test/SkimProducer/startSkim.py
@@ -61,7 +61,7 @@ def condorSubmitSkim(sample, caf=False):
def localStartSkim(sample):
base = os.environ['CMSSW_BASE']
- execString = "cmsRun {base}/src/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py isTest=False useTrackList=False sample={sample}".format(sample=sample, base=base)
+ execString = "cmsRun {base}/src/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py sample={sample}".format(sample=sample, base=base)
print(execString)
toExec = execString.split(" ")
@@ -73,7 +73,7 @@ def localStartSkim(sample):
def get_output(proc):
while True:
- line = proc.stdout.readline().rstrip()
+ line = proc.stdout.readline().rstrip().decode()
if not line:
break
yield line
@@ -153,7 +153,7 @@ def main(argv):
if len(args.samples) == 0:
print("Usage: python startSkim.py -s ")
- sys.exit()
+ sys.exit(1)
finalSamples = []
for sample in args.samples:
diff --git a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py
index 27e0b0abe4882..a182f2922c9cf 100644
--- a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py
+++ b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py
@@ -29,18 +29,10 @@ def save(name, object):
lock.release()
class Dataset:
- name = ""
- nFiles = 0
- maxEvents = -1
- baseDirectory = ""
- sampleType = "data1"
- fileList = []
- conditions = []
-
def __init__(self, config, name):
dsDict = dict(config.items("dataset:{}".format(name)))
self.name = name
- self.baseDirectory = dsDict["baseDirectory"]
+ self.baseDirectory = dsDict["baseDirectory"].replace("$CMSSW_BASE", os.environ['CMSSW_BASE'])
self.fileList = []
names = dsDict["fileNames"].split(" ")
@@ -49,21 +41,20 @@ def __init__(self, config, name):
for fileName in parsedNames:
self.fileList.append(self.baseDirectory+"/"+fileName)
self.nFiles = len(self.fileList)
-
+
+ self.maxEvents = -1
if "maxEvents" in dsDict:
self.maxEvents = int(dsDict["maxEvents"])
- if "isMC" in dsDict:
- if dsDict["isMC"] == "True":
- self.sampleType = "MC"
- else:
- self.sampleType ="data1"
-
+
+ self.sampleType ="data1"
+ if "isMC" in dsDict and dsDict["isMC"] == "True":
+ self.sampleType = "MC"
+
+ self.isCosmics = False
if "isCosmics" in dsDict:
self.isCosmics = (dsDict["isCosmics"] == "True")
- else:
- self.isCosmics = False
- self.conditions, dummy, self.validConditions = loadConditions(dsDict)
+ self.conditions, self.validConditions = loadConditions(dsDict)
# check if any of the sources used for conditions is invalid
if not self.validConditions:
@@ -75,42 +66,29 @@ def __init__(self, config, name):
if not self.existingFiles:
for fileName in missingFiles:
print("Invalid file name {} defined for dataset {}".format(fileName, self.name))
-
-
-class Alignment:
- name = ""
- alignmentName = None
- baselineDir = "Design"
- globalTag = "None"
- isDesign = False
- hasAlignmentCondition = False
- conditions = []
+class Alignment:
def __init__(self, config, name):
alDict = dict(config.items("alignment:{}".format(name)))
self.name = name
- if "alignmentName" in alDict:
- self.alignmentName = alDict["alignmentName"]
+
+ self.globalTag = "None"
if "globalTag" in alDict:
self.globalTag = alDict["globalTag"]
+ self.baselineDir = "Design"
if "baselineDir" in alDict:
self.baselineDir= alDict["baselineDir"]
+ self.isDesign = False
if "isDesign" in alDict:
self.isDesign= (alDict["isDesign"] == "True")
- # If self.hasAlignmentCondition is true, no other Alignment-Object is loaded in apeEstimator_cfg.py using the alignmentName
- self.conditions, self.hasAlignmentCondition, self.validConditions = loadConditions(alDict)
+ # If self.hasAlignmentCondition is true, no other Alignment-Object is loaded in apeEstimator_cfg.py using the
+ self.conditions, self.validConditions = loadConditions(alDict)
# check if any of the sources used for conditions is invalid
if not self.validConditions:
print("Invalid conditions defined for alignment {}".format(self.name))
-
- # check if at least one of the two ways to define the alignment was used
- if self.alignmentName == None and not self.hasAlignmentCondition:
- print("Error: No alignment object name or record was defined for alignment {}".format(self.name))
- sys.exit()
-
class ApeMeasurement:
name = "workingArea"
@@ -118,7 +96,6 @@ class ApeMeasurement:
firstIteration = 0
maxIterations = 15
maxEvents = None
- status = STATE_NONE
dataset = None
alignment = None
runningJobs = None
@@ -128,10 +105,10 @@ class ApeMeasurement:
def __init__(self, name, config, settings):
self.name = name
- self.status = STATE_ITERATION_START
+ self.status_ = STATE_ITERATION_START
self.runningJobs = []
self.failedJobs = []
- self.startTime = subprocess.check_output(["date"]).strip()
+ self.startTime = subprocess.check_output(["date"]).decode().strip()
# load conditions from dictionary, overwrite defaults if defined
for key, value in settings.items():
@@ -139,9 +116,6 @@ def __init__(self, name, config, settings):
setattr(self, key, value)
# Replace names with actual Dataset and Alignment objects
- # In principle, one could preload all these once so they are not
- # redefined for each measurement, but right now this does not
- # seem necessary
self.dataset = Dataset(config, settings["dataset"])
self.alignment = Alignment(config, settings["alignment"])
@@ -156,16 +130,16 @@ def __init__(self, name, config, settings):
if self.alignment.isDesign:
self.maxIterations = 0
- self.conditions, dummy, self.validConditions = loadConditions(settings)
+ self.conditions, self.validConditions = loadConditions(settings)
# see if sanity checks passed
if not self.alignment.validConditions or not self.dataset.validConditions or not self.dataset.existingFiles or not self.validConditions:
- self.status = STATE_INVALID_CONDITIONS
- self.print_status()
- self.finishTime = subprocess.check_output(["date"]).strip()
+ self.setStatus(STATE_INVALID_CONDITIONS, True)
+ return
+
+ if unitTest:
return
-
if self.alignment.isDesign and self.dataset.sampleType != "MC":
# For now, this won't immediately shut down the program
print("APE Measurement {} is scheduled to to an APE baseline measurement with a dataset that is not marked as isMC=True. Is this intended?".format(self.name))
@@ -173,20 +147,25 @@ def __init__(self, name, config, settings):
if not self.alignment.isDesign:
ensurePathExists('{}/hists/{}/apeObjects'.format(base, self.name))
-
- def get_status(self):
- return status_map[self.status]
+ def status(self):
+ return status_map[self.status_]
- def print_status(self):
- print("APE Measurement {} in iteration {} is now in status {}".format(self.name, self.curIteration, self.get_status()))
+ def printStatus(self):
+ print("APE Measurement {} in iteration {} is now in status {}".format(self.name, self.curIteration, self.status()))
+
+ def setStatus(self, status, terminal=False):
+ if self.status_ != status:
+ self.status_ = status
+ self.printStatus()
+ if terminal:
+ self.finishTime = subprocess.check_output(["date"]).decode().strip()
# submit jobs for track refit and hit categorization
- def submit_jobs(self):
+ def submitJobs(self):
toSubmit = []
allConditions = self.alignment.conditions+self.dataset.conditions+self.conditions
- allConditions = list({v['record']:v for v in allConditions}.values()) # should we clean for duplicate records? the record last defined (from dataset)
- # will be kept in case of overlap, which is the same as if there was no overlap removal
+ allConditions = list({v['record']:v for v in allConditions}.values()) # Removes double definitions of Records
ensurePathExists("{}/test/autoSubmitter/workingArea".format(base))
@@ -203,10 +182,7 @@ def submit_jobs(self):
for condition in allConditions:
fi.write(conditionsTemplate.format(record=condition["record"], connect=condition["connect"], tag=condition["tag"]))
-
- alignmentNameToUse = self.alignment.alignmentName
- if self.alignment.hasAlignmentCondition:
- alignmentNameToUse = "fromConditions"
+ alignmentNameToUse = "fromConditions"
lastIter = (self.curIteration==self.maxIterations) and not self.alignment.isDesign
@@ -227,7 +203,7 @@ def submit_jobs(self):
arguments += condorArgumentTemplate.format(fileNumber=fileNumber, inputFile=inputFile)
# build condor submit script
- date = subprocess.check_output(["date", "+%m_%d_%H_%M_%S"]).strip()
+ date = subprocess.check_output(["date", "+%m_%d_%H_%M_%S"]).decode().strip()
sub = "{}/test/autoSubmitter/workingArea/job_{}_iter{}".format(base, self.name, self.curIteration)
errorFileTemp = sub+"_error_{}.txt"
@@ -255,7 +231,7 @@ def submit_jobs(self):
# submit batch
from autoSubmitterTemplates import submitCondorTemplate
- subOut = subprocess.check_output(submitCondorTemplate.format(subFile=submitFileName), shell=True).strip()
+ subOut = subprocess.check_output(submitCondorTemplate.format(subFile=submitFileName), shell=True).decode().strip()
if len(subOut) == 0:
print("Running on environment that does not know bsub command or ssh session is timed out (ongoing for longer than 24h?), exiting")
@@ -266,12 +242,9 @@ def submit_jobs(self):
# list contains condor log files from which to read when job is terminated to detect errors
self.runningJobs.append((logFileTemp.format(i), errorFileTemp.format(i), "{}.{}".format(cluster, i)))
-
- self.status = STATE_BJOBS_WAITING
- self.print_status()
+ self.setStatus(STATE_BJOBS_WAITING)
- def check_jobs(self):
- lastStatus = self.status
+ def checkJobs(self):
stillRunningJobs = []
# check all still running jobs
for logName, errName, jobId in self.runningJobs:
@@ -313,10 +286,9 @@ def check_jobs(self):
# at least one job failed
if len(self.failedJobs) > 0:
- self.status = STATE_BJOBS_FAILED
- self.finishTime = subprocess.check_output(["date"]).strip()
+ self.setStatus(STATE_BJOBS_FAILED, True)
elif len(self.runningJobs) == 0:
- self.status = STATE_BJOBS_DONE
+ self.setStatus(STATE_BJOBS_DONE)
print("All condor jobs of APE measurement {} in iteration {} are done".format(self.name, self.curIteration))
# remove files
@@ -334,13 +306,10 @@ def check_jobs(self):
os.remove(errorFile)
os.remove(outputFile)
os.remove(logFile)
-
- if lastStatus != self.status:
- self.print_status()
# merges files from jobs
- def do_merge(self):
- self.status = STATE_MERGE_WAITING
+ def mergeFiles(self):
+ self.setStatus(STATE_MERGE_WAITING)
if self.alignment.isDesign:
folderName = '{}/hists/{}/baseline'.format(base, self.name)
else:
@@ -365,15 +334,13 @@ def do_merge(self):
os.remove(name)
if rootFileValid("{}/allData.root".format(folderName)) and merge_result == 0:
- self.status = STATE_MERGE_DONE
+ self.setStatus(STATE_MERGE_DONE)
else:
- self.status = STATE_MERGE_FAILED
- self.finishTime = subprocess.check_output(["date"]).strip()
- self.print_status()
+ self.setStatus(STATE_MERGE_FAILED, True)
# calculates APE
- def do_summary(self):
- self.status = STATE_SUMMARY_WAITING
+ def calculateApe(self):
+ self.status_ = STATE_SUMMARY_WAITING
from autoSubmitterTemplates import summaryTemplate
if self.alignment.isDesign:
#use measurement name as baseline folder name in this case
@@ -383,34 +350,29 @@ def do_summary(self):
summary_result = subprocess.call(summaryTemplate.format(inputCommands=inputCommands), shell=True) # returns exit code (0 if no error occured)
if summary_result == 0:
- self.status = STATE_SUMMARY_DONE
+ self.setStatus(STATE_SUMMARY_DONE)
else:
- self.status = STATE_SUMMARY_FAILED
- self.finishTime = subprocess.check_output(["date"]).strip()
- self.print_status()
+ self.setStatus(STATE_SUMMARY_FAILED, True)
# saves APE to .db file so it can be read out next iteration
- def do_local_setting(self):
- self.status = STATE_LOCAL_WAITING
+ def writeApeToDb(self):
+ self.setStatus(STATE_LOCAL_WAITING)
from autoSubmitterTemplates import localSettingTemplate
inputCommands = "iterNumber={} setBaseline={} measurementName={}".format(self.curIteration,self.alignment.isDesign,self.name)
local_setting_result = subprocess.call(localSettingTemplate.format(inputCommands=inputCommands), shell=True) # returns exit code (0 if no error occured)
if local_setting_result == 0:
- self.status = STATE_LOCAL_DONE
+ self.setStatus(STATE_LOCAL_DONE)
else:
- self.status = STATE_LOCAL_FAILED
- self.finishTime = subprocess.check_output(["date"]).strip()
- self.print_status()
+ self.setStatus(STATE_LOCAL_FAILED, True)
- def finish_iteration(self):
+ def finishIteration(self):
print("APE Measurement {} just finished iteration {}".format(self.name, self.curIteration))
if self.curIteration < self.maxIterations:
self.curIteration += 1
- self.status = STATE_ITERATION_START
+ self.setStatus(STATE_ITERATION_START)
else:
- self.status = STATE_FINISHED
- self.finishTime = subprocess.check_output(["date"]).strip()
+ self.setStatus(STATE_FINISHED, True)
print("APE Measurement {}, which was started at {} was finished after {} iterations, at {}".format(self.name, self.startTime, self.curIteration, self.finishTime))
def kill(self):
@@ -418,7 +380,7 @@ def kill(self):
for log, err, jobId in self.runningJobs:
subprocess.call(killJobTemplate.format(jobId=jobId), shell=True)
self.runningJobs = []
- self.status = STATE_NONE
+ self.setStatus(STATE_NONE)
def purge(self):
self.kill()
@@ -426,18 +388,17 @@ def purge(self):
shutil.rmtree(folderName)
# remove log-files as well?
- def run_iteration(self):
+ def runIteration(self):
global threadcounter
global measurements
threadcounter.acquire()
try:
- if self.status == STATE_ITERATION_START:
+ if self.status_ == STATE_ITERATION_START:
# start bjobs
print("APE Measurement {} just started iteration {}".format(self.name, self.curIteration))
-
try:
- self.submit_jobs()
+ self.submitJobs()
save("measurements", measurements)
except Exception as e:
# this is needed in case the scheduler goes down
@@ -445,56 +406,57 @@ def run_iteration(self):
print(e)
return
- if self.status == STATE_BJOBS_WAITING:
+ if self.status_ == STATE_BJOBS_WAITING:
# check if bjobs are finished
- self.check_jobs()
+ self.checkJobs()
save("measurements", measurements)
- if self.status == STATE_BJOBS_DONE:
+ if self.status_ == STATE_BJOBS_DONE:
# merge files
- self.do_merge()
+ self.mergeFiles()
save("measurements", measurements)
- if self.status == STATE_MERGE_DONE:
+ if self.status_ == STATE_MERGE_DONE:
# start summary
- self.do_summary()
+ self.calculateApe()
save("measurements", measurements)
- if self.status == STATE_SUMMARY_DONE:
+ if self.status_ == STATE_SUMMARY_DONE:
# start local setting (only if not a baseline measurement)
if self.alignment.isDesign:
- self.status = STATE_LOCAL_DONE
+ self.setStatus(STATE_LOCAL_DONE)
else:
- self.do_local_setting()
+ self.writeApeToDb()
save("measurements", measurements)
- if self.status == STATE_LOCAL_DONE:
- self.finish_iteration()
+ if self.status_ == STATE_LOCAL_DONE:
+ self.finishIteration()
save("measurements", measurements)
# go to next iteration or finish measurement
- if self.status == STATE_BJOBS_FAILED or \
- self.status == STATE_MERGE_FAILED or \
- self.status == STATE_SUMMARY_FAILED or \
- self.status == STATE_LOCAL_FAILED or \
- self.status == STATE_INVALID_CONDITIONS or \
- self.status == STATE_FINISHED:
+
+ if self.status_ == STATE_BJOBS_FAILED or \
+ self.status_ == STATE_MERGE_FAILED or \
+ self.status_ == STATE_SUMMARY_FAILED or \
+ self.status_ == STATE_LOCAL_FAILED or \
+ self.status_ == STATE_INVALID_CONDITIONS or \
+ self.status_ == STATE_FINISHED:
with open(history_file, "a") as fi:
- fi.write("APE measurement {name} which was started at {start} finished at {end} with state {state} in iteration {iteration}\n".format(name=self.name, start=self.startTime, end=self.finishTime, state=self.get_status(), iteration=self.curIteration))
- if self.status == STATE_FINISHED:
+ fi.write("APE measurement {name} which was started at {start} finished at {end} with state {state} in iteration {iteration}\n".format(name=self.name, start=self.startTime, end=self.finishTime, state=self.status(), iteration=self.curIteration))
+ if self.status_ == STATE_FINISHED:
global finished_measurements
finished_measurements[self.name] = self
save("finished", finished_measurements)
else:
global failed_measurements
failed_measurements[self.name] = self
- self.status = STATE_NONE
+
+ self.setStatus(STATE_NONE)
save("failed", failed_measurements)
save("measurements", measurements)
- if self.status == STATE_ITERATION_START: # this ensures that jobs do not go into idle if many measurements are done simultaneously
+ if self.status_ == STATE_ITERATION_START: # this ensures that jobs do not go into idle if many measurements are done simultaneously
# start bjobs
print("APE Measurement {} just started iteration {}".format(self.name, self.curIteration))
- self.submit_jobs()
+ self.submitJobs()
save("measurements", measurements)
finally:
threadcounter.release()
-
def main():
parser = argparse.ArgumentParser(description="Automatically run APE measurements")
parser.add_argument("-c", "--config", action="append", dest="configs", default=[],
@@ -511,6 +473,8 @@ def main():
help='Number of threads running in parallel')
parser.add_argument("-C", "--caf",action="store_true", dest="caf", default=False,
help="Use CAF queue for condor jobs")
+ parser.add_argument("-u", "--unitTest", action="store_true", dest="unitTest", default=False,
+ help='If this is used, as soon as a measurement fails, the program will exit and as exit code the status of the measurement, i.e., where it failed')
args = parser.parse_args()
global base
@@ -519,10 +483,10 @@ def main():
global threadcounter
global lock
global use_caf
+ global unitTest
use_caf = args.caf
- enableCAF(use_caf)
-
+ unitTest = args.unitTest
threadcounter = threading.BoundedSemaphore(args.ncores)
lock = threading.Lock()
@@ -535,9 +499,8 @@ def main():
base = os.environ['CMSSW_BASE']+"/src/Alignment/APEEstimation"
except KeyError:
print("No CMSSW environment was set, exiting")
- sys.exit()
-
-
+ sys.exit(1)
+
killTargets = []
purgeTargets = []
for toConvert in args.kill:
@@ -565,7 +528,7 @@ def main():
for res in resumed:
measurements.append(res)
- print("Measurement {} in state {} in iteration {} was resumed".format(res.name, res.get_status(), res.curIteration))
+ print("Measurement {} in state {} in iteration {} was resumed".format(res.name, res.status(), res.curIteration))
# Killing and purging is done here, because it doesn't make
# sense to kill or purge a measurement that was just started
for to_kill in args.kill:
@@ -580,7 +543,7 @@ def main():
except IOError:
print("Could not resume because {} could not be opened, exiting".format(shelve_name))
- sys.exit()
+ sys.exit(2)
# read out from config file
if args.configs != []:
@@ -599,22 +562,28 @@ def main():
measurement = ApeMeasurement(name, config, settings)
- if measurement.status >= STATE_ITERATION_START and measurement.status <= STATE_FINISHED:
+ if measurement.status_ >= STATE_ITERATION_START:
measurements.append(measurement)
print("APE Measurement {} was started".format(measurement.name))
-
-
+
+ if unitTest:
+ # status is 0 if successful, 101 if wrongly configured
+ sys.exit(measurement.status_)
+
+ initializeModuleLoading()
+ enableCAF(use_caf)
+
while True:
# remove finished and failed measurements
- measurements = [measurement for measurement in measurements if not (measurement.status==STATE_NONE or measurement.status == STATE_FINISHED)]
+ measurements = [measurement for measurement in measurements if not (measurement.status_==STATE_NONE or measurement.status_ == STATE_FINISHED)]
save("measurements", measurements)
save("failed", failed_measurements)
save("finished", finished_measurements)
list_threads = []
for measurement in measurements:
- t = threading.Thread(target=measurement.run_iteration)
+ t = threading.Thread(target=measurement.runIteration)
list_threads.append(t)
t.start()
@@ -624,7 +593,7 @@ def main():
if len(measurements) == 0:
print("No APE measurements are active, exiting")
- break
+ sys.exit(0)
try: # so that interrupting does not give an error message and just ends the program
time_remaining = clock_interval
@@ -639,7 +608,6 @@ def main():
sys.stdout.write("\033[K")
except KeyboardInterrupt:
sys.exit(0)
-
-
+
if __name__ == "__main__":
main()
diff --git a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py
index b09b1f091e673..5632d093973d3 100644
--- a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py
+++ b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py
@@ -21,14 +21,13 @@
Error = {errorFile}
Log = {logFile}
request_memory = 2000M
-request_disk = 400M
+request_disk = 500M
batch_name = {jobName}
+JobFlavour = "workday"
Queue Arguments from (
{arguments})
"""
-
condorSubTemplateCAF="""
Executable = {jobFile}
Universe = vanilla
@@ -36,7 +35,7 @@
Error = {errorFile}
Log = {logFile}
request_memory = 2000M
-request_disk = 400M
+request_disk = 500M
batch_name = {jobName}
+JobFlavour = "workday"
+AccountingGroup = "group_u_CMS.CAF.ALCA"
diff --git a/Alignment/APEEstimation/test/autoSubmitter/helpers.py b/Alignment/APEEstimation/test/autoSubmitter/helpers.py
index 7c37fd6e39661..bb78557ff2b73 100644
--- a/Alignment/APEEstimation/test/autoSubmitter/helpers.py
+++ b/Alignment/APEEstimation/test/autoSubmitter/helpers.py
@@ -70,17 +70,18 @@ def rootFileValid(path):
result &= not file.IsZombie()
return result
-if not 'MODULEPATH' in os.environ:
- f = open(os.environ['MODULESHOME'] + "/init/.modulespath", "r")
- path = []
- for line in f.readlines():
- line = re.sub("#.*$", '', line)
- if line != '':
- path.append(line)
- os.environ['MODULEPATH'] = ':'.join(path)
-
-if not 'LOADEDMODULES' in os.environ:
- os.environ['LOADEDMODULES'] = ''
+def initializeModuleLoading():
+ if not 'MODULEPATH' in os.environ:
+ f = open(os.environ['MODULESHOME'] + "/init/.modulespath", "r")
+ path = []
+ for line in f.readlines():
+ line = re.sub("#.*$", '', line)
+ if line != '':
+ path.append(line)
+ os.environ['MODULEPATH'] = ':'.join(path)
+
+ if not 'LOADEDMODULES' in os.environ:
+ os.environ['LOADEDMODULES'] = ''
def module(*args):
if type(args[0]) == type([]):
@@ -95,8 +96,6 @@ def enableCAF(switch):
module('load', 'lxbatch/tzero')
else:
module('load', 'lxbatch/share')
-
-
def ensurePathExists(path):
try:
@@ -105,7 +104,6 @@ def ensurePathExists(path):
if exception.errno != errno.EEXIST:
raise
-
def replaceAllRanges(string):
if "[" in string and "]" in string:
strings = []
@@ -150,8 +148,6 @@ def allFilesExist(dataset):
passed = False
missingFiles.append(fileName)
return passed, missingFiles
-
-
def hasValidSource(condition):
if condition["connect"].startswith("frontier://FrontierProd/"):
@@ -165,7 +161,6 @@ def hasValidSource(condition):
return False
def loadConditions(dictionary):
- hasAlignmentCondition = False
goodConditions = True
conditions = []
for key, value in dictionary.items():
@@ -176,8 +171,6 @@ def loadConditions(dictionary):
# structure is "condition rcd:source tag"
record = key.split(" ")[1]
connect, tag = value.split(" ")
- if record == "TrackerAlignmentRcd":
- hasAlignmentCondition = True
conditions.append({"record":record, "connect":replaceShortcuts(connect), "tag":tag})
elif len(value.split(" ")) == 1 and len(key.split(" ")) == 2:
# structure is "condition tag:source", so we have to guess rcd from the tag. might also be "condition tag1+tag2+...+tagN:source"
@@ -188,8 +181,6 @@ def loadConditions(dictionary):
for possibleTag, possibleRcd in records.items():
if tag.startswith(possibleTag):
conditions.append({"record":possibleRcd, "connect":replaceShortcuts(connect), "tag":tag})
- if possibleRcd == "TrackerAlignmentRcd":
- hasAlignmentCondition = True
foundTag = True
break
if not foundTag:
@@ -207,4 +198,4 @@ def loadConditions(dictionary):
if not condition["record"].endswith("Rcd"):
goodConditions = False
print("'{}' is not a valid record name.".format(condition["record"]))
- return conditions, hasAlignmentCondition, goodConditions
+ return conditions, goodConditions
diff --git a/Alignment/APEEstimation/test/autoSubmitter/unitTest.ini b/Alignment/APEEstimation/test/autoSubmitter/unitTest.ini
new file mode 100644
index 0000000000000..3920aace7d635
--- /dev/null
+++ b/Alignment/APEEstimation/test/autoSubmitter/unitTest.ini
@@ -0,0 +1,12 @@
+[dataset:wlnu]
+baseDirectory=$CMSSW_BASE/unit_tests/ApeTest
+fileNames=MC_UnitTest_TkAlMuonIsolated_1.root
+isMC=True
+
+[alignment:FromGT]
+globalTag=auto:phase1_2022_design
+isDesign=True
+
+[ape:Design]
+dataset: wlnu
+alignment: FromGT
diff --git a/Alignment/APEEstimation/test/trackerTreeGenerator_cfg.py b/Alignment/APEEstimation/test/trackerTreeGenerator_cfg.py
new file mode 100644
index 0000000000000..4727e156f1327
--- /dev/null
+++ b/Alignment/APEEstimation/test/trackerTreeGenerator_cfg.py
@@ -0,0 +1,90 @@
+from __future__ import print_function
+import FWCore.ParameterSet.Config as cms
+
+import os
+
+
+##
+## Process definition
+##
+process = cms.Process("TrackerTreeGeneration")
+
+
+
+##
+## MessageLogger
+##
+process.load("FWCore.MessageService.MessageLogger_cfi")
+process.MessageLogger.cerr.threshold = 'INFO'
+process.MessageLogger.TrackerTreeGenerator=dict()
+process.MessageLogger.cerr.INFO.limit = 0
+process.MessageLogger.cerr.default.limit = -1
+process.MessageLogger.cerr.TrackerTreeGenerator = cms.untracked.PSet(limit = cms.untracked.int32(-1))
+
+
+
+##
+## Process options
+##
+process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
+
+
+
+##
+## Input source
+##
+process.source = cms.Source("EmptySource")
+
+
+
+##
+## Number of events
+##
+process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )
+
+
+
+##
+## Geometry
+##
+process.load("Configuration.Geometry.GeometryRecoDB_cff")
+
+
+##
+## Conditions
+##
+# use always ideal conditions to get no influence from Alignment on absolute Positions, Orientations...
+# so it is clear that when choosing special regions in e.g. globalPhi, Modules of the same Rod are contained in the same region
+process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
+from Configuration.AlCa.GlobalTag import GlobalTag
+process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase1_2023_design', '')
+print("Using global tag "+process.GlobalTag.globaltag._value)
+
+
+##
+## Analyzer
+##
+process.load("Alignment.TrackerAlignment.TrackerTreeGenerator_cfi")
+
+
+
+##
+## Output File Configuration
+##
+process.TFileService = cms.Service("TFileService",
+ fileName = cms.string(os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/TrackerTree.root')
+)
+
+
+
+##
+## Path
+##
+process.p = cms.Path(process.TrackerTreeGenerator)
+
+
+
+
+
+
+
diff --git a/Alignment/APEEstimation/test/unitTest.sh b/Alignment/APEEstimation/test/unitTest.sh
new file mode 100755
index 0000000000000..a29c2099c3f24
--- /dev/null
+++ b/Alignment/APEEstimation/test/unitTest.sh
@@ -0,0 +1,11 @@
+#! /bin/bash
+function die { echo $1: status $2 ; exit $2; }
+
+echo " TESTING data set skimming"
+# skim the predefined data set
+python3 $CMSSW_BASE/src/Alignment/APEEstimation/test/SkimProducer/startSkim.py -s UnitTest || die "Failure skimming data set" $?
+
+echo " TESTING auto submitter"
+# start baseline measurement
+python3 $CMSSW_BASE/src/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py -c $CMSSW_BASE/src/Alignment/APEEstimation/test/autoSubmitter/unitTest.ini -u || die "Failure running autoSubmitter" $?
+