From 7df3aaae8b8c1ec9fe9904398f3cbe3ba8c8e852 Mon Sep 17 00:00:00 2001 From: mteroerd Date: Mon, 19 Jun 2023 15:34:19 +0200 Subject: [PATCH 1/5] Fix python3 compatibility and do some minor formatting changes --- .../test/autoSubmitter/autoSubmitter.py | 133 +++++++----------- .../autoSubmitter/autoSubmitterTemplates.py | 5 +- .../test/autoSubmitter/helpers.py | 5 - 3 files changed, 51 insertions(+), 92 deletions(-) diff --git a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py index 27e0b0abe4882..3f14900fdbb10 100644 --- a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py +++ b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py @@ -29,14 +29,6 @@ def save(name, object): lock.release() class Dataset: - name = "" - nFiles = 0 - maxEvents = -1 - baseDirectory = "" - sampleType = "data1" - fileList = [] - conditions = [] - def __init__(self, config, name): dsDict = dict(config.items("dataset:{}".format(name))) self.name = name @@ -49,19 +41,18 @@ def __init__(self, config, name): for fileName in parsedNames: self.fileList.append(self.baseDirectory+"/"+fileName) self.nFiles = len(self.fileList) - + + self.maxEvents = -1 if "maxEvents" in dsDict: self.maxEvents = int(dsDict["maxEvents"]) - if "isMC" in dsDict: - if dsDict["isMC"] == "True": - self.sampleType = "MC" - else: - self.sampleType ="data1" - + + self.sampleType ="data1" + if "isMC" in dsDict and dsDict["isMC"] == "True": + self.sampleType = "MC" + + self.isCosmics = False if "isCosmics" in dsDict: self.isCosmics = (dsDict["isCosmics"] == "True") - else: - self.isCosmics = False self.conditions, dummy, self.validConditions = loadConditions(dsDict) @@ -75,26 +66,22 @@ def __init__(self, config, name): if not self.existingFiles: for fileName in missingFiles: print("Invalid file name {} defined for dataset {}".format(fileName, self.name)) - - -class Alignment: - name = "" - alignmentName = None - baselineDir = "Design" - globalTag = "None" - isDesign = False - hasAlignmentCondition = False - conditions = [] +class Alignment: def __init__(self, config, name): alDict = dict(config.items("alignment:{}".format(name))) self.name = name + + alignmentName = None if "alignmentName" in alDict: self.alignmentName = alDict["alignmentName"] + self.globalTag = "None" if "globalTag" in alDict: self.globalTag = alDict["globalTag"] + self.baselineDir = "Design" if "baselineDir" in alDict: self.baselineDir= alDict["baselineDir"] + self.isDesign = False if "isDesign" in alDict: self.isDesign= (alDict["isDesign"] == "True") @@ -105,9 +92,9 @@ def __init__(self, config, name): if not self.validConditions: print("Invalid conditions defined for alignment {}".format(self.name)) - # check if at least one of the two ways to define the alignment was used - if self.alignmentName == None and not self.hasAlignmentCondition: + # for baseline (Design) measurements, this is not needed, as we usually take conditions from GT + if self.alignmentName == None and not self.hasAlignmentCondition and not self.isDesign: print("Error: No alignment object name or record was defined for alignment {}".format(self.name)) sys.exit() @@ -131,7 +118,7 @@ def __init__(self, name, config, settings): self.status = STATE_ITERATION_START self.runningJobs = [] self.failedJobs = [] - self.startTime = subprocess.check_output(["date"]).strip() + self.startTime = subprocess.check_output(["date"]).decode().strip() # load conditions from dictionary, overwrite defaults if defined for key, value in settings.items(): @@ -139,9 +126,6 @@ def __init__(self, name, config, settings): setattr(self, key, value) # Replace names with actual Dataset and Alignment objects - # In principle, one could preload all these once so they are not - # redefined for each measurement, but right now this does not - # seem necessary self.dataset = Dataset(config, settings["dataset"]) self.alignment = Alignment(config, settings["alignment"]) @@ -160,11 +144,9 @@ def __init__(self, name, config, settings): # see if sanity checks passed if not self.alignment.validConditions or not self.dataset.validConditions or not self.dataset.existingFiles or not self.validConditions: - self.status = STATE_INVALID_CONDITIONS - self.print_status() - self.finishTime = subprocess.check_output(["date"]).strip() + self.set_status(STATE_INVALID_CONDITIONS) + self.finishTime = subprocess.check_output(["date"]).decode().strip() return - if self.alignment.isDesign and self.dataset.sampleType != "MC": # For now, this won't immediately shut down the program @@ -173,20 +155,25 @@ def __init__(self, name, config, settings): if not self.alignment.isDesign: ensurePathExists('{}/hists/{}/apeObjects'.format(base, self.name)) - def get_status(self): return status_map[self.status] def print_status(self): print("APE Measurement {} in iteration {} is now in status {}".format(self.name, self.curIteration, self.get_status())) + def set_status(self, status, terminal=False): + if self.status != status: + self.status = status + self.print_status() + if terminal: + self.finishTime = subprocess.check_output(["date"]).decode().strip() + # submit jobs for track refit and hit categorization def submit_jobs(self): toSubmit = [] allConditions = self.alignment.conditions+self.dataset.conditions+self.conditions - allConditions = list({v['record']:v for v in allConditions}.values()) # should we clean for duplicate records? the record last defined (from dataset) - # will be kept in case of overlap, which is the same as if there was no overlap removal + allConditions = list({v['record']:v for v in allConditions}.values()) # Removes double definitions of Records ensurePathExists("{}/test/autoSubmitter/workingArea".format(base)) @@ -203,7 +190,6 @@ def submit_jobs(self): for condition in allConditions: fi.write(conditionsTemplate.format(record=condition["record"], connect=condition["connect"], tag=condition["tag"])) - alignmentNameToUse = self.alignment.alignmentName if self.alignment.hasAlignmentCondition: alignmentNameToUse = "fromConditions" @@ -227,7 +213,7 @@ def submit_jobs(self): arguments += condorArgumentTemplate.format(fileNumber=fileNumber, inputFile=inputFile) # build condor submit script - date = subprocess.check_output(["date", "+%m_%d_%H_%M_%S"]).strip() + date = subprocess.check_output(["date", "+%m_%d_%H_%M_%S"]).decode().strip() sub = "{}/test/autoSubmitter/workingArea/job_{}_iter{}".format(base, self.name, self.curIteration) errorFileTemp = sub+"_error_{}.txt" @@ -255,7 +241,7 @@ def submit_jobs(self): # submit batch from autoSubmitterTemplates import submitCondorTemplate - subOut = subprocess.check_output(submitCondorTemplate.format(subFile=submitFileName), shell=True).strip() + subOut = subprocess.check_output(submitCondorTemplate.format(subFile=submitFileName), shell=True).decode().strip() if len(subOut) == 0: print("Running on environment that does not know bsub command or ssh session is timed out (ongoing for longer than 24h?), exiting") @@ -266,12 +252,9 @@ def submit_jobs(self): # list contains condor log files from which to read when job is terminated to detect errors self.runningJobs.append((logFileTemp.format(i), errorFileTemp.format(i), "{}.{}".format(cluster, i))) - - self.status = STATE_BJOBS_WAITING - self.print_status() + self.set_status(STATE_BJOBS_WAITING) def check_jobs(self): - lastStatus = self.status stillRunningJobs = [] # check all still running jobs for logName, errName, jobId in self.runningJobs: @@ -313,10 +296,9 @@ def check_jobs(self): # at least one job failed if len(self.failedJobs) > 0: - self.status = STATE_BJOBS_FAILED - self.finishTime = subprocess.check_output(["date"]).strip() + self.set_status(STATE_BJOBS_FAILED, True) elif len(self.runningJobs) == 0: - self.status = STATE_BJOBS_DONE + self.set_status(STATE_BJOBS_DONE) print("All condor jobs of APE measurement {} in iteration {} are done".format(self.name, self.curIteration)) # remove files @@ -334,13 +316,10 @@ def check_jobs(self): os.remove(errorFile) os.remove(outputFile) os.remove(logFile) - - if lastStatus != self.status: - self.print_status() # merges files from jobs def do_merge(self): - self.status = STATE_MERGE_WAITING + self.set_status(STATE_MERGE_WAITING) if self.alignment.isDesign: folderName = '{}/hists/{}/baseline'.format(base, self.name) else: @@ -365,11 +344,9 @@ def do_merge(self): os.remove(name) if rootFileValid("{}/allData.root".format(folderName)) and merge_result == 0: - self.status = STATE_MERGE_DONE + self.set_status(STATE_MERGE_DONE) else: - self.status = STATE_MERGE_FAILED - self.finishTime = subprocess.check_output(["date"]).strip() - self.print_status() + self.set_status(STATE_MERGE_FAILED, True) # calculates APE def do_summary(self): @@ -383,34 +360,29 @@ def do_summary(self): summary_result = subprocess.call(summaryTemplate.format(inputCommands=inputCommands), shell=True) # returns exit code (0 if no error occured) if summary_result == 0: - self.status = STATE_SUMMARY_DONE + self.set_status(STATE_SUMMARY_DONE) else: - self.status = STATE_SUMMARY_FAILED - self.finishTime = subprocess.check_output(["date"]).strip() - self.print_status() + self.set_status(STATE_SUMMARY_FAILED, True) # saves APE to .db file so it can be read out next iteration def do_local_setting(self): - self.status = STATE_LOCAL_WAITING + self.set_status(STATE_LOCAL_WAITING) from autoSubmitterTemplates import localSettingTemplate inputCommands = "iterNumber={} setBaseline={} measurementName={}".format(self.curIteration,self.alignment.isDesign,self.name) local_setting_result = subprocess.call(localSettingTemplate.format(inputCommands=inputCommands), shell=True) # returns exit code (0 if no error occured) if local_setting_result == 0: - self.status = STATE_LOCAL_DONE + self.set_status(STATE_LOCAL_DONE) else: - self.status = STATE_LOCAL_FAILED - self.finishTime = subprocess.check_output(["date"]).strip() - self.print_status() + self.set_status(STATE_LOCAL_FAILED, True) def finish_iteration(self): print("APE Measurement {} just finished iteration {}".format(self.name, self.curIteration)) if self.curIteration < self.maxIterations: self.curIteration += 1 - self.status = STATE_ITERATION_START + self.set_status(STATE_ITERATION_START) else: - self.status = STATE_FINISHED - self.finishTime = subprocess.check_output(["date"]).strip() + self.set_status(STATE_FINISHED, True) print("APE Measurement {}, which was started at {} was finished after {} iterations, at {}".format(self.name, self.startTime, self.curIteration, self.finishTime)) def kill(self): @@ -418,7 +390,7 @@ def kill(self): for log, err, jobId in self.runningJobs: subprocess.call(killJobTemplate.format(jobId=jobId), shell=True) self.runningJobs = [] - self.status = STATE_NONE + self.set_status(STATE_NONE) def purge(self): self.kill() @@ -435,7 +407,6 @@ def run_iteration(self): # start bjobs print("APE Measurement {} just started iteration {}".format(self.name, self.curIteration)) - try: self.submit_jobs() save("measurements", measurements) @@ -460,7 +431,7 @@ def run_iteration(self): if self.status == STATE_SUMMARY_DONE: # start local setting (only if not a baseline measurement) if self.alignment.isDesign: - self.status = STATE_LOCAL_DONE + self.set_status(STATE_LOCAL_DONE) else: self.do_local_setting() save("measurements", measurements) @@ -483,7 +454,7 @@ def run_iteration(self): else: global failed_measurements failed_measurements[self.name] = self - self.status = STATE_NONE + self.set_status(STATE_NONE) save("failed", failed_measurements) save("measurements", measurements) if self.status == STATE_ITERATION_START: # this ensures that jobs do not go into idle if many measurements are done simultaneously @@ -494,7 +465,6 @@ def run_iteration(self): finally: threadcounter.release() - def main(): parser = argparse.ArgumentParser(description="Automatically run APE measurements") parser.add_argument("-c", "--config", action="append", dest="configs", default=[], @@ -523,7 +493,6 @@ def main(): use_caf = args.caf enableCAF(use_caf) - threadcounter = threading.BoundedSemaphore(args.ncores) lock = threading.Lock() @@ -535,9 +504,8 @@ def main(): base = os.environ['CMSSW_BASE']+"/src/Alignment/APEEstimation" except KeyError: print("No CMSSW environment was set, exiting") - sys.exit() - - + sys.exit(1) + killTargets = [] purgeTargets = [] for toConvert in args.kill: @@ -580,7 +548,7 @@ def main(): except IOError: print("Could not resume because {} could not be opened, exiting".format(shelve_name)) - sys.exit() + sys.exit(2) # read out from config file if args.configs != []: @@ -602,8 +570,6 @@ def main(): if measurement.status >= STATE_ITERATION_START and measurement.status <= STATE_FINISHED: measurements.append(measurement) print("APE Measurement {} was started".format(measurement.name)) - - while True: # remove finished and failed measurements @@ -624,7 +590,7 @@ def main(): if len(measurements) == 0: print("No APE measurements are active, exiting") - break + sys.exit(0) try: # so that interrupting does not give an error message and just ends the program time_remaining = clock_interval @@ -640,6 +606,5 @@ def main(): except KeyboardInterrupt: sys.exit(0) - if __name__ == "__main__": main() diff --git a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py index b09b1f091e673..5632d093973d3 100644 --- a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py +++ b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py @@ -21,14 +21,13 @@ Error = {errorFile} Log = {logFile} request_memory = 2000M -request_disk = 400M +request_disk = 500M batch_name = {jobName} +JobFlavour = "workday" Queue Arguments from ( {arguments}) """ - condorSubTemplateCAF=""" Executable = {jobFile} Universe = vanilla @@ -36,7 +35,7 @@ Error = {errorFile} Log = {logFile} request_memory = 2000M -request_disk = 400M +request_disk = 500M batch_name = {jobName} +JobFlavour = "workday" +AccountingGroup = "group_u_CMS.CAF.ALCA" diff --git a/Alignment/APEEstimation/test/autoSubmitter/helpers.py b/Alignment/APEEstimation/test/autoSubmitter/helpers.py index 7c37fd6e39661..c39943fc7d66d 100644 --- a/Alignment/APEEstimation/test/autoSubmitter/helpers.py +++ b/Alignment/APEEstimation/test/autoSubmitter/helpers.py @@ -95,8 +95,6 @@ def enableCAF(switch): module('load', 'lxbatch/tzero') else: module('load', 'lxbatch/share') - - def ensurePathExists(path): try: @@ -105,7 +103,6 @@ def ensurePathExists(path): if exception.errno != errno.EEXIST: raise - def replaceAllRanges(string): if "[" in string and "]" in string: strings = [] @@ -150,8 +147,6 @@ def allFilesExist(dataset): passed = False missingFiles.append(fileName) return passed, missingFiles - - def hasValidSource(condition): if condition["connect"].startswith("frontier://FrontierProd/"): From 9ab4165f4b6baf26cc076d98712610bca2a4c084 Mon Sep 17 00:00:00 2001 From: mteroerd Date: Mon, 19 Jun 2023 15:39:42 +0200 Subject: [PATCH 2/5] removing redundancy --- Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py index 3f14900fdbb10..2cc8bfd6154a8 100644 --- a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py +++ b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py @@ -144,8 +144,7 @@ def __init__(self, name, config, settings): # see if sanity checks passed if not self.alignment.validConditions or not self.dataset.validConditions or not self.dataset.existingFiles or not self.validConditions: - self.set_status(STATE_INVALID_CONDITIONS) - self.finishTime = subprocess.check_output(["date"]).decode().strip() + self.set_status(STATE_INVALID_CONDITIONS, True) return if self.alignment.isDesign and self.dataset.sampleType != "MC": From e708d9b5e76e70774af0f34edbf9c67fe052051e Mon Sep 17 00:00:00 2001 From: mteroerd Date: Mon, 19 Jun 2023 16:03:47 +0200 Subject: [PATCH 3/5] Some smaller changes for code rules --- .../test/autoSubmitter/autoSubmitter.py | 121 +++++++++--------- 1 file changed, 60 insertions(+), 61 deletions(-) diff --git a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py index 2cc8bfd6154a8..1bf836deda79d 100644 --- a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py +++ b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py @@ -105,7 +105,6 @@ class ApeMeasurement: firstIteration = 0 maxIterations = 15 maxEvents = None - status = STATE_NONE dataset = None alignment = None runningJobs = None @@ -115,7 +114,7 @@ class ApeMeasurement: def __init__(self, name, config, settings): self.name = name - self.status = STATE_ITERATION_START + self.status_ = STATE_ITERATION_START self.runningJobs = [] self.failedJobs = [] self.startTime = subprocess.check_output(["date"]).decode().strip() @@ -144,7 +143,7 @@ def __init__(self, name, config, settings): # see if sanity checks passed if not self.alignment.validConditions or not self.dataset.validConditions or not self.dataset.existingFiles or not self.validConditions: - self.set_status(STATE_INVALID_CONDITIONS, True) + self.setStatus(STATE_INVALID_CONDITIONS, True) return if self.alignment.isDesign and self.dataset.sampleType != "MC": @@ -154,21 +153,21 @@ def __init__(self, name, config, settings): if not self.alignment.isDesign: ensurePathExists('{}/hists/{}/apeObjects'.format(base, self.name)) - def get_status(self): - return status_map[self.status] + def status(self): + return status_map[self.status_] - def print_status(self): - print("APE Measurement {} in iteration {} is now in status {}".format(self.name, self.curIteration, self.get_status())) + def printStatus(self): + print("APE Measurement {} in iteration {} is now in status {}".format(self.name, self.curIteration, self.status())) - def set_status(self, status, terminal=False): - if self.status != status: - self.status = status - self.print_status() + def setStatus(self, status, terminal=False): + if self.status_ != status: + self.status_ = status + self.printStatus() if terminal: self.finishTime = subprocess.check_output(["date"]).decode().strip() # submit jobs for track refit and hit categorization - def submit_jobs(self): + def submitJobs(self): toSubmit = [] allConditions = self.alignment.conditions+self.dataset.conditions+self.conditions @@ -251,9 +250,9 @@ def submit_jobs(self): # list contains condor log files from which to read when job is terminated to detect errors self.runningJobs.append((logFileTemp.format(i), errorFileTemp.format(i), "{}.{}".format(cluster, i))) - self.set_status(STATE_BJOBS_WAITING) + self.setStatus(STATE_BJOBS_WAITING) - def check_jobs(self): + def checkJobs(self): stillRunningJobs = [] # check all still running jobs for logName, errName, jobId in self.runningJobs: @@ -295,9 +294,9 @@ def check_jobs(self): # at least one job failed if len(self.failedJobs) > 0: - self.set_status(STATE_BJOBS_FAILED, True) + self.setStatus(STATE_BJOBS_FAILED, True) elif len(self.runningJobs) == 0: - self.set_status(STATE_BJOBS_DONE) + self.setStatus(STATE_BJOBS_DONE) print("All condor jobs of APE measurement {} in iteration {} are done".format(self.name, self.curIteration)) # remove files @@ -317,8 +316,8 @@ def check_jobs(self): os.remove(logFile) # merges files from jobs - def do_merge(self): - self.set_status(STATE_MERGE_WAITING) + def mergeFiles(self): + self.setStatus(STATE_MERGE_WAITING) if self.alignment.isDesign: folderName = '{}/hists/{}/baseline'.format(base, self.name) else: @@ -343,13 +342,13 @@ def do_merge(self): os.remove(name) if rootFileValid("{}/allData.root".format(folderName)) and merge_result == 0: - self.set_status(STATE_MERGE_DONE) + self.setStatus(STATE_MERGE_DONE) else: - self.set_status(STATE_MERGE_FAILED, True) + self.setStatus(STATE_MERGE_FAILED, True) # calculates APE - def do_summary(self): - self.status = STATE_SUMMARY_WAITING + def calculateApe(self): + self.status_ = STATE_SUMMARY_WAITING from autoSubmitterTemplates import summaryTemplate if self.alignment.isDesign: #use measurement name as baseline folder name in this case @@ -359,29 +358,29 @@ def do_summary(self): summary_result = subprocess.call(summaryTemplate.format(inputCommands=inputCommands), shell=True) # returns exit code (0 if no error occured) if summary_result == 0: - self.set_status(STATE_SUMMARY_DONE) + self.setStatus(STATE_SUMMARY_DONE) else: - self.set_status(STATE_SUMMARY_FAILED, True) + self.setStatus(STATE_SUMMARY_FAILED, True) # saves APE to .db file so it can be read out next iteration - def do_local_setting(self): - self.set_status(STATE_LOCAL_WAITING) + def writeApeToDb(self): + self.setStatus(STATE_LOCAL_WAITING) from autoSubmitterTemplates import localSettingTemplate inputCommands = "iterNumber={} setBaseline={} measurementName={}".format(self.curIteration,self.alignment.isDesign,self.name) local_setting_result = subprocess.call(localSettingTemplate.format(inputCommands=inputCommands), shell=True) # returns exit code (0 if no error occured) if local_setting_result == 0: - self.set_status(STATE_LOCAL_DONE) + self.setStatus(STATE_LOCAL_DONE) else: - self.set_status(STATE_LOCAL_FAILED, True) + self.setStatus(STATE_LOCAL_FAILED, True) - def finish_iteration(self): + def finishIteration(self): print("APE Measurement {} just finished iteration {}".format(self.name, self.curIteration)) if self.curIteration < self.maxIterations: self.curIteration += 1 - self.set_status(STATE_ITERATION_START) + self.setStatus(STATE_ITERATION_START) else: - self.set_status(STATE_FINISHED, True) + self.setStatus(STATE_FINISHED, True) print("APE Measurement {}, which was started at {} was finished after {} iterations, at {}".format(self.name, self.startTime, self.curIteration, self.finishTime)) def kill(self): @@ -389,7 +388,7 @@ def kill(self): for log, err, jobId in self.runningJobs: subprocess.call(killJobTemplate.format(jobId=jobId), shell=True) self.runningJobs = [] - self.set_status(STATE_NONE) + self.setStatus(STATE_NONE) def purge(self): self.kill() @@ -397,17 +396,17 @@ def purge(self): shutil.rmtree(folderName) # remove log-files as well? - def run_iteration(self): + def runIteration(self): global threadcounter global measurements threadcounter.acquire() try: - if self.status == STATE_ITERATION_START: + if self.status_ == STATE_ITERATION_START: # start bjobs print("APE Measurement {} just started iteration {}".format(self.name, self.curIteration)) try: - self.submit_jobs() + self.submitJobs() save("measurements", measurements) except Exception as e: # this is needed in case the scheduler goes down @@ -415,51 +414,51 @@ def run_iteration(self): print(e) return - if self.status == STATE_BJOBS_WAITING: + if self.status_ == STATE_BJOBS_WAITING: # check if bjobs are finished - self.check_jobs() + self.checkJobs() save("measurements", measurements) - if self.status == STATE_BJOBS_DONE: + if self.status_ == STATE_BJOBS_DONE: # merge files - self.do_merge() + self.mergeFiles() save("measurements", measurements) - if self.status == STATE_MERGE_DONE: + if self.status_ == STATE_MERGE_DONE: # start summary - self.do_summary() + self.calculateApe() save("measurements", measurements) - if self.status == STATE_SUMMARY_DONE: + if self.status_ == STATE_SUMMARY_DONE: # start local setting (only if not a baseline measurement) if self.alignment.isDesign: - self.set_status(STATE_LOCAL_DONE) + self.setStatus(STATE_LOCAL_DONE) else: - self.do_local_setting() + self.writeApeToDb() save("measurements", measurements) - if self.status == STATE_LOCAL_DONE: - self.finish_iteration() + if self.status_ == STATE_LOCAL_DONE: + self.finishIteration() save("measurements", measurements) # go to next iteration or finish measurement - if self.status == STATE_BJOBS_FAILED or \ - self.status == STATE_MERGE_FAILED or \ - self.status == STATE_SUMMARY_FAILED or \ - self.status == STATE_LOCAL_FAILED or \ - self.status == STATE_INVALID_CONDITIONS or \ - self.status == STATE_FINISHED: + if self.status_ == STATE_BJOBS_FAILED or \ + self.status_ == STATE_MERGE_FAILED or \ + self.status_ == STATE_SUMMARY_FAILED or \ + self.status_ == STATE_LOCAL_FAILED or \ + self.status_ == STATE_INVALID_CONDITIONS or \ + self.status_ == STATE_FINISHED: with open(history_file, "a") as fi: - fi.write("APE measurement {name} which was started at {start} finished at {end} with state {state} in iteration {iteration}\n".format(name=self.name, start=self.startTime, end=self.finishTime, state=self.get_status(), iteration=self.curIteration)) - if self.status == STATE_FINISHED: + fi.write("APE measurement {name} which was started at {start} finished at {end} with state {state} in iteration {iteration}\n".format(name=self.name, start=self.startTime, end=self.finishTime, state=self.status(), iteration=self.curIteration)) + if self.status_ == STATE_FINISHED: global finished_measurements finished_measurements[self.name] = self save("finished", finished_measurements) else: global failed_measurements failed_measurements[self.name] = self - self.set_status(STATE_NONE) + self.setStatus(STATE_NONE) save("failed", failed_measurements) save("measurements", measurements) - if self.status == STATE_ITERATION_START: # this ensures that jobs do not go into idle if many measurements are done simultaneously + if self.status_ == STATE_ITERATION_START: # this ensures that jobs do not go into idle if many measurements are done simultaneously # start bjobs print("APE Measurement {} just started iteration {}".format(self.name, self.curIteration)) - self.submit_jobs() + self.submitJobs() save("measurements", measurements) finally: threadcounter.release() @@ -532,7 +531,7 @@ def main(): for res in resumed: measurements.append(res) - print("Measurement {} in state {} in iteration {} was resumed".format(res.name, res.get_status(), res.curIteration)) + print("Measurement {} in state {} in iteration {} was resumed".format(res.name, res.status(), res.curIteration)) # Killing and purging is done here, because it doesn't make # sense to kill or purge a measurement that was just started for to_kill in args.kill: @@ -566,20 +565,20 @@ def main(): measurement = ApeMeasurement(name, config, settings) - if measurement.status >= STATE_ITERATION_START and measurement.status <= STATE_FINISHED: + if measurement.status_ >= STATE_ITERATION_START and measurement.status_ <= STATE_FINISHED: measurements.append(measurement) print("APE Measurement {} was started".format(measurement.name)) while True: # remove finished and failed measurements - measurements = [measurement for measurement in measurements if not (measurement.status==STATE_NONE or measurement.status == STATE_FINISHED)] + measurements = [measurement for measurement in measurements if not (measurement.status_==STATE_NONE or measurement.status_ == STATE_FINISHED)] save("measurements", measurements) save("failed", failed_measurements) save("finished", finished_measurements) list_threads = [] for measurement in measurements: - t = threading.Thread(target=measurement.run_iteration) + t = threading.Thread(target=measurement.runIteration) list_threads.append(t) t.start() From 307c8dd76f78f1661b0a9d8b9c778c299bf79779 Mon Sep 17 00:00:00 2001 From: mteroerd Date: Tue, 20 Jun 2023 16:04:05 +0200 Subject: [PATCH 4/5] Added unit test functionality, removed dependence on Alignment/TrackerAlignment package Reduced scope of unit test to account for limited permissions Changed autosubmitter so it doesnt load module for t0 submission for unit tests Further changes for unit testrelated to loading modules Fixed expected location of file created during unit test --- .../APEEstimation/python/ApeEstimator_cfi.py | 2 +- .../MC_UnitTest_TkAlMuonIsolated_cff.py | 20 ++ .../APEEstimation/scripts/initialise.bash | 2 - Alignment/APEEstimation/test/BuildFile.xml | 3 + .../test/SkimProducer/skimProducer_cfg.py | 45 +-- .../test/SkimProducer/startSkim.py | 6 +- .../test/autoSubmitter/autoSubmitter.py | 41 +-- .../test/autoSubmitter/helpers.py | 30 +- .../test/autoSubmitter/unitTest.ini | 12 + .../test/testApeestimatorSummary_cfg.py | 154 --------- .../test/testApeestimator_cfg.py | 313 ------------------ .../test/trackerTreeGenerator_cfg.py | 90 +++++ Alignment/APEEstimation/test/unitTest.sh | 11 + 13 files changed, 182 insertions(+), 547 deletions(-) create mode 100644 Alignment/APEEstimation/python/samples/MC_UnitTest_TkAlMuonIsolated_cff.py create mode 100644 Alignment/APEEstimation/test/BuildFile.xml create mode 100644 Alignment/APEEstimation/test/autoSubmitter/unitTest.ini delete mode 100644 Alignment/APEEstimation/test/testApeestimatorSummary_cfg.py delete mode 100644 Alignment/APEEstimation/test/testApeestimator_cfg.py create mode 100644 Alignment/APEEstimation/test/trackerTreeGenerator_cfg.py create mode 100755 Alignment/APEEstimation/test/unitTest.sh diff --git a/Alignment/APEEstimation/python/ApeEstimator_cfi.py b/Alignment/APEEstimation/python/ApeEstimator_cfi.py index 1ab006bfbfecc..c8894c01b754b 100644 --- a/Alignment/APEEstimation/python/ApeEstimator_cfi.py +++ b/Alignment/APEEstimation/python/ApeEstimator_cfi.py @@ -74,7 +74,7 @@ minGoodHitsPerTrack = cms.uint32(0), #File containing TrackerTree with ideal Geometry - TrackerTreeFile = cms.string(os.environ['CMSSW_BASE'] + '/src/Alignment/TrackerAlignment/hists/TrackerTree.root'), + TrackerTreeFile = cms.string(os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/TrackerTree.root'), #Sectors defining set of modules for common overview plots resp. APE values Sectors = cms.VPSet(), diff --git a/Alignment/APEEstimation/python/samples/MC_UnitTest_TkAlMuonIsolated_cff.py b/Alignment/APEEstimation/python/samples/MC_UnitTest_TkAlMuonIsolated_cff.py new file mode 100644 index 0000000000000..27e1538ec30d0 --- /dev/null +++ b/Alignment/APEEstimation/python/samples/MC_UnitTest_TkAlMuonIsolated_cff.py @@ -0,0 +1,20 @@ +import FWCore.ParameterSet.Config as cms + +maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) +readFiles = cms.untracked.vstring() +secFiles = cms.untracked.vstring() +source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles) + + +readFiles.extend( [ +"/store/mc/Run3Winter22DRPremix/WJetsToLNu_TuneCP5_13p6TeV-madgraphMLM-pythia8/ALCARECO/TkAlMuonIsolated-TRKALCADesign_design_geometry_122X_mcRun3_2021_design_v9-v2/2520000/0027e6ed-2626-4ede-97a5-f0a44164b81b.root", +"/store/mc/Run3Winter22DRPremix/WJetsToLNu_TuneCP5_13p6TeV-madgraphMLM-pythia8/ALCARECO/TkAlMuonIsolated-TRKALCADesign_design_geometry_122X_mcRun3_2021_design_v9-v2/2520000/00899b9d-32ab-46f2-b77b-0b0a8d666027.root", +"/store/mc/Run3Winter22DRPremix/WJetsToLNu_TuneCP5_13p6TeV-madgraphMLM-pythia8/ALCARECO/TkAlMuonIsolated-TRKALCADesign_design_geometry_122X_mcRun3_2021_design_v9-v2/2520000/07851676-0c65-4630-bbab-7406defeb670.root", +"/store/mc/Run3Winter22DRPremix/WJetsToLNu_TuneCP5_13p6TeV-madgraphMLM-pythia8/ALCARECO/TkAlMuonIsolated-TRKALCADesign_design_geometry_122X_mcRun3_2021_design_v9-v2/2520000/0a54c512-0f69-44e1-90bc-16da035cbe02.root", + ] ); + + + +secFiles.extend( [ + ] ) + diff --git a/Alignment/APEEstimation/scripts/initialise.bash b/Alignment/APEEstimation/scripts/initialise.bash index 083753a57639a..9f9adbba24439 100644 --- a/Alignment/APEEstimation/scripts/initialise.bash +++ b/Alignment/APEEstimation/scripts/initialise.bash @@ -2,8 +2,6 @@ DIRBASE="$CMSSW_BASE/src/Alignment/APEEstimation" -mkdir $CMSSW_BASE/src/Alignment/TrackerAlignment/hists/ - mkdir $DIRBASE/hists/ mkdir $DIRBASE/hists/workingArea/ mkdir $DIRBASE/hists/workingArea/apeObjects/ diff --git a/Alignment/APEEstimation/test/BuildFile.xml b/Alignment/APEEstimation/test/BuildFile.xml new file mode 100644 index 0000000000000..1d697d08f8da8 --- /dev/null +++ b/Alignment/APEEstimation/test/BuildFile.xml @@ -0,0 +1,3 @@ + + + diff --git a/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py b/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py index 42d16e267f987..b04d66eb6d921 100644 --- a/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py +++ b/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py @@ -10,15 +10,11 @@ import sys options = VarParsing.VarParsing ('standard') options.register('sample', 'data1', VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, "Input sample") -options.register('useTrackList', False, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.bool, "Use list of preselected tracks") -options.register('isTest', False, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.bool, "Test run") # get and parse the command line arguments options.parseArguments() print("Input sample: ", options.sample) -print("Use list of preselected tracks: ", options.useTrackList) -print("Test run: ", options.isTest) ## @@ -79,11 +75,6 @@ outputName = 'MinBias.root' #outputPath = "workingArea" trackSelection = "MinBias" -if options.sample == 'data2': - process.load("Alignment.APEEstimation.samples.Data_TkAlMinBias_Run2018C_PromptReco_v3_cff") - outputName = 'MinBias1.root' - #outputPath = "workingArea" - trackSelection = "MinBias" if options.sample == 'data3': process.load("Alignment.APEEstimation.samples.Data_TkAlMuonIsolated_22Jan2013C_v1_cff") outputName = 'Data_TkAlMuonIsolated_22Jan2013C.root' @@ -104,22 +95,14 @@ outputPath = '/eos/cms/store/caf/user/jschulz/Skims/MC/UL2016ReRecoRealistic' outputName = 'Mc_TkAlMuonIsolated_WJetsToLNu_2016.root' trackSelection = "SingleMu" -if options.sample == 'zmumu': - process.load("") - outputName = '' - trackSelection = "DoubleMu" -if options.sample == 'zmumu10': - process.load("Alignment.APEEstimation.samples.Mc_TkAlMuonIsolated_Summer12_zmumu10_cff") - outputName = 'Mc_TkAlMuonIsolated_Summer12_zmumu10.root' - trackSelection = "DoubleMu" -if options.sample == 'zmumu20': - process.load("Alignment.APEEstimation.samples.Mc_TkAlMuonIsolated_Summer12_zmumu20_cff") - outputName = 'Mc_TkAlMuonIsolated_Summer12_zmumu20.root' - trackSelection = "DoubleMu" -if options.sample == 'zmumu50': - process.load("Alignment.APEEstimation.samples.DYToMuMu_M-50_Tune4C_13TeV-pythia8_Spring14dr-TkAlMuonIsolated-castor_PU_S14_POSTLS170_V6-v1_ALCARECO_cff") - outputName = 'Mc_DYToMuMu_M-50_Tune4C_13TeV-pythia8_Spring14dr-TkAlMuonIsolated-castor_PU_S14_POSTLS170_V6-v1.root' - trackSelection = "DoubleMu" + +# For unit tests +if options.sample == 'UnitTest': + process.load("Alignment.APEEstimation.samples.MC_UnitTest_TkAlMuonIsolated_cff") + outputName = 'MC_UnitTest_TkAlMuonIsolated.root' + maxEvents = 1000 + globalTag = "auto:phase1_2022_design" + trackSelection = "SingleMu" print("Using output name %s"%(outputName)) @@ -148,7 +131,6 @@ ## Number of Events (should be after input file) ## process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(maxEvents) ) -if options.isTest: process.maxEvents.input = 1001 ## @@ -177,13 +159,6 @@ process.MuSkim = trackSelector -## -## If preselected track list is used -## -if options.useTrackList: - process.MuSkim.src = 'TrackList' - process.TriggerSelectionSequence *= process.TrackList - import Alignment.CommonAlignment.tools.trackselectionRefitting as trackselRefit process.seqTrackselRefit = trackselRefit.getSequence(process, trackSelector.src.getModuleLabel()) @@ -226,10 +201,6 @@ process.out.outputCommands.extend(process.ApeSkimEventContent.outputCommands) -if options.isTest: - process.out.fileName = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_apeSkim.root' - - ## ## Outpath ## diff --git a/Alignment/APEEstimation/test/SkimProducer/startSkim.py b/Alignment/APEEstimation/test/SkimProducer/startSkim.py index f2eb82619d527..e73b035f71dcb 100644 --- a/Alignment/APEEstimation/test/SkimProducer/startSkim.py +++ b/Alignment/APEEstimation/test/SkimProducer/startSkim.py @@ -61,7 +61,7 @@ def condorSubmitSkim(sample, caf=False): def localStartSkim(sample): base = os.environ['CMSSW_BASE'] - execString = "cmsRun {base}/src/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py isTest=False useTrackList=False sample={sample}".format(sample=sample, base=base) + execString = "cmsRun {base}/src/Alignment/APEEstimation/test/SkimProducer/skimProducer_cfg.py sample={sample}".format(sample=sample, base=base) print(execString) toExec = execString.split(" ") @@ -73,7 +73,7 @@ def localStartSkim(sample): def get_output(proc): while True: - line = proc.stdout.readline().rstrip() + line = proc.stdout.readline().rstrip().decode() if not line: break yield line @@ -153,7 +153,7 @@ def main(argv): if len(args.samples) == 0: print("Usage: python startSkim.py -s ") - sys.exit() + sys.exit(1) finalSamples = [] for sample in args.samples: diff --git a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py index 1bf836deda79d..f25594ecd749c 100644 --- a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py +++ b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py @@ -32,7 +32,7 @@ class Dataset: def __init__(self, config, name): dsDict = dict(config.items("dataset:{}".format(name))) self.name = name - self.baseDirectory = dsDict["baseDirectory"] + self.baseDirectory = dsDict["baseDirectory"].replace("$CMSSW_BASE", os.environ['CMSSW_BASE']) self.fileList = [] names = dsDict["fileNames"].split(" ") @@ -54,7 +54,7 @@ def __init__(self, config, name): if "isCosmics" in dsDict: self.isCosmics = (dsDict["isCosmics"] == "True") - self.conditions, dummy, self.validConditions = loadConditions(dsDict) + self.conditions, self.validConditions = loadConditions(dsDict) # check if any of the sources used for conditions is invalid if not self.validConditions: @@ -72,9 +72,6 @@ def __init__(self, config, name): alDict = dict(config.items("alignment:{}".format(name))) self.name = name - alignmentName = None - if "alignmentName" in alDict: - self.alignmentName = alDict["alignmentName"] self.globalTag = "None" if "globalTag" in alDict: self.globalTag = alDict["globalTag"] @@ -85,19 +82,13 @@ def __init__(self, config, name): if "isDesign" in alDict: self.isDesign= (alDict["isDesign"] == "True") - # If self.hasAlignmentCondition is true, no other Alignment-Object is loaded in apeEstimator_cfg.py using the alignmentName - self.conditions, self.hasAlignmentCondition, self.validConditions = loadConditions(alDict) + # If self.hasAlignmentCondition is true, no other Alignment-Object is loaded in apeEstimator_cfg.py using the + self.conditions, self.validConditions = loadConditions(alDict) # check if any of the sources used for conditions is invalid if not self.validConditions: print("Invalid conditions defined for alignment {}".format(self.name)) - # check if at least one of the two ways to define the alignment was used - # for baseline (Design) measurements, this is not needed, as we usually take conditions from GT - if self.alignmentName == None and not self.hasAlignmentCondition and not self.isDesign: - print("Error: No alignment object name or record was defined for alignment {}".format(self.name)) - sys.exit() - class ApeMeasurement: name = "workingArea" @@ -139,7 +130,7 @@ def __init__(self, name, config, settings): if self.alignment.isDesign: self.maxIterations = 0 - self.conditions, dummy, self.validConditions = loadConditions(settings) + self.conditions, self.validConditions = loadConditions(settings) # see if sanity checks passed if not self.alignment.validConditions or not self.dataset.validConditions or not self.dataset.existingFiles or not self.validConditions: @@ -188,9 +179,7 @@ def submitJobs(self): for condition in allConditions: fi.write(conditionsTemplate.format(record=condition["record"], connect=condition["connect"], tag=condition["tag"])) - alignmentNameToUse = self.alignment.alignmentName - if self.alignment.hasAlignmentCondition: - alignmentNameToUse = "fromConditions" + alignmentNameToUse = "fromConditions" lastIter = (self.curIteration==self.maxIterations) and not self.alignment.isDesign @@ -437,6 +426,7 @@ def runIteration(self): self.finishIteration() save("measurements", measurements) # go to next iteration or finish measurement + if self.status_ == STATE_BJOBS_FAILED or \ self.status_ == STATE_MERGE_FAILED or \ self.status_ == STATE_SUMMARY_FAILED or \ @@ -452,6 +442,7 @@ def runIteration(self): else: global failed_measurements failed_measurements[self.name] = self + self.setStatus(STATE_NONE) save("failed", failed_measurements) save("measurements", measurements) @@ -479,6 +470,8 @@ def main(): help='Number of threads running in parallel') parser.add_argument("-C", "--caf",action="store_true", dest="caf", default=False, help="Use CAF queue for condor jobs") + parser.add_argument("-u", "--unitTest", action="store_true", dest="unitTest", default=False, + help='If this is used, as soon as a measurement fails, the program will exit and as exit code the status of the measurement, i.e., where it failed') args = parser.parse_args() global base @@ -489,7 +482,7 @@ def main(): global use_caf use_caf = args.caf - enableCAF(use_caf) + unitTest = args.unitTest threadcounter = threading.BoundedSemaphore(args.ncores) lock = threading.Lock() @@ -565,10 +558,18 @@ def main(): measurement = ApeMeasurement(name, config, settings) - if measurement.status_ >= STATE_ITERATION_START and measurement.status_ <= STATE_FINISHED: + if measurement.status_ >= STATE_ITERATION_START: measurements.append(measurement) print("APE Measurement {} was started".format(measurement.name)) + if unitTest: + # status is 0 if successful, 101 if wrongly configured + sys.exit(measurement.status_) + + initializeModuleLoading() + enableCAF(use_caf) + + while True: # remove finished and failed measurements measurements = [measurement for measurement in measurements if not (measurement.status_==STATE_NONE or measurement.status_ == STATE_FINISHED)] @@ -603,6 +604,6 @@ def main(): sys.stdout.write("\033[K") except KeyboardInterrupt: sys.exit(0) - + if __name__ == "__main__": main() diff --git a/Alignment/APEEstimation/test/autoSubmitter/helpers.py b/Alignment/APEEstimation/test/autoSubmitter/helpers.py index c39943fc7d66d..bb78557ff2b73 100644 --- a/Alignment/APEEstimation/test/autoSubmitter/helpers.py +++ b/Alignment/APEEstimation/test/autoSubmitter/helpers.py @@ -70,17 +70,18 @@ def rootFileValid(path): result &= not file.IsZombie() return result -if not 'MODULEPATH' in os.environ: - f = open(os.environ['MODULESHOME'] + "/init/.modulespath", "r") - path = [] - for line in f.readlines(): - line = re.sub("#.*$", '', line) - if line != '': - path.append(line) - os.environ['MODULEPATH'] = ':'.join(path) - -if not 'LOADEDMODULES' in os.environ: - os.environ['LOADEDMODULES'] = '' +def initializeModuleLoading(): + if not 'MODULEPATH' in os.environ: + f = open(os.environ['MODULESHOME'] + "/init/.modulespath", "r") + path = [] + for line in f.readlines(): + line = re.sub("#.*$", '', line) + if line != '': + path.append(line) + os.environ['MODULEPATH'] = ':'.join(path) + + if not 'LOADEDMODULES' in os.environ: + os.environ['LOADEDMODULES'] = '' def module(*args): if type(args[0]) == type([]): @@ -160,7 +161,6 @@ def hasValidSource(condition): return False def loadConditions(dictionary): - hasAlignmentCondition = False goodConditions = True conditions = [] for key, value in dictionary.items(): @@ -171,8 +171,6 @@ def loadConditions(dictionary): # structure is "condition rcd:source tag" record = key.split(" ")[1] connect, tag = value.split(" ") - if record == "TrackerAlignmentRcd": - hasAlignmentCondition = True conditions.append({"record":record, "connect":replaceShortcuts(connect), "tag":tag}) elif len(value.split(" ")) == 1 and len(key.split(" ")) == 2: # structure is "condition tag:source", so we have to guess rcd from the tag. might also be "condition tag1+tag2+...+tagN:source" @@ -183,8 +181,6 @@ def loadConditions(dictionary): for possibleTag, possibleRcd in records.items(): if tag.startswith(possibleTag): conditions.append({"record":possibleRcd, "connect":replaceShortcuts(connect), "tag":tag}) - if possibleRcd == "TrackerAlignmentRcd": - hasAlignmentCondition = True foundTag = True break if not foundTag: @@ -202,4 +198,4 @@ def loadConditions(dictionary): if not condition["record"].endswith("Rcd"): goodConditions = False print("'{}' is not a valid record name.".format(condition["record"])) - return conditions, hasAlignmentCondition, goodConditions + return conditions, goodConditions diff --git a/Alignment/APEEstimation/test/autoSubmitter/unitTest.ini b/Alignment/APEEstimation/test/autoSubmitter/unitTest.ini new file mode 100644 index 0000000000000..3920aace7d635 --- /dev/null +++ b/Alignment/APEEstimation/test/autoSubmitter/unitTest.ini @@ -0,0 +1,12 @@ +[dataset:wlnu] +baseDirectory=$CMSSW_BASE/unit_tests/ApeTest +fileNames=MC_UnitTest_TkAlMuonIsolated_1.root +isMC=True + +[alignment:FromGT] +globalTag=auto:phase1_2022_design +isDesign=True + +[ape:Design] +dataset: wlnu +alignment: FromGT diff --git a/Alignment/APEEstimation/test/testApeestimatorSummary_cfg.py b/Alignment/APEEstimation/test/testApeestimatorSummary_cfg.py deleted file mode 100644 index 517cfb40010ec..0000000000000 --- a/Alignment/APEEstimation/test/testApeestimatorSummary_cfg.py +++ /dev/null @@ -1,154 +0,0 @@ -from __future__ import print_function -import os - -import FWCore.ParameterSet.Config as cms - - - - - -## -## Setup command line options -## -import FWCore.ParameterSet.VarParsing as VarParsing -import sys -options = VarParsing.VarParsing ('standard') -options.register('sample', 'wlnu', VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, "wlnu") -options.register('isTest', True, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.bool, "Test run") - -# get and parse the command line arguments -if( hasattr(sys, "argv") ): - for args in sys.argv : - arg = args.split(',') - for val in arg: - val = val.split('=') - if(len(val)==2): - setattr(options,val[0], val[1]) - -print("Input sample: ", options.sample) -print("Test run: ", options.isTest) - - - -## -## Process definition -## -process = cms.Process("ApeEstimatorSummary") - - - -## -## Message Logger -## -process.load("FWCore.MessageService.MessageLogger_cfi") -process.MessageLogger.CalculateAPE=dict() -#process.MessageLogger.ApeEstimatorSummary=dict() -process.MessageLogger.cerr.INFO.limit = 0 -process.MessageLogger.cerr.default.limit = -1 -process.MessageLogger.cerr.CalculateAPE = cms.untracked.PSet(limit = cms.untracked.int32(-1)) -#process.MessageLogger.cerr.ApeEstimatorSummary = cms.untracked.PSet(limit = cms.untracked.int32(-1)) - -#process.MessageLogger.cout = cms.untracked.PSet(INFO = cms.untracked.PSet( -# reportEvery = cms.untracked.int32(100), # every 100th only -# limit = cms.untracked.int32(10), # or limit to 10 printouts... -#)) -process.MessageLogger.cerr.FwkReport.reportEvery = 1000 ## really show only every 1000th - - - -## -## Process options -## -process.options = cms.untracked.PSet( - wantSummary = cms.untracked.bool(True), -) - - - -## -## Input sample definition -## -isData1 = isData2 = False -isData = False -isQcd = isWlnu = isZmumu = isZtautau = isZmumu10 = isZmumu20 = False -isMc = False -if options.sample == 'data1': - isData1 = True - isData = True -elif options.sample == 'data2': - isData2 = True - isData = True -elif options.sample == 'qcd': - isQcd = True - isMc = True -elif options.sample == 'wlnu': - isWlnu = True - isMc = True -elif options.sample == 'zmumu': - isZmumu = True - isMc = True -elif options.sample == 'ztautau': - isZtautau = True - isMc = True -elif options.sample == 'zmumu10': - isZmumu10 = True - isMc = True -elif options.sample == 'zmumu20': - isZmumu20 = True - isMc = True -else: - print('ERROR --- incorrect data sammple: ', options.sample) - exit(8888) - - - -## -## Input Files -## -process.source = cms.Source("EmptySource") - - - -## -## Number of Events -## -process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) - - - -## -## ApeEstimatorSummary -## -from Alignment.APEEstimation.ApeEstimatorSummary_cff import * -process.ApeEstimatorSummary1 = ApeEstimatorSummaryBaseline.clone( - InputFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '.root', - ResultsFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '_resultsFile1.root', - BaselineFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '_baselineApe.root', -) -process.ApeEstimatorSummary2 = ApeEstimatorSummaryIter.clone( - correctionScaling = 0.6, - InputFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '.root', - ResultsFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '_resultsFile2.root', - BaselineFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '_baselineApe.root', - IterationFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '_iterationApe2.root', - ApeOutputFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '_apeOutput2.txt', -) -process.ApeEstimatorSummary3 = ApeEstimatorSummaryIter.clone( - correctionScaling = 0.6, - InputFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '.root', - ResultsFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '_resultsFile3.root', - BaselineFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '_baselineApe.root', - IterationFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '_iterationApe3.root', - ApeOutputFile = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/test_' + options.sample + '_apeOutput3.txt', -) - - - -process.p = cms.Path( - process.ApeEstimatorSummary1* - process.ApeEstimatorSummary2 - #~ *process.ApeEstimatorSummary3 -) - - - diff --git a/Alignment/APEEstimation/test/testApeestimator_cfg.py b/Alignment/APEEstimation/test/testApeestimator_cfg.py deleted file mode 100644 index 21f58700b8a20..0000000000000 --- a/Alignment/APEEstimation/test/testApeestimator_cfg.py +++ /dev/null @@ -1,313 +0,0 @@ -from __future__ import print_function -import os - -import FWCore.ParameterSet.Config as cms - - - - -## -## Setup command line options -## -import FWCore.ParameterSet.VarParsing as VarParsing -import sys -options = VarParsing.VarParsing ('standard') -options.register('sample', 'wlnu', VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, "Input sample") -options.register('isTest', True, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.bool, "Test run") - -# get and parse the command line arguments -if( hasattr(sys, "argv") ): - for args in sys.argv : - arg = args.split(',') - for val in arg: - val = val.split('=') - if(len(val)==2): - setattr(options,val[0], val[1]) - -print("Input sample: ", options.sample) -print("Test run: ", options.isTest) - - - -## -## Process definition -## -process = cms.Process("ApeEstimator") - - - -## -## Message Logger -## -process.load("FWCore.MessageService.MessageLogger_cfi") -process.MessageLogger.SectorBuilder=dict() -process.MessageLogger.ResidualErrorBinning=dict() -process.MessageLogger.HitSelector=dict() -process.MessageLogger.CalculateAPE=dict() -process.MessageLogger.ApeEstimator=dict() -#process.MessageLogger.TrackRefitter=dict() -process.MessageLogger.AlignmentTrackSelector=dict() -process.MessageLogger.cerr.INFO.limit = 0 -process.MessageLogger.cerr.default.limit = -1 # Do not use =0, else all error messages (except those listed below) are supressed -process.MessageLogger.cerr.SectorBuilder = cms.untracked.PSet(limit = cms.untracked.int32(-1)) -process.MessageLogger.cerr.HitSelector = cms.untracked.PSet(limit = cms.untracked.int32(-1)) -process.MessageLogger.cerr.CalculateAPE = cms.untracked.PSet(limit = cms.untracked.int32(-1)) -process.MessageLogger.cerr.ApeEstimator = cms.untracked.PSet(limit = cms.untracked.int32(-1)) -process.MessageLogger.cerr.AlignmentTrackSelector = cms.untracked.PSet(limit = cms.untracked.int32(-1)) -process.MessageLogger.cerr.FwkReport.reportEvery = 1000 ## really show only every 1000th - - - -## -## Process options -## -process.options = cms.untracked.PSet( - wantSummary = cms.untracked.bool(True), -) - - - -## -## Input sample definition -## -isData1 = isData2 = False -isData = False -isQcd = isWlnu = isZmumu = isZtautau = isZmumu10 = isZmumu20 = False -isMc = False -if options.sample == 'data1': - isData1 = True - isData = True -elif options.sample == 'data2': - isData2 = True - isData = True -elif options.sample == 'qcd': - isQcd = True - isMc = True -elif options.sample == 'wlnu': - isWlnu = True - isMc = True -elif options.sample == 'zmumu': - isZmumu = True - isMc = True -elif options.sample == 'ztautau': - isZtautau = True - isMc = True -elif options.sample == 'zmumu10': - isZmumu10 = True - isMc = True -elif options.sample == 'zmumu20': - isZmumu20 = True - isMc = True -else: - print('ERROR --- incorrect data sammple: ', options.sample) - exit(8888) - - - -## -## Input Files -## -if isData1: - process.load("Alignment.APEEstimation.samples.Data_TkAlMuonIsolated_Run2011A_May10ReReco_ApeSkim_cff") -elif isData2: - process.load("Alignment.APEEstimationsamples.Data_TkAlMuonIsolated_Run2011A_PromptV4_ApeSkim_cff") -elif isQcd: - process.load("Alignment.APEEstimation.samples.Mc_TkAlMuonIsolated_Summer11_qcd_ApeSkim_cff") -elif isWlnu: - process.load("Alignment.APEEstimation.samples.Mc_WJetsToLNu_74XTest_ApeSkim_cff") -elif isZmumu10: - process.load("Alignment.APEEstimation.samples.Mc_TkAlMuonIsolated_Summer11_zmumu10_ApeSkim_cff") -elif isZmumu20: - process.load("Alignment.APEEstimation.samples.Mc_TkAlMuonIsolated_Summer11_zmumu20_ApeSkim_cff") - - - -## -## Number of Events (should be after input file) -## -process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) -if options.isTest: process.maxEvents.input = 10001 - - -## -## Check run and event numbers for Dublicates --- only for real data -## -#process.source.duplicateCheckMode = cms.untracked.string("noDuplicateCheck") -#process.source.duplicateCheckMode = cms.untracked.string("checkEachFile") -process.source.duplicateCheckMode = cms.untracked.string("checkEachRealDataFile") -#process.source.duplicateCheckMode = cms.untracked.string("checkAllFilesOpened") # default value - - - -## -## Whole Refitter Sequence -## -process.load("Alignment.APEEstimation.TrackRefitter_38T_cff") - -process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') -from Configuration.AlCa.GlobalTag import GlobalTag -process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_design', '') - - -##### To be used when running on Phys14MC with a CMSSW version > 72X -#process.GlobalTag.toGet = cms.VPSet( -# cms.PSet( -# record = cms.string("BeamSpotObjectsRcd"), -# tag = cms.string("Realistic8TeVCollisions_START50_V13_v1_mc"), -# connect = cms.untracked.string("frontier://FrontierProd/CMS_CONDITIONS"), -# ) -#) -print("Using global tag "+process.GlobalTag.globaltag._value) - - - -## -## New pixel templates -## -process.GlobalTag.toGet = cms.VPSet( - cms.PSet( - record = cms.string("SiPixelTemplateDBObjectRcd"), - tag = cms.string("SiPixelTemplateDBObject_38T_v3_mc"), - connect = cms.untracked.string("frontier://FrontierProd/CMS_CONDITIONS"), - ) -) - - - -## -## Alignment and APE -## -import CalibTracker.Configuration.Common.PoolDBESSource_cfi -## Choose Alignment (w/o touching APE) -if isMc: - process.myTrackerAlignment = CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone( - connect = 'frontier://FrontierProd/CMS_CONDITIONS', # or your sqlite file - toGet = [ - cms.PSet( - record = cms.string('TrackerAlignmentRcd'), - tag = cms.string('TrackerIdealGeometry210_mc') # 'TrackerAlignment_2009_v2_offline' - ), - ], - ) - process.es_prefer_trackerAlignment = cms.ESPrefer("PoolDBESSource","myTrackerAlignment") - -process.es_prefer_trackerAlignment = cms.ESPrefer("PoolDBESSource","myTrackerAlignment") -if isData: - # Recent geometry - process.myTrackerAlignment = CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone( - connect = 'frontier://FrontierProd/CMS_CONDITIONS', - toGet = [ - cms.PSet( - record = cms.string('TrackerAlignmentRcd'), - tag = cms.string('TrackerAlignment_GR10_v6_offline'), - ), - ], - ) - process.es_prefer_trackerAlignment = cms.ESPrefer("PoolDBESSource","myTrackerAlignment") - # Kinks and bows - process.myTrackerAlignmentKinksAndBows = CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone( - connect = 'frontier://FrontierProd/CMS_CONDITIONS', - toGet = [ - cms.PSet( - record = cms.string('TrackerSurfaceDeformationRcd'), - tag = cms.string('TrackerSurfaceDeformations_v1_offline'), - ), - ], - ) - process.es_prefer_trackerAlignmentKinksAndBows = cms.ESPrefer("PoolDBESSource","myTrackerAlignmentKinksAndBows") - -## APE (set to zero) -process.myTrackerAlignmentErr = CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone( - connect = 'frontier://FrontierProd/CMS_CONDITIONS', - toGet = [ - cms.PSet( - record = cms.string('TrackerAlignmentErrorExtendedRcd'), - tag = cms.string('TrackerIdealGeometryErrorsExtended210_mc') - ), - ], -) -process.es_prefer_trackerAlignmentErr = cms.ESPrefer("PoolDBESSource","myTrackerAlignmentErr") - - - -## -## Trigger Selection -## -process.load("Alignment.APEEstimation.TriggerSelection_cff") - - - -## -## ApeEstimator -## -from Alignment.APEEstimation.ApeEstimator_cff import * -process.ApeEstimator1 = ApeEstimator.clone( - #~ tjTkAssociationMapTag = "TrackRefitterHighPurityForApeEstimator", - tjTkAssociationMapTag = "TrackRefitterForApeEstimator", - maxTracksPerEvent = 0, - applyTrackCuts = False, - Sectors = RecentSectors, - analyzerMode = False, - calculateApe = True -) -process.ApeEstimator1.HitSelector.width = [] -process.ApeEstimator1.HitSelector.maxIndex = [] -process.ApeEstimator1.HitSelector.widthProj = [] -process.ApeEstimator1.HitSelector.widthDiff = [] -process.ApeEstimator1.HitSelector.edgeStrips = [] -process.ApeEstimator1.HitSelector.sOverN = [] -process.ApeEstimator1.HitSelector.maxCharge = [] -process.ApeEstimator1.HitSelector.chargeOnEdges = [] -process.ApeEstimator1.HitSelector.probX = [] -process.ApeEstimator1.HitSelector.phiSensX = [] -process.ApeEstimator1.HitSelector.phiSensY = [] -process.ApeEstimator1.HitSelector.errXHit = [] -process.ApeEstimator1.HitSelector.chargePixel = [] -process.ApeEstimator1.HitSelector.widthX = [] -process.ApeEstimator1.HitSelector.widthY = [] -process.ApeEstimator1.HitSelector.logClusterProbability = [] -process.ApeEstimator1.HitSelector.isOnEdge = [] -process.ApeEstimator1.HitSelector.qBin = [] - - -process.ApeEstimator2 = process.ApeEstimator1.clone( - Sectors = ValidationSectors, - analyzerMode = True, - calculateApe = False, -) - -process.ApeEstimator3 = process.ApeEstimator2.clone( - zoomHists = False, -) - - - -## -## Output File Configuration -## -outputName = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/' -if options.isTest: - outputName = outputName + 'test_' -outputName = outputName + options.sample + '.root' - -process.TFileService = cms.Service("TFileService", - fileName = cms.string(outputName), - closeFileFast = cms.untracked.bool(True) -) - - - -## -## Path -## -process.p = cms.Path( - process.TriggerSelectionSequence* - process.RefitterHighPuritySequence* - (process.ApeEstimator1+ - process.ApeEstimator2+ - process.ApeEstimator3 - ) -) - - - diff --git a/Alignment/APEEstimation/test/trackerTreeGenerator_cfg.py b/Alignment/APEEstimation/test/trackerTreeGenerator_cfg.py new file mode 100644 index 0000000000000..4727e156f1327 --- /dev/null +++ b/Alignment/APEEstimation/test/trackerTreeGenerator_cfg.py @@ -0,0 +1,90 @@ +from __future__ import print_function +import FWCore.ParameterSet.Config as cms + +import os + + +## +## Process definition +## +process = cms.Process("TrackerTreeGeneration") + + + +## +## MessageLogger +## +process.load("FWCore.MessageService.MessageLogger_cfi") +process.MessageLogger.cerr.threshold = 'INFO' +process.MessageLogger.TrackerTreeGenerator=dict() +process.MessageLogger.cerr.INFO.limit = 0 +process.MessageLogger.cerr.default.limit = -1 +process.MessageLogger.cerr.TrackerTreeGenerator = cms.untracked.PSet(limit = cms.untracked.int32(-1)) + + + +## +## Process options +## +process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) ) + + + +## +## Input source +## +process.source = cms.Source("EmptySource") + + + +## +## Number of events +## +process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) + + + +## +## Geometry +## +process.load("Configuration.Geometry.GeometryRecoDB_cff") + + +## +## Conditions +## +# use always ideal conditions to get no influence from Alignment on absolute Positions, Orientations... +# so it is clear that when choosing special regions in e.g. globalPhi, Modules of the same Rod are contained in the same region +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase1_2023_design', '') +print("Using global tag "+process.GlobalTag.globaltag._value) + + +## +## Analyzer +## +process.load("Alignment.TrackerAlignment.TrackerTreeGenerator_cfi") + + + +## +## Output File Configuration +## +process.TFileService = cms.Service("TFileService", + fileName = cms.string(os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/TrackerTree.root') +) + + + +## +## Path +## +process.p = cms.Path(process.TrackerTreeGenerator) + + + + + + + diff --git a/Alignment/APEEstimation/test/unitTest.sh b/Alignment/APEEstimation/test/unitTest.sh new file mode 100755 index 0000000000000..a29c2099c3f24 --- /dev/null +++ b/Alignment/APEEstimation/test/unitTest.sh @@ -0,0 +1,11 @@ +#! /bin/bash +function die { echo $1: status $2 ; exit $2; } + +echo " TESTING data set skimming" +# skim the predefined data set +python3 $CMSSW_BASE/src/Alignment/APEEstimation/test/SkimProducer/startSkim.py -s UnitTest || die "Failure skimming data set" $? + +echo " TESTING auto submitter" +# start baseline measurement +python3 $CMSSW_BASE/src/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py -c $CMSSW_BASE/src/Alignment/APEEstimation/test/autoSubmitter/unitTest.ini -u || die "Failure running autoSubmitter" $? + From 40ed46f4f4bc83f94fd83c0da8e32460a0d058b3 Mon Sep 17 00:00:00 2001 From: mteroerd Date: Thu, 22 Jun 2023 09:38:45 +0200 Subject: [PATCH 5/5] Make code not create folders for unit tests --- Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py index f25594ecd749c..a182f2922c9cf 100644 --- a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py +++ b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py @@ -136,7 +136,10 @@ def __init__(self, name, config, settings): if not self.alignment.validConditions or not self.dataset.validConditions or not self.dataset.existingFiles or not self.validConditions: self.setStatus(STATE_INVALID_CONDITIONS, True) return - + + if unitTest: + return + if self.alignment.isDesign and self.dataset.sampleType != "MC": # For now, this won't immediately shut down the program print("APE Measurement {} is scheduled to to an APE baseline measurement with a dataset that is not marked as isMC=True. Is this intended?".format(self.name)) @@ -480,6 +483,7 @@ def main(): global threadcounter global lock global use_caf + global unitTest use_caf = args.caf unitTest = args.unitTest