diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml index 755fbbf5c46..8c55b92d2a3 100644 --- a/config/cesm/machines/config_machines.xml +++ b/config/cesm/machines/config_machines.xml @@ -214,7 +214,7 @@ none me@my.address 8 - 4 + 8 FALSE mpiexec diff --git a/scripts/Tools/case.build b/scripts/Tools/case.build index c8ec78096c0..4ac984e3745 100755 --- a/scripts/Tools/case.build +++ b/scripts/Tools/case.build @@ -84,7 +84,7 @@ def _main_func(description): if cleanlist is not None or clean_all: build.clean(case, cleanlist, clean_all) elif(testname is not None): - logging.warn("Building test for {} in directory {}".format(testname, + logging.warning("Building test for {} in directory {}".format(testname, caseroot)) try: # The following line can throw exceptions if the testname is diff --git a/scripts/Tools/case_diff b/scripts/Tools/case_diff index ae5dac241ee..6568632afb1 100755 --- a/scripts/Tools/case_diff +++ b/scripts/Tools/case_diff @@ -68,8 +68,8 @@ def recursive_diff(dir1, dir2, repls, show_binary=False, skip_list=()): for dirname, set_obj in [(dir1, dir1_only), (dir2, dir2_only)]: for item in sorted(set_obj): if (item not in skip_list): - print "===============================================================================" - print os.path.join(dirname, item), "is unique" + print ("===============================================================================") + print (os.path.join(dirname, item), "is unique") num_differing_files += 1 # Handling of the common items is trickier @@ -82,8 +82,8 @@ def recursive_diff(dir1, dir2, repls, show_binary=False, skip_list=()): # If the directory status of the files differs, report diff if (path1isdir != os.path.isdir(path2)): - print "===============================================================================" - print path1 + " DIFFERS (directory status)" + print ("===============================================================================") + print (path1 + " DIFFERS (directory status)") num_differing_files += 1 continue @@ -100,7 +100,7 @@ def recursive_diff(dir1, dir2, repls, show_binary=False, skip_list=()): stat, out, err = run_cmd("file {}".format(path1)) if (stat != 0): - logging.warn("Failed to probe file '{}', out: '{}', err: '{}'".format(path1, out, err)) + logging.warning("Failed to probe file '{}', out: '{}', err: '{}'".format(path1, out, err)) continue is_text_file = "text" in out @@ -111,10 +111,10 @@ def recursive_diff(dir1, dir2, repls, show_binary=False, skip_list=()): stat, out, _ = run_cmd("diff -w {} -".format(path1), input_str=the_text) if (stat != 0): - print "===============================================================================" - print path1 + " DIFFERS (contents)" + print ("===============================================================================") + print (path1 + " DIFFERS (contents)") num_differing_files += 1 - print " ", out + print (" "+ out) return num_differing_files @@ -135,7 +135,7 @@ def _main_func(description): repls[val2] = val1 num_differing_files = recursive_diff(case1, case2, repls, show_binary, skip_list) - print num_differing_files, "files are different" + print (num_differing_files, "files are different") sys.exit(0 if num_differing_files == 0 else 1) ############################################################################### diff --git a/scripts/Tools/cime_bisect b/scripts/Tools/cime_bisect index da144df52f1..23d3dde1ecc 100755 --- a/scripts/Tools/cime_bisect +++ b/scripts/Tools/cime_bisect @@ -95,9 +95,9 @@ def cime_bisect(testargs, good, bad, testroot, compiler, project, baseline_name, commits_we_want_to_test = run_cmd_no_fail("git rev-list {}..{} --merges --first-parent".format(good, bad)).splitlines() all_commits_ = run_cmd_no_fail("git rev-list {}..{}".format(good, bad)).splitlines() commits_to_skip = set(all_commits_) - set(commits_we_want_to_test) - print "Skipping these non-merge commits" + print("Skipping these non-merge commits") for item in commits_to_skip: - print item + print(item) else: commits_to_skip = set() @@ -145,8 +145,8 @@ def cime_bisect(testargs, good, bad, testroot, compiler, project, baseline_name, bad_commits_filtered = bad_commits - commits_to_skip expect(len(bad_commits_filtered) == 1, bad_commits_filtered) - print "Bad merge is:" - print run_cmd_no_fail("git show {}".format(bad_commits_filtered.pop())) + print("Bad merge is:") + print(run_cmd_no_fail("git show {}".format(bad_commits_filtered.pop()))) finally: run_cmd_no_fail("git bisect reset") diff --git a/scripts/Tools/cimeteststatus b/scripts/Tools/cimeteststatus index 7c8380ffc6a..4978d6e8ff1 100755 --- a/scripts/Tools/cimeteststatus +++ b/scripts/Tools/cimeteststatus @@ -5,9 +5,9 @@ Purpose: Give basic and detailed summaries of CIME(CESM) tests, and send the test results back to the test database. Authors: Jay Shollenberger and Ben Andre """ - -from __future__ import print_function -import sys +from standard_script_setup import * +#pylint: disable=import-error +from six.moves import urllib if sys.hexversion < 0x02070000: print(70 * "*") print("ERROR: {0} requires python >= 2.7.x. ".format(sys.argv[0])) @@ -17,12 +17,11 @@ if sys.hexversion < 0x02070000: sys.exit(1) import xml.etree.ElementTree as etree import argparse -import os, glob, re -import urllib -import urllib2 +import glob, re import pprint import getpass + testdburl = "https://csegweb.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi" class CimeTestStatus(): @@ -354,10 +353,10 @@ def getSuiteInfo(specfile): for t in root.findall('test'): testlist.append(t.get('case')) - if 'machine' not in suiteinfo.keys(): + if 'machine' not in suiteinfo: machnodelist = t.findall('mach') suiteinfo['machine'] = machnodelist[0].text - if 'compiler' not in suiteinfo.keys(): + if 'compiler' not in suiteinfo: compnodelist = t.findall('compiler') suiteinfo['compiler'] = compnodelist[0].text @@ -527,13 +526,13 @@ def sendTestReport(args, suiteinfo, cimetests, auth): # Get the testdb username/password, and POST # the data. print("sending test report for " + suiteinfo['machine'] + " " + suiteinfo['compiler']) - data = urllib.urlencode({'username':auth['username'], + data = urllib.parse.urlencode({'username':auth['username'], 'password':auth['password'], 'testXML':testrecord}) - req = urllib2.Request(testdburl, data) + req = urllib.request.Request(testdburl, data) try: - urllib2.urlopen(req) - except urllib2.URLError as e: + urllib.request.urlopen(req) + except urllib.error.URLError as e: print("Error when posting data: " + e.reason) if(args.debug): @@ -552,7 +551,7 @@ def authenticate(): auth['password'] = getpass.getpass("enter TestDB password:") return auth -def main(): +def _main_func(): """ Parse the arguments, get the suite information from the test spec, get the test statuses, then print a raw status, test summary, or send the test report. @@ -596,5 +595,4 @@ def main(): if __name__ == "__main__": - main() - + _main_func() diff --git a/scripts/Tools/code_checker b/scripts/Tools/code_checker index 63c04e88139..f5d63db1247 100755 --- a/scripts/Tools/code_checker +++ b/scripts/Tools/code_checker @@ -63,7 +63,7 @@ def _main_func(description): num_procs, files = parse_command_line(sys.argv, description) results = check_code(files, num_procs=num_procs, interactive=True) - for result in results.itervalues(): + for result in results.values(): if result != "": sys.exit(1) diff --git a/scripts/Tools/compare_namelists b/scripts/Tools/compare_namelists index 56d6eb52eea..03ca3c808d4 100755 --- a/scripts/Tools/compare_namelists +++ b/scripts/Tools/compare_namelists @@ -67,7 +67,7 @@ def _main_func(description): expect(success, "Namelist diff between files {} and {}\n{}".format(gold_file, compare_file, comments)) - print "Files {} and {} MATCH".format(gold_file, compare_file) + print("Files {} and {} MATCH".format(gold_file, compare_file)) ############################################################################### diff --git a/scripts/Tools/component_compare_baseline b/scripts/Tools/component_compare_baseline index b43b08c8caf..970d12600d5 100755 --- a/scripts/Tools/component_compare_baseline +++ b/scripts/Tools/component_compare_baseline @@ -45,7 +45,7 @@ def _main_func(description): caseroot, baseline_dir = parse_command_line(sys.argv, description) with Case(caseroot) as case: success, comments = compare_baseline(case, baseline_dir) - print comments + print(comments) sys.exit(0 if success else 1) diff --git a/scripts/Tools/component_compare_test b/scripts/Tools/component_compare_test index 8deace7958f..0af20a07b15 100755 --- a/scripts/Tools/component_compare_test +++ b/scripts/Tools/component_compare_test @@ -48,7 +48,7 @@ def _main_func(description): suffix1, suffix2, caseroot = parse_command_line(sys.argv, description) with Case(caseroot) as case: success, comments = compare_test(case, suffix1, suffix2) - print comments + print(comments) sys.exit(0 if success else 1) diff --git a/scripts/Tools/component_generate_baseline b/scripts/Tools/component_generate_baseline index 9a549c3ee1d..9268ab90acf 100755 --- a/scripts/Tools/component_generate_baseline +++ b/scripts/Tools/component_generate_baseline @@ -52,7 +52,7 @@ def _main_func(description): with Case(caseroot) as case: success, comments = generate_baseline(case, baseline_dir, allow_baseline_overwrite) - print comments + print(comments) sys.exit(0 if success else 1) diff --git a/scripts/Tools/cs.status b/scripts/Tools/cs.status index e75234b4fc7..2c57dc7652e 100755 --- a/scripts/Tools/cs.status +++ b/scripts/Tools/cs.status @@ -65,8 +65,9 @@ def cs_status(test_paths, summary=False): test_id_output[test_id] = output for test_id in sorted(test_id_output): - print test_id - print test_id_output[test_id], + print(test_id) + print(test_id_output[test_id]) + print(' ') ############################################################################### def _main_func(description): diff --git a/scripts/Tools/pelayout b/scripts/Tools/pelayout index 7effc06288c..6422e450287 100755 --- a/scripts/Tools/pelayout +++ b/scripts/Tools/pelayout @@ -113,10 +113,10 @@ def print_pelayout(case, ntasks, nthreads, rootpes, arg_format, header): comp_classes = case.get_values("COMP_CLASSES") if (header is not None): - print header + print(header) # End if for comp in comp_classes: - print format_pelayout(comp, ntasks[comp], nthreads[comp], rootpes[comp], arg_format) + print(format_pelayout(comp, ntasks[comp], nthreads[comp], rootpes[comp], arg_format)) # End for # End def print_pelayout diff --git a/scripts/Tools/preview_run b/scripts/Tools/preview_run index a64d07a5172..c4eeade3216 100755 --- a/scripts/Tools/preview_run +++ b/scripts/Tools/preview_run @@ -57,13 +57,13 @@ def _main_func(description): logging.disable(logging.CRITICAL) with Case(caseroot, read_only=False) as case: - print "BATCH SUBMIT:" + print("BATCH SUBMIT:") job = "case.test" if case.get_value("TEST") else "case.run" job_id_to_cmd = case.submit_jobs(dry_run=True, job=job) for job_id, cmd in job_id_to_cmd: - print " ", job_id, "->", case.get_resolved_value(cmd) - print - print "MPIRUN:", case.get_resolved_value(case.get_mpirun_cmd()) + print(" ", job_id, "->", case.get_resolved_value(cmd)) + print() + print("MPIRUN:", case.get_resolved_value(case.get_mpirun_cmd())) if __name__ == "__main__": _main_func(__doc__) diff --git a/scripts/Tools/xmlquery b/scripts/Tools/xmlquery index 93239b4076f..05a7b8bc3e0 100755 --- a/scripts/Tools/xmlquery +++ b/scripts/Tools/xmlquery @@ -327,38 +327,38 @@ def _main_func(): wrapper.fix_sentence_endings = True for group in sorted(iter(results)): - if (len(variables) > 1 or len(results.keys()) > 1 or full) and not get_group: - print "\nResults in group %s"%group + if (len(variables) > 1 or len(results) > 1 or full) and not get_group: + print("\nResults in group %s"%group) for var in variables: if var in results[group]: if raw: - print results[group][var]['raw'] + print(results[group][var]['raw']) elif get_group: - print "\t%s: %s"%(var, results[group][var]['get_group']) + print("\t%s: %s"%(var, results[group][var]['get_group'])) elif value: sys.stdout.write("%s"%results[group][var]['value']) elif description: if results[group][var]['desc'][0] is not None: desc_text = ' '.join(results[group][var]['desc'][0].split()) - print "\t%s: %s"%(var, wrapper.fill(desc_text)) + print("\t%s: %s"%(var, wrapper.fill(desc_text))) elif fileonly: - print "\t%s: %s"%(var, results[group][var]['file']) + print("\t%s: %s"%(var, results[group][var]['file'])) elif dtype: - print "\t%s: %s"%(var, results[group][var]['type']) + print("\t%s: %s"%(var, results[group][var]['type'])) elif valid_values: if 'valid_values' in results[group][var]: - print "\t%s: %s"%(var, results[group][var]["valid_values"]) + print("\t%s: %s"%(var, results[group][var]["valid_values"])) elif full: if results[group][var]['desc'][0] is not None: desc_text = ' '.join(results[group][var]['desc'][0].split()) - print "\t%s: value=%s"%(var, results[group][var]['value']) - print "\t\ttype: %s"%(results[group][var]['type'][0]) + print("\t%s: value=%s"%(var, results[group][var]['value'])) + print("\t\ttype: %s"%(results[group][var]['type'][0])) if 'valid_values' in results[group][var]: - print "\t\tvalid_values: %s"%(results[group][var]["valid_values"]) - print "\t\tdescription: %s"%(wrapper.fill(desc_text)) - print "\t\tfile: %s"%(results[group][var]['file'][0]) + print("\t\tvalid_values: %s"%(results[group][var]["valid_values"])) + print("\t\tdescription: %s"%(wrapper.fill(desc_text))) + print("\t\tfile: %s"%(results[group][var]['file'][0])) else: - print "\t%s: %s"%(var, results[group][var]['value']) + print("\t%s: %s"%(var, results[group][var]['value'])) if (__name__ == "__main__"): diff --git a/scripts/lib/CIME/BuildTools/configure.py b/scripts/lib/CIME/BuildTools/configure.py index 90825dde97d..81bb3c331bc 100644 --- a/scripts/lib/CIME/BuildTools/configure.py +++ b/scripts/lib/CIME/BuildTools/configure.py @@ -73,7 +73,7 @@ def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, """ ems_path = os.path.join(output_dir, "env_mach_specific.xml") if os.path.exists(ems_path): - logger.warn("{} already exists, delete to replace".format(ems_path)) + logger.warning("{} already exists, delete to replace".format(ems_path)) return ems_file = EnvMachSpecific(output_dir, unit_testing=unit_testing) ems_file.populate(machobj) diff --git a/scripts/lib/CIME/BuildTools/macrowriterbase.py b/scripts/lib/CIME/BuildTools/macrowriterbase.py index db00e8383c4..7ccf9975336 100644 --- a/scripts/lib/CIME/BuildTools/macrowriterbase.py +++ b/scripts/lib/CIME/BuildTools/macrowriterbase.py @@ -14,6 +14,8 @@ from abc import ABCMeta, abstractmethod from CIME.XML.standard_module_setup import * from CIME.utils import get_cime_root +from six import add_metaclass + logger = logging.getLogger(__name__) def _get_components(value): @@ -69,6 +71,7 @@ def _get_components(value): return components +@add_metaclass(ABCMeta) class MacroWriterBase(object): """Abstract base class for macro file writers. @@ -101,8 +104,6 @@ class MacroWriterBase(object): end_ifeq """ - __metaclass__ = ABCMeta - indent_increment = 2 def __init__(self, output): @@ -132,7 +133,7 @@ def write_line(self, line): A trailing newline is added, whether or not the input has one. """ - self.output.write(unicode(self.indent_string() + line + "\n")) + self.output.write(str(self.indent_string() + line + "\n")) @abstractmethod def environment_variable_string(self, name): @@ -207,7 +208,7 @@ def write_macros_file_v1(macros, compiler, os_, machine, macros_file="Macros", o # A few things can be used from environ if not in XML for item in ["MPI_PATH", "NETCDF_PATH"]: if not item in macros and item in os.environ: - logger.warn("Setting {} from Environment".format(item)) + logger.warning("Setting {} from Environment".format(item)) macros[item] = os.environ[item] with open(macros_file, "w") as fd: @@ -222,7 +223,7 @@ def write_macros_file_v1(macros, compiler, os_, machine, macros_file="Macros", o fd.write("#\n# Makefile Macros \n") # print the settings out to the Macros file - for key, value in sorted(macros.iteritems()): + for key, value in sorted(macros.items()): if key == "_COND_": pass elif key.startswith("ADD_"): @@ -248,7 +249,7 @@ def write_macros_file_v1(macros, compiler, os_, machine, macros_file="Macros", o # print the settings out to the Macros file, do it in # two passes so that path values appear first in the # file. - for key, value in sorted(macros.iteritems()): + for key, value in sorted(macros.items()): if key == "_COND_": pass else: @@ -262,7 +263,7 @@ def write_macros_file_v1(macros, compiler, os_, machine, macros_file="Macros", o fd.write("set({} {})\n".format(cmake_var, value)) fd.write("list(APPEND CMAKE_PREFIX_PATH {})\n\n".format(value)) - for key, value in sorted(macros.iteritems()): + for key, value in sorted(macros.items()): if key == "_COND_": pass else: @@ -301,10 +302,10 @@ def write_macros_file_v1(macros, compiler, os_, machine, macros_file="Macros", o def _parse_hash(macros, fd, depth, output_format, cmakedebug=""): width = 2 * depth - for key, value in macros.iteritems(): + for key, value in macros.items(): if type(value) is dict: if output_format == "make" or "DEBUG" in key: - for key2, value2 in value.iteritems(): + for key2, value2 in value.items(): if output_format == "make": fd.write("{}ifeq ($({}), {}) \n".format(" " * width, key, key2)) diff --git a/scripts/lib/CIME/SystemTests/dae.py b/scripts/lib/CIME/SystemTests/dae.py index 5e70a6b2c5b..cdd062c9c04 100644 --- a/scripts/lib/CIME/SystemTests/dae.py +++ b/scripts/lib/CIME/SystemTests/dae.py @@ -54,7 +54,7 @@ def _case_two_setup(self): self._case.set_value("DATA_ASSIMILATION_CYCLES", 2) stopn = self._case.get_value("STOP_N") expect((stopn % 2) == 0, "ERROR: DAE test requires that STOP_N be even") - stopn = stopn / 2 + stopn = int(stopn / 2) self._case.set_value("STOP_N", stopn) self._case.flush() diff --git a/scripts/lib/CIME/SystemTests/eri.py b/scripts/lib/CIME/SystemTests/eri.py index 955b1918019..55446269aa2 100644 --- a/scripts/lib/CIME/SystemTests/eri.py +++ b/scripts/lib/CIME/SystemTests/eri.py @@ -52,12 +52,12 @@ def run_phase(self): stop_option = self._case.get_value("STOP_OPTION") run_startdate = self._case.get_value("RUN_STARTDATE") - stop_n1 = stop_n / 6 + stop_n1 = int(stop_n / 6) rest_n1 = stop_n1 start_1 = run_startdate stop_n2 = stop_n - stop_n1 - rest_n2 = stop_n2 / 2 + 1 + rest_n2 = int(stop_n2 / 2 + 1) hist_n = stop_n2 start_1_year, start_1_month, start_1_day = [int(item) for item in start_1.split("-")] @@ -65,7 +65,7 @@ def run_phase(self): start_2 = "{:04d}-{:02d}-{:02d}".format(start_2_year, start_1_month, start_1_day) stop_n3 = stop_n2 - rest_n2 - rest_n3 = stop_n3 / 2 + 1 + rest_n3 = int(stop_n3 / 2 + 1) stop_n4 = stop_n3 - rest_n3 diff --git a/scripts/lib/CIME/SystemTests/ers.py b/scripts/lib/CIME/SystemTests/ers.py index 28698084c1d..2de668606a0 100644 --- a/scripts/lib/CIME/SystemTests/ers.py +++ b/scripts/lib/CIME/SystemTests/ers.py @@ -28,7 +28,7 @@ def _ers_second_phase(self): stop_n = self._case.get_value("STOP_N") stop_option = self._case.get_value("STOP_OPTION") - rest_n = stop_n/2 + 1 + rest_n = int(stop_n/2 + 1) stop_new = stop_n - rest_n expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,stop_n,rest_n)) diff --git a/scripts/lib/CIME/SystemTests/nodefail.py b/scripts/lib/CIME/SystemTests/nodefail.py index 4a1204f8876..daf5a0f8d5a 100644 --- a/scripts/lib/CIME/SystemTests/nodefail.py +++ b/scripts/lib/CIME/SystemTests/nodefail.py @@ -49,7 +49,7 @@ def _restart_fake_phase(self): with open(fake_exe_file, "w") as fd: fd.write(fake_exe) - os.chmod(fake_exe_file, 0755) + os.chmod(fake_exe_file, 0o755) prev_run_exe = self._case.get_value("run_exe") env_mach_specific = self._case.get_env("mach_specific") diff --git a/scripts/lib/CIME/SystemTests/pet.py b/scripts/lib/CIME/SystemTests/pet.py index fe4a526db7f..1cb15939f39 100644 --- a/scripts/lib/CIME/SystemTests/pet.py +++ b/scripts/lib/CIME/SystemTests/pet.py @@ -45,7 +45,7 @@ def _case_two_setup(self): # machines, if the mpiexec tries to exceed the procs-per-node that were given # to the batch submission, things break. Setting MAX_TASKS_PER_NODE to half of # it original value prevents this. - self._case.set_value("MAX_TASKS_PER_NODE", self._case.get_value("MAX_TASKS_PER_NODE") / 2) + self._case.set_value("MAX_TASKS_PER_NODE", int(self._case.get_value("MAX_TASKS_PER_NODE") / 2)) # Need to redo case_setup because we may have changed the number of threads case_setup(self._case, reset=True) diff --git a/scripts/lib/CIME/SystemTests/system_tests_common.py b/scripts/lib/CIME/SystemTests/system_tests_common.py index 6f315d238bc..a4e97d1b116 100644 --- a/scripts/lib/CIME/SystemTests/system_tests_common.py +++ b/scripts/lib/CIME/SystemTests/system_tests_common.py @@ -13,7 +13,7 @@ import CIME.build as build -import shutil, glob, gzip, time, traceback +import shutil, glob, gzip, time, traceback, six logger = logging.getLogger(__name__) @@ -88,16 +88,15 @@ def build(self, sharedlib_only=False, model_only=False): try: self.build_phase(sharedlib_only=(phase_name==SHAREDLIB_BUILD_PHASE), model_only=(phase_name==MODEL_BUILD_PHASE)) - except: + except BaseException as e: success = False - msg = sys.exc_info()[1].message - + msg = e.__str__() if "BUILD FAIL" in msg: # Don't want to print stacktrace for a model failure since that # is not a CIME/infrastructure problem. excmsg = msg else: - excmsg = "Exception during build:\n{}\n{}".format(sys.exc_info()[1], traceback.format_exc()) + excmsg = "Exception during build:\n{}\n{}".format(msg, traceback.format_exc()) logger.warning(excmsg) append_testlog(excmsg) @@ -155,15 +154,15 @@ def run(self): self._check_for_memleak() - except: + except BaseException as e: success = False - msg = sys.exc_info()[1].message + msg = e.__str__() if "RUN FAIL" in msg: # Don't want to print stacktrace for a model failure since that # is not a CIME/infrastructure problem. excmsg = msg else: - excmsg = "Exception during run:\n{}\n{}".format(sys.exc_info()[1], traceback.format_exc()) + excmsg = "Exception during run:\n{}\n{}".format(msg, traceback.format_exc()) logger.warning(excmsg) append_testlog(excmsg) @@ -240,10 +239,12 @@ def _coupler_log_indicates_run_complete(self): allgood = len(newestcpllogfiles) for cpllog in newestcpllogfiles: try: - if "SUCCESSFUL TERMINATION" in gzip.open(cpllog, 'rb').read(): + if six.b("SUCCESSFUL TERMINATION") in gzip.open(cpllog, 'rb').read(): allgood = allgood - 1 - except: - logger.info("{} is not compressed, assuming run failed".format(cpllog)) + except BaseException as e: + msg = e.__str__() + + logger.info("{} is not compressed, assuming run failed {}".format(cpllog, msg)) return allgood==0 @@ -281,7 +282,7 @@ def _get_mem_usage(self, cpllog): fopen = open with fopen(cpllog, "rb") as f: for line in f: - m = meminfo.match(line) + m = meminfo.match(line.decode('utf-8')) if m: memlist.append((float(m.group(1)), float(m.group(2)))) # Remove the last mem record, it's sometimes artificially high @@ -296,7 +297,7 @@ def _get_throughput(self, cpllog): """ if cpllog is not None and os.path.isfile(cpllog): with gzip.open(cpllog, "rb") as f: - cpltext = f.read() + cpltext = f.read().decode('utf-8') m = re.search(r"# simulated years / cmp-day =\s+(\d+\.\d+)\s",cpltext) if m: return float(m.group(1)) @@ -345,7 +346,7 @@ def compare_env_run(self, expected=None): diffs = f1obj.compare_xml(f2obj) for key in diffs.keys(): if expected is not None and key in expected: - logging.warn(" Resetting {} for test".format(key)) + logging.warning(" Resetting {} for test".format(key)) f1obj.set_value(key, f2obj.get_value(key, resolved=False)) else: print("WARNING: Found difference in test {}: case: {} original value {}".format(key, diffs[key][0], diffs[key][1])) @@ -463,7 +464,7 @@ def build_phase(self, sharedlib_only=False, model_only=False): f.write("#!/bin/bash\n") f.write(self._script) - os.chmod(modelexe, 0755) + os.chmod(modelexe, 0o755) build.post_build(self._case, []) diff --git a/scripts/lib/CIME/XML/compilerblock.py b/scripts/lib/CIME/XML/compilerblock.py index c3fee91e4ec..a975c7efb27 100644 --- a/scripts/lib/CIME/XML/compilerblock.py +++ b/scripts/lib/CIME/XML/compilerblock.py @@ -201,7 +201,7 @@ def _elem_to_setting(self, elem): variables that this setting depends on. """ # Attributes on an element are the conditions on that element. - conditions = dict(elem.items()) + conditions = dict(list(elem.items())) if self._compiler is not None: conditions["COMPILER"] = self._compiler # Deal with internal markup. diff --git a/scripts/lib/CIME/XML/compilers.py b/scripts/lib/CIME/XML/compilers.py index 41d4bdfeaf3..1bbcf93d341 100644 --- a/scripts/lib/CIME/XML/compilers.py +++ b/scripts/lib/CIME/XML/compilers.py @@ -10,6 +10,7 @@ from CIME.BuildTools.makemacroswriter import MakeMacroWriter from CIME.BuildTools.cmakemacroswriter import CMakeMacroWriter from CIME.BuildTools.macroconditiontree import merge_optional_trees +import six logger = logging.getLogger(__name__) @@ -160,7 +161,7 @@ def write_macros_file(self, macros_file="Macros.make", output_format="make", xml else: format_ = output_format - if isinstance(macros_file, basestring): + if isinstance(macros_file, six.string_types): with open(macros_file, "w") as macros: self._write_macros_file_v2(format_, macros) else: @@ -205,7 +206,7 @@ def _write_macros_file_v2(self, build_system, output, xml=None): while value_lists: # Variables that are ready to be written. ready_variables = [ - var_name for var_name in value_lists.keys() + var_name for var_name in value_lists if value_lists[var_name].depends <= vars_written ] expect(len(ready_variables) > 0, @@ -254,7 +255,7 @@ def _add_to_macros(node, macros): else: cond_macros = macros["_COND_"] - for key, value2 in attrib.iteritems(): + for key, value2 in attrib.items(): if key not in cond_macros: cond_macros[key] = {} if value2 not in cond_macros[key]: diff --git a/scripts/lib/CIME/XML/component.py b/scripts/lib/CIME/XML/component.py index e0eed6c417b..1f4ae93162c 100644 --- a/scripts/lib/CIME/XML/component.py +++ b/scripts/lib/CIME/XML/component.py @@ -75,7 +75,7 @@ def _get_value_match(self, node, attributes=None, exact_match=False): for valnode in self.get_nodes("value", root=node): # loop through all the keys in valnode (value nodes) attributes - for key,value in valnode.attrib.iteritems(): + for key,value in valnode.attrib.items(): # determine if key is in attributes dictionary match_count = 0 if attributes is not None and key in attributes: @@ -281,6 +281,6 @@ def print_values(self): compsets[attrib] = text logger.info(" {}".format(helptext)) - for v in sorted(compsets.iteritems()): + for v in sorted(compsets.items()): label, definition = v logger.info(" {:20s} : {}".format(label, definition)) diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py index f883511689b..d120bb30602 100644 --- a/scripts/lib/CIME/XML/compsets.py +++ b/scripts/lib/CIME/XML/compsets.py @@ -84,7 +84,7 @@ def print_values(self, arg_help=True): logger.info(" --------------------------------------") logger.info(" Compset Alias: Compset Long Name ") logger.info(" --------------------------------------") - for key in sorted(compsets_text.iterkeys()): + for key in sorted(compsets_text.keys()): logger.info(" {:20} : {}".format(key, compsets_text[key])) def return_all_values(self): @@ -92,11 +92,11 @@ def return_all_values(self): science_compsets = dict() help_text = self.get_value(name="help") compsets_text = self.get_value("names") - for key in sorted(compsets_text.iterkeys()): + for key in sorted(compsets_text.keys()): all_compsets[key] = compsets_text[key] # get the matching science support grids - for alias in all_compsets.iterkeys(): + for alias in all_compsets.keys(): science_compsets[alias] = self.get_compset_match(alias) return help_text, all_compsets diff --git a/scripts/lib/CIME/XML/entry_id.py b/scripts/lib/CIME/XML/entry_id.py index 94b2aca8423..8928a52add5 100644 --- a/scripts/lib/CIME/XML/entry_id.py +++ b/scripts/lib/CIME/XML/entry_id.py @@ -39,7 +39,7 @@ def set_default_value(self, vid, val): if node is not None: val = self.set_element_text("default_value", val, root=node) if val is None: - logger.warn("Called set_default_value on a node without default_value field") + logger.warning("Called set_default_value on a node without default_value field") return val @@ -211,7 +211,7 @@ def _set_valid_values(self, node, new_valid_values): current_value = node.get("value") valid_values_list = self._get_valid_values(node) if current_value is not None and current_value not in valid_values_list: - logger.warn("WARNING: Current setting for {} not in new valid values. Updating setting to \"{}\"".format(node.get("id"), valid_values_list[0])) + logger.warning("WARNING: Current setting for {} not in new valid values. Updating setting to \"{}\"".format(node.get("id"), valid_values_list[0])) self._set_value(node, valid_values_list[0]) return new_valid_values diff --git a/scripts/lib/CIME/XML/env_base.py b/scripts/lib/CIME/XML/env_base.py index 21bc79fbf6d..3acfb36a3cd 100644 --- a/scripts/lib/CIME/XML/env_base.py +++ b/scripts/lib/CIME/XML/env_base.py @@ -1,7 +1,6 @@ """ Base class for env files. This class inherits from EntryID.py """ -import string from CIME.XML.standard_module_setup import * from CIME.XML.entry_id import EntryID from CIME.XML.headers import Headers @@ -48,9 +47,9 @@ def check_if_comp_var(self, vid, attribute=None): new_vid = None for comp in self._components: if "_"+comp in vid: - new_vid = string.replace(vid, '_'+comp, '', 1) + new_vid = vid.replace('_'+comp, '', 1) elif comp+"_" in vid: - new_vid = string.replace(vid, comp+'_', '', 1) + new_vid = vid.replace(comp+'_', '', 1) if new_vid is not None: break diff --git a/scripts/lib/CIME/XML/env_batch.py b/scripts/lib/CIME/XML/env_batch.py index b0ac5843bd7..add5de20900 100644 --- a/scripts/lib/CIME/XML/env_batch.py +++ b/scripts/lib/CIME/XML/env_batch.py @@ -359,7 +359,7 @@ def submit_jobs(self, case, no_batch=False, job=None, skip_pnl=False, if self._prereq_jobid is not None: jobid = self._prereq_jobid for dep in deps: - if dep in depid.keys() and depid[dep] is not None: + if dep in depid and depid[dep] is not None: jobid += " " + str(depid[dep]) #TODO: doubt these will be used # elif dep == "and": @@ -372,7 +372,7 @@ def submit_jobs(self, case, no_batch=False, job=None, skip_pnl=False, if slen == 0: jobid = None - logger.warn("job is {}".format(job)) + logger.warning("job is {}".format(job)) result = self._submit_single_job(case, job, jobid, no_batch=no_batch, skip_pnl=skip_pnl, @@ -394,7 +394,7 @@ def submit_jobs(self, case, no_batch=False, job=None, skip_pnl=False, def _submit_single_job(self, case, job, depid=None, no_batch=False, skip_pnl=False, mail_user=None, mail_type='never', batch_args=None, dry_run=False): - logger.warn("Submit job {}".format(job)) + logger.warning("Submit job {}".format(job)) batch_system = self.get_value("BATCH_SYSTEM", subgroup=None) if batch_system is None or batch_system == "none" or no_batch: # Import here to avoid circular include diff --git a/scripts/lib/CIME/XML/env_mach_pes.py b/scripts/lib/CIME/XML/env_mach_pes.py index 172965fce09..c70902ba9a1 100644 --- a/scripts/lib/CIME/XML/env_mach_pes.py +++ b/scripts/lib/CIME/XML/env_mach_pes.py @@ -25,7 +25,8 @@ def get_value(self, vid, attribute=None, resolved=True, subgroup=None, MAX_MPITA if vid == "NINST_MAX": value = 1 for comp in self._components: - value = max(value, self.get_value("NINST_{}".format(comp))) + if comp != "CPL": + value = max(value, self.get_value("NINST_{}".format(comp))) return value value = EnvBase.get_value(self, vid, attribute, resolved, subgroup) @@ -88,7 +89,9 @@ def get_total_tasks(self, comp_classes): ntasks = self.get_value("NTASKS", attribute={"component":comp}) rootpe = self.get_value("ROOTPE", attribute={"component":comp}) pstrid = self.get_value("PSTRID", attribute={"component":comp}) - maxinst = max(maxinst, self.get_value("NINST", attribute={"component":comp})) + if comp != "CPL": + ninst = self.get_value("NINST", attribute={"component":comp}) + maxinst = max(maxinst, ninst) tt = rootpe + (ntasks - 1) * pstrid + 1 total_tasks = max(tt, total_tasks) if self.get_value("MULTI_DRIVER"): diff --git a/scripts/lib/CIME/XML/env_mach_specific.py b/scripts/lib/CIME/XML/env_mach_specific.py index 64fa53c66de..0bea8710df0 100644 --- a/scripts/lib/CIME/XML/env_mach_specific.py +++ b/scripts/lib/CIME/XML/env_mach_specific.py @@ -258,9 +258,9 @@ def _load_modules_generic(self, modules_to_load): cmd = "source {}".format(sh_init_cmd) - if os.environ.has_key("SOFTENV_ALIASES"): + if "SOFTENV_ALIASES" in os.environ: cmd += " && source $SOFTENV_ALIASES" - if os.environ.has_key("SOFTENV_LOAD"): + if "SOFTENV_LOAD" in os.environ: cmd += " && source $SOFTENV_LOAD" for action,argument in modules_to_load: @@ -356,7 +356,7 @@ def get_mpirun(self, case, attribs, job="case.run", exe_only=False): matches = 0 is_default = False - for key, value in attribs.iteritems(): + for key, value in attribs.items(): expect(key in self._allowed_mpi_attributes, "Unexpected key {} in mpirun attributes".format(key)) if key in xml_attribs: if xml_attribs[key].lower() == "false": diff --git a/scripts/lib/CIME/XML/generic_xml.py b/scripts/lib/CIME/XML/generic_xml.py index efad45fa2ee..0ba9f44b336 100644 --- a/scripts/lib/CIME/XML/generic_xml.py +++ b/scripts/lib/CIME/XML/generic_xml.py @@ -6,6 +6,7 @@ from distutils.spawn import find_executable from xml.dom import minidom import getpass +import six logger = logging.getLogger(__name__) @@ -123,9 +124,9 @@ def get_nodes(self, nodename, attributes=None, root=None, xpath=None): # one attribute in an xpath query so we query seperately for each attribute # and create a result with the intersection of those lists - for key, value in attributes.iteritems(): + for key, value in attributes.items(): if value is not None: - expect(isinstance(value, basestring), + expect(isinstance(value, six.string_types), " Bad value passed for key {}".format(key)) xpath = ".//{}[@{}=\'{}\']".format(nodename, key, value) logger.debug("xpath is {}".format(xpath)) @@ -196,8 +197,8 @@ def get_resolved_value(self, raw_value): '4' >>> obj.get_resolved_value("0001-01-01") '0001-01-01' - >>> obj.get_resolved_value("$SHELL{echo hi}") - 'hi' + >>> obj.get_resolved_value("$SHELL{echo hi}") == 'hi' + True """ logger.debug("raw_value {}".format(raw_value)) reference_re = re.compile(r'\${?(\w+)}?') @@ -267,7 +268,7 @@ def validate_xml_file(self, filename, schema): logger.debug("Checking file {} against schema {}".format(filename, schema)) run_cmd_no_fail("{} --noout --schema {} {}".format(xmllint, schema, filename)) else: - logger.warn("xmllint not found, could not validate file {}".format(filename)) + logger.warning("xmllint not found, could not validate file {}".format(filename)) def get_element_text(self, element_name, attributes=None, root=None, xpath=None): element_node = self.get_optional_node(element_name, attributes, root, xpath) diff --git a/scripts/lib/CIME/XML/grids.py b/scripts/lib/CIME/XML/grids.py index 634a4be28eb..6d0a208ac3a 100644 --- a/scripts/lib/CIME/XML/grids.py +++ b/scripts/lib/CIME/XML/grids.py @@ -601,7 +601,7 @@ def _get_all_values_v1(self): domain_list.append({'domain':child.tag, 'text':child.text}) grid_info.update({'domains': domain_list}) - + # add mapping files grids = [ ("atm_grid", component_grids[0]), ("lnd_grid", component_grids[1]), ("ocn_grid", component_grids[2]), \ ("rof_grid", component_grids[3]), ("glc_grid", component_grids[5]), ("wav_grid", component_grids[6]) ] @@ -682,16 +682,16 @@ def _get_all_values_v2(self): gridnames = [] for grid_node in grid_nodes: gridnames.append(grid_node.text) - grids += grid_node.get("name") + ":" + grid_node.text + " " + grids += grid_node.get("name") + ":" + grid_node.text + " " grids = " non-default grids are: %s" %grids mask = "" mask_nodes = self.get_nodes("mask", root=model_grid_node) for mask_node in mask_nodes: mask += "\n mask is: %s" %(mask_node.text) - - grids_dict[alias] = {'aliases':aliases, - 'grids':grids, + + grids_dict[alias] = {'aliases':aliases, + 'grids':grids, 'mask':mask } gridnames = set(gridnames) @@ -712,4 +712,3 @@ def return_all_values(self): (default_comp_grids, all_grids) = self._get_all_values_v2() return help_text, default_comp_grids, all_grids - diff --git a/scripts/lib/CIME/XML/machines.py b/scripts/lib/CIME/XML/machines.py index fc39ac556b4..4108c84e0fa 100644 --- a/scripts/lib/CIME/XML/machines.py +++ b/scripts/lib/CIME/XML/machines.py @@ -304,9 +304,9 @@ def print_values(self): print( " os ", os_.text) print( " compilers ",compilers.text) if MAX_MPITASKS_PER_NODE is not None: - print( " pes/node ",MAX_MPITASKS_PER_NODE.text) + print(" pes/node ",MAX_MPITASKS_PER_NODE.text) if max_tasks_per_node is not None: - print( " max_tasks/node ",max_tasks_per_node.text) + print(" max_tasks/node ",max_tasks_per_node.text) def return_all_values(self): # return a dictionary of machines @@ -327,11 +327,10 @@ def return_all_values(self): if max_tasks_per_node is not None: max_tasks_pn = max_tasks_per_node.text - mach_dict[name] = { 'description' : desc.text, + mach_dict[name] = { 'description' : desc.text, 'os' : os_.text, 'compilers' : compilers.text, 'pes/node' : ppn, 'max_tasks/node' : max_tasks_pn } return mach_dict - diff --git a/scripts/lib/CIME/XML/namelist_definition.py b/scripts/lib/CIME/XML/namelist_definition.py index 6a8a631015c..a8054d6b6e5 100644 --- a/scripts/lib/CIME/XML/namelist_definition.py +++ b/scripts/lib/CIME/XML/namelist_definition.py @@ -270,7 +270,7 @@ def is_valid_value(self, name, value): if not is_valid_fortran_namelist_literal(type_, scalar): invalid.append(scalar) if len(invalid) > 0: - logger.warn("Invalid values {}".format(invalid)) + logger.warning("Invalid values {}".format(invalid)) return False # Now that we know that the strings as input are valid Fortran, do some @@ -296,7 +296,7 @@ def is_valid_value(self, name, value): if scalar not in compare_list: invalid.append(scalar) if len(invalid) > 0: - logger.warn("Invalid values {}".format(invalid)) + logger.warning("Invalid values {}".format(invalid)) return False # Check size of input array. diff --git a/scripts/lib/CIME/XML/pes.py b/scripts/lib/CIME/XML/pes.py index 05e077a3e95..583d308250f 100644 --- a/scripts/lib/CIME/XML/pes.py +++ b/scripts/lib/CIME/XML/pes.py @@ -112,11 +112,11 @@ def _find_matches(self, grid_nodes, grid, compset, machine, pesize_opts, overrid compset_choice = compset_match pesize_choice = pesize_match elif points == max_points: - logger.warn("mach_choice {} mach_match {}".format(mach_choice, mach_match)) - logger.warn("grid_choice {} grid_match {}".format(grid_choice, grid_match)) - logger.warn("compset_choice {} compset_match {}".format(compset_choice, compset_match)) - logger.warn("pesize_choice {} pesize_match {}".format(pesize_choice, pesize_match)) - logger.warn("points = {:d}".format(points)) + logger.warning("mach_choice {} mach_match {}".format(mach_choice, mach_match)) + logger.warning("grid_choice {} grid_match {}".format(grid_choice, grid_match)) + logger.warning("compset_choice {} compset_match {}".format(compset_choice, compset_match)) + logger.warning("pesize_choice {} pesize_match {}".format(pesize_choice, pesize_match)) + logger.warning("points = {:d}".format(points)) expect(False, "More than one PE layout matches given PE specs") if not override: for node in pe_select: diff --git a/scripts/lib/CIME/XML/test_reporter.py b/scripts/lib/CIME/XML/test_reporter.py index 1dd841bc6a2..35257498b69 100644 --- a/scripts/lib/CIME/XML/test_reporter.py +++ b/scripts/lib/CIME/XML/test_reporter.py @@ -2,15 +2,12 @@ Interface to the testreporter xml. This class inherits from GenericXML.py """ - +#pylint: disable=import-error +from six.moves import urllib from CIME.XML.standard_module_setup import * from CIME.XML.generic_xml import GenericXML from CIME.utils import expect,get_model -import urllib - - - class TestReporter(GenericXML): def __init__(self): @@ -97,17 +94,16 @@ def push2testdb(self): # Post test result XML to CESM test database # xmlstr = ET.tostring(self.root,method="xml",encoding="UTF-8") - username=raw_input("Username:") + username=input("Username:") os.system("stty -echo") - password=raw_input("Password:") + password=input("Password:") os.system("stty echo") params={'username':username,'password':password,'testXML':xmlstr} url="https://csegweb.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi" - params = urllib.urlencode(params) - f = urllib.urlopen(url, params) + params = urllib.parse.urlencode(params) + f = urllib.request.urlopen(url, params) # # Print any messages from the post command # print(f.read()) print(f.code) - diff --git a/scripts/lib/CIME/XML/testlist.py b/scripts/lib/CIME/XML/testlist.py index c1fd4c8d48f..38a49650fa2 100644 --- a/scripts/lib/CIME/XML/testlist.py +++ b/scripts/lib/CIME/XML/testlist.py @@ -157,4 +157,3 @@ def get_tests(self, machine=None, category=None, compiler=None, compset=None, gr else: logger.critical("Did not recognize testlist file version {} for file {}" .format(self.get_version(), self.filename)) - diff --git a/scripts/lib/CIME/aprun.py b/scripts/lib/CIME/aprun.py index c09574912d2..97f572cd205 100755 --- a/scripts/lib/CIME/aprun.py +++ b/scripts/lib/CIME/aprun.py @@ -65,16 +65,16 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, c2 += 1 # make sure all maxt values at least 1 - for c1 in xrange(0, total_tasks): + for c1 in range(0, total_tasks): if maxt[c1] < 1: maxt[c1] = 1 # Compute task and thread settings for batch commands tasks_per_node, task_count, thread_count, max_thread_count, total_node_count, aprun_args = \ 0, 1, maxt[0], maxt[0], 0, "" - for c1 in xrange(1, total_tasks): + for c1 in range(1, total_tasks): if maxt[c1] != thread_count: - tasks_per_node = min(MAX_MPITASKS_PER_NODE, max_tasks_per_node / thread_count) + tasks_per_node = min(MAX_MPITASKS_PER_NODE, int(max_tasks_per_node / thread_count)) tasks_per_node = min(task_count, tasks_per_node) @@ -99,7 +99,7 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, task_count += 1 if MAX_MPITASKS_PER_NODE > 0: - tasks_per_node = min(MAX_MPITASKS_PER_NODE, max_tasks_per_node / thread_count) + tasks_per_node = min(MAX_MPITASKS_PER_NODE, int(max_tasks_per_node / thread_count)) else: tasks_per_node = max_tasks_per_node / thread_count diff --git a/scripts/lib/CIME/bless_test_results.py b/scripts/lib/CIME/bless_test_results.py index b89b21a6826..0b7609d89c7 100644 --- a/scripts/lib/CIME/bless_test_results.py +++ b/scripts/lib/CIME/bless_test_results.py @@ -17,7 +17,7 @@ def bless_namelists(test_name, report_only, force, baseline_name, baseline_root) # Update namelist files print("Test '{}' had namelist diff".format(test_name)) if (not report_only and - (force or raw_input("Update namelists (y/n)? ").upper() in ["Y", "YES"])): + (force or input("Update namelists (y/n)? ").upper() in ["Y", "YES"])): create_test_gen_args = " -g {} ".format(baseline_name if get_model() == "cesm" else " -g -b {} ".format(baseline_name)) stat, _, err = run_cmd("{}/create_test {} -n {} --baseline-root {} -o".format(get_scripts_root(), test_name, create_test_gen_args, baseline_root)) if stat != 0: @@ -42,7 +42,7 @@ def bless_history(test_name, testcase_dir_for_test, baseline_name, baseline_root else: print(comments) if (not report_only and - (force or raw_input("Update this diff (y/n)? ").upper() in ["Y", "YES"])): + (force or input("Update this diff (y/n)? ").upper() in ["Y", "YES"])): result, comments = generate_baseline(case, baseline_dir=baseline_full_dir) if not result: logging.warning("Hist file bless FAILED for test {}".format(test_name)) diff --git a/scripts/lib/CIME/build.py b/scripts/lib/CIME/build.py index bf7b3909f51..16eba36a28a 100644 --- a/scripts/lib/CIME/build.py +++ b/scripts/lib/CIME/build.py @@ -252,7 +252,7 @@ def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid bldlog = open(file_build, "r") for line in bldlog: if re.search("Current setting for", line): - logger.warn(line) + logger.warning(line) # clm not a shared lib for ACME if get_model() != "acme" and (buildlist is None or "lnd" in buildlist): @@ -290,7 +290,7 @@ def _build_model_thread(config_dir, compclass, compname, caseroot, libroot, bldr t1 = time.time() cmd = os.path.join(caseroot, "SourceMods", "src." + compname, "buildlib") if os.path.isfile(cmd): - logger.warn("WARNING: using local buildlib script for {}".format(compname)) + logger.warning("WARNING: using local buildlib script for {}".format(compname)) else: cmd = os.path.join(config_dir, "buildlib") expect(os.path.isfile(cmd), "Could not find buildlib for {}".format(compname)) @@ -324,7 +324,7 @@ def _clean_impl(case, cleanlist, clean_all): sharedlibroot = os.path.abspath(case.get_value("SHAREDLIBROOT")) expect(sharedlibroot is not None,"No SHAREDLIBROOT defined in case") if sharedlibroot != exeroot and os.path.isdir(sharedlibroot): - logging.warn("cleaning directory {}".format(sharedlibroot)) + logging.warning("cleaning directory {}".format(sharedlibroot)) shutil.rmtree(sharedlibroot) else: expect(cleanlist is not None and len(cleanlist) > 0,"Empty cleanlist not expected") @@ -387,7 +387,7 @@ def _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist): # Retrieve relevant case data # This environment variable gets set for cesm Make and # needs to be unset before building again. - if "MODEL" in os.environ.keys(): + if "MODEL" in os.environ: del os.environ["MODEL"] build_threaded = case.get_build_threaded() casetools = case.get_value("CASETOOLS") diff --git a/scripts/lib/CIME/buildlib.py b/scripts/lib/CIME/buildlib.py index d04860bce39..551f6325d75 100644 --- a/scripts/lib/CIME/buildlib.py +++ b/scripts/lib/CIME/buildlib.py @@ -6,7 +6,6 @@ from CIME.case import Case from CIME.utils import parse_args_and_handle_standard_logging_options, setup_standard_logging_options import sys, os, argparse, doctest - logger = logging.getLogger(__name__) ############################################################################### @@ -86,7 +85,4 @@ def run_gmake(case, compclass, libroot, bldroot, libname="", user_cppdefs=""): if user_cppdefs: cmd = cmd + "USER_CPPDEFS='{}'".format(user_cppdefs ) - rc, out, err = run_cmd(cmd) - expect(rc == 0, "Command {} failed rc={:d}\nout={}\nerr={}".format(cmd, rc, out, err)) - - print "Command {} completed with output {}\nerr {}".format(cmd, out, err) + run_cmd_no_fail(cmd, combine_output=True) diff --git a/scripts/lib/CIME/buildnml.py b/scripts/lib/CIME/buildnml.py index a600f681432..85c4f5b96ce 100644 --- a/scripts/lib/CIME/buildnml.py +++ b/scripts/lib/CIME/buildnml.py @@ -98,7 +98,7 @@ def create_namelist_infile(case, user_nl_file, namelist_infile, infile_text=""): with open(user_nl_file, "r") as file_usernl: lines_input = file_usernl.readlines() else: - logger.warn("WARNING: No file {} found in case directory".format(user_nl_file)) + logger.warning("WARNING: No file {} found in case directory".format(user_nl_file)) lines_output = [] lines_output.append("&comp_inparm \n") diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 997c3a4c2d9..cc96df01db4 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -7,7 +7,8 @@ from copy import deepcopy import glob, os, shutil, math from CIME.XML.standard_module_setup import * - +#pylint: disable=import-error,redefined-builtin +from six.moves import input from CIME.utils import expect, get_cime_root, append_status from CIME.utils import convert_to_type, get_model, get_project from CIME.utils import get_current_commit, check_name @@ -402,15 +403,15 @@ def set_valid_values(self, item, valid_values): return result def set_lookup_value(self, item, value): - if item in self.lookups.keys() and self.lookups[item] is not None: - logger.warn("Item {} already in lookups with value {}".format(item,self.lookups[item])) + if item in self.lookups and self.lookups[item] is not None: + logger.warning("Item {} already in lookups with value {}".format(item,self.lookups[item])) else: logger.debug("Setting in lookups: item {}, value {}".format(item,value)) self.lookups[item] = value def clean_up_lookups(self, allow_undefined=False): # put anything in the lookups table into existing env objects - for key,value in self.lookups.items(): + for key,value in list(self.lookups.items()): logger.debug("lookup key {} value {}".format(key, value)) result = self.set_value(key,value, allow_undefined=allow_undefined) if result is not None: @@ -614,7 +615,7 @@ def _get_component_config_data(self, files): if len(self._component_classes) > len(self._components): self._components.append('sesp') - for i in xrange(1,len(self._component_classes)): + for i in range(1,len(self._component_classes)): comp_class = self._component_classes[i] comp_name = self._components[i-1] node_name = 'CONFIG_' + comp_class + '_FILE' @@ -733,10 +734,10 @@ def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): mach_pes_obj.set_value(key, ninst) key = "NTASKS_{}".format(compclass) - if key not in pes_ntasks.keys(): + if key not in pes_ntasks: mach_pes_obj.set_value(key,1) key = "NTHRDS_{}".format(compclass) - if compclass not in pes_nthrds.keys(): + if compclass not in pes_nthrds: mach_pes_obj.set_value(compclass,1) return pesize @@ -887,7 +888,7 @@ def configure(self, compset_name, grid_name, machine_name=None, if os.path.exists(wdir): expect(not test, "Directory {} already exists, aborting test".format(wdir)) if answer is None: - response = raw_input("\nDirectory {} already exists, (r)eplace, (a)bort, or (u)se existing?".format(wdir)) + response = input("\nDirectory {} already exists, (r)eplace, (a)bort, or (u)se existing?".format(wdir)) else: response = answer @@ -1176,7 +1177,7 @@ def report_job_status(self): if not jobmap: logger.info("No job ids associated with this case. Either case.submit was not run or was run with no-batch") else: - for jobname, jobid in jobmap.iteritems(): + for jobname, jobid in jobmap.items(): status = self.get_env("batch").get_status(jobid) if status: logger.info("{}: {}".format(jobname, status)) @@ -1243,7 +1244,7 @@ def set_model_version(self, model): if version != "unknown": logger.info("{} model version found: {}".format(model, version)) else: - logger.warn("WARNING: No {} Model version found.".format(model)) + logger.warning("WARNING: No {} Model version found.".format(model)) def load_env(self): if not self._is_env_loaded: @@ -1284,9 +1285,9 @@ def _check_testlists(self, compset_alias, grid_name, files): if test["category"] == "prealpha" or test["category"] == "prebeta" or "aux_" in test["category"]: testcnt += 1 if testcnt > 0: - logger.warn("\n*********************************************************************************************************************************") - logger.warn("This compset and grid combination is not scientifically supported, however it is used in {:d} tests.".format(testcnt)) - logger.warn("*********************************************************************************************************************************\n") + logger.warning("\n*********************************************************************************************************************************") + logger.warning("This compset and grid combination is not scientifically supported, however it is used in {:d} tests.".format(testcnt)) + logger.warning("*********************************************************************************************************************************\n") else: expect(False, "\nThis compset and grid combination is untested in CESM. " "Override this warning with the --run-unsupported option to create_newcase.", @@ -1304,7 +1305,7 @@ def set_file(self, xmlfile): gfile = GenericXML(infile=xmlfile) ftype = gfile.get_id() components = self.get_value("COMP_CLASSES") - logger.warn("setting case file to {}".format(xmlfile)) + logger.warning("setting case file to {}".format(xmlfile)) new_env_file = None for env_file in self._env_entryid_files: if os.path.basename(env_file.filename) == ftype: @@ -1411,10 +1412,10 @@ def create(self, casename, srcroot, compset_name, grid_name, except: if os.path.exists(self._caseroot): if not logger.isEnabledFor(logging.DEBUG) and not test: - logger.warn("Failed to setup case, removing {}\nUse --debug to force me to keep caseroot".format(self._caseroot)) + logger.warning("Failed to setup case, removing {}\nUse --debug to force me to keep caseroot".format(self._caseroot)) shutil.rmtree(self._caseroot) else: - logger.warn("Leaving broken case dir {}".format(self._caseroot)) + logger.warning("Leaving broken case dir {}".format(self._caseroot)) raise diff --git a/scripts/lib/CIME/case_clone.py b/scripts/lib/CIME/case_clone.py index 0b50818501f..00980feb854 100644 --- a/scripts/lib/CIME/case_clone.py +++ b/scripts/lib/CIME/case_clone.py @@ -1,4 +1,4 @@ -import os, glob, shutil, string +import os, glob, shutil from CIME.XML.standard_module_setup import * from CIME.utils import expect from CIME.user_mod_support import apply_user_mods @@ -44,7 +44,7 @@ def create_case_clone(case, newcase, keepexe=False, mach_dir=None, project=None, olduser = case.get_value("USER") newuser = os.environ.get("USER") if olduser != newuser: - cime_output_root = string.replace(cime_output_root, olduser, newuser) + cime_output_root = cime_output_root.replace(olduser, newuser) newcase.set_value("USER", newuser) newcase.set_value("CIME_OUTPUT_ROOT", cime_output_root) @@ -68,8 +68,8 @@ def create_case_clone(case, newcase, keepexe=False, mach_dir=None, project=None, newcase.set_value("BUILD_COMPLETE","TRUE") orig_bld_complete = case.get_value("BUILD_COMPLETE") if not orig_bld_complete: - logger.warn("\nWARNING: Creating a clone with --keepexe before building the original case may cause PIO_TYPENAME to be invalid in the clone") - logger.warn("Avoid this message by building case one before you clone.\n") + logger.warning("\nWARNING: Creating a clone with --keepexe before building the original case may cause PIO_TYPENAME to be invalid in the clone") + logger.warning("Avoid this message by building case one before you clone.\n") else: newcase.set_value("BUILD_COMPLETE","FALSE") @@ -133,7 +133,7 @@ def create_case_clone(case, newcase, keepexe=False, mach_dir=None, project=None, success, comment = compare_files(os.path.join(newcaseroot, "env_build.xml"), os.path.join(newcaseroot, "LockedFiles", "env_build.xml")) if not success: - logger.warn(comment) + logger.warning(comment) shutil.rmtree(newcase_root) expect(False, "env_build.xml cannot be changed via usermods if keepexe is an option: \n " "Failed to clone case, removed {}\n".format(newcase_root)) diff --git a/scripts/lib/CIME/case_run.py b/scripts/lib/CIME/case_run.py index 0c657c7e5e8..0b5f756ff8f 100644 --- a/scripts/lib/CIME/case_run.py +++ b/scripts/lib/CIME/case_run.py @@ -210,12 +210,12 @@ def resubmit_check(case): # Note that Mira requires special logic dout_s = case.get_value("DOUT_S") - logger.warn("dout_s {} ".format(dout_s)) + logger.warning("dout_s {} ".format(dout_s)) mach = case.get_value("MACH") - logger.warn("mach {} ".format(mach)) + logger.warning("mach {} ".format(mach)) testcase = case.get_value("TESTCASE") resubmit_num = case.get_value("RESUBMIT") - logger.warn("resubmit_num {}".format(resubmit_num)) + logger.warning("resubmit_num {}".format(resubmit_num)) # If dout_s is True than short-term archiving handles the resubmit # If dout_s is True and machine is mira submit the st_archive script resubmit = False @@ -310,7 +310,7 @@ def case_run(case, skip_pnl=False): save_postrun_provenance(case) - logger.warn("check for resubmit") + logger.warning("check for resubmit") resubmit_check(case) return True diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index 4c2750412e9..532a250f0d3 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -49,7 +49,7 @@ def _build_usernl_files(case, model, comp): nlfile = "user_nl_{}".format(comp) model_nl = os.path.join(model_dir, nlfile) if ninst > 1: - for inst_counter in xrange(1, ninst+1): + for inst_counter in range(1, ninst+1): inst_nlfile = "{}_{:04d}".format(nlfile, inst_counter) if not os.path.exists(inst_nlfile): # If there is a user_nl_foo in the case directory, copy it @@ -144,7 +144,7 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False): ntasks = ninst else: expect(False, "NINST_{} value {:d} greater than NTASKS_{} {:d}".format(comp, ninst, comp, ntasks)) - case.set_value("NTASKS_PER_INST_{}".format(comp), ntasks / ninst) + case.set_value("NTASKS_PER_INST_{}".format(comp), int(ntasks / ninst)) if os.path.exists("case.run"): logger.info("Machine/Decomp/Pes configuration has already been done ...skipping") diff --git a/scripts/lib/CIME/case_st_archive.py b/scripts/lib/CIME/case_st_archive.py index 9c756d93a5d..ea8931a88a6 100644 --- a/scripts/lib/CIME/case_st_archive.py +++ b/scripts/lib/CIME/case_st_archive.py @@ -392,9 +392,9 @@ def _archive_restarts_date_comp(case, archive, archive_entry, try: os.remove(srcfile) except OSError: - logger.warn("unable to remove interim restart file {}".format(srcfile)) + logger.warning("unable to remove interim restart file {}".format(srcfile)) else: - logger.warn("interim restart file {} does not exist".format(srcfile)) + logger.warning("interim restart file {} does not exist".format(srcfile)) return histfiles_savein_rundir @@ -512,7 +512,7 @@ def case_st_archive(case, last_date=None, archive_incomplete_logs=True, copy_onl rest_n = case.get_value('REST_N') stop_n = case.get_value('STOP_N') if rest_n < stop_n: - logger.warn('Restart files from end of run will be saved' + logger.warning('Restart files from end of run will be saved' 'interim restart files will be deleted') logger.info("st_archive starting") diff --git a/scripts/lib/CIME/case_submit.py b/scripts/lib/CIME/case_submit.py index 90176e85571..20243b6368e 100644 --- a/scripts/lib/CIME/case_submit.py +++ b/scripts/lib/CIME/case_submit.py @@ -62,13 +62,13 @@ def _submit(case, job=None, resubmit=False, no_batch=False, skip_pnl=False, case.set_value("RUN_WITH_SUBMIT", True) case.flush() - logger.warn("submit_jobs {}".format(job)) + logger.warning("submit_jobs {}".format(job)) job_ids = case.submit_jobs(no_batch=no_batch, job=job, skip_pnl=skip_pnl, mail_user=mail_user, mail_type=mail_type, batch_args=batch_args) xml_jobids = [] - for jobname, jobid in job_ids.iteritems(): + for jobname, jobid in job_ids.items(): logger.info("Submitted job {} with id {}".format(jobname, jobid)) if jobid: xml_jobids.append("{}:{}".format(jobname, jobid)) diff --git a/scripts/lib/CIME/case_test.py b/scripts/lib/CIME/case_test.py index 1136a133186..0f752b5558f 100644 --- a/scripts/lib/CIME/case_test.py +++ b/scripts/lib/CIME/case_test.py @@ -47,7 +47,7 @@ def case_test(case, testname=None, reset=False): testname = case.get_value('TESTCASE') expect(testname is not None, "testname argument not resolved") - logging.warn("Running test for {}".format(testname)) + logging.warning("Running test for {}".format(testname)) _set_up_signal_handlers() diff --git a/scripts/lib/CIME/check_lockedfiles.py b/scripts/lib/CIME/check_lockedfiles.py index f2bc049385d..12959afb6e8 100644 --- a/scripts/lib/CIME/check_lockedfiles.py +++ b/scripts/lib/CIME/check_lockedfiles.py @@ -71,7 +71,7 @@ def check_pelayouts_require_rebuild(case, models): new_inst = case.get_value("NINST_{}".format(comp)) if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst: - logging.warn("{} pe change requires clean build {} {}".format(comp, old_tasks, new_tasks)) + logging.warning("{} pe change requires clean build {} {}".format(comp, old_tasks, new_tasks)) cleanflag = comp.lower() run_cmd_no_fail("./case.build --clean {}".format(cleanflag)) @@ -107,11 +107,11 @@ def check_lockedfiles(case): f1obj = case.get_env('batch') f2obj = EnvBatch(caseroot, lfile) else: - logging.warn("Locked XML file '{}' is not current being handled".format(fpart)) + logging.warning("Locked XML file '{}' is not current being handled".format(fpart)) continue diffs = f1obj.compare_xml(f2obj) if diffs: - logging.warn("File {} has been modified".format(lfile)) + logging.warning("File {} has been modified".format(lfile)) for key in diffs.keys(): print(" found difference in {} : case {} locked {}" .format(key, repr(diffs[key][0]), repr(diffs[key][1]))) @@ -122,9 +122,9 @@ def check_lockedfiles(case): expect(False, "Cannot change file env_case.xml, please" " recover the original copy from LockedFiles") elif objname == "env_build": - logging.warn("Setting build complete to False") + logging.warning("Setting build complete to False") case.set_value("BUILD_COMPLETE", False) - if "PIO_VERSION" in diffs.keys(): + if "PIO_VERSION" in diffs: case.set_value("BUILD_STATUS", 2) logging.critical("Changing PIO_VERSION requires running " "case.build --clean-all and rebuilding") diff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py index 6aae473c5d4..03eac9f1347 100644 --- a/scripts/lib/CIME/code_checker.py +++ b/scripts/lib/CIME/code_checker.py @@ -16,7 +16,9 @@ def _run_pylint(on_file, interactive): ############################################################################### pylint = find_executable("pylint") - cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement,logging-format-interpolation" + cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import" + cmd_options += ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement" + cmd_options += ",logging-format-interpolation,no-name-in-module" cimeroot = get_cime_root() if "scripts/Tools" in on_file: @@ -58,6 +60,8 @@ def _should_pylint_skip(filepath): for dir_to_skip in list_of_directories_to_ignore: if dir_to_skip + "/" in filepath: return True + if filepath == "scripts/lib/six.py": + return True return False @@ -100,6 +104,10 @@ def check_code(files, num_procs=10, interactive=False): # Check every python file files_to_check = get_all_checkable_files() + if "scripts/lib/six.py" in files_to_check: + files_to_check.remove("scripts/lib/six.py") + logger.info("Not checking contributed file six.py") + expect(len(files_to_check) > 0, "No matching files found") # No point in using more threads than files diff --git a/scripts/lib/CIME/compare_namelists.py b/scripts/lib/CIME/compare_namelists.py index b8d75648027..d4b0fca41b6 100644 --- a/scripts/lib/CIME/compare_namelists.py +++ b/scripts/lib/CIME/compare_namelists.py @@ -353,7 +353,7 @@ def _compare_values(name, gold_value, comp_value, case): comments += " list variable '{}' has extra value {}\n".format(name, comp_value_list_item) elif (type(gold_value) is OrderedDict): - for key, gold_value_dict_item in gold_value.iteritems(): + for key, gold_value_dict_item in gold_value.items(): if (key in comp_value): comments += _compare_values("{} dict item {}".format(name, key), gold_value_dict_item, comp_value[key], case) @@ -425,7 +425,7 @@ def _compare_namelists(gold_namelists, comp_namelists, case): ... val3 = .false. ... /''' >>> comments = _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'), _parse_namelists(teststr2.splitlines(), 'bar'), None) - >>> print comments + >>> print(comments) Missing namelist: nml1 Differences in namelist 'nml2': BASE: val21 = 'foo' @@ -493,12 +493,12 @@ def _compare_namelists(gold_namelists, comp_namelists, case): '' """ different_namelists = OrderedDict() - for namelist, gold_names in gold_namelists.iteritems(): + for namelist, gold_names in gold_namelists.items(): if (namelist not in comp_namelists): different_namelists[namelist] = ["Missing namelist: {}\n".format(namelist)] else: comp_names = comp_namelists[namelist] - for name, gold_value in gold_names.iteritems(): + for name, gold_value in gold_names.items(): if (name not in comp_names): different_namelists.setdefault(namelist, []).append(" missing variable: '{}'\n".format(name)) else: @@ -516,7 +516,7 @@ def _compare_namelists(gold_namelists, comp_namelists, case): different_namelists[namelist] = ["Found extra namelist: {}\n".format(namelist)] comments = "" - for namelist, nlcomment in different_namelists.iteritems(): + for namelist, nlcomment in different_namelists.items(): if len(nlcomment) == 1: comments += nlcomment[0] else: diff --git a/scripts/lib/CIME/compare_test_results.py b/scripts/lib/CIME/compare_test_results.py index ef476df35e0..4d486807060 100644 --- a/scripts/lib/CIME/compare_test_results.py +++ b/scripts/lib/CIME/compare_test_results.py @@ -152,7 +152,7 @@ def compare_test_results(baseline_name, baseline_root, test_root, compiler, test brief_result += " {}".format(compare_comment) brief_result += "\n" - print(brief_result,) + print(brief_result) append_status_cprnc_log(brief_result, logfile_name, test_dir) diff --git a/scripts/lib/CIME/get_timing.py b/scripts/lib/CIME/get_timing.py index 2e128bf56ad..4ae32edbce4 100644 --- a/scripts/lib/CIME/get_timing.py +++ b/scripts/lib/CIME/get_timing.py @@ -125,8 +125,11 @@ def _getTiming(self, inst=0): ncpl_base_period = self.case.get_value("NCPL_BASE_PERIOD") ncpl = 0 for compclass in self.case.get_values("COMP_CLASSES"): - ncpl = max(ncpl, self.case.get_value("{}_NCPL".format(compclass))) - ocn_ncpl = self.case.get_value("OCN_NCPL") + comp_ncpl = self.case.get_value("{}_NCPL".format(compclass)) + if compclass == "OCN": + ocn_ncpl = comp_ncpl + if comp_ncpl is not None: + ncpl = max(ncpl, comp_ncpl) compset = self.case.get_value("COMPSET") if compset is None: @@ -179,7 +182,7 @@ def _getTiming(self, inst=0): try: shutil.copyfile(binfilename, finfilename) - except Exception, e: + except Exception as e: if not os.path.isfile(binfilename): logger.critical("File {} not found".format(binfilename)) else: @@ -191,7 +194,7 @@ def _getTiming(self, inst=0): fin = open(finfilename, "r") self.finlines = fin.readlines() fin.close() - except Exception, e: + except Exception as e: logger.critical("Unable to open file {}".format(finfilename)) raise e @@ -228,7 +231,7 @@ def _getTiming(self, inst=0): cpl.offset = 0 try: self.fout = open(foutfilename, "w") - except Exception, e: + except Exception as e: logger.critical("Could not open file for writing: {}".format(foutfilename)) raise e diff --git a/scripts/lib/CIME/hist_utils.py b/scripts/lib/CIME/hist_utils.py index bb9714683ee..045e913f719 100644 --- a/scripts/lib/CIME/hist_utils.py +++ b/scripts/lib/CIME/hist_utils.py @@ -403,7 +403,7 @@ def generate_baseline(case, baseline_dir=None, allow_baseline_overwrite=False): # drop the date so that the name is generic newestcpllogfile = case.get_latest_cpl_log(coupler_log_path=case.get_value("LOGDIR")) if newestcpllogfile is None: - logger.warn("No cpl.log file found in log directory {}".format(case.get_value("LOGDIR"))) + logger.warning("No cpl.log file found in log directory {}".format(case.get_value("LOGDIR"))) else: shutil.copyfile(newestcpllogfile, os.path.join(basegen_dir, "cpl.log.gz")) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index f3115a1845e..b4613d63ba1 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -108,6 +108,7 @@ from CIME.XML.standard_module_setup import * from CIME.utils import expect +import six logger = logging.getLogger(__name__) @@ -640,10 +641,7 @@ def literal_to_python_value(literal, type_=None): >>> literal_to_python_value("") >>> literal_to_python_value("-1.D+10") -10000000000.0 - >>> literal_to_python_value("nan(1234)") - Traceback (most recent call last): - ... - ValueError: invalid literal for float(): nan(1234) + >>> shouldRaise(ValueError, literal_to_python_value, "nan(1234)") """ expect(FORTRAN_REPEAT_PREFIX_REGEX.search(literal) is None, "Cannot use repetition syntax in literal_to_python_value") @@ -688,8 +686,8 @@ def expand_literal_list(literals): ['true'] >>> expand_literal_list(['1', '2', 'f*', '3*3', '5']) ['1', '2', 'f*', '3', '3', '3', '5'] - >>> expand_literal_list([u'2*f*']) - [u'f*', u'f*'] + >>> expand_literal_list(['2*f*']) + ['f*', 'f*'] """ expanded = [] for literal in literals: @@ -711,8 +709,8 @@ def compress_literal_list(literals): ['true'] >>> compress_literal_list(['1', '2', 'f*', '3', '3', '3', '5']) ['1', '2', 'f*', '3', '3', '3', '5'] - >>> compress_literal_list([u'f*', u'f*']) - [u'f*', u'f*'] + >>> compress_literal_list(['f*', 'f*']) + ['f*', 'f*'] """ compressed = [] if len(literals) == 0: @@ -732,21 +730,21 @@ def compress_literal_list(literals): # Otherwise, write out the previous literal and start tracking the # new one. rep_str = str(num_reps) + '*' if num_reps > 1 else '' - if isinstance(old_literal, basestring): + if isinstance(old_literal, six.string_types): compressed.append(rep_str + old_literal) else: compressed.append(rep_str + str(old_literal)) old_literal = literal num_reps = 1 rep_str = str(num_reps) + '*' if num_reps > 1 else '' - if isinstance(old_literal, basestring): + if isinstance(old_literal, six.string_types): compressed.append(rep_str + old_literal) else: compressed.append(rep_str + str(old_literal)) return compressed else: for literal in literals: - if isinstance(literal, basestring): + if isinstance(literal, six.string_types): compressed.append(literal) else: compressed.append(str(literal)) @@ -833,7 +831,7 @@ def parse(in_file=None, text=None, groupless=False, convert_tab_to_space=True): "Must specify an input file or text to the namelist parser.") expect(in_file is None or text is None, "Cannot specify both input file and text to the namelist parser.") - if isinstance(in_file, str) or isinstance(in_file, unicode): + if isinstance(in_file, six.string_types): logger.debug("Reading namelist at: {}".format(in_file)) with open(in_file) as in_file_obj: text = in_file_obj.read() @@ -853,6 +851,23 @@ def parse(in_file=None, text=None, groupless=False, convert_tab_to_space=True): return Namelist(namelist_dict) +def shouldRaise(eclass, method, *args, **kw): + """ + A helper function to make doctests py3 compatible + http://python3porting.com/problems.html#running-doctests + """ + try: + method(*args, **kw) + except: + e = sys.exc_info()[1] + if not isinstance(e, eclass): + raise + return + raise Exception("Expected exception %s not raised" % + str(eclass)) + + + class Namelist(object): """Class representing a Fortran namelist. @@ -899,9 +914,9 @@ def get_group_names(self): >>> Namelist().get_group_names() [] >>> sorted(parse(text='&foo / &bar /').get_group_names()) - [u'bar', u'foo'] + ['bar', 'foo'] """ - return self._groups.keys() + return list(self._groups.keys()) def get_variable_names(self, group_name): """Return a list of all variables in the given namelist group. @@ -912,38 +927,38 @@ def get_variable_names(self, group_name): [] >>> x = parse(text='&foo bar=,bazz=true,bazz(2)=fred,bang=6*""/') >>> sorted(x.get_variable_names('fOo')) - [u'bang', u'bar', u'bazz', u'bazz(2)'] + ['bang', 'bar', 'bazz', 'bazz(2)'] >>> x = parse(text='&foo bar=,bazz=true,bang=6*""/') >>> sorted(x.get_variable_names('fOo')) - [u'bang', u'bar', u'bazz'] + ['bang', 'bar', 'bazz'] >>> x = parse(text='&foo bar(::)=,bazz=false,bazz(2)=true,bazz(:2:)=6*""/') >>> sorted(x.get_variable_names('fOo')) - [u'bar(::)', u'bazz', u'bazz(2)', u'bazz(:2:)'] + ['bar(::)', 'bazz', 'bazz(2)', 'bazz(:2:)'] """ group_name = group_name.lower() if group_name not in self._groups: return [] - return self._groups[group_name].keys() + return list(self._groups[group_name].keys()) def get_variable_value(self, group_name, variable_name): """Return the value of the specified variable. This function always returns a non-empty list containing strings. If the - specified `group_name` or `variable_name` is not present, `[u'']` is + specified `group_name` or `variable_name` is not present, `['']` is returned. >>> Namelist().get_variable_value('foo', 'bar') - [u''] + [''] >>> parse(text='&foo bar=1,2 /').get_variable_value('foo', 'bazz') - [u''] + [''] >>> parse(text='&foo bar=1,2 /').get_variable_value('foO', 'Bar') - [u'1', u'2'] + ['1', '2'] """ group_name = group_name.lower() variable_name = variable_name.lower() if group_name not in self._groups or \ variable_name not in self._groups[group_name]: - return [u''] + return [''] return self._groups[group_name][variable_name] @@ -953,17 +968,17 @@ def get_value(self, variable_name): This function is similar to `get_variable_value`, except that it does not require a `group_name`, and it requires that the `variable_name` be unique across all groups. - - >>> parse(text='&foo bar=1 / &bazz bar=1 /').get_value('bar') + + >>> parse(text='&foo bar=1 / &bazz bar=1 /').get_value('bar') # doctest: +ELLIPSIS Traceback (most recent call last): ... - SystemExit: ERROR: Namelist.get_value: Variable {} is present in multiple groups: [u'bazz', u'foo'] + SystemExit: ERROR: Namelist.get_value: Variable {} is present in multiple groups: ... >>> parse(text='&foo bar=1 / &bazz /').get_value('Bar') - [u'1'] + ['1'] >>> parse(text='&foo bar(2)=1 / &bazz /').get_value('Bar(2)') - [u'1'] + ['1'] >>> parse(text='&foo / &bazz /').get_value('bar') - [u''] + [''] """ variable_name = variable_name.lower() possible_groups = [group_name for group_name in self._groups @@ -974,32 +989,32 @@ def get_value(self, variable_name): if possible_groups: return self._groups[possible_groups[0]][variable_name] else: - return [u''] + return [''] def set_variable_value(self, group_name, variable_name, value, var_size=1): """Set the value of the specified variable. >>> x = parse(text='&foo bar=1 /') >>> x.get_variable_value('foo', 'bar') - [u'1'] - >>> x.set_variable_value('foo', 'bar(2)', [u'3'], var_size=4) + ['1'] + >>> x.set_variable_value('foo', 'bar(2)', ['3'], var_size=4) >>> x.get_variable_value('foo', 'bar') - [u'1', u'3'] - >>> x.set_variable_value('foo', 'bar(1)', [u'2']) + ['1', '3'] + >>> x.set_variable_value('foo', 'bar(1)', ['2']) >>> x.get_variable_value('foo', 'bar') - [u'2', u'3'] - >>> x.set_variable_value('foo', 'bar', [u'1']) + ['2', '3'] + >>> x.set_variable_value('foo', 'bar', ['1']) >>> x.get_variable_value('foo', 'bar') - [u'1', u'3'] - >>> x.set_variable_value('foo', 'bazz', [u'3']) - >>> x.set_variable_value('Brack', 'baR', [u'4']) + ['1', '3'] + >>> x.set_variable_value('foo', 'bazz', ['3']) + >>> x.set_variable_value('Brack', 'baR', ['4']) >>> x.get_variable_value('foo', 'bazz') - [u'3'] + ['3'] >>> x.get_variable_value('brack', 'bar') - [u'4'] - >>> x.set_variable_value('foo', 'red(2:6:2)', [u'2', u'4', u'6'], var_size=12) + ['4'] + >>> x.set_variable_value('foo', 'red(2:6:2)', ['2', '4', '6'], var_size=12) >>> x.get_variable_value('foo', 'red') - ['', u'2', '', u'4', '', u'6'] + ['', '2', '', '4', '', '6'] """ group_name = group_name.lower() @@ -1058,43 +1073,43 @@ def merge_nl(self, other, overwrite=False): >>> x = parse(text='&foo bar=1 bazz=,2 brat=3/') >>> y = parse(text='&foo bar=2 bazz=3*1 baker=4 / &foo2 barter=5 /') >>> y.get_value('bazz') - [u'1', u'1', u'1'] + ['1', '1', '1'] >>> x.merge_nl(y) >>> sorted(x.get_group_names()) - [u'foo', u'foo2'] + ['foo', 'foo2'] >>> sorted(x.get_variable_names('foo')) - [u'baker', u'bar', u'bazz', u'brat'] + ['baker', 'bar', 'bazz', 'brat'] >>> sorted(x.get_variable_names('foo2')) - [u'barter'] + ['barter'] >>> x.get_value('bar') - [u'1'] + ['1'] >>> x.get_value('bazz') - [u'1', u'2', u'1'] + ['1', '2', '1'] >>> x.get_value('brat') - [u'3'] + ['3'] >>> x.get_value('baker') - [u'4'] + ['4'] >>> x.get_value('barter') - [u'5'] + ['5'] >>> x = parse(text='&foo bar=1 bazz=,2 brat=3/') >>> y = parse(text='&foo bar=2 bazz=3*1 baker=4 / &foo2 barter=5 /') >>> x.merge_nl(y, overwrite=True) >>> sorted(x.get_group_names()) - [u'foo', u'foo2'] + ['foo', 'foo2'] >>> sorted(x.get_variable_names('foo')) - [u'baker', u'bar', u'bazz', u'brat'] + ['baker', 'bar', 'bazz', 'brat'] >>> sorted(x.get_variable_names('foo2')) - [u'barter'] + ['barter'] >>> x.get_value('bar') - [u'2'] + ['2'] >>> x.get_value('bazz') - [u'1', u'1', u'1'] + ['1', '1', '1'] >>> x.get_value('brat') - [u'3'] + ['3'] >>> x.get_value('baker') - [u'4'] + ['4'] >>> x.get_value('barter') - [u'5'] + ['5'] """ # Pretty simple strategy: go through the entire other namelist, and # merge all values with this one's. @@ -1134,7 +1149,7 @@ def write(self, out_file, groups=None, append=False, format_='nml', sorted_group """ expect(format_ in ('nml', 'rc', 'nmlcontents'), "Namelist.write: unexpected output format {!r}".format(str(format_))) - if isinstance(out_file, str) or isinstance(out_file, unicode): + if isinstance(out_file, six.string_types): logger.debug("Writing namelist to: {}".format(out_file)) flag = 'a' if append else 'w' with open(out_file, flag) as file_obj: @@ -1146,7 +1161,7 @@ def write(self, out_file, groups=None, append=False, format_='nml', sorted_group def _write(self, out_file, groups, format_, sorted_groups): """Unwrapped version of `write` assuming that a file object is input.""" if groups is None: - groups = self._groups.keys() + groups = list(self._groups.keys()) if format_ == 'nml' or format_ == 'nmlcontents': equals = ' =' elif format_ == 'rc': @@ -1249,7 +1264,7 @@ def __init__(self, text, groupless=False): self._line = 1 self._col = 0 # Text and its size. - self._text = unicode(text) + self._text = str(text) self._len = len(self._text) # Dictionary with group names as keys, and dictionaries of variable # name-value pairs as values. (Or a single flat dictionary if @@ -1277,10 +1292,8 @@ def _curr(self): def _next(self): """Return the character at the next position. - >>> _NamelistParser(' ')._next() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, _NamelistParser(' ')._next) + """ # If at the end of the file, we should raise _NamelistEOF. The easiest # way to do this is to just advance. @@ -1319,19 +1332,15 @@ def _advance(self, nchars=1, check_eof=False): >>> x._advance(3) >>> (x._pos, x._line, x._col) (7, 3, 1) - >>> x._advance(1) - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. - >>> _NamelistParser('abc\n')._advance(4) - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, x._advance, 1) + + >>> shouldRaise(_NamelistEOF, _NamelistParser('abc\n')._advance, 4) + >>> x = _NamelistParser('ab') >>> x._advance(check_eof=True) False >>> x._curr() - u'b' + 'b' >>> x._advance(check_eof=True) True """ @@ -1365,34 +1374,32 @@ def _eat_whitespace(self, allow_initial_comment=False): >>> x._eat_whitespace() True >>> x._curr() - u'a' + 'a' >>> x._eat_whitespace() False >>> x._advance() - >>> x._eat_whitespace() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, x._eat_whitespace) + >>> x = _NamelistParser(' \n! blah\n ! blah\n a') >>> x._eat_whitespace() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser('! blah\n a') >>> x._eat_whitespace() False >>> x._curr() - u'!' + '!' >>> x = _NamelistParser(' ! blah\n a') >>> x._eat_whitespace() True >>> x._curr() - u'!' + '!' >>> x = _NamelistParser(' ! blah\n a') >>> x._eat_whitespace(allow_initial_comment=True) True >>> x._curr() - u'a' + 'a' """ eaten = False comment_allowed = allow_initial_comment @@ -1416,7 +1423,7 @@ def _eat_comment(self): >>> x._eat_comment() True >>> x._curr() - u' ' + ' ' >>> x._eat_comment() False >>> x._eat_whitespace() @@ -1424,17 +1431,13 @@ def _eat_comment(self): >>> x._eat_comment() True >>> x._curr() - u'a' + 'a' >>> x._advance(2) - >>> x._eat_comment() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, x._eat_comment) + >>> x = _NamelistParser('! foo\n') - >>> x._eat_comment() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, x._eat_comment) + """ if self._curr() != '!': return False @@ -1458,10 +1461,8 @@ def _expect_char(self, chars): >>> x = _NamelistParser('ab') >>> x._expect_char('a') >>> x._advance() - >>> x._expect_char('a') - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected 'a' but found 'b' + >>> shouldRaise(_NamelistParseError, x._expect_char, 'a') + >>> x._expect_char('ab') """ if self._curr() not in chars: @@ -1474,30 +1475,20 @@ def _expect_char(self, chars): def _parse_namelist_group_name(self): r"""Parses and returns a namelist group name at the current position. - >>> _NamelistParser('abc')._parse_namelist_group_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected '&' but found 'a' - >>> _NamelistParser('&abc')._parse_namelist_group_name() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistParseError, _NamelistParser('abc')._parse_namelist_group_name) + + >>> shouldRaise(_NamelistEOF, _NamelistParser('&abc')._parse_namelist_group_name) + >>> _NamelistParser('&abc ')._parse_namelist_group_name() - u'abc' + 'abc' >>> _NamelistParser('&abc\n')._parse_namelist_group_name() - u'abc' - >>> _NamelistParser('&abc/ ')._parse_namelist_group_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: 'abc/' is not a valid variable name - >>> _NamelistParser('&abc= ')._parse_namelist_group_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: 'abc=' is not a valid variable name - >>> _NamelistParser('& ')._parse_namelist_group_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: '' is not a valid variable name + 'abc' + >>> shouldRaise(_NamelistParseError, _NamelistParser('&abc/ ')._parse_namelist_group_name) + + >>> shouldRaise(_NamelistParseError, _NamelistParser('&abc= ')._parse_namelist_group_name) + + >>> shouldRaise(_NamelistParseError, _NamelistParser('& ')._parse_namelist_group_name) + """ self._expect_char("&") self._advance() @@ -1510,40 +1501,41 @@ def _parse_variable_name(self, allow_equals=True): variable name; if it is `False`, only white space can be used for this purpose. - >>> _NamelistParser('abc')._parse_variable_name() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, _NamelistParser('abc')._parse_variable_name) + >>> _NamelistParser('foo(2)= ')._parse_variable_name() - u'foo(2)' + 'foo(2)' >>> _NamelistParser('abc ')._parse_variable_name() - u'abc' + 'abc' >>> _NamelistParser('ABC ')._parse_variable_name() - u'abc' + 'abc' >>> _NamelistParser('abc\n')._parse_variable_name() - u'abc' + 'abc' >>> _NamelistParser('abc%fred\n')._parse_variable_name() - u'abc%fred' + 'abc%fred' >>> _NamelistParser('abc(2)@fred\n')._parse_variable_name() - u'abc(2)@fred' + 'abc(2)@fred' >>> _NamelistParser('abc(1:2:3)\n')._parse_variable_name() - u'abc(1:2:3)' + 'abc(1:2:3)' >>> _NamelistParser('abc=')._parse_variable_name() - u'abc' - >>> _NamelistParser('abc(1,2) ')._parse_variable_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: Multiple dimensions not supported in CIME namelist variables 'abc(1,2)' - >>> _NamelistParser('abc, ')._parse_variable_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: 'abc,' is not a valid variable name - >>> _NamelistParser(' ')._parse_variable_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: '' is not a valid variable name + 'abc' + >>> try: + ... _NamelistParser('abc(1,2) ')._parse_variable_name() + ... raise AssertionError("_NamelistParseError not raised") + ... except _NamelistParseError: + ... pass + >>> try: + ... _NamelistParser('abc, ')._parse_variable_name() + ... raise AssertionError("_NamelistParseError not raised") + ... except _NamelistParseError: + ... pass + >>> try: + ... _NamelistParser(' ')._parse_variable_name() + ... raise AssertionError("_NamelistParseError not raised") + ... except _NamelistParseError: + ... pass >>> _NamelistParser('foo+= ')._parse_variable_name() - u'foo' + 'foo' """ old_pos = self._pos separators = (' ', '\n', '=', '+') if allow_equals else (' ', '\n') @@ -1581,24 +1573,20 @@ def _parse_character_literal(self): Position on return is the last character of the string; we avoid advancing past that in order to avoid potential EOF errors. - >>> _NamelistParser('"abc')._parse_character_literal() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, _NamelistParser('"abc')._parse_character_literal) + >>> _NamelistParser('"abc" ')._parse_character_literal() - u'"abc"' + '"abc"' >>> _NamelistParser("'abc' ")._parse_character_literal() - u"'abc'" - >>> _NamelistParser("*abc* ")._parse_character_literal() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: *abc* is not a valid character literal + "'abc'" + >>> shouldRaise(_NamelistParseError, _NamelistParser("*abc* ")._parse_character_literal) + >>> _NamelistParser("'abc''def' ")._parse_character_literal() - u"'abc''def'" + "'abc''def'" >>> _NamelistParser("'abc''' ")._parse_character_literal() - u"'abc'''" + "'abc'''" >>> _NamelistParser("'''abc' ")._parse_character_literal() - u"'''abc'" + "'''abc'" """ delimiter = self._curr() old_pos = self._pos @@ -1625,16 +1613,12 @@ def _parse_complex_literal(self): Position on return is the last character of the string; we avoid advancing past that in order to avoid potential EOF errors. - >>> _NamelistParser('(1.,2.')._parse_complex_literal() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, _NamelistParser('(1.,2.')._parse_complex_literal) + >>> _NamelistParser('(1.,2.) ')._parse_complex_literal() - u'(1.,2.)' - >>> _NamelistParser("(A,B) ")._parse_complex_literal() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: '(A,B)' is not a valid complex literal + '(1.,2.)' + >>> shouldRaise(_NamelistParseError, _NamelistParser("(A,B) ")._parse_complex_literal) + """ old_pos = self._pos while self._curr() != ')': @@ -1710,97 +1694,81 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): to mark the end of a literal. >>> _NamelistParser('"abc" ')._parse_literal() - u'"abc"' + '"abc"' >>> _NamelistParser("'abc' ")._parse_literal() - u"'abc'" - >>> _NamelistParser('"abc"')._parse_literal() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + "'abc'" + >>> shouldRaise(_NamelistEOF, _NamelistParser('"abc"')._parse_literal) + >>> _NamelistParser('"abc"')._parse_literal(allow_eof_end=True) - u'"abc"' + '"abc"' >>> _NamelistParser('(1.,2.) ')._parse_literal() - u'(1.,2.)' - >>> _NamelistParser('(1.,2.)')._parse_literal() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + '(1.,2.)' + >>> shouldRaise(_NamelistEOF, _NamelistParser('(1.,2.)')._parse_literal) + >>> _NamelistParser('(1.,2.)')._parse_literal(allow_eof_end=True) - u'(1.,2.)' + '(1.,2.)' >>> _NamelistParser('5 ')._parse_literal() - u'5' + '5' >>> _NamelistParser('6.9 ')._parse_literal() - u'6.9' + '6.9' >>> _NamelistParser('inf ')._parse_literal() - u'inf' + 'inf' >>> _NamelistParser('nan(booga) ')._parse_literal() - u'nan(booga)' + 'nan(booga)' >>> _NamelistParser('.FLORIDA$ ')._parse_literal() - u'.FLORIDA$' - >>> _NamelistParser('hamburger ')._parse_literal() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got 'hamburger' + '.FLORIDA$' + >>> shouldRaise(_NamelistParseError, _NamelistParser('hamburger ')._parse_literal) + >>> _NamelistParser('5,')._parse_literal() - u'5' + '5' >>> _NamelistParser('5\n')._parse_literal() - u'5' + '5' >>> _NamelistParser('5/')._parse_literal() - u'5' + '5' >>> _NamelistParser(',')._parse_literal() - u'' + '' >>> _NamelistParser('6*5 ')._parse_literal() - u'6*5' + '6*5' >>> _NamelistParser('6*(1., 2.) ')._parse_literal() - u'6*(1., 2.)' + '6*(1., 2.)' >>> _NamelistParser('6*"a" ')._parse_literal() - u'6*"a"' - >>> _NamelistParser('6*')._parse_literal() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + '6*"a"' + >>> shouldRaise(_NamelistEOF, _NamelistParser('6*')._parse_literal) + >>> _NamelistParser('6*')._parse_literal(allow_eof_end=True) - u'6*' - >>> _NamelistParser('foo= ')._parse_literal() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got 'foo=' - >>> _NamelistParser('foo+= ')._parse_literal() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got 'foo+=' + '6*' + >>> shouldRaise(_NamelistParseError, _NamelistParser('foo= ')._parse_literal) + + >>> shouldRaise(_NamelistParseError, _NamelistParser('foo+= ')._parse_literal) + >>> _NamelistParser('5,')._parse_literal(allow_name=True) - u'5' + '5' >>> x = _NamelistParser('foo= ') >>> x._parse_literal(allow_name=True) >>> x._curr() - u'f' + 'f' >>> x = _NamelistParser('foo+= ') >>> x._parse_literal(allow_name=True) >>> x._curr() - u'f' - >>> _NamelistParser('6*foo= ')._parse_literal(allow_name=True) - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got '6*foo=' - >>> _NamelistParser('6*foo+= ')._parse_literal(allow_name=True) - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got '6*foo+=' + 'f' + >>> shouldRaise(_NamelistParseError, _NamelistParser('6*foo= ')._parse_literal, allow_name=True) + + >>> shouldRaise(_NamelistParseError, _NamelistParser('6*foo+= ')._parse_literal, allow_name=True) + >>> x = _NamelistParser('foo = ') >>> x._parse_literal(allow_name=True) >>> x._curr() - u'f' + 'f' >>> x = _NamelistParser('foo\n= ') >>> x._parse_literal(allow_name=True) >>> x._curr() - u'f' + 'f' >>> _NamelistParser('')._parse_literal(allow_eof_end=True) - u'' + '' """ # Deal with empty input string. if allow_eof_end and self._pos == self._len: - return u'' + return '' # Deal with a repeated value prefix. old_pos = self._pos if FORTRAN_REPEAT_PREFIX_REGEX.search(self._text[self._pos:]): @@ -1871,47 +1839,45 @@ def _expect_separator(self, allow_eof=False): >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser(" a") >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser(",a") >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser("/a") >>> x._expect_separator() False >>> x._curr() - u'/' + '/' >>> x = _NamelistParser("a") - >>> x._expect_separator() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected one of the characters in ' \n,/' but found 'a' + >>> shouldRaise(_NamelistParseError, x._expect_separator) + >>> x = _NamelistParser(" , a") >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser(" / a") >>> x._expect_separator() False >>> x._curr() - u'/' + '/' >>> x = _NamelistParser(" , ! Some stuff\n a") >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser(" , ! Some stuff\n ! Other stuff\n a") >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> _NamelistParser("")._expect_separator(allow_eof=True) False >>> x = _NamelistParser(" ") @@ -1921,10 +1887,8 @@ def _expect_separator(self, allow_eof=False): >>> x._expect_separator(allow_eof=True) True >>> x = _NamelistParser(" / ") - >>> x._expect_separator(allow_eof=True) - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: found group-terminating '/' in file without group names + >>> shouldRaise(_NamelistParseError, x._expect_separator, allow_eof=True) + """ errstring = "found group-terminating '/' in file without group names" # Deal with the possibility that we are already at EOF. @@ -1966,53 +1930,47 @@ def _parse_name_and_values(self, allow_eof_end=False): alternate file format in "groupless" mode.) >>> _NamelistParser("foo='bar' /")._parse_name_and_values() - (u'foo', [u"'bar'"], False) + ('foo', ["'bar'"], False) >>> _NamelistParser("foo(3)='bar' /")._parse_name_and_values() - (u'foo(3)', [u"'bar'"], False) + ('foo(3)', ["'bar'"], False) >>> _NamelistParser("foo ='bar' /")._parse_name_and_values() - (u'foo', [u"'bar'"], False) + ('foo', ["'bar'"], False) >>> _NamelistParser("foo=\n'bar' /")._parse_name_and_values() - (u'foo', [u"'bar'"], False) - >>> _NamelistParser("foo 'bar' /")._parse_name_and_values() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected '=' but found "'" + ('foo', ["'bar'"], False) + >>> shouldRaise(_NamelistParseError, _NamelistParser("foo 'bar' /")._parse_name_and_values) + >>> _NamelistParser("foo='bar','bazz' /")._parse_name_and_values() - (u'foo', [u"'bar'", u"'bazz'"], False) + ('foo', ["'bar'", "'bazz'"], False) >>> _NamelistParser("foo=,,'bazz',6*/")._parse_name_and_values() - (u'foo', [u'', u'', u"'bazz'", u'6*'], False) + ('foo', ['', '', "'bazz'", '6*'], False) >>> _NamelistParser("foo='bar' 'bazz' foo2='ban'")._parse_name_and_values() - (u'foo', [u"'bar'", u"'bazz'"], False) + ('foo', ["'bar'", "'bazz'"], False) >>> _NamelistParser("foo='bar' 'bazz' foo2(2)='ban'")._parse_name_and_values() - (u'foo', [u"'bar'", u"'bazz'"], False) - >>> _NamelistParser("foo= foo2='ban' ")._parse_name_and_values() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got "foo2='ban'" + ('foo', ["'bar'", "'bazz'"], False) + >>> shouldRaise(_NamelistParseError, _NamelistParser("foo= foo2='ban' ")._parse_name_and_values) + >>> _NamelistParser("foo=,,'bazz',6* ")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'', u'', u"'bazz'", u'6*'], False) + ('foo', ['', '', "'bazz'", '6*'], False) >>> _NamelistParser("foo(3)='bazz'")._parse_name_and_values(allow_eof_end=True) - (u'foo(3)', [u"'bazz'"], False) - >>> _NamelistParser("foo=")._parse_name_and_values() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + ('foo(3)', ["'bazz'"], False) + >>> shouldRaise(_NamelistEOF, _NamelistParser("foo=")._parse_name_and_values) + >>> _NamelistParser("foo=")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u''], False) + ('foo', [''], False) >>> _NamelistParser("foo= ")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u''], False) + ('foo', [''], False) >>> _NamelistParser("foo=2")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'2'], False) + ('foo', ['2'], False) >>> _NamelistParser("foo=1,2")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'1', u'2'], False) + ('foo', ['1', '2'], False) >>> _NamelistParser("foo(1:2)=1,2,3 ")._parse_name_and_values(allow_eof_end=True) Traceback (most recent call last): ... SystemExit: ERROR: Too many values for array foo(1:2) >>> _NamelistParser("foo=1,")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'1', u''], False) + ('foo', ['1', ''], False) >>> _NamelistParser("foo+=1")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'1'], True) + ('foo', ['1'], True) """ name = self._parse_variable_name() addto = False # This keeps track of whether += existed @@ -2029,7 +1987,7 @@ def _parse_name_and_values(self, allow_eof_end=False): except _NamelistEOF: # If we hit the end of file, return a name assigned to a null value. if allow_eof_end: - return name, [u''], addto + return name, [''], addto else: raise # Expect at least one literal, even if it's a null value. @@ -2059,54 +2017,52 @@ def _parse_namelist_group(self): >>> x = _NamelistParser("&group /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {})]) + OrderedDict([('group', {})]) >>> x._curr() - u'/' + '/' >>> x = _NamelistParser("&group\n foo='bar','bazz'\n,, foo2=2*5\n /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {u'foo': [u"'bar'", u"'bazz'", u''], u'foo2': [u'5', u'5']})]) + OrderedDict([('group', {'foo': ["'bar'", "'bazz'", ''], 'foo2': ['5', '5']})]) >>> x = _NamelistParser("&group\n foo='bar','bazz'\n,, foo2=2*5\n /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u'']), (u'foo2', [u'5', u'5'])]) + OrderedDict([('foo', ["'bar'", "'bazz'", '']), ('foo2', ['5', '5'])]) >>> x._curr() - u'/' + '/' >>> x = _NamelistParser("&group /&group /") >>> x._parse_namelist_group() >>> x._advance() - >>> x._parse_namelist_group() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: Namelist group 'group' encountered twice. + >>> shouldRaise(_NamelistParseError, x._parse_namelist_group) + >>> x = _NamelistParser("&group foo='bar', foo='bazz' /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {u'foo': [u"'bazz'"]})]) + OrderedDict([('group', {'foo': ["'bazz'"]})]) >>> x = _NamelistParser("&group foo='bar', foo= /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {u'foo': [u"'bar'"]})]) + OrderedDict([('group', {'foo': ["'bar'"]})]) >>> x = _NamelistParser("&group foo='bar', foo= /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'foo', [u"'bar'"])]) + OrderedDict([('foo', ["'bar'"])]) >>> x = _NamelistParser("&group foo='bar', foo+='baz' /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'foo', [u"'bar'", u"'baz'"])]) + OrderedDict([('foo', ["'bar'", "'baz'"])]) >>> x = _NamelistParser("&group foo+='bar' /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'foo', [u"'bar'"])]) + OrderedDict([('foo', ["'bar'"])]) >>> x = _NamelistParser("&group foo='bar', foo+='baz' /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {u'foo': [u"'bar'", u"'baz'"]})]) + OrderedDict([('group', {'foo': ["'bar'", "'baz'"]})]) >>> x = _NamelistParser("&group foo+='bar' /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {u'foo': [u"'bar'"]})]) + OrderedDict([('group', {'foo': ["'bar'"]})]) """ group_name = self._parse_namelist_group_name() if not self._groupless: @@ -2147,37 +2103,37 @@ def parse_namelist(self): >>> _NamelistParser(" \n!Comment").parse_namelist() OrderedDict() >>> _NamelistParser(" &group /").parse_namelist() - OrderedDict([(u'group', {})]) + OrderedDict([('group', {})]) >>> _NamelistParser("! Comment \n &group /! Comment\n ").parse_namelist() - OrderedDict([(u'group', {})]) + OrderedDict([('group', {})]) >>> _NamelistParser("! Comment \n &group /! Comment ").parse_namelist() - OrderedDict([(u'group', {})]) + OrderedDict([('group', {})]) >>> _NamelistParser("&group1\n foo='bar','bazz'\n,, foo2=2*5\n / &group2 /").parse_namelist() - OrderedDict([(u'group1', {u'foo': [u"'bar'", u"'bazz'", u''], u'foo2': [u'5', u'5']}), (u'group2', {})]) + OrderedDict([('group1', {'foo': ["'bar'", "'bazz'", ''], 'foo2': ['5', '5']}), ('group2', {})]) >>> _NamelistParser("!blah \n foo='bar','bazz'\n,, foo2=2*5\n ", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u'']), (u'foo2', [u'2*5'])]) + OrderedDict([('foo', ["'bar'", "'bazz'", '']), ('foo2', ['2*5'])]) >>> _NamelistParser("!blah \n foo='bar','bazz'\n,, foo2=2*5,6\n ", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u'']), (u'foo2', [u'2*5', u'6'])]) + OrderedDict([('foo', ["'bar'", "'bazz'", '']), ('foo2', ['2*5', '6'])]) >>> _NamelistParser("!blah \n foo='bar'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'"])]) + OrderedDict([('foo', ["'bar'"])]) >>> _NamelistParser("foo='bar', foo(3)='bazz'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'"]), (u'foo(3)', [u"'bazz'"])]) + OrderedDict([('foo', ["'bar'"]), ('foo(3)', ["'bazz'"])]) >>> _NamelistParser("foo(2)='bar'", groupless=True).parse_namelist() - OrderedDict([(u'foo(2)', [u"'bar'"])]) + OrderedDict([('foo(2)', ["'bar'"])]) >>> _NamelistParser("foo(2)='bar', foo(3)='bazz'", groupless=True).parse_namelist() - OrderedDict([(u'foo(2)', [u"'bar'"]), (u'foo(3)', [u"'bazz'"])]) + OrderedDict([('foo(2)', ["'bar'"]), ('foo(3)', ["'bazz'"])]) >>> _NamelistParser("foo='bar', foo='bazz'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bazz'"])]) + OrderedDict([('foo', ["'bazz'"])]) >>> _NamelistParser("foo='bar'\n foo+='bazz'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'", u"'bazz'"])]) + OrderedDict([('foo', ["'bar'", "'bazz'"])]) >>> _NamelistParser("foo='bar', foo='bazz'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bazz'"])]) + OrderedDict([('foo', ["'bazz'"])]) >>> _NamelistParser("foo='bar', foo=", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'"])]) + OrderedDict([('foo', ["'bar'"])]) >>> _NamelistParser("foo='bar', 'bazz'\n foo+='ban'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u"'ban'"])]) + OrderedDict([('foo', ["'bar'", "'bazz'", "'ban'"])]) >>> _NamelistParser("foo+='bar'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'"])]) + OrderedDict([('foo', ["'bar'"])]) """ # Return empty dictionary for empty files. if self._len == 0: diff --git a/scripts/lib/CIME/nmlgen.py b/scripts/lib/CIME/nmlgen.py index 0f698f1255b..fe2bc304bff 100644 --- a/scripts/lib/CIME/nmlgen.py +++ b/scripts/lib/CIME/nmlgen.py @@ -329,7 +329,7 @@ def _sub_fields(self, varnames): if self._case.get_value('GLC_NEC') == 0: glc_nec_indices = [0] else: - glc_nec_indices = range(self._case.get_value('GLC_NEC')) + glc_nec_indices = list(range(self._case.get_value('GLC_NEC'))) glc_nec_indices.append(glc_nec_indices[-1] + 1) glc_nec_indices.pop(0) for i in glc_nec_indices: @@ -570,7 +570,7 @@ def add_default(self, name, value=None, ignore_abs_path=None): continue file_path = self.set_abs_file_path(file_path) if not os.path.exists(file_path): - logger.warn("File not found: {} = {}, will attempt to download in check_input_data phase".format(name, literal)) + logger.warning("File not found: {} = {}, will attempt to download in check_input_data phase".format(name, literal)) current_literals[i] = string_to_character_literal(file_path) current_literals = compress_literal_list(current_literals) diff --git a/scripts/lib/CIME/preview_namelists.py b/scripts/lib/CIME/preview_namelists.py index fff00b22145..8706594d2ec 100644 --- a/scripts/lib/CIME/preview_namelists.py +++ b/scripts/lib/CIME/preview_namelists.py @@ -80,7 +80,7 @@ def create_namelists(case, component=None): # first look in the case SourceMods directory cmd = os.path.join(caseroot, "SourceMods", "src."+compname, "buildnml") if os.path.isfile(cmd): - logger.warn("\nWARNING: Using local buildnml file {}\n".format(cmd)) + logger.warning("\nWARNING: Using local buildnml file {}\n".format(cmd)) else: # otherwise look in the component config_dir cmd = os.path.join(config_dir, "buildnml") diff --git a/scripts/lib/CIME/simple_compare.py b/scripts/lib/CIME/simple_compare.py index 086297b4d1b..5d86b175764 100644 --- a/scripts/lib/CIME/simple_compare.py +++ b/scripts/lib/CIME/simple_compare.py @@ -65,7 +65,7 @@ def _compare_data(gold_lines, comp_lines, case): ... data00 ... ''' >>> results = _compare_data(teststr.splitlines(), teststr2.splitlines(), None) - >>> print results + >>> print(results) Inequivalent lines data2 data3 != data2 data30 NORMALIZED: data2 data3 != data2 data30 Found extra lines diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py index 6a5e0c8f04c..c3f7aa87e6e 100644 --- a/scripts/lib/CIME/test_scheduler.py +++ b/scripts/lib/CIME/test_scheduler.py @@ -249,7 +249,7 @@ def __init__(self, test_names, test_data=None, ########################################################################### def get_testnames(self): - return self._tests.keys() + return list(self._tests.keys()) ########################################################################### def _log_output(self, test, output): @@ -547,7 +547,7 @@ def _xml_phase(self, test): # handled in create_newcase continue elif opt.startswith('IOP'): - logger.warn("IOP test option not yet implemented") + logger.warning("IOP test option not yet implemented") else: expect(False, "Could not parse option '{}' ".format(opt)) @@ -660,7 +660,7 @@ def _wait_for_something_to_finish(self, threads_in_flight): expect(len(threads_in_flight) <= self._parallel_jobs, "Oversubscribed?") finished_tests = [] while not finished_tests: - for test, thread_info in threads_in_flight.iteritems(): + for test, thread_info in threads_in_flight.items(): if not thread_info[0].is_alive(): finished_tests.append((test, thread_info[1])) diff --git a/scripts/lib/CIME/test_status.py b/scripts/lib/CIME/test_status.py index d560804dc11..9bbb4a9c397 100644 --- a/scripts/lib/CIME/test_status.py +++ b/scripts/lib/CIME/test_status.py @@ -132,7 +132,7 @@ def __exit__(self, *_): self.flush() def __iter__(self): - for phase, data in self._phase_statuses.iteritems(): + for phase, data in self._phase_statuses.items(): yield phase, data[0] def get_name(self): @@ -215,7 +215,7 @@ def phase_statuses_dump(self, prefix=''): """ result = "" if self._phase_statuses: - for phase, data in self._phase_statuses.iteritems(): + for phase, data in self._phase_statuses.items(): status, comments = data if not comments: result += "{}{} {} {}\n".format(prefix, status, self._test_name, phase) @@ -314,7 +314,7 @@ def get_overall_test_status(self, wait_for_run=False, check_throughput=False, ch """ rv = TEST_PASS_STATUS run_phase_found = False - for phase, data in self._phase_statuses.iteritems(): + for phase, data in self._phase_statuses.items(): status = data[0] if phase == RUN_PHASE: run_phase_found = True diff --git a/scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py b/scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py index 67646109673..9164530e3f0 100644 --- a/scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py +++ b/scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py @@ -5,6 +5,7 @@ import shutil import tempfile from CIME.SystemTests.test_utils import user_nl_utils +import six class TestUserNLCopier(unittest.TestCase): @@ -108,7 +109,7 @@ def test_append_without_files_raises_exception(self): self.write_user_nl_file(component_exists, 'irrelevant contents') # Exercise & verify - self.assertRaisesRegexp(RuntimeError, "No user_nl files found", + six.assertRaisesRegex(self, RuntimeError, "No user_nl files found", user_nl_utils.append_to_user_nl_files, caseroot = self._caseroot, component = component_for_append, diff --git a/scripts/lib/CIME/tests/test_user_mod_support.py b/scripts/lib/CIME/tests/test_user_mod_support.py index a1bd9b3048a..75bcf33f03f 100644 --- a/scripts/lib/CIME/tests/test_user_mod_support.py +++ b/scripts/lib/CIME/tests/test_user_mod_support.py @@ -5,7 +5,7 @@ import tempfile import os from CIME.user_mod_support import apply_user_mods - +import six # ======================================================================== # Define some parameters # ======================================================================== @@ -114,7 +114,7 @@ def test_basic(self): def test_keepexe(self): self.createUserMod("foo") - with self.assertRaisesRegexp(SystemExit, "cannot have any source mods"): + with six.assertRaisesRegex(self, SystemExit, "cannot have any source mods"): apply_user_mods(self._caseroot, os.path.join(self._user_mods_parent_dir, "foo"), keepexe=True) diff --git a/scripts/lib/CIME/user_mod_support.py b/scripts/lib/CIME/user_mod_support.py index 6c3ef34071b..91bd3d65536 100644 --- a/scripts/lib/CIME/user_mod_support.py +++ b/scripts/lib/CIME/user_mod_support.py @@ -62,7 +62,7 @@ def apply_user_mods(caseroot, user_mods_path, keepexe=None): # We overwrite any existing SourceMods file so that later # include_dirs take precedence over earlier ones if os.path.isfile(case_source_mods): - logger.warn("WARNING: Overwriting existing SourceMods in {}".format(case_source_mods)) + logger.warning("WARNING: Overwriting existing SourceMods in {}".format(case_source_mods)) else: logger.info("Adding SourceMod to case {}".format(case_source_mods)) try: @@ -81,7 +81,7 @@ def apply_user_mods(caseroot, user_mods_path, keepexe=None): # Note that use of xmlchange_cmnds has been deprecated and will soon # be removed altogether, so new tests should rely on shell_commands if shell_commands_file.endswith("xmlchange_cmnds"): - logger.warn("xmlchange_cmnds is deprecated and will be removed " +\ + logger.warning("xmlchange_cmnds is deprecated and will be removed " +\ "in a future release; please rename {} shell_commands".format(shell_commands_file)) with open(shell_commands_file,"r") as fd: new_shell_commands = fd.read().replace("xmlchange","xmlchange --force") @@ -92,7 +92,7 @@ def apply_user_mods(caseroot, user_mods_path, keepexe=None): for shell_command_file in case_shell_command_files: if os.path.isfile(shell_command_file): - os.chmod(shell_command_file, 0777) + os.chmod(shell_command_file, 0o777) run_cmd_no_fail(shell_command_file) @@ -130,6 +130,6 @@ def build_include_dirs_list(user_mods_path, include_dirs=None): if os.path.isabs(newpath): build_include_dirs_list(newpath, include_dirs) else: - logger.warn("Could not resolve path '{}' in file '{}'".format(newpath, include_file)) + logger.warning("Could not resolve path '{}' in file '{}'".format(newpath, include_file)) return include_dirs diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index 6fbef55c61b..f703b9740ef 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -2,10 +2,13 @@ Common functions used by cime python scripts Warning: you cannot use CIME Classes in this module as it causes circular dependencies """ -import logging, gzip, sys, os, time, re, shutil, glob, string, random, imp, errno, signal +import io, logging, gzip, sys, os, time, re, shutil, glob, string, random, imp, errno, signal import stat as statlib import warnings +import six from contextlib import contextmanager +#pylint: disable=import-error +from six.moves import configparser # Return this error code if the scripts worked but tests failed TESTS_FAILED_ERR_CODE = 100 @@ -74,7 +77,7 @@ def check_name(fullname, additional_chars=None, fullpath=False): name = fullname match = re.search(r"["+re.escape(chars)+"]", name) if match is not None: - logger.warn("Illegal character {} found in name {}".format(match.group(0), name)) + logger.warning("Illegal character {} found in name {}".format(match.group(0), name)) return False return True @@ -86,11 +89,10 @@ def _read_cime_config_file(): CIME_MODEL=acme,cesm PROJECT=someprojectnumber """ - from ConfigParser import SafeConfigParser as config_parser cime_config_file = os.path.abspath(os.path.join(os.path.expanduser("~"), ".cime","config")) - cime_config = config_parser() + cime_config = configparser.SafeConfigParser() if(os.path.isfile(cime_config_file)): cime_config.read(cime_config_file) else: @@ -247,12 +249,12 @@ def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, # Real defaults for these value should be subprocess.PIPE if arg_stdout is _hack: arg_stdout = subprocess.PIPE - elif isinstance(arg_stdout, str): + elif isinstance(arg_stdout, six.string_types): arg_stdout = _convert_to_fd(arg_stdout, from_dir) if arg_stderr is _hack: arg_stderr = subprocess.STDOUT if combine_output else subprocess.PIPE - elif isinstance(arg_stderr, str): + elif isinstance(arg_stderr, six.string_types): arg_stderr = _convert_to_fd(arg_stdout, from_dir) if (verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG))): @@ -272,15 +274,29 @@ def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, env=env) output, errput = proc.communicate(input_str) - output = output.strip() if output is not None else output - errput = errput.strip() if errput is not None else errput - stat = proc.wait() - - if isinstance(arg_stdout, file): - arg_stdout.close() # pylint: disable=no-member + if output is not None: + try: + output = output.decode('utf-8').strip() + except AttributeError: + pass + if errput is not None: + try: + errput = errput.decode('utf-8').strip() + except AttributeError: + pass - if isinstance(arg_stderr, file) and arg_stderr is not arg_stdout: - arg_stderr.close() # pylint: disable=no-member + stat = proc.wait() + if six.PY2: + if isinstance(arg_stdout, file): # pylint: disable=undefined-variable + arg_stdout.close() # pylint: disable=no-member + if isinstance(arg_stderr, file) and arg_stderr is not arg_stdout: # pylint: disable=undefined-variable + arg_stderr.close() # pylint: disable=no-member + else: + if isinstance(arg_stdout, io.IOBase): + arg_stdout.close() # pylint: disable=no-member + if isinstance(arg_stderr, io.IOBase) and arg_stderr is not arg_stdout: + arg_stderr.close() # pylint: disable=no-member + if (verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG))): if stat != 0: @@ -298,19 +314,17 @@ def run_cmd_no_fail(cmd, input_str=None, from_dir=None, verbose=None, Wrapper around subprocess to make it much more convenient to run shell commands. Expects command to work. Just returns output string. - >>> run_cmd_no_fail('echo foo') - 'foo' - + >>> run_cmd_no_fail('echo foo') == 'foo' + True >>> run_cmd_no_fail('echo THE ERROR >&2; false') # doctest:+ELLIPSIS Traceback (most recent call last): ... SystemExit: ERROR: Command: 'echo THE ERROR >&2; false' failed with error 'THE ERROR' from dir ... - >>> run_cmd_no_fail('grep foo', input_str='foo') - 'foo' - - >>> run_cmd_no_fail('echo THE ERROR >&2', combine_output=True) - 'THE ERROR' + >>> run_cmd_no_fail('grep foo', input_str=b'foo') == 'foo' + True + >>> run_cmd_no_fail('echo THE ERROR >&2', combine_output=True) == 'THE ERROR' + True """ stat, output, errput = run_cmd(cmd, input_str, from_dir, verbose, arg_stdout, arg_stderr, env, combine_output) if stat != 0: @@ -328,7 +342,8 @@ def check_minimum_python_version(major, minor): >>> check_minimum_python_version(sys.version_info[0], sys.version_info[1]) >>> """ - expect(sys.version_info[0] == major and sys.version_info[1] >= minor, + expect(sys.version_info[0] > major or + (sys.version_info[0] == major and sys.version_info[1] >= minor), "Python {:d}, minor version {:d}+ is required, you have {:d}.{:d}".format(major, minor, sys.version_info[0], sys.version_info[1])) def normalize_case_id(case_id): @@ -856,9 +871,9 @@ def convert_to_string(value, type_str=None, vid=""): """ if value is not None and type(value) is not str: if type_str == "char": - expect(type(value) is str, "Wrong type for entry id '{}'".format(vid)) + expect(isinstance(value, six.string_types), "Wrong type for entry id '{}'".format(vid)) elif type_str == "integer": - expect(type(value) is int, "Wrong type for entry id '{}'".format(vid)) + expect(isinstance(value, six.integer_types), "Wrong type for entry id '{}'".format(vid)) value = str(value) elif type_str == "logical": expect(type(value) is bool, "Wrong type for entry id '{}'".format(vid)) @@ -900,9 +915,9 @@ def convert_to_babylonian_time(seconds): >>> convert_to_babylonian_time(3661) '01:01:01' """ - hours = seconds / 3600 + hours = int(seconds / 3600) seconds %= 3600 - minutes = seconds / 60 + minutes = int(seconds / 60) seconds %= 60 return "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds) @@ -944,7 +959,7 @@ def compute_total_time(job_cost_map, proc_pool): running_jobs = {} # name -> (procs, est-time, start-time) while len(waiting_jobs) > 0 or len(running_jobs) > 0: launched_jobs = [] - for jobname, data in waiting_jobs.iteritems(): + for jobname, data in waiting_jobs.items(): procs_for_job, time_for_job = data if procs_for_job <= proc_pool: proc_pool -= procs_for_job @@ -955,7 +970,7 @@ def compute_total_time(job_cost_map, proc_pool): del waiting_jobs[launched_job] completed_jobs = [] - for jobname, data in running_jobs.iteritems(): + for jobname, data in running_jobs.items(): procs_for_job, time_for_job, time_started = data if (current_time - time_started) >= time_for_job: proc_pool += procs_for_job @@ -1113,7 +1128,7 @@ def transform_vars(text, case=None, subgroup=None, overrides=None, default=None) if "-q {{ queue }}" in text: text = "" else: - logger.warn("Could not replace variable '{}'".format(variable)) + logger.warning("Could not replace variable '{}'".format(variable)) text = text.replace(whole_match, "") return text @@ -1147,11 +1162,11 @@ def gzip_existing_file(filepath): >>> import tempfile >>> fd, filename = tempfile.mkstemp(text=True) - >>> _ = os.write(fd, "Hello World") + >>> _ = os.write(fd, b"Hello World") >>> os.close(fd) >>> gzfile = gzip_existing_file(filename) - >>> gunzip_existing_file(gzfile) - 'Hello World' + >>> gunzip_existing_file(gzfile) == b'Hello World' + True >>> os.remove(gzfile) """ expect(os.path.exists(filepath), "{} does not exists".format(filepath)) @@ -1272,19 +1287,23 @@ def analyze_build_log(comp, log, compiler): if re.search(warn_re, line): warncnt += 1 if re.search(error_re, line): - logger.warn(line) + logger.warning(line) if re.search(undefined_re, line): - logger.warn(line) + logger.warning(line) if warncnt > 0: logger.info("Component {} build complete with {} warnings".format(comp, warncnt)) def is_python_executable(filepath): + first_line = None if os.path.isfile(filepath): - with open(filepath, "r") as f: - first_line = f.readline() - - return first_line.startswith("#!") and "python" in first_line + with open(filepath, "rt") as f: + try: + first_line = f.readline() + except: + pass + + return first_line is not None and first_line.startswith("#!") and "python" in first_line return False def get_umask(): diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py index a9b1d5ca6ef..5000bff60df 100644 --- a/scripts/lib/CIME/wait_for_tests.py +++ b/scripts/lib/CIME/wait_for_tests.py @@ -1,4 +1,6 @@ -import os, time, threading, Queue, socket, signal, distutils.spawn, shutil, glob +#pylint: disable=import-error +from six.moves import queue +import os, time, threading, socket, signal, distutils.spawn, shutil, glob import logging import xml.etree.ElementTree as xmlet @@ -7,6 +9,7 @@ from CIME.XML.machines import Machines from CIME.test_status import * + SIGNAL_RECEIVED = False ACME_MAIN_CDASH = "ACME_Climate" CDASH_DEFAULT_BUILD_GROUP = "ACME_Latest" @@ -146,7 +149,7 @@ def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_ti need_to_upload = False - for test_name, test_data in results.iteritems(): + for test_name, test_data in results.items(): test_path, test_status = test_data if (test_status not in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS]): @@ -299,7 +302,7 @@ def wait_for_test(test_path, results, wait, check_throughput, check_memory, igno ############################################################################### def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False): ############################################################################### - results = Queue.Queue() + results = queue.Queue() for test_path in test_paths: t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak)) @@ -348,7 +351,7 @@ def wait_for_tests(test_paths, test_results = wait_for_tests_impl(test_paths, no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak) all_pass = True - for test_name, test_data in sorted(test_results.iteritems()): + for test_name, test_data in sorted(test_results.items()): test_path, test_status = test_data logging.info("Test '{}' finished with status '{}'".format(test_name, test_status)) logging.info(" Path: {}".format(test_path)) diff --git a/scripts/lib/jenkins_generic_job.py b/scripts/lib/jenkins_generic_job.py index c575cd74669..089bb7801b4 100644 --- a/scripts/lib/jenkins_generic_job.py +++ b/scripts/lib/jenkins_generic_job.py @@ -15,7 +15,7 @@ def cleanup_queue(test_root, test_id): with Case(case_dir, read_only=True) as case: jobmap = case.get_job_info() jobkills = [] - for jobname, jobid in jobmap.iteritems(): + for jobname, jobid in jobmap.items(): logging.warning("Found leftover batch job {} ({}) that need to be deleted".format(jobid, jobname)) jobkills.append(jobid) diff --git a/scripts/lib/six.py b/scripts/lib/six.py new file mode 100644 index 00000000000..a0297d7113d --- /dev/null +++ b/scripts/lib/six.py @@ -0,0 +1,890 @@ +# Copyright (c) 2010-2017 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Utilities for writing code that runs on Python 2 and 3""" + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.11.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + try: + raise tp, value, tb + finally: + tb = None +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + try: + if from_value is None: + raise value + raise value from from_value + finally: + value = None +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + try: + raise value from from_value + finally: + value = None +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/scripts/lib/update_acme_tests.py b/scripts/lib/update_acme_tests.py index 608c00aa422..c4419540e80 100644 --- a/scripts/lib/update_acme_tests.py +++ b/scripts/lib/update_acme_tests.py @@ -1,6 +1,7 @@ import CIME.utils from CIME.utils import expect, convert_to_seconds, parse_test_name from CIME.XML.machines import Machines +import six # Here are the tests belonging to acme suites. Format is # ... @@ -183,27 +184,27 @@ def get_test_suite(suite, machine=None, compiler=None): tests = [] for item in tests_raw: test_mod = None - if (isinstance(item, str)): + if (isinstance(item, six.string_types)): test_name = item else: expect(isinstance(item, tuple), "Bad item type for item '{}'".format(str(item))) expect(len(item) in [2, 3], "Expected two or three items in item '{}'".format(str(item))) - expect(isinstance(item[0], str), "Expected string in first field of item '{}'".format(str(item))) - expect(isinstance(item[1], str), "Expected string in second field of item '{}'".format(str(item))) + expect(isinstance(item[0], six.string_types), "Expected string in first field of item '{}'".format(str(item))) + expect(isinstance(item[1], six.string_types), "Expected string in second field of item '{}'".format(str(item))) test_name = item[0] if (len(item) == 2): test_mod = item[1] else: - expect(type(item[2]) in [str, tuple], "Expected string or tuple for third field of item '{}'".format(str(item))) - test_mod_machines = [item[2]] if isinstance(item[2], str) else item[2] + expect(type(item[2]) in [six.string_types, tuple], "Expected string or tuple for third field of item '{}'".format(str(item))) + test_mod_machines = [item[2]] if isinstance(item[2], six.string_types) else item[2] if (machine in test_mod_machines): test_mod = item[1] tests.append(CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler, testmod=test_mod)) if (inherits_from is not None): - inherits_from = [inherits_from] if isinstance(inherits_from, str) else inherits_from + inherits_from = [inherits_from] if isinstance(inherits_from, six.string_types) else inherits_from for inherits in inherits_from: inherited_tests = get_test_suite(inherits, machine, compiler) @@ -216,7 +217,7 @@ def get_test_suite(suite, machine=None, compiler=None): ############################################################################### def get_test_suites(): ############################################################################### - return _TEST_SUITES.keys() + return list(_TEST_SUITES.keys()) ############################################################################### def infer_machine_name_from_tests(testargs): @@ -322,14 +323,14 @@ def get_recommended_test_time(test_full_name): _, rec_time, tests_raw = _TEST_SUITES[suite] for item in tests_raw: test_mod = None - if (isinstance(item, str)): + if (isinstance(item, six.string_types)): test_name = item else: test_name = item[0] if (len(item) == 2): test_mod = item[1] else: - test_mod_machines = [item[2]] if isinstance(item[2], str) else item[2] + test_mod_machines = [item[2]] if isinstance(item[2], six.string_types) else item[2] if (machine in test_mod_machines): test_mod = item[1] @@ -355,11 +356,13 @@ def sort_by_time(test_one, test_two): """ rec1, rec2 = get_recommended_test_time(test_one), get_recommended_test_time(test_two) if rec1 == rec2: - return cmp(test_one, test_two) + return (test_one > test_two) - (test_two < test_one) else: if rec2 is None: return -1 elif rec1 is None: return 1 else: - return cmp(convert_to_seconds(rec2), convert_to_seconds(rec1)) + a = convert_to_seconds(rec2) + b = convert_to_seconds(rec1) + return (a < b) - (b < a) diff --git a/scripts/manage_pes b/scripts/manage_pes index da00c465c56..c366fe0d550 100755 --- a/scripts/manage_pes +++ b/scripts/manage_pes @@ -250,7 +250,7 @@ class ManagePes(object): self.print_gridnodes([match]) logger.info(" The new values would be") self.print_gridnodes([newmatch]) - override = raw_input(" Do you want to override the match with" + override = input(" Do you want to override the match with" " your pe-layout [yes/no] (default is no)\n") if override.lower() != "y" and override.lower() != "yes": logger.info("Nothing done.") diff --git a/scripts/query_config b/scripts/query_config index 4b2fb3a6094..889c06e6d9c 100755 --- a/scripts/query_config +++ b/scripts/query_config @@ -92,7 +92,7 @@ def print_compset(name, files, all_components=False): elif config_file is None or not os.path.isfile(config_file): return - print "\nActive component: {}".format(name) + print("\nActive component: {}".format(name)) # Now parse the compsets file and write out the compset alias and longname as well as the help text # determine component xml content compsets = Compsets(config_file) @@ -144,7 +144,7 @@ def query_component(name, all_components=False): expect(config_exists, "Cannot find config_file {} on disk".format(config_file)) elif all_components and not config_exists: - print "WARNING: Couldn't find config_file {} on disk".format(config_file) + print("WARNING: Couldn't find config_file {} on disk".format(config_file)) return # If name is not a valid argument - exit with error expect(match_found, @@ -285,10 +285,10 @@ class Machines(CIME.XML.machines.Machines): if single_machine and machine_name is None: files = Files() config_file = files.get_value("MACHINES_SPEC_FILE") - print "Machine is not listed in config file: {}".format(config_file) + print("Machine is not listed in config file: {}".format(config_file)) else: # write out machines machines = self.get_nodes(nodename="machine") - print "Machine(s)" + print("Machine(s)") for machine in machines: name = machine.get("MACH") desc = machine.find("DESC") @@ -300,21 +300,21 @@ class Machines(CIME.XML.machines.Machines): current_machine = self.probe_machine_name(warn=False) if not single_machine: name += " (current)" if current_machine and current_machine in name else "" - print " {} : {} ".format(name, desc.text) - print " os ", os_.text - print " compilers ",compilers.text + print(" {} : {} ".format(name, desc.text)) + print(" os ", os_.text) + print(" compilers ",compilers.text) if MAX_MPITASKS_PER_NODE is not None: - print " pes/node ",MAX_MPITASKS_PER_NODE.text + print(" pes/node ",MAX_MPITASKS_PER_NODE.text) if max_tasks_per_node is not None: - print " max_tasks/node ",max_tasks_per_node.text + print(" max_tasks/node ",max_tasks_per_node.text) elif single_machine and machine_name in name: - print " {} : {} ".format(name, desc.text) - print " os ", os_.text - print " compilers ",compilers.text + print(" {} : {} ".format(name, desc.text)) + print(" os ", os_.text) + print(" compilers ",compilers.text) if MAX_MPITASKS_PER_NODE is not None: - print " pes/node ",MAX_MPITASKS_PER_NODE.text + print(" pes/node ",MAX_MPITASKS_PER_NODE.text) if max_tasks_per_node is not None: - print " max_tasks/node ",max_tasks_per_node.text + print(" max_tasks/node ",max_tasks_per_node.text) def _main_func(description): """ diff --git a/scripts/query_testlists b/scripts/query_testlists index d363df3f564..3f314d868fb 100755 --- a/scripts/query_testlists +++ b/scripts/query_testlists @@ -13,7 +13,7 @@ Usage: All of the above support the various --xml-* arguments for subsetting which tests are included. """ -from __future__ import print_function + from Tools.standard_script_setup import * from CIME.test_utils import get_tests_from_xml, test_to_string from CIME.XML.tests import Tests diff --git a/scripts/tests/list_tests b/scripts/tests/list_tests index d9de526aa85..01b08116753 100755 --- a/scripts/tests/list_tests +++ b/scripts/tests/list_tests @@ -1,5 +1,7 @@ #!/usr/bin/env python - +# This script will print the list of test classes in +# scripts_regression_tests.py +# import unittest DEBUG = False @@ -13,14 +15,15 @@ def list_tests_from(): if len(tests): for atest in tests: if DEBUG: - print atest + print(atest) for btest in atest._tests: btestname = btest.__str__().split() test_classes.append(btestname[1][1:-1].split('.')[1]) - # add this explicitly, not captured by the above - test_classes.append("B_CheckCode") - for ctest in sorted(list(set(test_classes))): - print ctest + # add this explicitly, not captured by the above + test_classes.append("B_CheckCode") + for ctest in sorted(list(set(test_classes))): + print(ctest) + if __name__ == "__main__": # Include the directories diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 7382a0e2c25..4742e1c2b91 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -1,7 +1,7 @@ #!/usr/bin/env python -import io, glob, os, re, shutil, signal, sys, tempfile, \ - threading, time, logging, unittest, getpass, string +import glob, os, re, shutil, signal, sys, tempfile, \ + threading, time, logging, unittest, getpass from xml.etree.ElementTree import ParseError @@ -10,6 +10,8 @@ # Remove all pyc files to ensure we're testing the right things import subprocess subprocess.call('/bin/rm $(find . -name "*.pyc")', shell=True, cwd=LIB_DIR) +from six import assertRaisesRegex +import six from CIME.utils import run_cmd, run_cmd_no_fail, get_lids, get_current_commit import update_acme_tests @@ -239,8 +241,8 @@ def tearDownClass(cls): teardown_root = True for tfile in cls._testdirs: if tfile not in cls._do_teardown: - print "Detected failed test or user request no teardown" - print "Leaving case directory : %s"%tfile + print("Detected failed test or user request no teardown") + print("Leaving case directory : %s"%tfile) teardown_root = False elif do_teardown: shutil.rmtree(tfile) @@ -340,7 +342,7 @@ def test_d_create_clone_new_user(self): run_cmd_assert_result(self, "./xmlchange USER=this_is_not_a_user", from_dir=prevtestdir) - fakeoutputroot = string.replace(cls._testroot, os.environ.get("USER"), "this_is_not_a_user") + fakeoutputroot = cls._testroot.replace(os.environ.get("USER"), "this_is_not_a_user") run_cmd_assert_result(self, "./xmlchange CIME_OUTPUT_ROOT=%s"%fakeoutputroot, from_dir=prevtestdir) @@ -517,8 +519,8 @@ def tearDownClass(cls): for tfile in cls._testdirs: if tfile not in cls._do_teardown: - print "Detected failed test or user request no teardown" - print "Leaving case directory : %s"%tfile + print("Detected failed test or user request no teardown") + print("Leaving case directory : %s"%tfile) elif do_teardown: shutil.rmtree(tfile) @@ -768,10 +770,10 @@ def tearDown(self): do_teardown = self._do_teardown and sys.exc_info() == (None, None, None) if (not do_teardown): - print "Detected failed test or user request no teardown" - print "Leaving files:" + print("Detected failed test or user request no teardown") + print("Leaving files:") for file_to_clean in files_to_clean: - print " ", file_to_clean + print(" ", file_to_clean) else: # For batch machines need to avoid race condition as batch system # finishes I/O for the case. @@ -1692,8 +1694,8 @@ def get_macros(macro_maker, build_xml, build_system): """ # Build.write_macros expects file-like objects as input, so # we need to wrap the strings in StringIO objects. - xml = io.StringIO(unicode(build_xml)) - output = io.StringIO() + xml = six.StringIO(str(build_xml)) + output = six.StringIO() output_format = None if build_system == "Makefile": output_format = "make" @@ -1924,21 +1926,21 @@ class G_TestMacrosBasic(unittest.TestCase): def test_script_is_callable(self): """The test script can be called on valid output without dying.""" # This is really more a smoke test of this script than anything else. - maker = Compilers(MockMachines("mymachine", "SomeOS"), version="2.0") + maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) test_xml = _wrap_config_build_xml("FALSE") get_macros(maker, test_xml, "Makefile") def test_script_rejects_bad_xml(self): """The macro writer rejects input that's not valid XML.""" - maker = Compilers(MockMachines("mymachine", "SomeOS"), version="2.0") + maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) with self.assertRaises(ParseError): get_macros(maker, "This is not valid XML.", "Makefile") def test_script_rejects_bad_build_system(self): """The macro writer rejects a bad build system string.""" - maker = Compilers(MockMachines("mymachine", "SomeOS"), version="2.0") + maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) bad_string = "argle-bargle." - with self.assertRaisesRegexp( + with assertRaisesRegex(self, SystemExit, "Unrecognized build system provided to write_macros: " + bad_string): get_macros(maker, "This string is irrelevant.", bad_string) @@ -1960,7 +1962,7 @@ class H_TestMakeMacros(unittest.TestCase): test_machine = "mymachine" def setUp(self): - self._maker = Compilers(MockMachines(self.test_machine, self.test_os), version="2.0") + self._maker = Compilers(MockMachines(self.test_machine, self.test_os), version=2.0) def xml_to_tester(self, xml_string): """Helper that directly converts an XML string to a MakefileTester.""" @@ -2040,7 +2042,7 @@ def test_reject_duplicate_defaults(self): """The macro writer dies if given many defaults.""" xml1 = """/path/to/default""" xml2 = """/path/to/other_default""" - with self.assertRaisesRegexp( + with assertRaisesRegex(self, SystemExit, "Variable MPI_PATH is set ambiguously in config_build.xml."): self.xml_to_tester(xml1+xml2) @@ -2049,7 +2051,7 @@ def test_reject_duplicates(self): """The macro writer dies if given many matches for a given configuration.""" xml1 = """/path/to/mpich""" xml2 = """/path/to/mpich2""" - with self.assertRaisesRegexp( + with assertRaisesRegex(self, SystemExit, "Variable MPI_PATH is set ambiguously in config_build.xml."): self.xml_to_tester(xml1+xml2) @@ -2058,7 +2060,7 @@ def test_reject_ambiguous(self): """The macro writer dies if given an ambiguous set of matches.""" xml1 = """/path/to/mpich""" xml2 = """/path/to/mpi-debug""" - with self.assertRaisesRegexp( + with assertRaisesRegex(self, SystemExit, "Variable MPI_PATH is set ambiguously in config_build.xml."): self.xml_to_tester(xml1+xml2) @@ -2190,7 +2192,7 @@ def test_env_and_shell_command(self): tester.assert_variable_equals("FFLAGS", "-O2 -fast", env={"OPT_LEVEL": "2"}) xml1 = """-O$SHELL{echo $ENV{OPT_LEVEL}} -fast""" err_msg = "Nesting not allowed.*" - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(xml1) def test_config_variable_insertion(self): @@ -2220,12 +2222,12 @@ def test_config_reject_self_references(self): # references. xml1 = """MPI_LIB_NAME""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(""+xml1+"") xml1 = """${MPI_LIB_NAME}""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(""+xml1+"") def test_config_reject_cyclical_references(self): @@ -2233,12 +2235,12 @@ def test_config_reject_cyclical_references(self): xml1 = """MPI_PATH""" xml2 = """MPI_LIB_NAME""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(""+xml1+xml2+"") xml1 = """${MPI_PATH}""" xml2 = """${MPI_LIB_NAME}""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(""+xml1+xml2+"") def test_variable_insertion_with_machine_specific_setting(self): @@ -2247,14 +2249,14 @@ def test_variable_insertion_with_machine_specific_setting(self): xml2 = """MPI_PATH""".format(self.test_machine) xml3 = """MPI_LIB_NAME""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(xml1+xml2+xml3) xml1 = """something""" xml2 = """MPI_PATH""".format(self.test_machine) xml3 = """${MPI_LIB_NAME}""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(xml1+xml2+xml3) @@ -2337,7 +2339,7 @@ def check_for_pylint(): major = int(pylintver.group(1)) minor = int(pylintver.group(2)) if pylint is None or major < 1 or (major == 1 and minor < 5): - print "pylint version 1.5 or newer not found, pylint tests skipped" + print("pylint version 1.5 or newer not found, pylint tests skipped") return False return True @@ -2433,12 +2435,11 @@ def _main_func(): try: unittest.main(verbosity=2, catchbreak=True) - except SystemExit: - had_fails = sys.exc_info()[1].message - if had_fails: - print "Detected failures, leaving directory:", TEST_ROOT + except SystemExit as e: + if e.__str__() != "False": + print("Detected failures, leaving directory:", TEST_ROOT) else: - print "All pass, removing directory:", TEST_ROOT + print("All pass, removing directory:", TEST_ROOT) if os.path.exists(TEST_ROOT): shutil.rmtree(TEST_ROOT) diff --git a/src/build_scripts/buildlib.internal_components b/src/build_scripts/buildlib.internal_components index 410f7e4b049..5c23073d217 100755 --- a/src/build_scripts/buildlib.internal_components +++ b/src/build_scripts/buildlib.internal_components @@ -23,7 +23,6 @@ def buildlib(caseroot, libroot, bldroot, compname=None): if dir1 == "cime_config": compname = dir2 else: - print "HERE %s"%dir1 compname = dir1.split('.')[1] build_cime_component_lib(case, compname, libroot, bldroot) diff --git a/src/build_scripts/buildlib.pio b/src/build_scripts/buildlib.pio index 5b6909b0ddf..d8bf64203a8 100755 --- a/src/build_scripts/buildlib.pio +++ b/src/build_scripts/buildlib.pio @@ -137,7 +137,7 @@ def _main_func(description): valid_values += ",pnetcdf" if netcdf4_parallel_found: valid_values += ",netcdf4p,netcdf4c" - logger.warn("Updating valid_values for PIO_TYPENAME: %s", valid_values) + logger.warning("Updating valid_values for PIO_TYPENAME: %s", valid_values) case.set_valid_values("PIO_TYPENAME",valid_values) # nothing means use the general default @@ -148,7 +148,7 @@ def _main_func(description): case.set_valid_values(comp_pio_typename,valid_values) current_value = case.get_value(comp_pio_typename) if current_value not in valid_values: - logger.warn("Resetting PIO_TYPENAME to netcdf for component %s"%comp) + logger.warning("Resetting PIO_TYPENAME to netcdf for component %s"%comp) case.set_value(comp_pio_typename,"netcdf") diff --git a/src/drivers/mct/cime_config/buildnml b/src/drivers/mct/cime_config/buildnml index 8ce75173335..970efa691c0 100755 --- a/src/drivers/mct/cime_config/buildnml +++ b/src/drivers/mct/cime_config/buildnml @@ -99,7 +99,7 @@ def _create_drv_namelists(case, infile, confdir, nmlgen, files): for comp in comps: ncpl = case.get_value(comp.upper() + '_NCPL') if ncpl is not None: - cpl_dt = basedt / int(ncpl) + cpl_dt = int(basedt / int(ncpl)) totaldt = cpl_dt * int(ncpl) if totaldt != basedt: expect(False, " {} ncpl doesn't divide base dt evenly".format(comp))