try:os.remove(filename)exceptOSError:
- #Skip file not found error.
+ # Skip file not found error.passelifk=="mode"andv=="simulated":print("Simulated removal of {}".format(filename))
>>> d['Hello']
'Universe' """
+
def__init__(self,actions=None,strict=True):""" Initializes a Modder from a list of supported actions.
@@ -150,6 +149,7 @@
Source code for custodian.ansible.interpreter
if __name__=="__main__":importdoctest
+
doctest.testmod()
<
ifjob_number==0:backup=True
- #assume the initial guess is poor,
- #start with conjugate gradients
+ # assume the initial guess is poor,
+ # start with conjugate gradientssettings=[{"dict":"INCAR","action":{"_set":{"IBRION":2}}}
@@ -110,8 +108,8 @@
Source code for custodian.cli.converge_geometry
<
{"file":"CONTCAR","action":{"_file_copy":{"dest":"POSCAR"}}}]
- #switch to RMM-DIIS once we are near the
- #local minimum (assumed after 2 runs of CG)
+ # switch to RMM-DIIS once we are near the
+ # local minimum (assumed after 2 runs of CG)else:settings=[{"dict":"INCAR",
diff --git a/docs/_modules/custodian/cli/converge_kpoints.html b/docs/_modules/custodian/cli/converge_kpoints.html
index b13a54bf..eda57796 100644
--- a/docs/_modules/custodian/cli/converge_kpoints.html
+++ b/docs/_modules/custodian/cli/converge_kpoints.html
@@ -4,7 +4,7 @@
- custodian.cli.converge_kpoints — custodian 2019.8.23 documentation
+ custodian.cli.converge_kpoints — custodian 2019.8.24 documentation
@@ -37,7 +37,7 @@
fromcustodian.custodianimportCustodianimportlogging
-
example_yaml="""# This is an example of a Custodian yaml spec file. It shows how you can specify# a double relaxation followed by a static calculation. Minor modifications
diff --git a/docs/_modules/custodian/cli/run_nwchem.html b/docs/_modules/custodian/cli/run_nwchem.html
index c8c0de23..7c03e14f 100644
--- a/docs/_modules/custodian/cli/run_nwchem.html
+++ b/docs/_modules/custodian/cli/run_nwchem.html
@@ -4,7 +4,7 @@
- custodian.cli.run_nwchem — custodian 2019.8.23 documentation
+ custodian.cli.run_nwchem — custodian 2019.8.24 documentation
@@ -37,7 +37,7 @@
"action":{"_set":kpoints.as_dict()}})# lattice vectors with length < 9 will get >1 KPOINTlow_kpoints=Kpoints.gamma_automatic(
- [max(int(18/l),1)forlinstructure.lattice.abc])
+ [max(int(18/l),1)forlinstructure.lattice.abc])settings.extend([{"dict":"INCAR","action":{"_set":{"ISMEAR":0}}},
@@ -284,7 +283,6 @@
Source code for custodian.cli.run_vasp
help="Set to true to turn off auto_npar. Useful for certain machines ""and calculations where you want absolute control.")
-
parser.add_argument("-z","--gzip",dest="gzip",action="store_true",help="Add this option to gzip the final output. Do not gzip if you "
@@ -310,7 +308,7 @@
Source code for custodian.cli.run_vasp
)parser.add_argument(
- "-me","--max-errors",dest="max_errors",nargs="?",
+ "-me","--max-errors",dest="max_errors",nargs="?",default=10,type=int,help="Maximum number of errors to allow before quitting")
diff --git a/docs/_modules/custodian/custodian.html b/docs/_modules/custodian/custodian.html
index 0aa53828..3dfae35e 100644
--- a/docs/_modules/custodian/custodian.html
+++ b/docs/_modules/custodian/custodian.html
@@ -4,7 +4,7 @@
- custodian.custodian — custodian 2019.8.23 documentation
+ custodian.custodian — custodian 2019.8.24 documentation
@@ -37,7 +37,7 @@
# will not be used.importsentry_sdk
+
sentry_sdk.init(dsn=os.environ["SENTRY_DSN"])
-
+
withsentry_sdk.configure_scope()asscope:
-
+
fromgetpassimportgetuser
+
try:scope.user={"username":getuser()}
- except:
+ exceptException:pass
-
-
# Sentry.io is a service to aggregate logs remotely, this is useful# for Custodian to get statistics on which errors are most common.
@@ -128,17 +124,20 @@
[docs]classJob(MSONable):""" Abstract base class defining the interface for a Job. """
@@ -827,7 +826,7 @@
Source code for custodian.custodian
fixed maximum number of times on a single job (i.e. the counter is reset at the beginning of each job). If the maximum number is reached the code will either raise a MaxCorrectionsPerHandlerError (raise_on_max==True) or stops
- considering the correction (raise_on_max==False). If max_num_corrections
+ considering the correction (raise_on_max==False). If max_num_corrections is None this option is not considered. These options can be overridden as class attributes of the subclass or as customizable options setting an instance attribute from __init__.
@@ -885,7 +884,7 @@
[docs]classValidator(MSONable):""" Abstract base class defining the interface for a Validator. A Validator differs from an ErrorHandler in that it does not correct a run and is run
diff --git a/docs/_modules/custodian/feff/handlers.html b/docs/_modules/custodian/feff/handlers.html
index 41572ad8..34efe487 100644
--- a/docs/_modules/custodian/feff/handlers.html
+++ b/docs/_modules/custodian/feff/handlers.html
@@ -4,7 +4,7 @@
- custodian.feff.handlers — custodian 2019.8.23 documentation
+ custodian.feff.handlers — custodian 2019.8.24 documentation
@@ -37,7 +37,7 @@
# Checks output file for errors.self.outdata=QCOutput(self.output_file).dataself.errors=self.outdata.get("errors")
+ self.warnings=self.outdata.get("warnings")# If we aren't out of optimization cycles, but we were in the past, reset the historyif"out_of_opt_cycles"notinself.errorsandlen(self.opt_error_history)>0:self.opt_error_history=[]
@@ -127,12 +128,18 @@
self.qcinp.molecule=self.outdata.get("molecule_from_last_geometry")actions.append({"molecule":"molecule_from_last_geometry"})
- # If already at geom_max_cycles, often can just get convergence by restarting
- # from the geometry of the last cycle. But we'll also save any structural
+ elifself.qcinp.rem.get("thresh","10")!="14":
+ self.qcinp.rem["thresh"]="14"
+ actions.append({"thresh":"14"})
+ # Will need to try and implement this dmax handler below when I have more time
+ # to fix the tests and the general handling procedure.
+ # elif self.qcinp.rem.get("geom_opt_dmax",300) != 150:
+ # self.qcinp.rem["geom_opt_dmax"] = 150
+ # actions.append({"geom_opt_dmax": "150"})
+ # If already at geom_max_cycles, thresh 14, and dmax 150, often can just get convergence
+ # by restarting from the geometry of the last cycle. But we'll also save any structural# changes that happened along the way.else:self.opt_error_history+=[self.outdata["structure_change"]]
@@ -169,27 +184,30 @@
Source code for custodian.qchem.handlers
self.qcinp.molecule=self.outdata.get("molecule_from_last_geometry")actions.append({"molecule":"molecule_from_last_geometry"})
- elifself.qcinp.rem.get("scf_algorithm","diis").lower()=="diis":
- self.qcinp.rem["scf_algorithm"]="rca_diis"
- actions.append({"scf_algorithm":"rca_diis"})
- ifself.qcinp.rem.get("gen_scfman"):
- self.qcinp.rem["gen_scfman"]=False
- actions.append({"gen_scfman":False})
+ elifself.qcinp.rem.get("thresh","10")!="14":
+ self.qcinp.rem["thresh"]="14"
+ actions.append({"thresh":"14"})else:print("Use a different initial guess? Perhaps a different basis?")
- elif"linear_dependent_basis"inself.errors:
- # DIIS -> RCA_DIIS. If already RCA_DIIS, change basis?
- ifself.qcinp.rem.get("scf_algorithm","diis").lower()=="diis":
- self.qcinp.rem["scf_algorithm"]="rca_diis"
- actions.append({"scf_algorithm":"rca_diis"})
- ifself.qcinp.rem.get("gen_scfman"):
- self.qcinp.rem["gen_scfman"]=False
- actions.append({"gen_scfman":False})
+ elif"premature_end_FileMan_error"inself.errors:
+ ifself.qcinp.rem.get("thresh","10")!="14":
+ self.qcinp.rem["thresh"]="14"
+ actions.append({"thresh":"14"})
+ elifself.qcinp.rem.get("scf_guess_always","none").lower()!="true":
+ self.qcinp.rem["scf_guess_always"]=True
+ actions.append({"scf_guess_always":True})
+ else:
+ print("We're in a bad spot if we get a FileMan error while always generating a new SCF guess...")
+
+ elif"hessian_eigenvalue_error"inself.errors:
+ ifself.qcinp.rem.get("thresh","10")!="14":
+ self.qcinp.rem["thresh"]="14"
+ actions.append({"thresh":"14"})else:
- print("Perhaps use a better basis?")
+ print("Not sure how to fix hessian_eigenvalue_error if thresh is already 14!")elif"failed_to_transform_coords"inself.errors:# Check for symmetry flag in rem. If not False, set to False and rerun.
@@ -211,34 +229,45 @@
Source code for custodian.qchem.handlers
elif"failed_to_read_input"inself.errors:# Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
- actions.append({"rerun job as-is"})
-
- elif"IO_error"inself.errors:
- # Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
- actions.append({"rerun job as-is"})
+ actions.append({"rerun_job_no_changes":True})elif"read_molecule_error"inself.errors:# Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
- actions.append({"rerun job as-is"})
+ actions.append({"rerun_job_no_changes":True})elif"never_called_qchem"inself.errors:# Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
- actions.append({"rerun job as-is"})
+ actions.append({"rerun_job_no_changes":True})
+
+ elif"licensing_error"inself.errors:
+ # Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
+ actions.append({"rerun_job_no_changes":True})elif"unknown_error"inself.errors:
- print("Examine error message by hand.")
- return{"errors":self.errors,"actions":None}
+ ifself.qcinp.rem.get("scf_guess","none").lower()=="read":
+ delself.qcinp.rem["scf_guess"]
+ actions.append({"scf_guess":"deleted"})
+ elifself.qcinp.rem.get("thresh","10")!="14":
+ self.qcinp.rem["thresh"]="14"
+ actions.append({"thresh":"14"})
+ else:
+ print("Unknown error. Examine output and log files by hand.")
+ return{"errors":self.errors,"actions":None}else:# You should never get here. If correct is being called then errors should have at least one entry,# in which case it should have been caught by the if/elifs above.
- print(
- "If you get this message, something has gone terribly wrong!")
+ print("Errors:",self.errors)
+ print("Must have gotten an error which is correctly parsed but not included in the handler. FIX!!!")return{"errors":self.errors,"actions":None}
+ if{"molecule":"molecule_from_last_geometry"}inactionsand \
+ str(self.qcinp.rem.get("geom_opt_hessian")).lower()=="read":
+ delself.qcinp.rem["geom_opt_hessian"]
+ actions.append({"geom_opt_hessian":"deleted"})os.rename(self.input_file,self.input_file+".last")self.qcinp.write_file(self.input_file)
- return{"errors":self.errors,"actions":actions}
qclog_file (str): Name of the file to redirect the standard output to. None means not to record the standard output. suffix (str): String to append to the file in postprocess.
- scratch_dir (str): QCSCRATCH directory. Defaults to "/dev/shm/qcscratch/".
- save_scratch (bool): Whether to save scratch directory contents.
+ scratch_dir (str): QCSCRATCH directory. Defaults to current directory.
+ save_scratch (bool): Whether to save basic scratch directory contents. Defaults to False. save_name (str): Name of the saved scratch directory. Defaults to
- to "default_save_name".
+ to "saved_scratch". backup (bool): Whether to backup the initial input file. If True, the input will be copied with a ".orig" appended. Defaults to True. """
@@ -128,40 +128,27 @@
Source code for custodian.qchem.jobs
@propertydefcurrent_command(self):
- multimode_index=0
+ multi={"openmp":"-nt","mpi":"-np"}
+ ifself.multimodenotinmulti:
+ raiseRuntimeError("ERROR: Multimode should only be set to openmp or mpi")
+ command=[multi[self.multimode],str(self.max_cores),self.input_file,self.output_file]ifself.save_scratch:
- command=[
- "-save","",
- str(self.max_cores),self.input_file,self.output_file,
- self.save_name
- ]
- multimode_index=1
- else:
- command=[
- "",str(self.max_cores),self.input_file,self.output_file
- ]
- ifself.multimode=='openmp':
- command[multimode_index]="-nt"
- elifself.multimode=='mpi':
- command[multimode_index]="-np"
- else:
- print("ERROR: Multimode should only be set to openmp or mpi")
+ command.append(self.save_name)command=self.qchem_command+command
- returncommand
+ com_str=""
+ forpartincommand:
+ com_str=com_str+" "+part
+ returncom_str
max_iterations=10,max_molecule_perturb_scale=0.3,check_connectivity=True,
+ linked=True,**QCJob_kwargs):""" Optimize a structure and calculate vibrational frequencies to check if the
@@ -209,121 +201,272 @@
self.error_count=Counter()# threshold of number of atoms to treat the cell as large.self.natoms_large_cell=natoms_large_cell
- self.errors_subset_to_catch=errors_subset_to_catchor \
- list(VaspErrorHandler.error_msgs.keys())
+ self.errors_subset_to_catch=errors_subset_to_catchorlist(VaspErrorHandler.error_msgs.keys())self.logger=logging.getLogger(self.__class__.__name__)
""" Initializes the handler with max drift Args:
- max_drift (float): This defines the max drift. Leaving this at the default of None gets the max_drift from EDFIFFG
+ max_drift (float): This defines the max drift. Leaving this at the default of None gets the max_drift from
+ EDFIFFG """self.max_drift=max_drift
@@ -702,7 +696,7 @@
Source code for custodian.vasp.handlers
try:outcar=Outcar("OUTCAR")
- except:
+ exceptException:# Can't perform check if Outcar not validreturnFalse
@@ -781,14 +775,14 @@
Source code for custodian.vasp.handlers
# Also disregard if automatic KPOINT generation is usedif(notvi["INCAR"].get('ISYM',True))or \
vi[
- "KPOINTS"].style==Kpoints.supported_modes.Automatic:
+ "KPOINTS"].style==Kpoints.supported_modes.Automatic:returnFalsetry:v=Vasprun(self.output_vasprun)ifv.converged:returnFalse
- except:
+ exceptException:passwithopen(self.output_filename,"r")asf:forlineinf:
@@ -831,7 +825,7 @@
incar=Incar.from_file("INCAR")incar['MAGMOM']=magmomincar.write_file("INCAR")
- except:
+ exceptException:logger.error('MAGMOM copy from OUTCAR to INCAR failed')# Remove continuation so if a subsequent job is run in
@@ -659,7 +659,7 @@
diff --git a/docs/_sources/_themes/README.rst.txt b/docs/_sources/_themes/README.rst.txt
deleted file mode 100644
index e8179f96..00000000
--- a/docs/_sources/_themes/README.rst.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-krTheme Sphinx Style
-====================
-
-This repository contains sphinx styles Kenneth Reitz uses in most of
-his projects. It is a drivative of Mitsuhiko's themes for Flask and Flask related
-projects. To use this style in your Sphinx documentation, follow
-this guide:
-
-1. put this folder as _themes into your docs folder. Alternatively
- you can also use git submodules to check out the contents there.
-
-2. add this to your conf.py: ::
-
- sys.path.append(os.path.abspath('_themes'))
- html_theme_path = ['_themes']
- html_theme = 'kr'
-
-The following themes exist:
-
-**kr**
- the standard flask documentation theme for large projects
-
-**kr_small**
- small one-page theme. Intended to be used by very small addon libraries.
-
diff --git a/docs/_sources/changelog.rst.txt b/docs/_sources/changelog.rst.txt
deleted file mode 100644
index c975f125..00000000
--- a/docs/_sources/changelog.rst.txt
+++ /dev/null
@@ -1,218 +0,0 @@
-Change Log
-==========
-
-v2017.12.23
------------
-* cstdn command line tool is now official with docs.
-* Fine-grained control of VaspErrorHandler is now possible using
- `errors_subset_to_catch`.
-* Switched to date-based versioning for custodian like pymatgen.
-
-v1.1.1
-------
-* DriftErrorHandler (Shyam)
-
-v1.1.0
-------
-* Improved error handling for Qchem calculations.
-
-v1.0.4
-------
-* Improved handling of non-zero return codes.
-
-v1.0.2
-------
-* Interrupted run feature. (Shyam Dwaraknath)
-
-v1.0.1
-------
-* Pymatgen 4.0.0 compatible release.
-
-v1.0.0
-------
-* Custodian now comes with a "cstdn" script that enables the arbitrary creation
- of simple job sequences using a yaml file, and the running of calculations
- based on these yaml specifications.
-
-v0.8.8
-------
-1. Fix setup.py.
-
-v0.8.5
-------
-1. Refactoring to support pymatgen 3.1.4.
-
-v0.8.2
-------
-1. Made auto_npar optional for double relaxation VASP run.
-
-v0.8.1
-------
-1. Misc bug fixes (minor).
-
-v0.8.0
-------
-1. Major refactoring of Custodian to introdce Validators,
- which are effectively post-Job checking mechanisms that do not perform
- error correction.
-2. **Backwards incompatibility** BadVasprunXMLHandler is now a validator,
- which must be separately imported to be used.
-3. Miscellaneous cleanup of Py3k fixes.
-v0.7.6
-------
-1. Custodian is now Python 3 compatible and uses the latest versions of
- pymatgen and monty.
-
-v0.7.5
-------
-1. **Major** Custodian now exits with RuntimeError when max_errors or
- unrecoverable_error is encountered.
-2. Added BadVasprunXMLHandler.
-
-v0.7.4
-------
-1. auto_npar option in VaspJob now properly handles Hessian calculations.
-2. WalltimeHandler now supports termination at electronic step (David
- Waroquiers).
-3. Improved handling of BRMIX fixes.
-
-v0.7.3
-------
-1. Improved backwards compatibility for WallTimeHandler.
-2. Improvements to VaspErrorHandler. No longer catches spurious BRMIX error
- messages when NELECT is specified in INCAR, and pricel and rot_mat errors
- are now fixed with symmetry precision and gamma centered KPOINTS instead.
-3. Improved Qchem error handler (Xiaohui Qu).
-
-v0.7.2
-------
-1. Improved WalltimeHandler (PBSWalltimeHandler is a subset and is now
- deprecated).
-2. New monty required version (>= 0.2.2).
-
-v0.7.1
-------
-1. Much improved qchem error handling (Xiaohui Qu).
-2. New Monty required version (>= 0.2.0).
-
-v0.7.0
-------
-1. **Backwards incompatible with v0.6.3. Refactoring to move commonly used
- Python utility functions to `Monty package `_, which is now a depedency
- for custodian.
-2. Custodian now requires pymatgen >= 2.9.0 for VASP, Qchem and Nwchem jobs
- and handlers.
-3. converge_kpoints script now has increment mode.
-4. ErrorHandlers now have a new API, where the class variables "is_monitor"
- and "is_terminating" are provided to indicate if a particular handler
- runs in the background during a Job and whether a handler should
- terminate the job. Some errors may not be critical or may need to wait
- for some other event to terminate a job. For example,
- a particular error may require a flag to be set to request a job to
- terminate gracefully once it finishes its current task. The handler to
- set the flag should not terminate the job.
-
-0.6.3
------
-1. Added buffer time option in PBSWalltimeHandler.
-2. Improved Qchem jobs and handlers (Xiaohui Qu).
-3. Vastly improved API docs.
-
-0.6.2
------
-1. Bug fix release to support sub dirs in run folder when using scratch.
-2. Improve handling of walltime in PBSWalltimeHander.
-
-0.6.1
------
-1. Bug fix release to address minor issue with checkpointing.
-2. Checkpointing is now turned off by default.
-
-0.6.0
------
-1. Checkpointing implemented for Custodian. Custodian can now checkpoint all
- files in the current working directory after every successful job. If the
- job is resubmitted, it will restore files and start from the last
- checkpoint. Particularly useful for multi-job runs.
-2. Added PBSWalltimeHandler to handle wall times for PBS Vasp Jobs.
-3. Qchem error handlers and jobs.
-
-0.5.0
------
-1. Added scratch_dir option to Custodian class as well as run_vasp and
- run_nwchem scripts. Many supercomputing clusters have a scratch space
- which have significantly faster IO. This option provides a transparent way
- to specify the jobs to be performed in the scratch. Especially useful for
- jobs which have significant file IO.
-
-0.4.5
------
-1. Fix gzip of output.
-
-0.4.3
------
-1. Added handling for ZBRENT error for VASP.
-2. Minor refactoring to consolidate backup and gzip directory methods.
-
-0.4.2
------
-1. Rudimentary support for Nwchem error handling (by Shyue Ping Ong).
-2. Improved VASP error handling (by Steve Dacek and Will Richards).
-
-0.4.1
------
-1. Added hanlding of PRICEL error in VASP.
-2. Speed and robustness improvements.
-3. BRIONS error now handled by changing ISYM.
-
-0.4.0
------
-1. Many VASP handlers are now consolidated into a single VaspErrorHandler.
-2. Many more fixes for VASP runs, including the "TOO FEW BANDS",
- "TRIPLE PRODUCT", "DENTET" and "BRIONS" errors.
-3. VaspJob now includes the auto_npar and auto_gamma options, which
- automatically optimizes the NPAR setting to be sqrt(number of cores) as
- per the VASP recommendation for DFT runs and tries to search for a
- gamma-only compiled version of VASP for gamma 1x1x1 runs.
-
-0.3.5
------
-1. Bug fix for incorrect shift error handler in VASP.
-2. More robust fix for unconverged VASP runs (switching from ALGO fast to
- normal).
-3. Expanded documentation.
-
-0.3.4
------
-1. Added support for handlers that perform monitor a job as it is progressing
- and terminates it if necessary. Useful for correcting errors that come up
- by do not cause immediate job failures.
-
-0.3.2
------
-1. Important bug fix for VaspJob and converge_kpoints script.
-
-0.3.0
------
-
-1. Major update to custodian API. Custodian now perform more comprehensive
- logging in a file called custodian.json, which logs all jobs and
- corrections performed.
-
-Version 0.2.6
--------------
-1. Bug fix for run_vasp script for static runs.
-
-Version 0.2.5
--------------
-1. run_vasp script that now provides flexible specification of vasp runs.
-2. Vastly improved error handling for VASP runs.
-3. Improved logging system for custodian.
-4. Improved API for custodian return types during run.
-5. First stable release.
-
-Version 0.2.4
--------------
-
-1. Bug fixes for aflow style runs assimilation.
diff --git a/docs/_sources/custodian.ansible.rst.txt b/docs/_sources/custodian.ansible.rst.txt
deleted file mode 100644
index 5840ac5f..00000000
--- a/docs/_sources/custodian.ansible.rst.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-custodian.ansible package
-=========================
-
-Subpackages
------------
-
-.. toctree::
-
-
-Submodules
-----------
-
-custodian.ansible.actions module
---------------------------------
-
-.. automodule:: custodian.ansible.actions
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.ansible.interpreter module
-------------------------------------
-
-.. automodule:: custodian.ansible.interpreter
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: custodian.ansible
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/_sources/custodian.cli.rst.txt b/docs/_sources/custodian.cli.rst.txt
deleted file mode 100644
index c83b5d9c..00000000
--- a/docs/_sources/custodian.cli.rst.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-custodian.cli package
-=====================
-
-Submodules
-----------
-
-custodian.cli.converge\_geometry module
----------------------------------------
-
-.. automodule:: custodian.cli.converge_geometry
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.cli.converge\_kpoints module
---------------------------------------
-
-.. automodule:: custodian.cli.converge_kpoints
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.cli.cstdn module
---------------------------
-
-.. automodule:: custodian.cli.cstdn
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.cli.run\_nwchem module
---------------------------------
-
-.. automodule:: custodian.cli.run_nwchem
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.cli.run\_vasp module
-------------------------------
-
-.. automodule:: custodian.cli.run_vasp
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: custodian.cli
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/_sources/custodian.feff.rst.txt b/docs/_sources/custodian.feff.rst.txt
deleted file mode 100644
index 753541f9..00000000
--- a/docs/_sources/custodian.feff.rst.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-custodian.feff package
-======================
-
-Subpackages
------------
-
-.. toctree::
-
-
-Submodules
-----------
-
-custodian.feff.handlers module
-------------------------------
-
-.. automodule:: custodian.feff.handlers
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.feff.interpreter module
----------------------------------
-
-.. automodule:: custodian.feff.interpreter
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.feff.jobs module
---------------------------
-
-.. automodule:: custodian.feff.jobs
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: custodian.feff
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/_sources/custodian.nwchem.rst.txt b/docs/_sources/custodian.nwchem.rst.txt
deleted file mode 100644
index be9c01a4..00000000
--- a/docs/_sources/custodian.nwchem.rst.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-custodian.nwchem package
-========================
-
-Subpackages
------------
-
-.. toctree::
-
-
-Submodules
-----------
-
-custodian.nwchem.handlers module
---------------------------------
-
-.. automodule:: custodian.nwchem.handlers
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.nwchem.jobs module
-----------------------------
-
-.. automodule:: custodian.nwchem.jobs
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: custodian.nwchem
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/_sources/custodian.qchem.rst.txt b/docs/_sources/custodian.qchem.rst.txt
deleted file mode 100644
index 6fc3df09..00000000
--- a/docs/_sources/custodian.qchem.rst.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-custodian.qchem package
-=======================
-
-Subpackages
------------
-
-.. toctree::
-
-
-Submodules
-----------
-
-custodian.qchem.handlers module
--------------------------------
-
-.. automodule:: custodian.qchem.handlers
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.qchem.jobs module
----------------------------
-
-.. automodule:: custodian.qchem.jobs
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: custodian.qchem
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/_sources/custodian.rst.txt b/docs/_sources/custodian.rst.txt
deleted file mode 100644
index 97747550..00000000
--- a/docs/_sources/custodian.rst.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-custodian package
-=================
-
-Subpackages
------------
-
-.. toctree::
-
- custodian.ansible
- custodian.cli
- custodian.feff
- custodian.nwchem
- custodian.qchem
- custodian.vasp
-
-Submodules
-----------
-
-custodian.custodian module
---------------------------
-
-.. automodule:: custodian.custodian
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.utils module
-----------------------
-
-.. automodule:: custodian.utils
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: custodian
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/_sources/custodian.vasp.rst.txt b/docs/_sources/custodian.vasp.rst.txt
deleted file mode 100644
index 2d8dbc9b..00000000
--- a/docs/_sources/custodian.vasp.rst.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-custodian.vasp package
-======================
-
-Subpackages
------------
-
-.. toctree::
-
-
-Submodules
-----------
-
-custodian.vasp.handlers module
-------------------------------
-
-.. automodule:: custodian.vasp.handlers
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.vasp.interpreter module
----------------------------------
-
-.. automodule:: custodian.vasp.interpreter
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.vasp.jobs module
---------------------------
-
-.. automodule:: custodian.vasp.jobs
- :members:
- :undoc-members:
- :show-inheritance:
-
-custodian.vasp.validators module
---------------------------------
-
-.. automodule:: custodian.vasp.validators
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: custodian.vasp
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/_sources/index.rst.txt b/docs/_sources/index.rst.txt
deleted file mode 100644
index eaa523f6..00000000
--- a/docs/_sources/index.rst.txt
+++ /dev/null
@@ -1,368 +0,0 @@
-Custodian
-=========
-
-Custodian is a simple, robust and flexible just-in-time (JIT) job management
-framework written in Python. Using custodian, you can create wrappers that
-perform error checking, job management and error recovery. It has a simple
-plugin framework that allows you to develop specific job management workflows
-for different applications.
-
-Error recovery is an important aspect of many *high-throughput* projects that
-generate data on a large scale. When you are running on the order of hundreds
-of thousands of jobs, even an error rate of 1% would mean thousands of errored
-jobs that would be impossible to deal with on a case-by-case basis.
-
-The specific use case for custodian is for long running jobs, with potentially
-random errors. For example, there may be a script that takes several days to
-run on a server, with a 1% chance of some IO error causing the job to fail.
-Using custodian, one can develop a mechanism to gracefully recover from the
-error, and restart the job with modified parameters if necessary.
-
-The current version of Custodian also comes with two sub-packages for error
-handling for Vienna Ab Initio Simulation Package (VASP), NwChem and QChem
-calculations.
-
-Change log
-==========
-
-v2019.2.10
-----------
-* Improved slow convergence handling. (@shyamd)
-
-v2019.2.7
----------
-* Improved error logging.
-* Improved handling of frozen jobs and potim errors.
-* Improved Exceptino handling. (Guido Petretto)
-
-
-:doc:`Older versions `
-
-Getting custodian
-=================
-
-Stable version
---------------
-
-The version at the Python Package Index (PyPI) is always the latest stable
-release that will be hopefully, be relatively bug-free. The easiest way to
-install custodian on any system is to use easy_install or pip, as follows::
-
- easy_install custodian
-
-or::
-
- pip install custodian
-
-Some plugins (e.g., vasp management) require additional setup (please see
-`pymatgen's documentation`_).
-
-Developmental version
----------------------
-
-The bleeding edge developmental version is at the custodian's `Github repo
-`_. The developmental
-version is likely to be more buggy, but may contain new features. The
-Github version include test files as well for complete unit testing. After
-cloning the source, you can type::
-
- python setup.py install
-
-or to install the package in developmental mode::
-
- python setup.py develop
-
-Requirements
-============
-
-Custodian requires Python 2.7+. There are no other required dependencies.
-
-Optional dependencies
----------------------
-
-Optional libraries that are required if you need certain features:
-
-1. Python Materials Genomics (`pymatgen`_) 2.8.10+: To use the plugin for
- VASP, NwChem and Qchem. Please install using::
-
- pip install pymatgen
-
- For more information, please consult `pymatgen's documentation`_.
-2. nose - For complete unittesting.
-
-Usage
-=====
-
-The main class in the workflow is known as Custodian, which manages a series
-of jobs with a list of error handlers. The general workflow for Custodian is
-presented in the figure below.
-
-.. figure:: _static/Custodian.png
- :width: 500px
- :align: center
- :alt: Custodian workflow
- :figclass: align-center
-
- Overview of the Custodian workflow.
-
-The Custodian class takes in two general inputs - a **list of Jobs** and
-a **list of ErrorHandlers**. **Jobs** should be subclasses of the
-:class:`custodian.custodian.Job` abstract base class and **ErrorHandlers**
-should be subclasses of the :class:`custodian.custodian.ErrorHandler` abstract
-base class. To use custodian, you need to implement concrete implementations
-of these abstract base classes.
-
-Simple example
---------------
-
-An very simple example implementation is given in the custodian_examples.py
-script in the scripts directory. We will now go through the example in detail
-here.
-
-The ExampleJob has the following code.
-
-.. code-block:: python
-
- class ExampleJob(Job):
-
- def __init__(self, jobid, params=None):
- if params is None:
- params = {"initial": 0, "total": 0}
- self.jobid = jobid
- self.params = params
-
- def setup(self):
- self.params["initial"] = 0
- self.params["total"] = 0
-
- def run(self):
- sequence = [random.uniform(0, 1) for i in range(100)]
- self.params["total"] = self.params["initial"] + sum(sequence)
-
- def postprocess(self):
- pass
-
- @property
- def name(self):
- return "ExampleJob{}".format(self.jobid)
-
-
-The ExampleJob simply sums a random sequence of 100 numbers between 0 and
-1, adds it to an initial value and puts the value in 'total' variable. The
-ExampleJob subclasses the Job abstract base class, and implements the necessary
-API comprising of just three key methods: **setup(), run(),
-and postprocess()**.
-
-Let us now define an ErrorHandler that will check if the total value is >= 50,
-and if it is not, it will increment the initial value by 1 and rerun the
-ExampleJob again.
-
-.. code-block:: python
-
-
- class ExampleHandler(ErrorHandler):
-
- def __init__(self, params):
- self.params = params
-
- def check(self):
- return self.params["total"] < 50
-
- def correct(self):
- self.params["initial"] += 1
- return {"errors": "total < 50", "actions": "increment by 1"}
-
-As you can see above, the ExampleHandler subclasses the ErrorHandler abstract
-base class, and implements the necessary API comprising of just two key
-methods: **check() and correct()**.
-
-The transfer of information between the Job and ErrorHandler is done using
-the params argument in this example, which is not ideal but is sufficiently
-for demonstrating the Custodian API. In real world usage,
-a more common transfer of information may involve the Job writing the output
-to a file, and the ErrorHandler checking the contents of those files to
-detect error situations.
-
-To run the job, one simply needs to supply a list of ExampleJobs and
-ErrorHandlers to a Custodian.
-
-.. code-block:: python
-
- njobs = 100
- params = {"initial": 0, "total": 0}
- c = Custodian([ExampleHandler(params)],
- [ExampleJob(i, params) for i in xrange(njobs)],
- max_errors=njobs)
- c.run()
-
-If you run custodian_example.py in the scripts directory, you will noticed that
-a **custodian.json** file was generated, which summarizes the jobs that have
-been run and any corrections performed.
-
-Practical example: Electronic structure calculations
-----------------------------------------------------
-
-A practical example where the Custodian framework is particularly useful is
-in the area of electronic structure calculations. Electronic structure
-calculations tend to be long running and often terminates due to errors,
-random or otherwise. Such errors become a major issue in projects that
-performs such calculations in high throughput, such as the `Materials
-Project`_.
-
-The Custodian package comes with a fairly comprehensive plugin to deal
-with jobs (:mod:`custodian.vasp.jobs`) and errors
-(:mod:`custodian.vasp.handlers`) in electronic structure calculations based
-on the Vienna Ab Initio Simulation Package (VASP). To do this,
-Custodian uses the Python Materials Genomics (`pymatgen`_) package to
-perform analysis and io from VASP input and output files.
-
-A simple example of a script using Custodian to run a two-relaxation VASP job
-is as follows:
-
-.. code-block:: python
-
- from custodian.custodian import Custodian
- from custodian.vasp.handlers import VaspErrorHandler, \
- UnconvergedErrorHandler, PoscarErrorHandler, DentetErrorHandler
- from custodian.vasp.jobs import VaspJob
-
- handlers = [VaspErrorHandler(), UnconvergedErrorHandler(),
- PoscarErrorHandler(), DentetErrorHandler()]
- jobs = VaspJob.double_relaxation_run(args.command.split())
- c = Custodian(handlers, jobs, max_errors=10)
- c.run()
-
-The above will gracefully deal with many VASP errors encountered during
-relaxation. For example, it will correct ISMEAR to 0 if there are
-insufficient KPOINTS to use ISMEAR = -5.
-
-Using custodian, you can even setup potentially indefinite jobs,
-e.g. kpoints convergence jobs with a target energy convergence. Please see the
-converge_kpoints script in the scripts for an example.
-
-.. versionadded:: 0.4.3
-
- A new package for dealing with NwChem calculations has been added.
- NwChem is an open-source code for performing computational chemistry
- calculations.
-
-cstdn - A yaml-spec controlled job
-==================================
-
-Custodian now comes with a cstdn script, which allows you to do fine-grained
-control of a job using a yaml spec file. Below is an annotated example of how
-you can specify a double VASP relaxation followed by a static calculation.
-Minor modifications would allow very customizable calculations, though this is
-obviously not meant for highly complex workflows. For those, usage of `FireWorks
-`_ is highly recommended.
-
-Sample yaml spec::
-
- # Specifies a list of jobs to run.
- # Each job is specified by a `jb: ` with parameters specified
- # via the params dict.
-
- jobs:
- - jb: custodian.vasp.jobs.VaspJob
- params:
- final: False
- suffix: .relax1
- - jb: custodian.vasp.jobs.VaspJob
- params:
- final: False
- suffix: .relax2
- settings_override:
- - {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}
- - jb: custodian.vasp.jobs.VaspJob
- params:
- final: True
- suffix: .static3
- settings_override:
- - {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}
- - {"dict": "INCAR", "action": {"_set": {"NSW": 0}}}
-
-
- # This key specifies parameters common to all jobs.
- # Keys starting with $ are expanded to the environmental values.
- # The example below means the parameter vasp_cmd is set to the value with
- # $PBS_NODEFILE expanded.
-
- jobs_common_params:
- $vasp_cmd: ["mpirun", "-machinefile", "$PBS_NODEFILE", "-np", "24", "vasp"]
-
-
- # Specifies a list of error handlers in the same format as jobs. Similarly,
- # parameters passed to the handler __init__ can be configured the same
- # way as for jobs.
- handlers:
- - hdlr: custodian.vasp.handlers.VaspErrorHandler
- - hdlr: custodian.vasp.handlers.AliasingErrorHandler
- - hdlr: custodian.vasp.handlers.MeshSymmetryErrorHandler
-
- # Specifies a list of error handlers in the same format as jobs.
- validators:
- - vldr: custodian.vasp.validators.VasprunXMLValidator
-
- #This sets all custodian running parameters.
- custodian_params:
- max_errors: 10
- scratch_dir: /tmp
- gzipped_output: True
- checkpoint: True
-
-You can then run the job using the following command::
-
- cstdn run
-
-API/Reference Docs
-==================
-
-The API docs are generated using Sphinx auto-doc and outlines the purpose of all
-modules and classes, and the expected argument and returned objects for most
-methods. They are available at the link below.
-
-:doc:`custodian API docs `
-
-How to cite custodian
-=====================
-
-If you use custodian in your research, especially the VASP component, please
-consider citing the following work::
-
- Shyue Ping Ong, William Davidson Richards, Anubhav Jain, Geoffroy Hautier,
- Michael Kocher, Shreyas Cholia, Dan Gunter, Vincent Chevrier, Kristin A.
- Persson, Gerbrand Ceder. *Python Materials Genomics (pymatgen) : A Robust,
- Open-Source Python Library for Materials Analysis.* Computational
- Materials Science, 2013, 68, 314–319. `doi:10.1016/j.commatsci.2012.10.028
- `_
-
-License
-=======
-
-Custodian is released under the MIT License. The terms of the license are as
-follows::
-
- The MIT License (MIT)
- Copyright (c) 2011-2012 MIT & LBNL
-
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the "Software")
- , to deal in the Software without restriction, including without limitation
- the rights to use, copy, modify, merge, publish, distribute, sublicense,
- and/or sell copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- DEALINGS IN THE SOFTWARE.
-
-.. _`pymatgen's documentation`: http://pymatgen.org
-.. _`Materials Project`: https://www.materialsproject.org
-.. _`pymatgen`: https://pypi.python.org/pypi/pymatgen
diff --git a/docs/_sources/modules.rst.txt b/docs/_sources/modules.rst.txt
deleted file mode 100644
index c241b4a4..00000000
--- a/docs/_sources/modules.rst.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-custodian
-=========
-
-.. toctree::
- :maxdepth: 6
-
- custodian
diff --git a/docs/_static/documentation_options.js b/docs/_static/documentation_options.js
index 92680bf2..584dbda4 100644
--- a/docs/_static/documentation_options.js
+++ b/docs/_static/documentation_options.js
@@ -1,6 +1,6 @@
var DOCUMENTATION_OPTIONS = {
URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
- VERSION: '2019.8.23',
+ VERSION: '2019.8.24',
LANGUAGE: 'None',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
diff --git a/docs/_themes/README.html b/docs/_themes/README.html
index 055d525d..5928d650 100644
--- a/docs/_themes/README.html
+++ b/docs/_themes/README.html
@@ -4,7 +4,7 @@
- krTheme Sphinx Style — custodian 2019.8.23 documentation
+ krTheme Sphinx Style — custodian 2019.8.24 documentation
@@ -37,7 +37,7 @@
Optimize a structure and calculate vibrational frequencies to check if the
structure is in a true minima. If a frequency is negative, iteratively
perturbe the geometry, optimize, and recalculate frequencies until all are
@@ -258,6 +258,11 @@
Corrects for total drift exceeding the force convergence criteria.
Initializes the handler with max drift
-:param max_drift: This defines the max drift. Leaving this at the default of None gets the max_drift from EDFIFFG
-:type max_drift: float
+:param max_drift: This defines the max drift. Leaving this at the default of None gets the max_drift from
+
-# coding: utf-8
-
-from__future__importunicode_literals,division,print_function
-importos
-importshutil
-
-"""
-This module defines various classes of supported actions. All actions are
-implemented as static methods, but are defined using classes (as opposed to
-modules) so that a set of well-defined actions can be namespaced easily.
-"""
-
-__author__="Shyue Ping Ong"
-__copyright__="Copyright 2012, The Materials Project"
-__version__="0.1"
-__maintainer__="Shyue Ping Ong"
-__email__="ongsp@ucsd.edu"
-__date__="Jun 2, 2012"
-
-
-
[docs]classDictActions(object):
- """
- Class to implement the supported mongo-like modifications on a dict.
- Supported keywords include the following Mongo-based keywords, with the
- usual meanings (refer to Mongo documentation for information):
-
- _inc
- _set
- _unset
- _push
- _push_all
- _add_to_set (but _each is not supported)
- _pop
- _pull
- _pull_all
- _rename
-
- However, note that "_set" does not support modification of nested dicts
- using the mongo {"a.b":1} notation. This is because mongo does not allow
- keys with "." to be inserted. Instead, nested dict modification is
- supported using a special "->" keyword, e.g. {"a->b": 1}
- """
-
-
[docs]@staticmethod
- defadd_to_set(input_dict,settings):
- fork,vinsettings.items():
- (d,key)=get_nested_dict(input_dict,k)
- ifkeyindand(notisinstance(d[key],list)):
- raiseValueError("Keyword {} does not refer to an array."
- .format(k))
- ifkeyindandvnotind[key]:
- d[key].append(v)
- elifkeynotind:
- d[key]=v
-
-
[docs]@staticmethod
- defpull(input_dict,settings):
- fork,vinsettings.items():
- (d,key)=get_nested_dict(input_dict,k)
- ifkeyindand(notisinstance(d[key],list)):
- raiseValueError("Keyword {} does not refer to an array."
- .format(k))
- ifkeyind:
- d[key]=[iforiind[key]ifi!=v]
-
-
[docs]@staticmethod
- defpull_all(input_dict,settings):
- fork,vinsettings.items():
- ifkininput_dictand(notisinstance(input_dict[k],list)):
- raiseValueError("Keyword {} does not refer to an array."
- .format(k))
- foriinv:
- DictActions.pull(input_dict,{k:i})
-
-
[docs]@staticmethod
- defpop(input_dict,settings):
- fork,vinsettings.items():
- (d,key)=get_nested_dict(input_dict,k)
- ifkeyindand(notisinstance(d[key],list)):
- raiseValueError("Keyword {} does not refer to an array."
- .format(k))
- ifv==1:
- d[key].pop()
- elifv==-1:
- d[key].pop(0)
-
-
-
[docs]classFileActions(object):
- """
- Class of supported file actions. For FileActions, the modder class takes in
- a filename as a string. The filename should preferably be a full path to
- avoid ambiguity.
- """
-
-
[docs]@staticmethod
- deffile_create(filename,settings):
- """
- Creates a file.
-
- Args:
- filename (str): Filename.
- settings (dict): Must be {"content": actual_content}
- """
- iflen(settings)!=1:
- raiseValueError("Settings must only contain one item with key "
- "'content'.")
- fork,vinsettings.items():
- ifk=="content":
- withopen(filename,'w')asf:
- f.write(v)
-
-
[docs]@staticmethod
- deffile_move(filename,settings):
- """
- Moves a file. {'_file_move': {'dest': 'new_file_name'}}
-
- Args:
- filename (str): Filename.
- settings (dict): Must be {"dest": path of new file}
- """
- iflen(settings)!=1:
- raiseValueError("Settings must only contain one item with key "
- "'dest'.")
- fork,vinsettings.items():
- ifk=="dest":
- shutil.move(filename,v)
-
-
[docs]@staticmethod
- deffile_delete(filename,settings):
- """
- Deletes a file. {'_file_delete': {'mode': "actual"}}
-
- Args:
- filename (str): Filename.
- settings (dict): Must be {"mode": actual/simulated}. Simulated
- mode only prints the action without performing it.
- """
- iflen(settings)!=1:
- raiseValueError("Settings must only contain one item with key "
- "'mode'.")
- fork,vinsettings.items():
- ifk=="mode"andv=="actual":
- try:
- os.remove(filename)
- exceptOSError:
- #Skip file not found error.
- pass
- elifk=="mode"andv=="simulated":
- print("Simulated removal of {}".format(filename))
-
-
[docs]@staticmethod
- deffile_copy(filename,settings):
- """
- Copies a file. {'_file_copy': {'dest': 'new_file_name'}}
-
- Args:
- filename (str): Filename.
- settings (dict): Must be {"dest": path of new file}
- """
- fork,vinsettings.items():
- ifk.startswith("dest"):
- shutil.copyfile(filename,v)
-# coding: utf-8
-
-from__future__importunicode_literals,division
-
-"""
-This module implements a Modder class that performs modifications on objects
-using support actions.
-"""
-
-
-__author__="Shyue Ping Ong"
-__copyright__="Copyright 2012, The Materials Project"
-__version__="0.1"
-__maintainer__="Shyue Ping Ong"
-__email__="ongsp@ucsd.edu"
-__date__="Jun 1, 2012"
-
-
-importre
-
-fromcustodian.ansible.actionsimportDictActions
-
-
-
[docs]classModder(object):
- """
- Class to modify a dict/file/any object using a mongo-like language.
- Keywords are mostly adopted from mongo's syntax, but instead of $, an
- underscore precedes action keywords. This is so that the modification can
- be inserted into a mongo db easily.
-
- Allowable actions are supplied as a list of classes as an argument. Refer
- to the action classes on what the actions do. Action classes are in
- pymatpro.ansible.actions.
-
- Examples:
- >>> modder = Modder()
- >>> d = {"Hello": "World"}
- >>> mod = {'_set': {'Hello':'Universe', 'Bye': 'World'}}
- >>> modder.modify(mod, d)
- >>> d['Bye']
- 'World'
- >>> d['Hello']
- 'Universe'
- """
- def__init__(self,actions=None,strict=True):
- """
- Initializes a Modder from a list of supported actions.
-
- Args:
- actions ([Action]): A sequence of supported actions. See
- :mod:`custodian.ansible.actions`. Default is None,
- which means only DictActions are supported.
- strict (bool): Indicating whether to use strict mode. In non-strict
- mode, unsupported actions are simply ignored without any
- errors raised. In strict mode, if an unsupported action is
- supplied, a ValueError is raised. Defaults to True.
- """
- self.supported_actions={}
- actions=actionsifactionsisnotNoneelse[DictActions]
- foractioninactions:
- foriindir(action):
- if(notre.match('__\w+__',i))and \
- callable(getattr(action,i)):
- self.supported_actions["_"+i]=getattr(action,i)
- self.strict=strict
-
-
[docs]defmodify(self,modification,obj):
- """
- Note that modify makes actual in-place modifications. It does not
- return a copy.
-
- Args:
- modification (dict): Modification must be {action_keyword :
- settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
- obj (dict/str/object): Object to modify depending on actions. For
- example, for DictActions, obj will be a dict to be modified.
- For FileActions, obj will be a string with a full pathname to a
- file.
- """
- foraction,settingsinmodification.items():
- ifactioninself.supported_actions:
- self.supported_actions[action].__call__(obj,settings)
- elifself.strict:
- raiseValueError("{} is not a supported action!"
- .format(action))
-
-
[docs]defmodify_object(self,modification,obj):
- """
- Modify an object that supports pymatgen's as_dict() and from_dict API.
-
- Args:
- modification (dict): Modification must be {action_keyword :
- settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
- obj (object): Object to modify
- """
- d=obj.as_dict()
- self.modify(modification,d)
- returnobj.from_dict(d)
-
-
-if__name__=="__main__":
- importargparse
-
- parser=argparse.ArgumentParser(description="""
- converge_geometry performs a geometry optimization. What this script will do
- is run a particular VASP relaxation repeatedly until the geometry
- is converged within the first ionic step. This is a common practice for
- converging molecular geometries in VASP, especially in situations where
- the geometry needs to be precise: such as frequency calculations.
- """,
- epilog="""
- Author: Stephen Dacek
- Version: {}
- Last updated: {}""".format(__version__,__date__))
-
- parser.add_argument(
- "-c","--command",dest="command",nargs="?",
- default="pvasp",type=str,
- help="VASP command. Defaults to pvasp. If you are using mpirun, "
- "set this to something like \"mpirun pvasp\".",)
-
- parser.add_argument(
- "-z","--gzip",dest="gzip",action="store_true",
- help="Add this option to gzip the final output. Do not gzip if you "
- "are going to perform an additional static run.")
-
- parser.add_argument(
- "-mr","--max_relaxtions",dest="max_relax",
- default=10,type=int,
- help="Maximum number of relaxations to allow")
-
- args=parser.parse_args()
- do_run(args)
-
[docs]defmain():
- importargparse
- parser=argparse.ArgumentParser(description="""
- converge_kpoints perform a KPOINTS convergence. What this script will do
- is to run a particular VASP run with increasing multiples of the initial
- KPOINT grid until a target convergence in energy per atom is reached.
- For example, let's say you have vasp input files that has a k-point grid
- of 1x1x1. This script will perform sequence jobs with k-point grids of
- 1x1x1, 2x2x2, 3x3x3, 4x4x4, ... until convergence is achieved. The
- default convergence criteria is 1meV/atom, but this can be set using the
- --target option.
- """,
- epilog="""
- Author: Shyue Ping Ong
- Version: {}
- Last updated: {}""".format(__version__,__date__))
-
- parser.add_argument(
- "-c","--command",dest="command",nargs="?",
- default="pvasp",type=str,
- help="VASP command. Defaults to pvasp. If you are using mpirun, "
- "set this to something like \"mpirun pvasp\".")
-
- parser.add_argument(
- "-i","--increment_mode",dest="mode",nargs="?",
- default="linear",type=str,choices=["linear","inc"],
- help="Mode for increasing kpoints. In linear mode, multiples of "
- "the existing kpoints are done. E.g., 2x4x2 -> 4x8x4 -> 6x12x6. "
- "In inc mode, all KPOINTS are incremented by 1 at each stage, "
- "i.e., 2x4x2 -> 3x5x3 ->4x6x4. Note that the latter mode does "
- "not preserve KPOINTS symmetry, though it is probably less "
- "expensive.")
-
- parser.add_argument(
- "-m","--max_steps",dest="max_steps",nargs="?",
- default=10,type=int,
- help="The maximum number of KPOINTS increment steps. This puts an "
- "upper bound on the largest KPOINT converge grid attempted.")
-
- parser.add_argument(
- "-t","--target",dest="target",nargs="?",
- default=0.001,type=float,
- help="The target converge in energy per atom to achieve "
- "convergence. E.g., 1e-3 means the KPOINTS will be increased "
- "until a converged of 1meV is reached."
- )
-
- args=parser.parse_args()
- do_run(args)
-#!/usr/bin/env python
-# Copyright (c) Materials Virtual Lab.
-# Distributed under the terms of the BSD License.
-
-from__future__importdivision,unicode_literals,print_function
-
-importargparse
-
-importsys
-frommonty.serializationimportloadfn
-
-fromcustodian.custodianimportCustodian
-importlogging
-
-
-example_yaml="""
-# This is an example of a Custodian yaml spec file. It shows how you can specify
-# a double relaxation followed by a static calculation. Minor modifications
-# would allow very customizable calculations, though this is obviously not meant
-# for highly complex workflows. For those, you will need to code and usage of
-# FireWorks is highly recommended.
-
-
-# Specifies a list of jobs to run.
-# Each job is specified by a `jb: <full class path>` with parameters specified
-# via the params dict.
-
-jobs:
-- jb: custodian.vasp.jobs.VaspJob
- params:
- final: False
- suffix: .relax1
-- jb: custodian.vasp.jobs.VaspJob
- params:
- final: False
- suffix: .relax2
- settings_override:
- - {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}
-- jb: custodian.vasp.jobs.VaspJob
- params:
- final: True
- suffix: .static3
- settings_override:
- - {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}
- - {"dict": "INCAR", "action": {"_set": {"NSW": 0}}}
-
-
-# This key specifies parameters common to all jobs.
-# Keys starting with $ are expanded to the environmental values.
-# The example below means the parameter vasp_cmd is set to the value with
-# $PBS_NODEFILE expanded.
-
-jobs_common_params:
- $vasp_cmd: ["mpirun", "-machinefile", "$PBS_NODEFILE", "-np", "24", "vasp"]
-
-
-# Specifies a list of error handlers in the same format as jobs.
-handlers:
-- hdlr: custodian.vasp.handlers.VaspErrorHandler
-- hdlr: custodian.vasp.handlers.AliasingErrorHandler
-- hdlr: custodian.vasp.handlers.MeshSymmetryErrorHandler
-
-
-# Specifies a list of error handlers in the same format as jobs.
-validators:
-- vldr: custodian.vasp.validators.VasprunXMLValidator
-
-
-#This sets all custodian running parameters.
-custodian_params:
- max_errors: 10
- scratch_dir: /tmp
- gzipped_output: True
- checkpoint: True
-"""
-
-
-
[docs]defmain():
- importargparse
- parser=argparse.ArgumentParser(description="""
- run_nwchem is a master script to perform various kinds of Nwchem runs.
- """,
- epilog="""
- Author: Shyue Ping Ong
- Version: {}
- Last updated: {}""".format(__version__,__date__))
-
- parser.add_argument(
- "-c","--command",dest="command",nargs="?",
- default="nwchem",type=str,
- help="Nwchem command. Defaults to nwchem. If you are using mpirun, "
- "set this to something like \"mpirun nwchem\".")
-
- parser.add_argument(
- "-s","--scratch",dest="scratch",nargs="?",
- default=None,type=str,
- help="Scratch directory to perform run in. Specify the root scratch "
- "directory as the code will automatically create a temporary "
- "subdirectory to run the job.")
-
- parser.add_argument(
- "-i","--infile",dest="infile",nargs="?",default="mol.nw",
- type=str,help="Input filename.")
-
- parser.add_argument(
- "-o","--output",dest="outfile",nargs="?",default="mol.nwout",
- type=str,help="Output filename."
- )
-
- parser.add_argument(
- "-z","--gzip",dest="gzip",action="store_true",
- help="Add this option to gzip the final output. Do not gzip if you "
- "are going to perform an additional static run."
- )
-
- args=parser.parse_args()
- do_run(args)
[docs]defmain():
- importargparse
- parser=argparse.ArgumentParser(description="""
- run_vasp is a master script to perform various kinds of VASP runs.
- """,
- epilog="""
- Author: Shyue Ping Ong
- Version: {}
- Last updated: {}""".format(__version__,__date__))
-
- parser.add_argument(
- "-c","--command",dest="command",nargs="?",
- default="pvasp",type=str,
- help="VASP command. Defaults to pvasp. If you are using mpirun, "
- "set this to something like \"mpirun pvasp\".")
-
- parser.add_argument(
- "--no_auto_npar",action="store_true",
- help="Set to true to turn off auto_npar. Useful for certain machines "
- "and calculations where you want absolute control.")
-
-
- parser.add_argument(
- "-z","--gzip",dest="gzip",action="store_true",
- help="Add this option to gzip the final output. Do not gzip if you "
- "are going to perform an additional static run."
- )
-
- parser.add_argument(
- "-s","--scratch",dest="scratch",nargs="?",
- default=None,type=str,
- help="Scratch directory to perform run in. Specify the root scratch "
- "directory as the code will automatically create a temporary "
- "subdirectory to run the job.")
-
- parser.add_argument(
- "-ks","--kpoint-static",dest="static_kpoint",nargs="?",
- default=1,type=int,
- help="The multiplier to use for the KPOINTS of a static run (if "
- "any). For example, setting this to 2 means that if your "
- "original run was done using a k-point grid of 2x3x3, "
- "the static run will be done with a k-point grid of 4x6x6. This "
- "defaults to 1, i.e., static runs are performed with the same "
- "k-point grid as relaxation runs."
- )
-
- parser.add_argument(
- "-me","--max-errors",dest="max_errors",nargs="?",
- default=10,type=int,
- help="Maximum number of errors to allow before quitting")
-
- parser.add_argument(
- "-hd","--handlers",dest="handlers",nargs="+",
- default=["VaspErrorHandler","MeshSymmetryErrorHandler",
- "UnconvergedErrorHandler","NonConvergingErrorHandler",
- "PotimErrorHandler"],type=str,
- help="The ErrorHandlers to use specified as string class names, "
- "with optional arguments specified as a url-like string. For "
- "example, VaspErrorHandler?output_filename=myfile.out specifies a "
- "VaspErrorHandler with output_name set to myfile.out. Multiple "
- "arguments are joined by a comma. E.g., MyHandler?myfile=a,"
- "data=1. The arguments are deserialized using yaml."
- )
-
- parser.add_argument(
- "-vd","--validators",dest="validators",nargs="+",
- default=["VasprunXMLValidator"],type=str,
- help="The Validators to use specified as string class names, "
- "with optional arguments specified as a url-like string. For "
- "example, VaspErrorHandler?output_filename=myfile.out specifies a "
- "VaspErrorHandler with output_name set to myfile.out. Multiple "
- "arguments are joined by a comma. E.g., MyHandler?myfile=a,"
- "data=1. The arguments are deserialized using yaml."
- )
-
- parser.add_argument(
- "jobs",metavar="jobs",type=str,nargs='+',
- default=["relax","relax"],
- help="Jobs to execute. Only sequences of relax, "
- "quickrelax, static, rampU, full_relax, static_derived, "
- "nonscf_derived, optics_derived are "
- "supported at the moment. For example, \"relax relax static\" "
- "will run a double relaxation followed by a static "
- "run. By default, suffixes are given sequential numbering,"
- "but this can be overrridden by adding a number to the job"
- "type, e.g. relax5 relax6 relax7")
-
- args=parser.parse_args()
- do_run(args)
-# coding: utf-8
-
-from__future__importunicode_literals,division
-
-importlogging
-importsubprocess
-importsys
-importdatetime
-importtime
-fromglobimportglob
-importtarfile
-importos
-fromabcimportABCMeta,abstractmethod
-fromitertoolsimportislice
-importwarnings
-frompprintimportpformat
-fromastimportliteral_eval
-
-importsix
-
-from.utilsimportget_execution_host_info
-
-frommonty.tempfileimportScratchDir
-frommonty.shutilimportgzip_dir
-frommonty.jsonimportMSONable,MontyEncoder,MontyDecoder
-frommonty.serializationimportloadfn,dumpfn
-
-"""
-This module implements the main Custodian class, which manages a list of jobs
-given a set of error handlers, the abstract base classes for the
-ErrorHandlers and Jobs.
-"""
-
-__author__="Shyue Ping Ong, William Davidson Richards"
-__copyright__="Copyright 2012, The Materials Project"
-__version__="0.2"
-__maintainer__="Shyue Ping Ong"
-__email__="ongsp@ucsd.edu"
-__date__="Sep 17 2014"
-
-
-logger=logging.getLogger(__name__)
-
-if"SENTRY_DSN"inos.environ:
- # Sentry.io is a service to aggregate logs remotely, this is useful
- # for Custodian to get statistics on which errors are most common.
- # If you do not have a SENTRY_DSN environment variable set, Sentry
- # will not be used.
-
- importsentry_sdk
- sentry_sdk.init(dsn=os.environ["SENTRY_DSN"])
-
- withsentry_sdk.configure_scope()asscope:
-
- fromgetpassimportgetuser
- try:
- scope.user={"username":getuser()}
- except:
- pass
-
-
-
-# Sentry.io is a service to aggregate logs remotely, this is useful
-# for Custodian to get statistics on which errors are most common.
-# If you do not have a SENTRY_DSN environment variable set, or do
-# not have CUSTODIAN_ERROR_REPORTING_OPT_IN set to True, then
-# Sentry will not be enabled.
-
-SENTRY_DSN=None
-if"SENTRY_DSN"inos.environ:
- SENTRY_DSN=os.environ["SENTRY_DSN"]
-elif"CUSTODIAN_REPORTING_OPT_IN"inos.environ:
- # check for environment variable to automatically set SENTRY_DSN
- # will set for True, true, TRUE, etc.
- ifliteral_eval(os.environ.get("CUSTODIAN_REPORTING_OPT_IN","False").title()):
- SENTRY_DSN="https://0f7291738eb042a3af671df9fc68ae2a@sentry.io/1470881"
-
-ifSENTRY_DSN:
-
- importsentry_sdk
- sentry_sdk.init(dsn=SENTRY_DSN)
-
- withsentry_sdk.configure_scope()asscope:
-
- fromgetpassimportgetuser
- try:
- scope.user={"username":getuser()}
- except:
- pass
-
- importsocket
- scope.set_tag("hostname",socket.gethostname())
-
-
-
[docs]classCustodian(object):
- """
- The Custodian class is the manager for a list of jobs given a list of
- error handlers. The way it works is as follows:
-
- 1. Let's say you have defined a list of jobs as [job1, job2, job3, ...] and
- you have defined a list of possible error handlers as [err1, err2, ...]
- 2. Custodian will run the jobs in the order of job1, job2, ... During each
- job, custodian will monitor for errors using the handlers that have
- is_monitor == True. If an error is detected, corrective measures are
- taken and the particular job is rerun.
- 3. At the end of each individual job, Custodian will run through the list
- error handlers that have is_monitor == False. If an error is detected,
- corrective measures are taken and the particular job is rerun.
-
- .. attribute: max_errors
-
- Maximum number of errors allowed.
-
- .. attribute: handlers
-
- All error handlers (including monitors). All error handlers are used
- to check for errors at the end of a run.
-
- .. attribute: monitors
-
- Error handlers that are Monitors, i.e., handlers that monitors a job
- as it is being run.
-
- .. attribute: polling_time_step
-
- The length of time in seconds between steps in which a job is
- checked for completion.
-
- .. attribute: monitor_freq
-
- The number of polling steps before monitoring occurs. For example,
- if you have a polling_time_step of 10 seconds and a monitor_freq of
- 30, this means that Custodian uses the monitors to check for errors
- every 30 x 10 = 300 seconds, i.e., 5 minutes.
- """
- LOG_FILE="custodian.json"
-
- def__init__(self,handlers,jobs,validators=None,
- max_errors_per_job=None,
- max_errors=1,polling_time_step=10,monitor_freq=30,
- skip_over_errors=False,scratch_dir=None,
- gzipped_output=False,checkpoint=False,terminate_func=None,
- terminate_on_nonzero_returncode=True):
- """
- Initializes a Custodian from a list of jobs and error handler.s
-
- Args:
- handlers ([ErrorHandler]): Error handlers. In order of priority of
- fixing.
- jobs ([Job]): Sequence of Jobs to be run. Note that this can be
- any sequence or even a generator yielding jobs.
- validators([Validator]): Validators to ensure job success
- max_errors_per_job (int): Maximum number of errors per job allowed
- before exiting. Defaults to None, which means it is set to be
- equal to max_errors..
- max_errors (int): Maximum number of total errors allowed before
- exiting. Defaults to 1.
- polling_time_step (int): The length of time in seconds between
- steps in which a job is checked for completion. Defaults to
- 10 secs.
- monitor_freq (int): The number of polling steps before monitoring
- occurs. For example, if you have a polling_time_step of 10
- seconds and a monitor_freq of 30, this means that Custodian
- uses the monitors to check for errors every 30 x 10 = 300
- seconds, i.e., 5 minutes.
- skip_over_errors (bool): If set to True, custodian will skip over
- error handlers that failed (raised an Exception of some sort).
- Otherwise, custodian will simply exit on unrecoverable errors.
- The former will lead to potentially more robust performance,
- but may make it difficult to improve handlers. The latter
- will allow one to catch potentially bad error handler
- implementations. Defaults to False.
- scratch_dir (str): If this is set, any files in the current
- directory are copied to a temporary directory in a scratch
- space first before any jobs are performed, and moved back to
- the current directory upon completion of all jobs. This is
- useful in some setups where a scratch partition has much
- faster IO. To use this, set scratch_dir=root of directory you
- want to use for runs. There is no need to provide unique
- directory names; we will use python's tempfile creation
- mechanisms. A symbolic link is created during the course of
- the run in the working directory called "scratch_link" as
- users may want to sometimes check the output during the
- course of a run. If this is None (the default), the run is
- performed in the current working directory.
- gzipped_output (bool): Whether to gzip the final output to save
- space. Defaults to False.
- checkpoint (bool): Whether to checkpoint after each successful Job.
- Checkpoints are stored as custodian.chk.#.tar.gz files. Defaults
- to False.
- terminate_func (callable): A function to be called to terminate a
- running job. If None, the default is to call Popen.terminate.
- terminate_on_nonzero_returncode (bool): If True, a non-zero return
- code on any Job will result in a termination. Defaults to True.
- """
- self.max_errors=max_errors
- self.max_errors_per_job=max_errors_per_jobormax_errors
- self.jobs=jobs
- self.handlers=handlers
- self.validators=validatorsor[]
- self.monitors=[hforhinhandlersifh.is_monitor]
- self.polling_time_step=polling_time_step
- self.monitor_freq=monitor_freq
- self.skip_over_errors=skip_over_errors
- self.scratch_dir=scratch_dir
- self.gzipped_output=gzipped_output
- self.checkpoint=checkpoint
- cwd=os.getcwd()
- ifself.checkpoint:
- self.restart,self.run_log=Custodian._load_checkpoint(cwd)
- else:
- self.restart=0
- self.run_log=[]
- self.errors_current_job=0
- self.total_errors=0
- self.terminate_func=terminate_func
- self.terminate_on_nonzero_returncode=terminate_on_nonzero_returncode
- self.finished=False
-
- @staticmethod
- def_load_checkpoint(cwd):
- restart=0
- run_log=[]
- chkpts=glob(os.path.join(cwd,"custodian.chk.*.tar.gz"))
- ifchkpts:
- chkpt=sorted(chkpts,key=lambdac:int(c.split(".")[-3]))[0]
- restart=int(chkpt.split(".")[-3])
- logger.info("Loading from checkpoint file {}...".format(chkpt))
- t=tarfile.open(chkpt)
- t.extractall()
- # Log the corrections to a json file.
- run_log=loadfn(Custodian.LOG_FILE,cls=MontyDecoder)
-
- returnrestart,run_log
-
- @staticmethod
- def_delete_checkpoints(cwd):
- forfinglob(os.path.join(cwd,"custodian.chk.*.tar.gz")):
- os.remove(f)
-
- @staticmethod
- def_save_checkpoint(cwd,index):
- try:
- Custodian._delete_checkpoints(cwd)
- n=os.path.join(cwd,"custodian.chk.{}.tar.gz".format(index))
- withtarfile.open(n,mode="w:gz",compresslevel=3)asf:
- f.add(cwd,arcname='.')
- logger.info("Checkpoint written to {}".format(n))
- exceptExceptionasex:
- logger.info("Checkpointing failed")
- importtraceback
- logger.error(traceback.format_exc())
-
-
[docs]@classmethod
- deffrom_spec(cls,spec):
- """
- Load a Custodian instance where the jobs are specified from a
- structure and a spec dict. This allows simple
- custom job sequences to be constructed quickly via a YAML file.
-
- Args:
- spec (dict): A dict specifying job. A sample of the dict in
- YAML format for the usual MP workflow is given as follows
-
- ```
- jobs:
- - jb: custodian.vasp.jobs.VaspJob
- params:
- final: False
- suffix: .relax1
- - jb: custodian.vasp.jobs.VaspJob
- params:
- final: True
- suffix: .relax2
- settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}
- jobs_common_params:
- vasp_cmd: /opt/vasp
- handlers:
- - hdlr: custodian.vasp.handlers.VaspErrorHandler
- - hdlr: custodian.vasp.handlers.AliasingErrorHandler
- - hdlr: custodian.vasp.handlers.MeshSymmetryHandler
- validators:
- - vldr: custodian.vasp.validators.VasprunXMLValidator
- custodian_params:
- scratch_dir: /tmp
- ```
-
- The `jobs` key is a list of jobs. Each job is
- specified via "job": <explicit path>, and all parameters are
- specified via `params` which is a dict.
-
- `common_params` specify a common set of parameters that are
- passed to all jobs, e.g., vasp_cmd.
-
- Returns:
- Custodian instance.
- """
-
- dec=MontyDecoder()
-
- defload_class(dotpath):
- modname,classname=dotpath.rsplit(".",1)
- mod=__import__(modname,globals(),locals(),[classname],0)
- returngetattr(mod,classname)
-
- defprocess_params(d):
- decoded={}
- fork,vind.items():
- ifk.startswith("$"):
- ifisinstance(v,list):
- v=[os.path.expandvars(i)foriinv]
- elifisinstance(v,dict):
- v={k2:os.path.expandvars(v2)fork2,v2inv.items()}
- else:
- v=os.path.expandvars(v)
- decoded[k.strip("$")]=dec.process_decoded(v)
- returndecoded
-
- jobs=[]
- common_params=process_params(spec.get("jobs_common_params",{}))
-
- fordinspec["jobs"]:
- cls_=load_class(d["jb"])
- params=process_params(d.get("params",{}))
- params.update(common_params)
- jobs.append(cls_(**params))
-
- handlers=[]
- fordinspec.get("handlers",[]):
- cls_=load_class(d["hdlr"])
- params=process_params(d.get("params",{}))
- handlers.append(cls_(**params))
-
- validators=[]
- fordinspec.get("validators",[]):
- cls_=load_class(d["vldr"])
- params=process_params(d.get("params",{}))
- validators.append(cls_(**params))
-
- custodian_params=process_params(spec.get("custodian_params",{}))
-
- returncls(jobs=jobs,handlers=handlers,validators=validators,
- **custodian_params)
-
-
[docs]defrun(self):
- """
- Runs all jobs.
-
- Returns:
- All errors encountered as a list of list.
- [[error_dicts for job 1], [error_dicts for job 2], ....]
-
- Raises:
- ValidationError: if a job fails validation
- ReturnCodeError: if the process has a return code different from 0
- NonRecoverableError: if an unrecoverable occurs
- MaxCorrectionsPerJobError: if max_errors_per_job is reached
- MaxCorrectionsError: if max_errors is reached
- MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
- """
- cwd=os.getcwd()
-
- withScratchDir(self.scratch_dir,create_symbolic_link=True,
- copy_to_current_on_exit=True,
- copy_from_current_on_enter=True)astemp_dir:
- self.total_errors=0
- start=datetime.datetime.now()
- logger.info("Run started at {} in {}.".format(
- start,temp_dir))
- v=sys.version.replace("\n"," ")
- logger.info("Custodian running on Python version {}".format(v))
- logger.info("Hostname: {}, Cluster: {}".format(
- *get_execution_host_info()))
-
- try:
- # skip jobs until the restart
- forjob_n,jobinislice(enumerate(self.jobs,1),
- self.restart,None):
- self._run_job(job_n,job)
- # We do a dump of the run log after each job.
- dumpfn(self.run_log,Custodian.LOG_FILE,cls=MontyEncoder,
- indent=4)
- # Checkpoint after each job so that we can recover from last
- # point and remove old checkpoints
- ifself.checkpoint:
- self.restart=job_n
- Custodian._save_checkpoint(cwd,job_n)
- exceptCustodianErrorasex:
- logger.error(ex.message)
- ifex.raises:
- raise
- finally:
- # Log the corrections to a json file.
- logger.info("Logging to {}...".format(Custodian.LOG_FILE))
- dumpfn(self.run_log,Custodian.LOG_FILE,cls=MontyEncoder,
- indent=4)
- end=datetime.datetime.now()
- logger.info("Run ended at {}.".format(end))
- run_time=end-start
- logger.info("Run completed. Total time taken = {}."
- .format(run_time))
- ifself.gzipped_output:
- gzip_dir(".")
-
- # Cleanup checkpoint files (if any) if run is successful.
- Custodian._delete_checkpoints(cwd)
-
- returnself.run_log
-
- def_run_job(self,job_n,job):
- """
- Runs a single job.
-
- Args:
- job_n: job number (1 index)
- job: Custodian job
-
-
- Raises:
- ValidationError: if a job fails validation
- ReturnCodeError: if the process has a return code different from 0
- NonRecoverableError: if an unrecoverable occurs
- MaxCorrectionsPerJobError: if max_errors_per_job is reached
- MaxCorrectionsError: if max_errors is reached
- MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
- """
- self.run_log.append({"job":job.as_dict(),"corrections":[],
- "handler":None,"validator":None,
- "max_errors":False,"max_errors_per_job":False,
- "max_errors_per_handler":False,
- "nonzero_return_code":False})
- self.errors_current_job=0
- # reset the counters of the number of times a correction has been
- # applied for each handler
- forhinself.handlers:
- h.n_applied_corrections=0
-
- job.setup()
-
- attempt=0
- while(self.total_errors<self.max_errorsand
- self.errors_current_job<self.max_errors_per_job):
- attempt+=1
- logger.info(
- "Starting job no. {} ({}) attempt no. {}. Total errors and "
- "errors in job thus far = {}, {}.".format(
- job_n,job.name,attempt,self.total_errors,
- self.errors_current_job))
-
- p=job.run()
- # Check for errors using the error handlers and perform
- # corrections.
- has_error=False
- zero_return_code=True
-
- # While the job is running, we use the handlers that are
- # monitors to monitor the job.
- ifisinstance(p,subprocess.Popen):
- ifself.monitors:
- n=0
- whileTrue:
- n+=1
- time.sleep(self.polling_time_step)
- ifp.poll()isnotNone:
- break
- terminate=self.terminate_funcorp.terminate
- ifn%self.monitor_freq==0:
- has_error=self._do_check(self.monitors,
- terminate)
- ifterminateisnotNoneandterminate!=p.terminate:
- time.sleep(self.polling_time_step)
- else:
- p.wait()
- ifself.terminate_funcisnotNoneand \
- self.terminate_func!=p.terminate:
- self.terminate_func()
- time.sleep(self.polling_time_step)
-
- zero_return_code=p.returncode==0
-
- logger.info("{}.run has completed. "
- "Checking remaining handlers".format(job.name))
- # Check for errors again, since in some cases non-monitor
- # handlers fix the problems detected by monitors
- # if an error has been found, not all handlers need to run
- ifhas_error:
- self._do_check([hforhinself.handlers
- ifnoth.is_monitor])
- else:
- has_error=self._do_check(self.handlers)
-
- ifhas_error:
- # This makes sure the job is killed cleanly for certain systems.
- job.terminate()
-
- # If there are no errors detected, perform
- # postprocessing and exit.
- ifnothas_error:
- forvinself.validators:
- ifv.check():
- self.run_log[-1]["validator"]=v
- s="Validation failed: {}".format(v.__class__.__name__)
- raiseValidationError(s,True,v)
- ifnotzero_return_code:
- ifself.terminate_on_nonzero_returncode:
- self.run_log[-1]["nonzero_return_code"]=True
- s="Job return code is %d. Terminating..."% \
- p.returncode
- logger.info(s)
- raiseReturnCodeError(s,True)
- else:
- warnings.warn("subprocess returned a non-zero return "
- "code. Check outputs carefully...")
- job.postprocess()
- return
-
- # Check that all errors could be handled
- forxinself.run_log[-1]["corrections"]:
- ifnotx["actions"]andx["handler"].raises_runtime_error:
- self.run_log[-1]["handler"]=x["handler"]
- s="Unrecoverable error for handler: {}".format(x["handler"])
- raiseNonRecoverableError(s,True,x["handler"])
- forxinself.run_log[-1]["corrections"]:
- ifnotx["actions"]:
- self.run_log[-1]["handler"]=x["handler"]
- s="Unrecoverable error for handler: %s"%x["handler"]
- raiseNonRecoverableError(s,False,x["handler"])
-
- ifself.errors_current_job>=self.max_errors_per_job:
- self.run_log[-1]["max_errors_per_job"]=True
- msg="Max errors per job reached: {}.".format(self.max_errors_per_job)
- logger.info(msg)
- raiseMaxCorrectionsPerJobError(msg,True,self.max_errors_per_job,job)
- else:
- self.run_log[-1]["max_errors"]=True
- msg="Max errors reached: {}.".format(self.max_errors)
- logger.info(msg)
- raiseMaxCorrectionsError(msg,True,self.max_errors)
-
-
[docs]defrun_interrupted(self):
- """
- Runs custodian in a interuppted mode, which sets up and
- validates jobs but doesn't run the executable
-
- Returns:
- number of remaining jobs
-
- Raises:
- ValidationError: if a job fails validation
- ReturnCodeError: if the process has a return code different from 0
- NonRecoverableError: if an unrecoverable occurs
- MaxCorrectionsPerJobError: if max_errors_per_job is reached
- MaxCorrectionsError: if max_errors is reached
- MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
- """
- start=datetime.datetime.now()
- try:
- cwd=os.getcwd()
- v=sys.version.replace("\n"," ")
- logger.info("Custodian started in singleshot mode at {} in {}."
- .format(start,cwd))
- logger.info("Custodian running on Python version {}".format(v))
-
- # load run log
- ifos.path.exists(Custodian.LOG_FILE):
- self.run_log=loadfn(Custodian.LOG_FILE,cls=MontyDecoder)
-
- iflen(self.run_log)==0:
- # starting up an initial job - setup input and quit
- job_n=0
- job=self.jobs[job_n]
- logger.info("Setting up job no. 1 ({}) ".format(job.name))
- job.setup()
- self.run_log.append({"job":job.as_dict(),"corrections":[],
- 'job_n':job_n})
- returnlen(self.jobs)
- else:
- # Continuing after running calculation
- job_n=self.run_log[-1]['job_n']
- job=self.jobs[job_n]
-
- # If we had to fix errors from a previous run, insert clean log
- # dict
- iflen(self.run_log[-1]['corrections'])>0:
- logger.info("Reran {}.run due to fixable errors".format(
- job.name))
-
- # check error handlers
- logger.info("Checking error handlers for {}.run".format(
- job.name))
- ifself._do_check(self.handlers):
- logger.info("Failed validation based on error handlers")
- # raise an error for an unrecoverable error
- forxinself.run_log[-1]["corrections"]:
- ifnotx["actions"]andx["handler"].raises_runtime_error:
- self.run_log[-1]["handler"]=x["handler"]
- s="Unrecoverable error for handler: {}. " \
- "Raising RuntimeError".format(x["handler"])
- raiseNonRecoverableError(s,True,x["handler"])
- logger.info("Corrected input based on error handlers")
- # Return with more jobs to run if recoverable error caught
- # and corrected for
- returnlen(self.jobs)-job_n
-
- # check validators
- logger.info("Checking validator for {}.run".format(job.name))
- forvinself.validators:
- ifv.check():
- self.run_log[-1]["validator"]=v
- logger.info("Failed validation based on validator")
- s="Validation failed: {}".format(v)
- raiseValidationError(s,True,v)
-
- logger.info("Postprocessing for {}.run".format(job.name))
- job.postprocess()
-
- # IF DONE WITH ALL JOBS - DELETE ALL CHECKPOINTS AND RETURN
- # VALIDATED
- iflen(self.jobs)==(job_n+1):
- self.finished=True
- return0
-
- # Setup next job_n
- job_n+=1
- job=self.jobs[job_n]
- self.run_log.append({"job":job.as_dict(),"corrections":[],
- 'job_n':job_n})
- job.setup()
- returnlen(self.jobs)-job_n
-
- exceptCustodianErrorasex:
- logger.error(ex.message)
- ifex.raises:
- raise
-
- finally:
- # Log the corrections to a json file.
- logger.info("Logging to {}...".format(Custodian.LOG_FILE))
- dumpfn(self.run_log,Custodian.LOG_FILE,cls=MontyEncoder,
- indent=4)
- end=datetime.datetime.now()
- logger.info("Run ended at {}.".format(end))
- run_time=end-start
- logger.info("Run completed. Total time taken = {}."
- .format(run_time))
- ifself.finishedandself.gzipped_output:
- gzip_dir(".")
-
- def_do_check(self,handlers,terminate_func=None):
- """
- checks the specified handlers. Returns True iff errors caught
- """
- corrections=[]
- forhinhandlers:
- try:
- ifh.check():
- ifh.max_num_correctionsisnotNone \
- andh.n_applied_corrections>=h.max_num_corrections:
- msg="Maximum number of corrections {} reached " \
- "for handler {}".format(h.max_num_corrections,h)
- ifh.raise_on_max:
- self.run_log[-1]["handler"]=h
- self.run_log[-1]["max_errors_per_handler"]=True
- raiseMaxCorrectionsPerHandlerError(msg,True,h.max_num_corrections,h)
- else:
- logger.warning(msg+" Correction not applied.")
- continue
- ifterminate_funcisnotNoneandh.is_terminating:
- logger.info("Terminating job")
- terminate_func()
- # make sure we don't terminate twice
- terminate_func=None
- d=h.correct()
- logger.error(h.__class__.__name__,extra=d)
- d["handler"]=h
- corrections.append(d)
- h.n_applied_corrections+=1
- exceptException:
- ifnotself.skip_over_errors:
- raise
- else:
- importtraceback
- logger.error("Bad handler %s "%h)
- logger.error(traceback.format_exc())
- corrections.append(
- {"errors":["Bad handler %s "%h],
- "actions":[]})
- self.total_errors+=len(corrections)
- self.errors_current_job+=len(corrections)
- self.run_log[-1]["corrections"].extend(corrections)
- # We do a dump of the run log after each check.
- dumpfn(self.run_log,Custodian.LOG_FILE,cls=MontyEncoder,
- indent=4)
- returnlen(corrections)>0
-
-
-
[docs]classJob(six.with_metaclass(ABCMeta,MSONable)):
- """
- Abstract base class defining the interface for a Job.
- """
-
-
[docs]@abstractmethod
- defsetup(self):
- """
- This method is run before the start of a job. Allows for some
- pre-processing.
- """
- pass
-
-
[docs]@abstractmethod
- defrun(self):
- """
- This method perform the actual work for the job. If parallel error
- checking (monitoring) is desired, this must return a Popen process.
- """
- pass
-
-
[docs]@abstractmethod
- defpostprocess(self):
- """
- This method is called at the end of a job, *after* error detection.
- This allows post-processing, such as cleanup, analysis of results,
- etc.
- """
- pass
-
- @property
- defname(self):
- """
- A nice string name for the job.
- """
- returnself.__class__.__name__
-
-
-
[docs]classErrorHandler(MSONable):
- """
- Abstract base class defining the interface for an ErrorHandler.
- """
-
- is_monitor=False
- """
- This class property indicates whether the error handler is a monitor,
- i.e., a handler that monitors a job as it is running. If a
- monitor-type handler notices an error, the job will be sent a
- termination signal, the error is then corrected,
- and then the job is restarted. This is useful for catching errors
- that occur early in the run but do not cause immediate failure.
- """
-
- is_terminating=True
- """
- Whether this handler terminates a job upon error detection. By
- default, this is True, which means that the current Job will be
- terminated upon error detection, corrections applied,
- and restarted. In some instances, some errors may not need the job to be
- terminated or may need to wait for some other event to terminate a job.
- For example, a particular error may require a flag to be set to request
- a job to terminate gracefully once it finishes its current task. The
- handler to set the flag should be classified as is_terminating = False to
- not terminate the job.
- """
-
- raises_runtime_error=True
- """
- Whether this handler causes custodian to raise a runtime error if it cannot
- handle the error (i.e. if correct returns a dict with "actions":None, or
- "actions":[])
- """
-
- max_num_corrections=None
- raise_on_max=False
- """
- Whether corrections from this specific handler should be applied only a
- fixed maximum number of times on a single job (i.e. the counter is reset
- at the beginning of each job). If the maximum number is reached the code
- will either raise a MaxCorrectionsPerHandlerError (raise_on_max==True) or stops
- considering the correction (raise_on_max==False). If max_num_corrections
- is None this option is not considered. These options can be overridden
- as class attributes of the subclass or as customizable options setting
- an instance attribute from __init__.
- """
-
-
[docs]@abstractmethod
- defcheck(self):
- """
- This method is called during the job (for monitors) or at the end of
- the job to check for errors.
-
- Returns:
- (bool) Indicating if errors are detected.
- """
- pass
-
-
[docs]@abstractmethod
- defcorrect(self):
- """
- This method is called at the end of a job when an error is detected.
- It should perform any corrective measures relating to the detected
- error.
-
- Returns:
- (dict) JSON serializable dict that describes the errors and
- actions taken. E.g.
- {"errors": list_of_errors, "actions": list_of_actions_taken}.
- If this is an unfixable error, actions should be set to None.
- """
- pass
-
- @property
- defn_applied_corrections(self):
- """
- The number of times the handler has given a correction and this
- has been applied.
-
- Returns:
- (int): the number of corrections applied.
- """
- try:
- returnself._num_applied_corrections
- exceptAttributeError:
- self._num_applied_corrections=0
- returnself._num_applied_corrections
-
- @n_applied_corrections.setter
- defn_applied_corrections(self,value):
- """
- Setter for the number of corrections applied.
-
- Args:
- value(int): the number of corrections applied
- """
- self._num_applied_corrections=value
-
-
-
[docs]classValidator(six.with_metaclass(ABCMeta,MSONable)):
- """
- Abstract base class defining the interface for a Validator. A Validator
- differs from an ErrorHandler in that it does not correct a run and is run
- only at the end of a Job. If errors are detected by a Validator, a run is
- immediately terminated.
- """
-
-
[docs]@abstractmethod
- defcheck(self):
- """
- This method is called at the end of a job.
-
- Returns:
- (bool) Indicating if errors are detected.
- """
- pass
-
-
-
[docs]classCustodianError(RuntimeError):
- """
- Exception class for Custodian errors.
- """
-
- def__init__(self,message,raises=False):
- """
- Initializes the error with a message.
-
- Args:
- message (str): Message passed to Exception
- raises (bool): Whether this should be raised outside custodian
- """
- super(CustodianError,self).__init__(message)
- self.raises=raises
- self.message=message
-
-
-
[docs]classValidationError(CustodianError):
- """
- Error raised when a validator does not pass the check
- """
-
- def__init__(self,message,raises,validator):
- """
- Args:
- message (str): Message passed to Exception
- raises (bool): Whether this should be raised outside custodian
- validator (Validator): Validator that caused the exception.
- """
- super(ValidationError,self).__init__(message,raises)
- self.validator=validator
-
-
-
[docs]classNonRecoverableError(CustodianError):
- """
- Error raised when a handler found an error but could not fix it
- """
-
- def__init__(self,message,raises,handler):
- """
- Args:
- message (str): Message passed to Exception
- raises (bool): Whether this should be raised outside custodian
- handler (Handler): Handler that caused the exception.
- """
- super(NonRecoverableError,self).__init__(message,raises)
- self.handler=handler
-
-
-
[docs]classReturnCodeError(CustodianError):
- """
- Error raised when the process gave non zero return code
- """
- pass
-
-
-
[docs]classMaxCorrectionsError(CustodianError):
- """
- Error raised when the maximum allowed number of errors is reached
- """
-
- def__init__(self,message,raises,max_errors):
- """
- Args:
- message (str): Message passed to Exception
- raises (bool): Whether this should be raised outside custodian
- max_errors (int): the number of errors reached
- """
- super(MaxCorrectionsError,self).__init__(message,raises)
- self.max_errors=max_errors
-
-
-
[docs]classMaxCorrectionsPerJobError(CustodianError):
- """
- Error raised when the maximum allowed number of errors per job is reached
- """
-
- def__init__(self,message,raises,max_errors_per_job,job):
- """
- Args:
- message (str): Message passed to Exception
- raises (bool): Whether this should be raised outside custodian
- max_errors_per_job (int): the number of errors per job reached
- job (Job): the job that was stopped
- """
- super(MaxCorrectionsPerJobError,self).__init__(message,raises)
- self.max_errors_per_job=max_errors_per_job
- self.job=job
-
-
-
[docs]classMaxCorrectionsPerHandlerError(CustodianError):
- """
- Error raised when the maximum allowed number of errors per handler is reached
- """
-
- def__init__(self,message,raises,max_errors_per_handler,handler):
- """
- Args:
- message (str): Message passed to Exception
- raises (bool): Whether this should be raised outside custodian
- max_errors_per_handler (int): the number of errors per job reached
- handler (Handler): the handler that caused the exception
- """
- super(MaxCorrectionsPerHandlerError,self).__init__(message,raises)
- self.max_errors_per_handler=max_errors_per_handler
- self.handler=handler
[docs]classUnconvergedErrorHandler(ErrorHandler):
- """
- Correct the unconverged error of FEFF's SCF calculation.
- """
-
- is_monitor=False
-
- def__init__(self,output_filename='log1.dat'):
- """
- Initializes the handler with the output file to check
-
- Args:
- output_filename (str): Filename for the log1.dat file. log1.dat file
- contains the SCF calculation convergence information. Change this only
- if it is different from the default (unlikely).
- """
- self.output_filename=output_filename
-
-
[docs]defcheck(self):
- """
- If the FEFF run does not converge, the check will return
- "TRUE"
- """
- returnself._notconverge_check()
-
- def_notconverge_check(self):
-
- # Process the output file and get converge information
- not_converge_pattern=re.compile("Convergence not reached.*")
- converge_pattern=re.compile('Convergence reached.*')
- for_,lineinenumerate(open(self.output_filename)):
- iflen(not_converge_pattern.findall(line))>0:
- returnTrue
-
- eliflen(converge_pattern.findall(line))>0:
- returnFalse
-
-
[docs]defcorrect(self):
- backup(FEFF_BACKUP_FILES)
- feff_input=FEFFDictSet.from_directory(".")
- scf_values=feff_input.tags.get("SCF")
- nscmt=scf_values[2]
- ca=scf_values[3]
- nmix=scf_values[4]
- actions=[]
-
- #Add RESTART card to PARAMETERS
- ifnot"RESTART"infeff_input.tags:
- actions.append({"dict":"PARAMETERS",
- "action":{"_set":{"RESTART":[]}}})
-
- ifnscmt<100andca==0.2:
- scf_values[2]=100
- scf_values[4]=3# Set nmix = 3
- actions.append({"dict":"PARAMETERS",
- "action":{"_set":{"SCF":scf_values}}})
- FeffModder().apply_actions(actions)
- return{"errors":["Non-converging job"],"actions":actions}
-
- elifnscmt==100andnmix==3andca>0.01:
- # Reduce the convergence accelerator factor
- scf_values[3]=round(ca/2,2)
- actions.append({"dict":"PARAMETERS",
- "action":{"_set":{"SCF":scf_values}}})
- FeffModder().apply_actions(actions)
- return{"errors":["Non-converging job"],"actions":actions}
-
- elifnmix==3andca==0.01:
- # Set ca = 0.05 and set nmix
- scf_values[3]=0.05
- scf_values[4]=5
- actions.append({"dict":"PARAMETERS",
- "action":{"_set":{"SCF":scf_values}}})
- FeffModder().apply_actions(actions)
- return{"errors":["Non-converging job"],"actions":actions}
-
- elifnmix==5andca==0.05:
- # Set ca = 0.05 and set nmix
- scf_values[3]=0.05
- scf_values[4]=10
- actions.append({"dict":"PARAMETERS",
- "action":{"_set":{"SCF":scf_values}}})
- FeffModder().apply_actions(actions)
- return{"errors":["Non-converging job"],"actions":actions}
-
- elifnmix==10andca<0.2:
- # loop through ca with nmix = 10
- scf_values[3]=round(ca*2,2)
- actions.append({"dict":"PARAMETERS",
- "action":{"_set":{"SCF":scf_values}}})
- FeffModder().apply_actions(actions)
- return{"errors":["Non-converging job"],"actions":actions}
-
- # Unfixable error. Just return None for actions.
- else:
- return{"errors":["Non-converging job"],"actions":None}
[docs]classFeffModder(Modder):
- def__init__(self,actions=None,strict=True,feffinp=None):
- """
- Initializes a Modder for FeffInput sets
-
- Args:
- actions ([Action]): A sequence of supported actions. See
- actions ([Action]): A sequence of supported actions. See
- :mod:`custodian.ansible.actions`. Default is None,
- which means DictActions and FileActions are supported.
- strict (bool): Indicating whether to use strict mode. In non-strict
- mode, unsupported actions are simply ignored without any
- errors raised. In strict mode, if an unsupported action is
- supplied, a ValueError is raised. Defaults to True.
- feffinp (FEFFInput): A FeffInput object from the current directory.
- Initialized automatically if not passed (but passing it will
- avoid having to reparse the directory).
- """
- self.feffinp=feffinporFEFFDictSet.from_directory('.')
- self.feffinp=self.feffinp.all_input()
- actions=actionsor[FileActions,DictActions]
- super(FeffModder,self).__init__(actions,strict)
-
-
[docs]defapply_actions(self,actions):
- """
- Applies a list of actions to the FEFF Input Set and rewrites modified
- files.
-
- Args:
- actions [dict]: A list of actions of the form {'file': filename,
- 'action': moddermodification} or {'dict': feffinput_key,
- 'action': moddermodification}
- """
- modified=[]
- forainactions:
- if"dict"ina:
- k=a["dict"]
- modified.append(k)
- self.feffinp[k]=self.modify_object(a["action"],self.feffinp[k])
- elif"file"ina:
- self.modify(a["action"],a["file"])
- else:
- raiseValueError("Unrecognized format: {}".format(a))
- ifmodified:
- feff=self.feffinp
- feff_input="\n\n".join(str(feff[k])forkin
- ["HEADER","PARAMETERS","POTENTIALS","ATOMS"]
- ifkinfeff)
- fork,vinsix.iteritems(feff):
- withopen(os.path.join('.',k),"w")asf:
- f.write(str(v))
-
- withopen(os.path.join('.',"feff.inp"),"w")asf:
- f.write(feff_input)
[docs]classFeffJob(Job):
- """
- A basic FEFF job, run whatever is in the directory.
- """
-
- def__init__(self,feff_cmd,output_file="feff.out",
- stderr_file="std_feff_err.txt",backup=True,
- gzipped=False,gzipped_prefix='feff_out'):
- """
- This constructor is used for a standard FEFF initialization
-
- Args:
- feff_cmd (str): the name of the full executable for running FEFF
- output_file (str): Name of file to direct standard out to.
- Defaults to "feff.out".
- stderr_file (str): Name of file direct standard error to.
- Defaults to "std_feff_err.txt".
- backup (bool): Indicating whether to backup the initial input files.
- If True, the feff.inp will be copied with a ".orig" appended.
- Defaults to True.
- gzipped (bool): Whether to gzip the final output. Defaults to False.
- gzipped_prefix (str): prefix to the feff output files archive. Defaults
- to feff_out, which means a series of feff_out.1.tar.gz, feff_out.2.tar.gz, ...
- will be generated.
- """
- self.feff_cmd=feff_cmd
- self.output_file=output_file
- self.stderr_file=stderr_file
- self.backup=backup
- self.gzipped=gzipped
- self.gzipped_prefix=gzipped_prefix
-
-
[docs]defrun(self):
-
- """
- Performs the actual FEFF run
- Returns:
- (subprocess.Popen) Used for monitoring.
- """
- withopen(self.output_file,"w")asf_std, \
- open(self.stderr_file,"w",buffering=1)asf_err:
- # Use line buffering for stderr
- # On TSCC, need to run shell command
- p=subprocess.Popen(self.feff_cmd,stdout=f_std,stderr=f_err,shell=True)
-
- returnp
-
-
[docs]defpostprocess(self):
- """
- Renaming or gzipping all the output as needed
- """
- ifself.gzipped:
- backup("*",prefix=self.gzipped_prefix)
[docs]classNwchemErrorHandler(ErrorHandler):
- """
- Error handler for Nwchem Jobs. Currently tested only for B3LYP DFT jobs
- generated by pymatgen.
- """
-
- def__init__(self,output_filename="mol.nwout"):
- """
- Initializes with an output file name.
-
- Args:
- output_filename (str): This is the file where the stdout for nwchem
- is being redirected. The error messages that are checked are
- present in the stdout. Defaults to "mol.nwout", which is the
- default redirect used by :class:`custodian.nwchem.jobs
- .NwchemJob`.
- """
- self.output_filename=output_filename
-
-
[docs]classNwchemJob(Job):
- """
- A basic Nwchem job.
- """
-
- def__init__(self,nwchem_cmd,input_file="mol.nw",
- output_file="mol.nwout",gzipped=False,
- backup=True,settings_override=None):
- """
- Initializes a basic NwChem job.
-
- Args:
- nwchem_cmd ([str]): Command to run Nwchem as a list of args. For
- example, ["nwchem"].
- input_file (str): Input file to run. Defaults to "mol.nw".
- output_file (str): Name of file to direct standard out to.
- Defaults to "mol.nwout".
- backup (bool): Whether to backup the initial input files. If True,
- the input files will be copied with a ".orig" appended.
- Defaults to True.
- gzipped (bool): Deprecated. Please use the Custodian class's
- gzipped_output option instead.
- settings_override ([dict]):
- An ansible style list of dict to override changes.
- #TODO: Not implemented yet.
- """
- self.nwchem_cmd=nwchem_cmd
- self.input_file=input_file
- self.output_file=output_file
- self.backup=backup
- self.gzipped=gzipped
- self.settings_override=settings_override
-
-
[docs]classQChemErrorHandler(ErrorHandler):
- """
- Master QChemErrorHandler class that handles a number of common errors
- that occur during QChem runs.
- """
-
- is_monitor=False
-
- def__init__(self,
- input_file="mol.qin",
- output_file="mol.qout",
- scf_max_cycles=200,
- geom_max_cycles=200):
- """
- Initializes the error handler from a set of input and output files.
-
- Args:
- input_file (str): Name of the QChem input file.
- output_file (str): Name of the QChem output file.
- scf_max_cycles (int): The max iterations to set to fix SCF failure.
- geom_max_cycles (int): The max iterations to set to fix geometry
- optimization failure.
- """
- self.input_file=input_file
- self.output_file=output_file
- self.scf_max_cycles=scf_max_cycles
- self.geom_max_cycles=geom_max_cycles
- self.outdata=None
- self.errors=[]
- self.opt_error_history=[]
-
-
[docs]defcheck(self):
- # Checks output file for errors.
- self.outdata=QCOutput(self.output_file).data
- self.errors=self.outdata.get("errors")
- # If we aren't out of optimization cycles, but we were in the past, reset the history
- if"out_of_opt_cycles"notinself.errorsandlen(self.opt_error_history)>0:
- self.opt_error_history=[]
- # If we're out of optimization cycles and we have unconnected fragments, no need to handle any errors
- if"out_of_opt_cycles"inself.errorsandself.outdata["structure_change"]=="unconnected_fragments":
- returnFalse
- returnlen(self.errors)>0
-
-
[docs]defcorrect(self):
- backup({self.input_file,self.output_file})
- actions=[]
- self.qcinp=QCInput.from_file(self.input_file)
-
- if"SCF_failed_to_converge"inself.errors:
- # Check number of SCF cycles. If not set or less than scf_max_cycles,
- # increase to that value and rerun. If already set, check if
- # scf_algorithm is unset or set to DIIS, in which case set to GDM.
- # Otherwise, tell user to call SCF error handler and do nothing.
- ifstr(self.qcinp.rem.get("max_scf_cycles"))!=str(
- self.scf_max_cycles):
- self.qcinp.rem["max_scf_cycles"]=self.scf_max_cycles
- actions.append({"max_scf_cycles":self.scf_max_cycles})
- elifself.qcinp.rem.get("scf_algorithm","diis").lower()=="diis":
- self.qcinp.rem["scf_algorithm"]="gdm"
- actions.append({"scf_algorithm":"gdm"})
- elifself.qcinp.rem.get("scf_algorithm","gdm").lower()=="gdm":
- self.qcinp.rem["scf_algorithm"]="diis_gdm"
- actions.append({"scf_algorithm":"diis_gdm"})
- else:
- print(
- "More advanced changes may impact the SCF result. Use the SCF error handler"
- )
-
- elif"out_of_opt_cycles"inself.errors:
- # Check number of opt cycles. If less than geom_max_cycles, increase
- # to that value, set last geom as new starting geom and rerun.
- ifstr(self.qcinp.rem.get(
- "geom_opt_max_cycles"))!=str(self.geom_max_cycles):
- self.qcinp.rem["geom_opt_max_cycles"]=self.geom_max_cycles
- actions.append({"geom_max_cycles:":self.scf_max_cycles})
- iflen(self.outdata.get("energy_trajectory"))>1:
- self.qcinp.molecule=self.outdata.get(
- "molecule_from_last_geometry")
- actions.append({"molecule":"molecule_from_last_geometry"})
- # If already at geom_max_cycles, often can just get convergence by restarting
- # from the geometry of the last cycle. But we'll also save any structural
- # changes that happened along the way.
- else:
- self.opt_error_history+=[self.outdata["structure_change"]]
- iflen(self.opt_error_history)>1:
- ifself.opt_error_history[-1]=="no_change":
- # If no structural changes occured in two consecutive optimizations,
- # and we still haven't converged, then just exit.
- return{"errors":self.errors,"actions":None,"opt_error_history":self.opt_error_history}
- self.qcinp.molecule=self.outdata.get("molecule_from_last_geometry")
- actions.append({"molecule":"molecule_from_last_geometry"})
-
- elif"unable_to_determine_lamda"inself.errors:
- # Set last geom as new starting geom and rerun. If no opt cycles,
- # use diff SCF strat? Diff initial guess? Change basis?
- iflen(self.outdata.get("energy_trajectory"))>1:
- self.qcinp.molecule=self.outdata.get(
- "molecule_from_last_geometry")
- actions.append({"molecule":"molecule_from_last_geometry"})
- elifself.qcinp.rem.get("scf_algorithm","diis").lower()=="diis":
- self.qcinp.rem["scf_algorithm"]="rca_diis"
- actions.append({"scf_algorithm":"rca_diis"})
- ifself.qcinp.rem.get("gen_scfman"):
- self.qcinp.rem["gen_scfman"]=False
- actions.append({"gen_scfman":False})
- else:
- print(
- "Use a different initial guess? Perhaps a different basis?"
- )
-
- elif"linear_dependent_basis"inself.errors:
- # DIIS -> RCA_DIIS. If already RCA_DIIS, change basis?
- ifself.qcinp.rem.get("scf_algorithm","diis").lower()=="diis":
- self.qcinp.rem["scf_algorithm"]="rca_diis"
- actions.append({"scf_algorithm":"rca_diis"})
- ifself.qcinp.rem.get("gen_scfman"):
- self.qcinp.rem["gen_scfman"]=False
- actions.append({"gen_scfman":False})
- else:
- print("Perhaps use a better basis?")
-
- elif"failed_to_transform_coords"inself.errors:
- # Check for symmetry flag in rem. If not False, set to False and rerun.
- # If already False, increase threshold?
- ifnotself.qcinp.rem.get("sym_ignore")orself.qcinp.rem.get(
- "symmetry"):
- self.qcinp.rem["sym_ignore"]=True
- self.qcinp.rem["symmetry"]=False
- actions.append({"sym_ignore":True})
- actions.append({"symmetry":False})
- else:
- print("Perhaps increase the threshold?")
-
- elif"input_file_error"inself.errors:
- print(
- "Something is wrong with the input file. Examine error message by hand."
- )
- return{"errors":self.errors,"actions":None}
-
- elif"failed_to_read_input"inself.errors:
- # Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
- actions.append({"rerun job as-is"})
-
- elif"IO_error"inself.errors:
- # Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
- actions.append({"rerun job as-is"})
-
- elif"read_molecule_error"inself.errors:
- # Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
- actions.append({"rerun job as-is"})
-
- elif"never_called_qchem"inself.errors:
- # Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
- actions.append({"rerun job as-is"})
-
- elif"unknown_error"inself.errors:
- print("Examine error message by hand.")
- return{"errors":self.errors,"actions":None}
-
- else:
- # You should never get here. If correct is being called then errors should have at least one entry,
- # in which case it should have been caught by the if/elifs above.
- print(
- "If you get this message, something has gone terribly wrong!")
- return{"errors":self.errors,"actions":None}
-
- os.rename(self.input_file,self.input_file+".last")
- self.qcinp.write_file(self.input_file)
- return{"errors":self.errors,"actions":actions}
-
-
-
[docs]classQChemSCFErrorHandler(ErrorHandler):
- """
- QChem ErrorHandler class that addresses SCF non-convergence.
- """
-
- is_monitor=False
-
- def__init__(self,
- input_file="mol.qin",
- output_file="mol.qout",
- rca_gdm_thresh=1.0E-3,
- scf_max_cycles=200):
- """
- Initializes the error handler from a set of input and output files.
-
- Args:
- input_file (str): Name of the QChem input file.
- output_file (str): Name of the QChem output file.
- rca_gdm_thresh (float): The threshold for the prior scf algorithm.
- If last deltaE is larger than the threshold try RCA_DIIS
- first, else, try DIIS_GDM first.
- scf_max_cycles (int): The max iterations to set to fix SCF failure.
- """
- self.input_file=input_file
- self.output_file=output_file
- self.scf_max_cycles=scf_max_cycles
- self.geom_max_cycles=geom_max_cycles
- self.qcinp=QCInput.from_file(self.input_file)
- self.outdata=None
- self.errors=None
- self.qchem_job=qchem_job
-
-
[docs]classQCJob(Job):
- """
- A basic QChem Job.
- """
-
- def__init__(self,
- qchem_command,
- max_cores,
- multimode="openmp",
- input_file="mol.qin",
- output_file="mol.qout",
- qclog_file="mol.qclog",
- suffix="",
- scratch_dir="/dev/shm/qcscratch/",
- save_scratch=False,
- save_name="default_save_name",
- backup=True):
- """
- Args:
- qchem_command (str): Command to run QChem.
- max_cores (int): Maximum number of cores to parallelize over.
- multimode (str): Parallelization scheme, either openmp or mpi.
- input_file (str): Name of the QChem input file.
- output_file (str): Name of the QChem output file.
- qclog_file (str): Name of the file to redirect the standard output
- to. None means not to record the standard output.
- suffix (str): String to append to the file in postprocess.
- scratch_dir (str): QCSCRATCH directory. Defaults to "/dev/shm/qcscratch/".
- save_scratch (bool): Whether to save scratch directory contents.
- Defaults to False.
- save_name (str): Name of the saved scratch directory. Defaults to
- to "default_save_name".
- backup (bool): Whether to backup the initial input file. If True, the
- input will be copied with a ".orig" appended. Defaults to True.
- """
- self.qchem_command=qchem_command.split(" ")
- self.multimode=multimode
- self.input_file=input_file
- self.output_file=output_file
- self.max_cores=max_cores
- self.qclog_file=qclog_file
- self.suffix=suffix
- self.scratch_dir=scratch_dir
- self.save_scratch=save_scratch
- self.save_name=save_name
- self.backup=backup
-
- @property
- defcurrent_command(self):
- multimode_index=0
- ifself.save_scratch:
- command=[
- "-save","",
- str(self.max_cores),self.input_file,self.output_file,
- self.save_name
- ]
- multimode_index=1
- else:
- command=[
- "",str(self.max_cores),self.input_file,self.output_file
- ]
- ifself.multimode=='openmp':
- command[multimode_index]="-nt"
- elifself.multimode=='mpi':
- command[multimode_index]="-np"
- else:
- print("ERROR: Multimode should only be set to openmp or mpi")
- command=self.qchem_command+command
- returncommand
-
-
[docs]defbackup(filenames,prefix="error"):
- """
- Backup files to a tar.gz file. Used, for example, in backing up the
- files of an errored run before performing corrections.
-
- Args:
- filenames ([str]): List of files to backup. Supports wildcards, e.g.,
- *.*.
- prefix (str): prefix to the files. Defaults to error, which means a
- series of error.1.tar.gz, error.2.tar.gz, ... will be generated.
- """
- num=max([0]+[int(f.split(".")[1])
- forfinglob("{}.*.tar.gz".format(prefix))])
- filename="{}.{}.tar.gz".format(prefix,num+1)
- logging.info("Backing up run to {}.".format(filename))
- withtarfile.open(filename,"w:gz")astar:
- forfnameinfilenames:
- forfinglob(fname):
- tar.add(f)
-
-
-
[docs]defget_execution_host_info():
- """
- Tries to return a tuple describing the execution host.
- Doesn't work for all queueing systems
-
- Returns:
- (HOSTNAME, CLUSTER_NAME)
- """
- host=os.environ.get('HOSTNAME',None)
- cluster=os.environ.get('SGE_O_HOST',None)
- ifhostisNone:
- try:
- importsocket
- host=hostorsocket.gethostname()
- except:
- pass
- returnhostor'unknown',clusteror'unknown'
-# coding: utf-8
-
-from__future__importunicode_literals,division
-
-frommonty.os.pathimportzpath
-importos
-importtime
-importdatetime
-importoperator
-importshutil
-importlogging
-fromfunctoolsimportreduce
-fromcollectionsimportCounter
-importre
-
-importnumpyasnp
-
-frommonty.devimportdeprecated
-frommonty.serializationimportloadfn
-
-fromcustodian.custodianimportErrorHandler
-fromcustodian.utilsimportbackup
-frompymatgen.io.vaspimportPoscar,VaspInput,Incar,Kpoints,Vasprun, \
- Oszicar,Outcar
-frompymatgen.transformations.standard_transformationsimport \
- SupercellTransformation
-
-fromcustodian.ansible.interpreterimportModder
-fromcustodian.ansible.actionsimportFileActions
-fromcustodian.vasp.interpreterimportVaspModder
-
-"""
-This module implements specific error handlers for VASP runs. These handlers
-tries to detect common errors in vasp runs and attempt to fix them on the fly
-by modifying the input files.
-"""
-
-__author__="Shyue Ping Ong, William Davidson Richards, Anubhav Jain, " \
- "Wei Chen, Stephen Dacek"
-__version__="0.1"
-__maintainer__="Shyue Ping Ong"
-__email__="ongsp@ucsd.edu"
-__status__="Beta"
-__date__="2/4/13"
-
-VASP_BACKUP_FILES={"INCAR","KPOINTS","POSCAR","OUTCAR","CONTCAR",
- "OSZICAR","vasprun.xml","vasp.out","std_err.txt"}
-
-
-
[docs]classVaspErrorHandler(ErrorHandler):
- """
- Master VaspErrorHandler class that handles a number of common errors
- that occur during VASP runs.
- """
-
- is_monitor=True
-
- error_msgs={
- "tet":["Tetrahedron method fails for NKPT<4",
- "Fatal error detecting k-mesh",
- "Fatal error: unable to match k-point",
- "Routine TETIRR needs special values",
- "Tetrahedron method fails (number of k-points < 4)"],
- "inv_rot_mat":["inverse of rotation matrix was not found (increase "
- "SYMPREC)"],
- "brmix":["BRMIX: very serious problems"],
- "subspacematrix":["WARNING: Sub-Space-Matrix is not hermitian in "
- "DAV"],
- "tetirr":["Routine TETIRR needs special values"],
- "incorrect_shift":["Could not get correct shifts"],
- "real_optlay":["REAL_OPTLAY: internal error",
- "REAL_OPT: internal ERROR"],
- "rspher":["ERROR RSPHER"],
- "dentet":["DENTET"],
- "too_few_bands":["TOO FEW BANDS"],
- "triple_product":["ERROR: the triple product of the basis vectors"],
- "rot_matrix":["Found some non-integer element in rotation matrix"],
- "brions":["BRIONS problems: POTIM should be increased"],
- "pricel":["internal error in subroutine PRICEL"],
- "zpotrf":["LAPACK: Routine ZPOTRF failed"],
- "amin":["One of the lattice vectors is very long (>50 A), but AMIN"],
- "zbrent":["ZBRENT: fatal internal in",
- "ZBRENT: fatal error in bracketing"],
- "pssyevx":["ERROR in subspace rotation PSSYEVX"],
- "eddrmm":["WARNING in EDDRMM: call to ZHEGV failed"],
- "edddav":["Error EDDDAV: Call to ZHEGV failed"],
- "grad_not_orth":[
- "EDWAV: internal error, the gradient is not orthogonal"],
- "nicht_konv":["ERROR: SBESSELITER : nicht konvergent"],
- "zheev":["ERROR EDDIAG: Call to routine ZHEEV failed!"],
- "elf_kpar":["ELF: KPAR>1 not implemented"],
- "elf_ncl":["WARNING: ELF not implemented for non collinear case"],
- "rhosyg":["RHOSYG internal error"],
- "posmap":["POSMAP internal error: symmetry equivalent atom not found"],
- "point_group":["Error: point group operation missing"]
- }
-
- def__init__(self,output_filename="vasp.out",natoms_large_cell=100,
- errors_subset_to_catch=None):
- """
- Initializes the handler with the output file to check.
-
- Args:
- output_filename (str): This is the file where the stdout for vasp
- is being redirected. The error messages that are checked are
- present in the stdout. Defaults to "vasp.out", which is the
- default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
- natoms_large_cell (int): Number of atoms threshold to treat cell
- as large. Affects the correction of certain errors. Defaults to
- 100.
- errors_subset_to_detect (list): A subset of errors to catch. The
- default is None, which means all supported errors are detected.
- Use this to only catch only a subset of supported errors.
- E.g., ["eddrrm", "zheev"] will only catch the eddrmm and zheev
- errors, and not others. If you wish to only excluded one or
- two of the errors, you can create this list by the following
- lines:
-
- ```
- subset = list(VaspErrorHandler.error_msgs.keys())
- subset.pop("eddrrm")
-
- handler = VaspErrorHandler(errors_subset_to_catch=subset)
- ```
- """
- self.output_filename=output_filename
- self.errors=set()
- self.error_count=Counter()
- # threshold of number of atoms to treat the cell as large.
- self.natoms_large_cell=natoms_large_cell
- self.errors_subset_to_catch=errors_subset_to_catchor \
- list(VaspErrorHandler.error_msgs.keys())
- self.logger=logging.getLogger(self.__class__.__name__)
-
-
[docs]defcheck(self):
- incar=Incar.from_file("INCAR")
- self.errors=set()
- withopen(self.output_filename,"r")asf:
- forlineinf:
- l=line.strip()
- forerr,msgsinVaspErrorHandler.error_msgs.items():
- iferrinself.errors_subset_to_catch:
- formsginmsgs:
- ifl.find(msg)!=-1:
- # this checks if we want to run a charged
- # computation (e.g., defects) if yes we don't
- # want to kill it because there is a change in
- # e-density (brmix error)
- iferr=="brmix"and'NELECT'inincar:
- continue
- self.errors.add(err)
- self.logger.error(msg,extra={"incar":incar.as_dict()})
- returnlen(self.errors)>0
-
-
[docs]defcorrect(self):
- backup(VASP_BACKUP_FILES|{self.output_filename})
- actions=[]
- vi=VaspInput.from_directory(".")
-
- ifself.errors.intersection(["tet","dentet"]):
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ISMEAR":0,"SIGMA":0.05}}})
-
- if"inv_rot_mat"inself.errors:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"SYMPREC":1e-8}}})
-
- if"brmix"inself.errors:
- # If there is not a valid OUTCAR already, increment
- # error count to 1 to skip first fix
- ifself.error_count['brmix']==0:
- try:
- assert(Outcar(zpath(os.path.join(
- os.getcwd(),"OUTCAR"))).is_stoppedisFalse)
- except:
- self.error_count['brmix']+=1
-
- ifself.error_count['brmix']==0:
- # Valid OUTCAR - simply rerun the job and increment
- # error count for next time
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ISTART":1}}})
- self.error_count['brmix']+=1
-
- elifself.error_count['brmix']==1:
- # Use Kerker mixing w/default values for other parameters
- actions.append({"dict":"INCAR",
- "action":{"_set":{"IMIX":1}}})
- self.error_count['brmix']+=1
-
- elifself.error_count['brmix']==2andvi["KPOINTS"].style \
- ==Kpoints.supported_modes.Gamma:
- actions.append({"dict":"KPOINTS",
- "action":{"_set":{"generation_style":
- "Monkhorst"}}})
- actions.append({"dict":"INCAR",
- "action":{"_unset":{"IMIX":1}}})
- self.error_count['brmix']+=1
-
- elifself.error_count['brmix']in[2,3]andvi["KPOINTS"].style \
- ==Kpoints.supported_modes.Monkhorst:
- actions.append({"dict":"KPOINTS",
- "action":{"_set":{"generation_style":
- "Gamma"}}})
- actions.append({"dict":"INCAR",
- "action":{"_unset":{"IMIX":1}}})
- self.error_count['brmix']+=1
-
- ifvi["KPOINTS"].num_kpts<1:
- all_kpts_even=all([
- bool(n%2==0)forninvi["KPOINTS"].kpts[0]
- ])
- ifall_kpts_even:
- new_kpts=(
- tuple(n+1forninvi["KPOINTS"].kpts[0]),)
- actions.append({"dict":"KPOINTS","action":{"_set":{
- "kpoints":new_kpts
- }}})
-
- else:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ISYM":0}}})
-
- ifvi["KPOINTS"].style==Kpoints.supported_modes.Monkhorst:
- actions.append({"dict":"KPOINTS",
- "action":{
- "_set":{"generation_style":"Gamma"}}})
-
- # Based on VASP forum's recommendation, you should delete the
- # CHGCAR and WAVECAR when dealing with this error.
- ifvi["INCAR"].get("ICHARG",0)<10:
- actions.append({"file":"CHGCAR",
- "action":{
- "_file_delete":{'mode':"actual"}}})
- actions.append({"file":"WAVECAR",
- "action":{
- "_file_delete":{'mode':"actual"}}})
-
- if"zpotrf"inself.errors:
- # Usually caused by short bond distances. If on the first step,
- # volume needs to be increased. Otherwise, it was due to a step
- # being too big and POTIM should be decreased. If a static run
- # try turning off symmetry.
- try:
- oszicar=Oszicar("OSZICAR")
- nsteps=len(oszicar.ionic_steps)
- except:
- nsteps=0
-
- ifnsteps>=1:
- potim=float(vi["INCAR"].get("POTIM",0.5))/2.0
- actions.append(
- {"dict":"INCAR",
- "action":{"_set":{"ISYM":0,"POTIM":potim}}})
- elifvi["INCAR"].get("NSW",0)==0 \
- orvi["INCAR"].get("ISIF",0)inrange(3):
- actions.append(
- {"dict":"INCAR","action":{"_set":{"ISYM":0}}})
- else:
- s=vi["POSCAR"].structure
- s.apply_strain(0.2)
- actions.append({"dict":"POSCAR",
- "action":{"_set":{"structure":s.as_dict()}}})
-
- # Based on VASP forum's recommendation, you should delete the
- # CHGCAR and WAVECAR when dealing with this error.
- ifvi["INCAR"].get("ICHARG",0)<10:
- actions.append({"file":"CHGCAR",
- "action":{"_file_delete":{'mode':"actual"}}})
- actions.append({"file":"WAVECAR",
- "action":{"_file_delete":{'mode':"actual"}}})
-
- ifself.errors.intersection(["subspacematrix"]):
- ifself.error_count["subspacematrix"]==0:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"LREAL":False}}})
- else:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"PREC":"Accurate"}}})
- self.error_count["subspacematrix"]+=1
-
- ifself.errors.intersection(["rspher","real_optlay","nicht_konv"]):
- s=vi["POSCAR"].structure
- iflen(s)<self.natoms_large_cell:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"LREAL":False}}})
- else:
- # for large supercell, try an in-between option LREAL = True
- # prior to LREAL = False
- ifself.error_count['real_optlay']==0:
- # use real space projectors generated by pot
- actions.append({"dict":"INCAR",
- "action":{"_set":{"LREAL":True}}})
- elifself.error_count['real_optlay']==1:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"LREAL":False}}})
- self.error_count['real_optlay']+=1
-
- ifself.errors.intersection(["tetirr","incorrect_shift"]):
-
- ifvi["KPOINTS"].style==Kpoints.supported_modes.Monkhorst:
- actions.append({"dict":"KPOINTS",
- "action":{
- "_set":{"generation_style":"Gamma"}}})
-
- if"rot_matrix"inself.errors:
- ifvi["KPOINTS"].style==Kpoints.supported_modes.Monkhorst:
- actions.append({"dict":"KPOINTS",
- "action":{
- "_set":{"generation_style":"Gamma"}}})
- else:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ISYM":0}}})
-
- if"amin"inself.errors:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"AMIN":"0.01"}}})
-
- if"triple_product"inself.errors:
- s=vi["POSCAR"].structure
- trans=SupercellTransformation(((1,0,0),(0,0,1),(0,1,0)))
- new_s=trans.apply_transformation(s)
- actions.append({"dict":"POSCAR",
- "action":{"_set":{"structure":new_s.as_dict()}},
- "transformation":trans.as_dict()})
-
- if"pricel"inself.errors:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"SYMPREC":1e-8,"ISYM":0}}})
-
- if"brions"inself.errors:
- potim=float(vi["INCAR"].get("POTIM",0.5))+0.1
- actions.append({"dict":"INCAR",
- "action":{"_set":{"POTIM":potim}}})
-
- if"zbrent"inself.errors:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"IBRION":1}}})
- actions.append({"file":"CONTCAR",
- "action":{"_file_copy":{"dest":"POSCAR"}}})
-
- if"too_few_bands"inself.errors:
- if"NBANDS"invi["INCAR"]:
- nbands=int(vi["INCAR"]["NBANDS"])
- else:
- withopen("OUTCAR")asf:
- forlineinf:
- if"NBANDS"inline:
- try:
- d=line.split("=")
- nbands=int(d[-1].strip())
- break
- except(IndexError,ValueError):
- pass
- actions.append({"dict":"INCAR",
- "action":{"_set":{"NBANDS":int(1.1*nbands)}}})
-
- if"pssyevx"inself.errors:
- actions.append({"dict":"INCAR","action":
- {"_set":{"ALGO":"Normal"}}})
- if"eddrmm"inself.errors:
- # RMM algorithm is not stable for this calculation
- ifvi["INCAR"].get("ALGO","Normal")in["Fast","VeryFast"]:
- actions.append({"dict":"INCAR","action":
- {"_set":{"ALGO":"Normal"}}})
- else:
- potim=float(vi["INCAR"].get("POTIM",0.5))/2.0
- actions.append({"dict":"INCAR",
- "action":{"_set":{"POTIM":potim}}})
- ifvi["INCAR"].get("ICHARG",0)<10:
- actions.append({"file":"CHGCAR",
- "action":{"_file_delete":{'mode':"actual"}}})
- actions.append({"file":"WAVECAR",
- "action":{"_file_delete":{'mode':"actual"}}})
-
- if"edddav"inself.errors:
- ifvi["INCAR"].get("ICHARG",0)<10:
- actions.append({"file":"CHGCAR",
- "action":{"_file_delete":{'mode':"actual"}}})
- actions.append({"dict":"INCAR","action":
- {"_set":{"ALGO":"All"}}})
-
- if"grad_not_orth"inself.errors:
- ifvi["INCAR"].get("ISMEAR",1)<0:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ISMEAR":0,"SIGMA":0.05}}})
-
- if"zheev"inself.errors:
- ifvi["INCAR"].get("ALGO","Fast").lower()!="exact":
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ALGO":"Exact"}}})
- if"elf_kpar"inself.errors:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"KPAR":1}}})
-
- if"rhosyg"inself.errors:
- ifvi["INCAR"].get("SYMPREC",1e-4)==1e-4:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ISYM":0}}})
- actions.append({"dict":"INCAR",
- "action":{"_set":{"SYMPREC":1e-4}}})
-
- if"posmap"inself.errors:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"SYMPREC":1e-6}}})
-
- if"point_group"inself.errors:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ISYM":0}}})
-
- VaspModder(vi=vi).apply_actions(actions)
- return{"errors":list(self.errors),"actions":actions}
-
-
-
[docs]classLrfCommutatorHandler(ErrorHandler):
- """
- Corrects LRF_COMMUTATOR errors by setting LPEAD=True if not already set.
- Note that switching LPEAD=T can slightly change results versus the
- default due to numerical evaluation of derivatives.
- """
-
- is_monitor=True
-
- error_msgs={
- "lrf_comm":["LRF_COMMUTATOR internal error"],
- }
-
- def__init__(self,output_filename="std_err.txt"):
- """
- Initializes the handler with the output file to check.
-
- Args:
- output_filename (str): This is the file where the stderr for vasp
- is being redirected. The error messages that are checked are
- present in the stderr. Defaults to "std_err.txt", which is the
- default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
- """
- self.output_filename=output_filename
- self.errors=set()
- self.error_count=Counter()
-
-
[docs]classStdErrHandler(ErrorHandler):
- """
- Master StdErr class that handles a number of common errors
- that occur during VASP runs with error messages only in
- the standard error.
- """
-
- is_monitor=True
-
- error_msgs={
- "kpoints_trans":["internal error in GENERATE_KPOINTS_TRANS: "
- "number of G-vector changed in star"],
- "out_of_memory":["Allocation would exceed memory limit"]
- }
-
- def__init__(self,output_filename="std_err.txt"):
- """
- Initializes the handler with the output file to check.
-
- Args:
- output_filename (str): This is the file where the stderr for vasp
- is being redirected. The error messages that are checked are
- present in the stderr. Defaults to "std_err.txt", which is the
- default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
- """
- self.output_filename=output_filename
- self.errors=set()
- self.error_count=Counter()
-
-
[docs]classAliasingErrorHandler(ErrorHandler):
- """
- Master VaspErrorHandler class that handles a number of common errors
- that occur during VASP runs.
- """
-
- is_monitor=True
-
- error_msgs={
- "aliasing":[
- "WARNING: small aliasing (wrap around) errors must be expected"],
- "aliasing_incar":["Your FFT grids (NGX,NGY,NGZ) are not sufficient "
- "for an accurate"]
- }
-
- def__init__(self,output_filename="vasp.out"):
- """
- Initializes the handler with the output file to check.
-
- Args:
- output_filename (str): This is the file where the stdout for vasp
- is being redirected. The error messages that are checked are
- present in the stdout. Defaults to "vasp.out", which is the
- default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
- """
- self.output_filename=output_filename
- self.errors=set()
-
-
[docs]defcheck(self):
- incar=Incar.from_file("INCAR")
- self.errors=set()
- withopen(self.output_filename,"r")asf:
- forlineinf:
- l=line.strip()
- forerr,msgsinAliasingErrorHandler.error_msgs.items():
- formsginmsgs:
- ifl.find(msg)!=-1:
- # this checks if we want to run a charged
- # computation (e.g., defects) if yes we don't
- # want to kill it because there is a change in e-
- # density (brmix error)
- iferr=="brmix"and'NELECT'inincar:
- continue
- self.errors.add(err)
- returnlen(self.errors)>0
-
-
[docs]defcorrect(self):
- backup(VASP_BACKUP_FILES|{self.output_filename})
- actions=[]
- vi=VaspInput.from_directory(".")
-
- if"aliasing"inself.errors:
- withopen("OUTCAR")asf:
- grid_adjusted=False
- changes_dict={}
- r=re.compile(".+aliasing errors.*(NG.)\s*to\s*(\d+)")
- forlineinf:
- m=r.match(line)
- ifm:
- changes_dict[m.group(1)]=int(m.group(2))
- grid_adjusted=True
- # Ensure that all NGX, NGY, NGZ have been checked
- ifgrid_adjustedand'NGZ'inline:
- actions.append(
- {"dict":"INCAR","action":{"_set":changes_dict}})
- ifvi["INCAR"].get("ICHARG",0)<10:
- actions.extend([{"file":"CHGCAR",
- "action":{"_file_delete":{
- 'mode':"actual"}}},
- {"file":"WAVECAR",
- "action":{"_file_delete":{
- 'mode':"actual"}}}])
- break
-
- if"aliasing_incar"inself.errors:
- # vasp seems to give different warnings depending on whether the
- # aliasing error was caused by user supplied inputs
- d={k:1forkin['NGX','NGY','NGZ']ifkinvi['INCAR'].keys()}
- actions.append({"dict":"INCAR","action":{"_unset":d}})
-
- ifvi["INCAR"].get("ICHARG",0)<10:
- actions.extend([{"file":"CHGCAR",
- "action":{
- "_file_delete":{'mode':"actual"}}},
- {"file":"WAVECAR",
- "action":{
- "_file_delete":{'mode':"actual"}}}])
-
- VaspModder(vi=vi).apply_actions(actions)
- return{"errors":list(self.errors),"actions":actions}
-
-
-
[docs]classDriftErrorHandler(ErrorHandler):
- """
- Corrects for total drift exceeding the force convergence criteria.
- """
-
- def__init__(self,max_drift=None,to_average=3,enaug_multiply=2):
- """
- Initializes the handler with max drift
- Args:
- max_drift (float): This defines the max drift. Leaving this at the default of None gets the max_drift from EDFIFFG
- """
-
- self.max_drift=max_drift
- self.to_average=int(to_average)
- self.enaug_multiply=enaug_multiply
-
-
[docs]defcheck(self):
-
- incar=Incar.from_file("INCAR")
- ifincar.get("EDIFFG",0.1)>=0orincar.get("NSW",0)==0:
- # Only activate when force relaxing and ionic steps
- # NSW check prevents accidental effects when running DFPT
- returnFalse
-
- ifnotself.max_drift:
- self.max_drift=incar["EDIFFG"]*-1
-
- try:
- outcar=Outcar("OUTCAR")
- except:
- # Can't perform check if Outcar not valid
- returnFalse
-
- iflen(outcar.data.get('drift',[]))<self.to_average:
- # Ensure enough steps to get average drift
- returnFalse
- else:
- curr_drift=outcar.data.get("drift",[])[::-1][:self.to_average]
- curr_drift=np.average([np.linalg.norm(d)fordincurr_drift])
- returncurr_drift>self.max_drift
-
-
[docs]defcorrect(self):
- backup(VASP_BACKUP_FILES)
- actions=[]
- vi=VaspInput.from_directory(".")
-
- incar=vi["INCAR"]
- outcar=Outcar("OUTCAR")
-
- # Move CONTCAR to POSCAR
- actions.append({"file":"CONTCAR",
- "action":{"_file_copy":{"dest":"POSCAR"}}})
-
- # First try adding ADDGRID
- ifnotincar.get("ADDGRID",False):
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ADDGRID":True}}})
- # Otherwise set PREC to High so ENAUG can be used to control Augmentation Grid Size
- elifincar.get("PREC","Accurate").lower()!="high":
- actions.append({"dict":"INCAR",
- "action":{"_set":{"PREC":"High"}}})
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ENAUG":incar.get("ENCUT",520)*2}}})
- # PREC is already high and ENAUG set so just increase it
- else:
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ENAUG":int(incar.get("ENAUG",1040)*self.enaug_multiply)}}})
-
- curr_drift=outcar.data.get("drift",[])[::-1][:self.to_average]
- curr_drift=np.average([np.linalg.norm(d)fordincurr_drift])
- VaspModder(vi=vi).apply_actions(actions)
- return{"errors":"Excessive drift {} > {}".format(curr_drift,self.max_drift),"actions":actions}
-
-
-
[docs]classMeshSymmetryErrorHandler(ErrorHandler):
- """
- Corrects the mesh symmetry error in VASP. This error is sometimes
- non-fatal. So this error handler only checks at the end of the run,
- and if the run has converged, no error is recorded.
- """
- is_monitor=False
-
- def__init__(self,output_filename="vasp.out",
- output_vasprun="vasprun.xml"):
- """
- Initializes the handler with the output files to check.
-
- Args:
- output_filename (str): This is the file where the stdout for vasp
- is being redirected. The error messages that are checked are
- present in the stdout. Defaults to "vasp.out", which is the
- default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
- output_vasprun (str): Filename for the vasprun.xml file. Change
- this only if it is different from the default (unlikely).
- """
- self.output_filename=output_filename
- self.output_vasprun=output_vasprun
-
-
[docs]defcheck(self):
- msg="Reciprocal lattice and k-lattice belong to different class of" \
- " lattices."
-
- vi=VaspInput.from_directory('.')
- # According to VASP admins, you can disregard this error
- # if symmetry is off
- # Also disregard if automatic KPOINT generation is used
- if(notvi["INCAR"].get('ISYM',True))or \
- vi[
- "KPOINTS"].style==Kpoints.supported_modes.Automatic:
- returnFalse
-
- try:
- v=Vasprun(self.output_vasprun)
- ifv.converged:
- returnFalse
- except:
- pass
- withopen(self.output_filename,"r")asf:
- forlineinf:
- l=line.strip()
- ifl.find(msg)!=-1:
- returnTrue
- returnFalse
[docs]classUnconvergedErrorHandler(ErrorHandler):
- """
- Check if a run is converged.
- """
- is_monitor=False
-
- def__init__(self,output_filename="vasprun.xml"):
- """
- Initializes the handler with the output file to check.
-
- Args:
- output_vasprun (str): Filename for the vasprun.xml file. Change
- this only if it is different from the default (unlikely).
- """
- self.output_filename=output_filename
-
-
[docs]defcorrect(self):
- v=Vasprun(self.output_filename)
- actions=[]
- ifnotv.converged_electronic:
- # Ladder from VeryFast to Fast to Fast to All
- # These progressively switches to more stable but more
- # expensive algorithms
- algo=v.incar.get("ALGO","Normal")
- ifalgo=="VeryFast":
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ALGO":"Fast"}}})
- elifalgo=="Fast":
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ALGO":"Normal"}}})
- elifalgo=="Normal":
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ALGO":"All"}}})
- else:
- # Try mixing as last resort
- new_settings={"ISTART":1,
- "ALGO":"Normal",
- "NELMDL":-6,
- "BMIX":0.001,
- "AMIX_MAG":0.8,
- "BMIX_MAG":0.001}
-
- ifnotall([v.incar.get(k,"")==valfork,valinnew_settings.items()]):
- actions.append({"dict":"INCAR",
- "action":{"_set":new_settings}})
-
- elifnotv.converged_ionic:
- # Just continue optimizing and let other handles fix ionic
- # optimizer parameters
- actions.append({"dict":"INCAR",
- "action":{"_set":{"IBRION":1}}})
- actions.append({"file":"CONTCAR",
- "action":{"_file_copy":{"dest":"POSCAR"}}})
-
- ifactions:
- vi=VaspInput.from_directory(".")
- backup(VASP_BACKUP_FILES)
- VaspModder(vi=vi).apply_actions(actions)
- return{"errors":["Unconverged"],"actions":actions}
- else:
- # Unfixable error. Just return None for actions.
- return{"errors":["Unconverged"],"actions":None}
-
-
-
[docs]classMaxForceErrorHandler(ErrorHandler):
- """
- Checks that the desired force convergence has been achieved. Otherwise
- restarts the run with smaller EDIFF. (This is necessary since energy
- and force convergence criteria cannot be set simultaneously)
- """
- is_monitor=False
-
- def__init__(self,output_filename="vasprun.xml",
- max_force_threshold=0.25):
- """
- Args:
- input_filename (str): name of the vasp INCAR file
- output_filename (str): name to look for the vasprun
- max_force_threshold (float): Threshold for max force for
- restarting the run. (typically should be set to the value
- that the creator looks for)
- """
- self.output_filename=output_filename
- self.max_force_threshold=max_force_threshold
-
-
[docs]classPotimErrorHandler(ErrorHandler):
- """
- Check if a run has excessively large positive energy changes.
- This is typically caused by too large a POTIM. Runs typically
- end up crashing with some other error (e.g. BRMIX) as the geometry
- gets progressively worse.
- """
- is_monitor=True
-
- def__init__(self,input_filename="POSCAR",output_filename="OSZICAR",
- dE_threshold=1):
- """
- Initializes the handler with the input and output files to check.
-
- Args:
- input_filename (str): This is the POSCAR file that the run
- started from. Defaults to "POSCAR". Change
- this only if it is different from the default (unlikely).
- output_filename (str): This is the OSZICAR file. Change
- this only if it is different from the default (unlikely).
- dE_threshold (float): The threshold energy change. Defaults to 1eV.
- """
- self.input_filename=input_filename
- self.output_filename=output_filename
- self.dE_threshold=dE_threshold
-
-
[docs]classFrozenJobErrorHandler(ErrorHandler):
- """
- Detects an error when the output file has not been updated
- in timeout seconds. Changes ALGO to Normal from Fast
- """
-
- is_monitor=True
-
- def__init__(self,output_filename="vasp.out",timeout=21600):
- """
- Initializes the handler with the output file to check.
-
- Args:
- output_filename (str): This is the file where the stdout for vasp
- is being redirected. The error messages that are checked are
- present in the stdout. Defaults to "vasp.out", which is the
- default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
- timeout (int): The time in seconds between checks where if there
- is no activity on the output file, the run is considered
- frozen. Defaults to 3600 seconds, i.e., 1 hour.
- """
- self.output_filename=output_filename
- self.timeout=timeout
-
-
[docs]classNonConvergingErrorHandler(ErrorHandler):
- """
- Check if a run is hitting the maximum number of electronic steps at the
- last nionic_steps ionic steps (default=10). If so, change ALGO from Fast to
- Normal or kill the job.
- """
- is_monitor=True
-
- def__init__(self,output_filename="OSZICAR",nionic_steps=10):
- """
- Initializes the handler with the output file to check.
-
- Args:
- output_filename (str): This is the OSZICAR file. Change
- this only if it is different from the default (unlikely).
- nionic_steps (int): The threshold number of ionic steps that
- needs to hit the maximum number of electronic steps for the
- run to be considered non-converging.
- """
- self.output_filename=output_filename
- self.nionic_steps=nionic_steps
-
-
[docs]defcorrect(self):
- vi=VaspInput.from_directory(".")
- algo=vi["INCAR"].get("ALGO","Normal")
- amix=vi["INCAR"].get("AMIX",0.4)
- bmix=vi["INCAR"].get("BMIX",1.0)
- amin=vi["INCAR"].get("AMIN",0.1)
- actions=[]
- # Ladder from VeryFast to Fast to Fast to All
- # These progressively switches to more stable but more
- # expensive algorithms
- ifalgo=="VeryFast":
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ALGO":"Fast"}}})
- elifalgo=="Fast":
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ALGO":"Normal"}}})
- elifalgo=="Normal":
- actions.append({"dict":"INCAR",
- "action":{"_set":{"ALGO":"All"}}})
- elifamix>0.1andbmix>0.01:
- # Try linear mixing
- actions.append({"dict":"INCAR",
- "action":{"_set":{"AMIX":0.1,"BMIX":0.01,
- "ICHARG":2}}})
- elifbmix<3.0andamin>0.01:
- # Try increasing bmix
- actions.append({"dict":"INCAR",
- "action":{"_set":{"AMIN":0.01,"BMIX":3.0,
- "ICHARG":2}}})
-
- ifactions:
- backup(VASP_BACKUP_FILES)
- VaspModder(vi=vi).apply_actions(actions)
- return{"errors":["Non-converging job"],"actions":actions}
- # Unfixable error. Just return None for actions.
- else:
- return{"errors":["Non-converging job"],"actions":None}
-
-
[docs]classWalltimeHandler(ErrorHandler):
- """
- Check if a run is nearing the walltime. If so, write a STOPCAR with
- LSTOP or LABORT = .True.. You can specify the walltime either in the init (
- which is unfortunately necessary for SGE and SLURM systems. If you happen
- to be running on a PBS system and the PBS_WALLTIME variable is in the run
- environment, the wall time will be automatically determined if not set.
- """
- is_monitor=True
-
- # The WalltimeHandler should not terminate as we want VASP to terminate
- # itself naturally with the STOPCAR.
- is_terminating=False
-
- # This handler will be unrecoverable, but custodian shouldn't raise an
- # error
- raises_runtime_error=False
-
- def__init__(self,wall_time=None,buffer_time=300,
- electronic_step_stop=False):
- """
- Initializes the handler with a buffer time.
-
- Args:
- wall_time (int): Total walltime in seconds. If this is None and
- the job is running on a PBS system, the handler will attempt to
- determine the walltime from the PBS_WALLTIME environment
- variable. If the wall time cannot be determined or is not
- set, this handler will have no effect.
- buffer_time (int): The min amount of buffer time in secs at the
- end that the STOPCAR will be written. The STOPCAR is written
- when the time remaining is < the higher of 3 x the average
- time for each ionic step and the buffer time. Defaults to
- 300 secs, which is the default polling time of Custodian.
- This is typically sufficient for the current ionic step to
- complete. But if other operations are being performed after
- the run has stopped, the buffer time may need to be increased
- accordingly.
- electronic_step_stop (bool): Whether to check for electronic steps
- instead of ionic steps (e.g. for static runs on large systems or
- static HSE runs, ...). Be careful that results such as density
- or wavefunctions might not be converged at the electronic level.
- Should be used with LWAVE = .True. to be useful. If this is
- True, the STOPCAR is written with LABORT = .TRUE. instead of
- LSTOP = .TRUE.
- """
- ifwall_timeisnotNone:
- self.wall_time=wall_time
- elif"PBS_WALLTIME"inos.environ:
- self.wall_time=int(os.environ["PBS_WALLTIME"])
- elif"SBATCH_TIMELIMIT"inos.environ:
- self.wall_time=int(os.environ["SBATCH_TIMELIMIT"])
- else:
- self.wall_time=None
- self.buffer_time=buffer_time
- # Sets CUSTODIAN_WALLTIME_START as the start time to use for
- # future jobs in the same batch environment. Can also be
- # set manually be the user in the batch environment.
- if"CUSTODIAN_WALLTIME_START"inos.environ:
- self.start_time=datetime.datetime.strptime(
- os.environ["CUSTODIAN_WALLTIME_START"],"%a %b %d %H:%M:%S %Z %Y")
- else:
- self.start_time=datetime.datetime.now()
- os.environ["CUSTODIAN_WALLTIME_START"]=datetime.datetime.strftime(
- self.start_time,"%a %b %d %H:%M:%S UTC %Y")
-
- self.electronic_step_stop=electronic_step_stop
- self.electronic_steps_timings=[0]
- self.prev_check_time=self.start_time
-
-
[docs]defcheck(self):
- ifself.wall_time:
- run_time=datetime.datetime.now()-self.start_time
- total_secs=run_time.total_seconds()
- outcar=Outcar("OUTCAR")
- ifnotself.electronic_step_stop:
- # Determine max time per ionic step.
- outcar.read_pattern({"timings":"LOOP\+.+real time(.+)"},
- postprocess=float)
- time_per_step=np.max(outcar.data.get('timings'))ifoutcar.data.get("timings",[])else0
- else:
- # Determine max time per electronic step.
- outcar.read_pattern({"timings":"LOOP:.+real time(.+)"},
- postprocess=float)
- time_per_step=np.max(outcar.data.get('timings'))ifoutcar.data.get("timings",[])else0
-
- # If the remaining time is less than average time for 3
- # steps or buffer_time.
- time_left=self.wall_time-total_secs
- iftime_left<max(time_per_step*3,self.buffer_time):
- returnTrue
-
- returnFalse
[docs]classCheckpointHandler(ErrorHandler):
- """
- This is not an error handler per se, but rather a checkpointer. What this
- does is that every X seconds, a STOPCAR and CHKPT will be written. This
- forces VASP to stop at the end of the next ionic step. The files are then
- copied into a subdir, and then the job is restarted. To use this proper,
- max_errors in Custodian must be set to a very high value, and you
- probably wouldn't want to use any standard VASP error handlers. The
- checkpoint will be stored in subdirs chk_#. This should be used in
- combiantion with the StoppedRunHandler.
- """
- is_monitor=True
-
- # The CheckpointHandler should not terminate as we want VASP to terminate
- # itself naturally with the STOPCAR.
- is_terminating=False
-
- def__init__(self,interval=3600):
- """
- Initializes the handler with an interval.
-
- Args:
- interval (int): Interval at which to checkpoint in seconds.
- Defaults to 3600 (1 hr).
- """
- self.interval=interval
- self.start_time=datetime.datetime.now()
- self.chk_counter=0
-
-
-
- def__str__(self):
- return"CheckpointHandler with interval %d"%self.interval
-
-
-
[docs]classStoppedRunHandler(ErrorHandler):
- """
- This is not an error handler per se, but rather a checkpointer. What this
- does is that every X seconds, a STOPCAR will be written. This forces VASP to
- stop at the end of the next ionic step. The files are then copied into a
- subdir, and then the job is restarted. To use this proper, max_errors in
- Custodian must be set to a very high value, and you probably wouldn't
- want to use any standard VASP error handlers. The checkpoint will be
- stored in subdirs chk_#. This should be used in combination with the
- StoppedRunHandler.
- """
- is_monitor=False
-
- # The CheckpointHandler should not terminate as we want VASP to terminate
- # itself naturally with the STOPCAR.
- is_terminating=False
-
- def__init__(self):
- pass
-
-
[docs]classPositiveEnergyErrorHandler(ErrorHandler):
- """
- Check if a run has positive absolute energy.
- If so, change ALGO from Fast to Normal or kill the job.
- """
- is_monitor=True
-
- def__init__(self,output_filename="OSZICAR"):
- """
- Initializes the handler with the output file to check.
-
- Args:
- output_filename (str): This is the OSZICAR file. Change
- this only if it is different from the default (unlikely).
- """
- self.output_filename=output_filename
-
-
[docs]classVaspModder(Modder):
- def__init__(self,actions=None,strict=True,vi=None):
- """
- Initializes a Modder for VaspInput sets
-
- Args:
- actions ([Action]): A sequence of supported actions. See
- :mod:`custodian.ansible.actions`. Default is None,
- which means DictActions and FileActions are supported.
- strict (bool): Indicating whether to use strict mode. In non-strict
- mode, unsupported actions are simply ignored without any
- errors raised. In strict mode, if an unsupported action is
- supplied, a ValueError is raised. Defaults to True.
- vi (VaspInput): A VaspInput object from the current directory.
- Initialized automatically if not passed (but passing it will
- avoid having to reparse the directory).
- """
- self.vi=viorVaspInput.from_directory('.')
- actions=actionsor[FileActions,DictActions]
- super(VaspModder,self).__init__(actions,strict)
-
-
[docs]defapply_actions(self,actions):
- """
- Applies a list of actions to the Vasp Input Set and rewrites modified
- files.
- Args:
- actions [dict]: A list of actions of the form {'file': filename,
- 'action': moddermodification} or {'dict': vaspinput_key,
- 'action': moddermodification}
- """
- modified=[]
- forainactions:
- if"dict"ina:
- k=a["dict"]
- modified.append(k)
- self.vi[k]=self.modify_object(a["action"],self.vi[k])
- elif"file"ina:
- self.modify(a["action"],a["file"])
- else:
- raiseValueError("Unrecognized format: {}".format(a))
- forfinmodified:
- self.vi[f].write_file(f)
[docs]classVaspJob(Job):
- """
- A basic vasp job. Just runs whatever is in the directory. But conceivably
- can be a complex processing of inputs etc. with initialization.
- """
-
- def__init__(self,vasp_cmd,output_file="vasp.out",
- stderr_file="std_err.txt",suffix="",final=True,
- backup=True,auto_npar=False,auto_gamma=True,
- settings_override=None,gamma_vasp_cmd=None,
- copy_magmom=False,auto_continue=False):
- """
- This constructor is necessarily complex due to the need for
- flexibility. For standard kinds of runs, it's often better to use one
- of the static constructors. The defaults are usually fine too.
-
- Args:
- vasp_cmd (str): Command to run vasp as a list of args. For example,
- if you are using mpirun, it can be something like
- ["mpirun", "pvasp.5.2.11"]
- output_file (str): Name of file to direct standard out to.
- Defaults to "vasp.out".
- stderr_file (str): Name of file to direct standard error to.
- Defaults to "std_err.txt".
- suffix (str): A suffix to be appended to the final output. E.g.,
- to rename all VASP output from say vasp.out to
- vasp.out.relax1, provide ".relax1" as the suffix.
- final (bool): Indicating whether this is the final vasp job in a
- series. Defaults to True.
- backup (bool): Whether to backup the initial input files. If True,
- the INCAR, KPOINTS, POSCAR and POTCAR will be copied with a
- ".orig" appended. Defaults to True.
- auto_npar (bool): Whether to automatically tune NPAR to be sqrt(
- number of cores) as recommended by VASP for DFT calculations.
- Generally, this results in significant speedups. Defaults to
- True. Set to False for HF, GW and RPA calculations.
- auto_gamma (bool): Whether to automatically check if run is a
- Gamma 1x1x1 run, and whether a Gamma optimized version of
- VASP exists with ".gamma" appended to the name of the VASP
- executable (typical setup in many systems). If so, run the
- gamma optimized version of VASP instead of regular VASP. You
- can also specify the gamma vasp command using the
- gamma_vasp_cmd argument if the command is named differently.
- settings_override ([dict]): An ansible style list of dict to
- override changes. For example, to set ISTART=1 for subsequent
- runs and to copy the CONTCAR to the POSCAR, you will provide::
-
- [{"dict": "INCAR", "action": {"_set": {"ISTART": 1}}},
- {"file": "CONTCAR",
- "action": {"_file_copy": {"dest": "POSCAR"}}}]
- gamma_vasp_cmd (str): Command for gamma vasp version when
- auto_gamma is True. Should follow the list style of
- subprocess. Defaults to None, which means ".gamma" is added
- to the last argument of the standard vasp_cmd.
- copy_magmom (bool): Whether to copy the final magmom from the
- OUTCAR to the next INCAR. Useful for multi-relaxation runs
- where the CHGCAR and WAVECAR are sometimes deleted (due to
- changes in fft grid, etc.). Only applies to non-final runs.
- auto_continue (bool): Whether to automatically continue a run
- if a STOPCAR is present. This is very useful if using the
- wall-time handler which will write a read-only STOPCAR to
- prevent VASP from deleting it once it finishes
- """
- self.vasp_cmd=vasp_cmd
- self.output_file=output_file
- self.stderr_file=stderr_file
- self.final=final
- self.backup=backup
- self.suffix=suffix
- self.settings_override=settings_override
- self.auto_npar=auto_npar
- self.auto_gamma=auto_gamma
- self.gamma_vasp_cmd=gamma_vasp_cmd
- self.copy_magmom=copy_magmom
- self.auto_continue=auto_continue
-
- ifSENTRY_DSN:
- # if using Sentry logging, add specific VASP executable to scope
- fromsentry_sdkimportconfigure_scope
- withconfigure_scope()asscope:
- try:
- ifisinstance(vasp_cmd,str):
- vasp_path=which(vasp_cmd.split(' ')[-1])
- elifisinstance(vasp_cmd,list):
- vasp_path=which(vasp_cmd[-1])
- scope.set_tag("vasp_path",vasp_path)
- scope.set_tag("vasp_cmd",vasp_cmd)
- except:
- logger.error("Failed to detect VASP path: {}".format(vasp_cmd),exc_info=True)
- scope.set_tag("vasp_cmd",vasp_cmd)
-
-
[docs]defsetup(self):
- """
- Performs initial setup for VaspJob, including overriding any settings
- and backing up.
- """
- decompress_dir('.')
-
- ifself.backup:
- forfinVASP_INPUT_FILES:
- shutil.copy(f,"{}.orig".format(f))
-
- ifself.auto_npar:
- try:
- incar=Incar.from_file("INCAR")
- # Only optimized NPAR for non-HF and non-RPA calculations.
- ifnot(incar.get("LHFCALC")orincar.get("LRPA")or
- incar.get("LEPSILON")):
- ifincar.get("IBRION")in[5,6,7,8]:
- # NPAR should not be set for Hessian matrix
- # calculations, whether in DFPT or otherwise.
- delincar["NPAR"]
- else:
- importmultiprocessing
- # try sge environment variable first
- # (since multiprocessing counts cores on the current
- # machine only)
- ncores=os.environ.get('NSLOTS')or \
- multiprocessing.cpu_count()
- ncores=int(ncores)
- fornparinrange(int(math.sqrt(ncores)),
- ncores):
- ifncores%npar==0:
- incar["NPAR"]=npar
- break
- incar.write_file("INCAR")
- except:
- pass
-
- ifself.auto_continue:
- ifos.path.exists("continue.json"):
- actions=loadfn("continue.json").get("actions")
- logger.info("Continuing previous VaspJob. Actions: {}".format(actions))
- backup(VASP_BACKUP_FILES,prefix="prev_run")
- VaspModder().apply_actions(actions)
-
- else:
- # Default functionality is to copy CONTCAR to POSCAR and set
- # ISTART to 1 in the INCAR, but other actions can be specified
- ifself.auto_continueisTrue:
- actions=[{"file":"CONTCAR",
- "action":{"_file_copy":{"dest":"POSCAR"}}},
- {"dict":"INCAR",
- "action":{"_set":{"ISTART":1}}}]
- else:
- actions=self.auto_continue
- dumpfn({"actions":actions},"continue.json")
-
- ifself.settings_overrideisnotNone:
- VaspModder().apply_actions(self.settings_override)
-
-
[docs]defrun(self):
- """
- Perform the actual VASP run.
-
- Returns:
- (subprocess.Popen) Used for monitoring.
- """
- cmd=list(self.vasp_cmd)
- ifself.auto_gamma:
- vi=VaspInput.from_directory(".")
- kpts=vi["KPOINTS"]
- ifkpts.style==Kpoints.supported_modes.Gamma \
- andtuple(kpts.kpts[0])==(1,1,1):
- ifself.gamma_vasp_cmdisnotNoneandwhich(
- self.gamma_vasp_cmd[-1]):
- cmd=self.gamma_vasp_cmd
- elifwhich(cmd[-1]+".gamma"):
- cmd[-1]+=".gamma"
- logger.info("Running {}".format(" ".join(cmd)))
- withopen(self.output_file,'w')asf_std, \
- open(self.stderr_file,"w",buffering=1)asf_err:
- # use line buffering for stderr
- p=subprocess.Popen(cmd,stdout=f_std,stderr=f_err)
- returnp
-
-
[docs]defpostprocess(self):
- """
- Postprocessing includes renaming and gzipping where necessary.
- Also copies the magmom to the incar if necessary
- """
- forfinVASP_OUTPUT_FILES+[self.output_file]:
- ifos.path.exists(f):
- ifself.finalandself.suffix!="":
- shutil.move(f,"{}{}".format(f,self.suffix))
- elifself.suffix!="":
- shutil.copy(f,"{}{}".format(f,self.suffix))
-
- ifself.copy_magmomandnotself.final:
- try:
- outcar=Outcar("OUTCAR")
- magmom=[m['tot']forminoutcar.magnetization]
- incar=Incar.from_file("INCAR")
- incar['MAGMOM']=magmom
- incar.write_file("INCAR")
- except:
- logger.error('MAGMOM copy from OUTCAR to INCAR failed')
-
- # Remove continuation so if a subsequent job is run in
- # the same directory, will not restart this job.
- ifos.path.exists("continue.json"):
- os.remove("continue.json")
-
-
[docs]@classmethod
- defdouble_relaxation_run(cls,vasp_cmd,auto_npar=True,ediffg=-0.05,
- half_kpts_first_relax=False,auto_continue=False):
- """
- Returns a list of two jobs corresponding to an AFLOW style double
- relaxation run.
-
- Args:
- vasp_cmd (str): Command to run vasp as a list of args. For example,
- if you are using mpirun, it can be something like
- ["mpirun", "pvasp.5.2.11"]
- auto_npar (bool): Whether to automatically tune NPAR to be sqrt(
- number of cores) as recommended by VASP for DFT calculations.
- Generally, this results in significant speedups. Defaults to
- True. Set to False for HF, GW and RPA calculations.
- ediffg (float): Force convergence criteria for subsequent runs (
- ignored for the initial run.)
- half_kpts_first_relax (bool): Whether to halve the kpoint grid
- for the first relaxation. Speeds up difficult convergence
- considerably. Defaults to False.
-
- Returns:
- List of two jobs corresponding to an AFLOW style run.
- """
- incar_update={"ISTART":1}
- ifediffg:
- incar_update["EDIFFG"]=ediffg
- settings_overide_1=None
- settings_overide_2=[
- {"dict":"INCAR",
- "action":{"_set":incar_update}},
- {"file":"CONTCAR",
- "action":{"_file_copy":{"dest":"POSCAR"}}}]
- ifhalf_kpts_first_relaxandos.path.exists("KPOINTS")and \
- os.path.exists("POSCAR"):
- kpts=Kpoints.from_file("KPOINTS")
- orig_kpts_dict=kpts.as_dict()
- # lattice vectors with length < 8 will get >1 KPOINT
- kpts.kpts=np.round(np.maximum(np.array(kpts.kpts)/2,
- 1)).astype(int).tolist()
- low_kpts_dict=kpts.as_dict()
- settings_overide_1=[
- {"dict":"KPOINTS",
- "action":{"_set":low_kpts_dict}}
- ]
- settings_overide_2.append(
- {"dict":"KPOINTS",
- "action":{"_set":orig_kpts_dict}}
- )
-
- return[VaspJob(vasp_cmd,final=False,suffix=".relax1",
- auto_npar=auto_npar,auto_continue=auto_continue,
- settings_override=settings_overide_1),
- VaspJob(vasp_cmd,final=True,backup=False,suffix=".relax2",
- auto_npar=auto_npar,auto_continue=auto_continue,
- settings_override=settings_overide_2)]
-
-
[docs]@classmethod
- defmetagga_opt_run(cls,vasp_cmd,auto_npar=True,ediffg=-0.05,
- half_kpts_first_relax=False,auto_continue=False):
- """
- Returns a list of thres jobs to perform an optimization for any
- metaGGA functional. There is an initial calculation of the
- GGA wavefunction which is fed into the initial metaGGA optimization
- to precondition the electronic structure optimizer. The metaGGA
- optimization is performed using the double relaxation scheme
- """
-
- incar=Incar.from_file("INCAR")
- # Defaults to using the SCAN metaGGA
- metaGGA=incar.get("METAGGA","SCAN")
-
- # Pre optimze WAVECAR and structure using regular GGA
- pre_opt_setings=[{"dict":"INCAR",
- "action":{"_set":{"METAGGA":None,
- "LWAVE":True,
- "NSW":0}}}]
- jobs=[VaspJob(vasp_cmd,auto_npar=auto_npar,
- final=False,suffix=".precondition",
- settings_override=pre_opt_setings)]
-
- # Finish with regular double relaxation style run using SCAN
- jobs.extend(VaspJob.double_relaxation_run(vasp_cmd,auto_npar=auto_npar,
- ediffg=ediffg,
- half_kpts_first_relax=half_kpts_first_relax))
-
- # Ensure the first relaxation doesn't overwrite the original inputs
- jobs[1].backup=False
-
- # Update double_relaxation job to start from pre-optimized run
- post_opt_settings=[{"dict":"INCAR",
- "action":{"_set":{"METAGGA":metaGGA,"ISTART":1,
- "NSW":incar.get("NSW",99),
- "LWAVE":incar.get("LWAVE",False)}}},
- {"file":"CONTCAR",
- "action":{"_file_copy":{"dest":"POSCAR"}}}]
- ifjobs[1].settings_override:
- post_opt_settings=jobs[1].settings_override+post_opt_settings
- jobs[1].settings_override=post_opt_settings
-
- returnjobs
-
-
[docs]@classmethod
- deffull_opt_run(cls,vasp_cmd,vol_change_tol=0.02,
- max_steps=10,ediffg=-0.05,half_kpts_first_relax=False,
- **vasp_job_kwargs):
- """
- Returns a generator of jobs for a full optimization run. Basically,
- this runs an infinite series of geometry optimization jobs until the
- % vol change in a particular optimization is less than vol_change_tol.
-
- Args:
- vasp_cmd (str): Command to run vasp as a list of args. For example,
- if you are using mpirun, it can be something like
- ["mpirun", "pvasp.5.2.11"]
- vol_change_tol (float): The tolerance at which to stop a run.
- Defaults to 0.05, i.e., 5%.
- max_steps (int): The maximum number of runs. Defaults to 10 (
- highly unlikely that this limit is ever reached).
- ediffg (float): Force convergence criteria for subsequent runs (
- ignored for the initial run.)
- half_kpts_first_relax (bool): Whether to halve the kpoint grid
- for the first relaxation. Speeds up difficult convergence
- considerably. Defaults to False.
- \*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See
- :class:`custodian.vasp.jobs.VaspJob`.
-
- Returns:
- Generator of jobs.
- """
- foriinrange(max_steps):
- ifi==0:
- settings=None
- backup=True
- ifhalf_kpts_first_relaxandos.path.exists("KPOINTS")and \
- os.path.exists("POSCAR"):
- kpts=Kpoints.from_file("KPOINTS")
- orig_kpts_dict=kpts.as_dict()
- kpts.kpts=np.maximum(np.array(kpts.kpts)/2,1).tolist()
- low_kpts_dict=kpts.as_dict()
- settings=[
- {"dict":"KPOINTS",
- "action":{"_set":low_kpts_dict}}
- ]
- else:
- backup=False
- initial=Poscar.from_file("POSCAR").structure
- final=Poscar.from_file("CONTCAR").structure
- vol_change=(final.volume-initial.volume)/initial.volume
-
- logger.info("Vol change = %.1f%%!"%(vol_change*100))
- ifabs(vol_change)<vol_change_tol:
- logger.info("Stopping optimization!")
- break
- else:
- incar_update={"ISTART":1}
- ifediffg:
- incar_update["EDIFFG"]=ediffg
- settings=[
- {"dict":"INCAR",
- "action":{"_set":incar_update}},
- {"file":"CONTCAR",
- "action":{"_file_copy":{"dest":"POSCAR"}}}]
- ifi==1andhalf_kpts_first_relax:
- settings.append({"dict":"KPOINTS",
- "action":{"_set":orig_kpts_dict}})
- logger.info("Generating job = %d!"%(i+1))
- yieldVaspJob(vasp_cmd,final=False,backup=backup,
- suffix=".relax%d"%(i+1),settings_override=settings,
- **vasp_job_kwargs)
-
-
[docs]@classmethod
- defconstrained_opt_run(cls,vasp_cmd,lattice_direction,initial_strain,
- atom_relax=True,max_steps=20,algo="bfgs",
- **vasp_job_kwargs):
- """
- Returns a generator of jobs for a constrained optimization run. Typical
- use case is when you want to approximate a biaxial strain situation,
- e.g., you apply a defined strain to a and b directions of the lattice,
- but allows the c-direction to relax.
-
- Some guidelines on the use of this method:
- i. It is recommended you do not use the Auto kpoint generation. The
- grid generated via Auto may fluctuate with changes in lattice
- param, resulting in numerical noise.
- ii. Make sure your EDIFF/EDIFFG is properly set in your INCAR. The
- optimization relies on these values to determine convergence.
-
- Args:
- vasp_cmd (str): Command to run vasp as a list of args. For example,
- if you are using mpirun, it can be something like
- ["mpirun", "pvasp.5.2.11"]
- lattice_direction (str): Which direction to relax. Valid values are
- "a", "b" or "c".
- initial_strain (float): An initial strain to be applied to the
- lattice_direction. This can usually be estimated as the
- negative of the strain applied in the other two directions.
- E.g., if you apply a tensile strain of 0.05 to the a and b
- directions, you can use -0.05 as a reasonable first guess for
- initial strain.
- atom_relax (bool): Whether to relax atomic positions.
- max_steps (int): The maximum number of runs. Defaults to 20 (
- highly unlikely that this limit is ever reached).
- algo (str): Algorithm to use to find minimum. Default is "bfgs",
- which is fast, but can be sensitive to numerical noise
- in energy calculations. The alternative is "bisection",
- which is more robust but can be a bit slow. The code does fall
- back on the bisection when bfgs gives a non-sensical result,
- e.g., negative lattice params.
- \*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See
- :class:`custodian.vasp.jobs.VaspJob`.
-
- Returns:
- Generator of jobs. At the end of the run, an "EOS.txt" is written
- which provides a quick look at the E vs lattice parameter.
- """
- nsw=99ifatom_relaxelse0
-
- incar=Incar.from_file("INCAR")
-
- # Set the energy convergence criteria as the EDIFFG (if present) or
- # 10 x EDIFF (which itself defaults to 1e-4 if not present).
- ifincar.get("EDIFFG")andincar.get("EDIFFG")>0:
- etol=incar["EDIFFG"]
- else:
- etol=incar.get("EDIFF",1e-4)*10
-
- iflattice_direction=="a":
- lattice_index=0
- eliflattice_direction=="b":
- lattice_index=1
- else:
- lattice_index=2
-
- energies={}
-
- foriinrange(max_steps):
- ifi==0:
- settings=[
- {"dict":"INCAR",
- "action":{"_set":{"ISIF":2,"NSW":nsw}}}]
- structure=Poscar.from_file("POSCAR").structure
- x=structure.lattice.abc[lattice_index]
- backup=True
- else:
- backup=False
- v=Vasprun("vasprun.xml")
- structure=v.final_structure
- energy=v.final_energy
- lattice=structure.lattice
-
- x=lattice.abc[lattice_index]
-
- energies[x]=energy
-
- ifi==1:
- x*=(1+initial_strain)
- else:
- # Sort the lattice parameter by energies.
- min_x=min(energies.keys(),key=lambdae:energies[e])
- sorted_x=sorted(energies.keys())
- ind=sorted_x.index(min_x)
- ifind==0:
- other=ind+1
- elifind==len(sorted_x)-1:
- other=ind-1
- else:
- other=ind+1 \
- ifenergies[sorted_x[ind+1]] \
- <energies[sorted_x[ind-1]] \
- elseind-1
- ifabs(energies[min_x]
- -energies[sorted_x[other]])<etol:
- logger.info("Stopping optimization! Final %s = %f"
- %(lattice_direction,min_x))
- break
-
- ifind==0andlen(sorted_x)>2:
- # Lowest energy lies outside of range of lowest value.
- # we decrease the lattice parameter in the next
- # iteration to find a minimum. This applies only when
- # there are at least 3 values.
- x=sorted_x[0]-abs(sorted_x[1]-sorted_x[0])
- logger.info("Lowest energy lies below bounds. "
- "Setting %s = %f."%(lattice_direction,x))
- elifind==len(sorted_x)-1andlen(sorted_x)>2:
- # Lowest energy lies outside of range of highest value.
- # we increase the lattice parameter in the next
- # iteration to find a minimum. This applies only when
- # there are at least 3 values.
- x=sorted_x[-1]+abs(sorted_x[-1]-sorted_x[-2])
- logger.info("Lowest energy lies above bounds. "
- "Setting %s = %f."%(lattice_direction,x))
- else:
- ifalgo.lower()=="bfgs"andlen(sorted_x)>=4:
- try:
- # If there are more than 4 data points, we will
- # do a quadratic fit to accelerate convergence.
- x1=list(energies.keys())
- y1=[energies[j]forjinx1]
- z1=np.polyfit(x1,y1,2)
- pp=np.poly1d(z1)
- fromscipy.optimizeimportminimize
- result=minimize(
- pp,min_x,
- bounds=[(sorted_x[0],sorted_x[-1])])
- if(notresult.success)orresult.x[0]<0:
- raiseValueError(
- "Negative lattice constant!")
- x=result.x[0]
- logger.info("BFGS minimized %s = %f."
- %(lattice_direction,x))
- exceptValueErrorasex:
- # Fall back on bisection algo if the bfgs fails.
- logger.info(str(ex))
- x=(min_x+sorted_x[other])/2
- logger.info("Falling back on bisection %s = %f."
- %(lattice_direction,x))
- else:
- x=(min_x+sorted_x[other])/2
- logger.info("Bisection %s = %f."
- %(lattice_direction,x))
-
- lattice=lattice.matrix
- lattice[lattice_index]=lattice[lattice_index]/ \
- np.linalg.norm(lattice[lattice_index])*x
-
- s=Structure(lattice,structure.species,structure.frac_coords)
- fname="POSCAR.%f"%x
- s.to(filename=fname)
-
- incar_update={"ISTART":1,"NSW":nsw,"ISIF":2}
-
- settings=[
- {"dict":"INCAR",
- "action":{"_set":incar_update}},
- {"file":fname,
- "action":{"_file_copy":{"dest":"POSCAR"}}}]
-
- logger.info("Generating job = %d with parameter %f!"%(i+1,x))
- yieldVaspJob(vasp_cmd,final=False,backup=backup,
- suffix=".static.%f"%x,
- settings_override=settings,**vasp_job_kwargs)
-
- withopen("EOS.txt","wt")asf:
- f.write("# %s energy\n"%lattice_direction)
- forkinsorted(energies.keys()):
- f.write("%f%f\n"%(k,energies[k]))
[docs]classVaspNEBJob(Job):
- """
- A NEB vasp job, especially for CI-NEB running at PBS clusters.
- The class is added for the purpose of handling a different folder
- arrangement in NEB calculation.
- """
-
- def__init__(self,vasp_cmd,
- output_file="neb_vasp.out",stderr_file="neb_std_err.txt",
- suffix="",final=True,backup=True,auto_npar=True,
- half_kpts=False,auto_gamma=True,auto_continue=False,
- gamma_vasp_cmd=None,settings_override=None):
- """
- This constructor is a simplified version of VaspJob, which satisfies
- the need for flexibility. For standard kinds of runs, it's often
- better to use one of the static constructors. The defaults are
- usually fine too.
-
- Args:
- vasp_cmd (str): Command to run vasp as a list of args. For example,
- if you are using mpirun, it can be something like
- ["mpirun", "pvasp.5.2.11"]
- output_file (str): Name of file to direct standard out to.
- Defaults to "vasp.out".
- stderr_file (str): Name of file to direct standard error to.
- Defaults to "std_err.txt".
- suffix (str): A suffix to be appended to the final output. E.g.,
- to rename all VASP output from say vasp.out to
- vasp.out.relax1, provide ".relax1" as the suffix.
- final (bool): Indicating whether this is the final vasp job in a
- series. Defaults to True.
- backup (bool): Whether to backup the initial input files. If True,
- the INCAR, KPOINTS, POSCAR and POTCAR will be copied with a
- ".orig" appended. Defaults to True.
- auto_npar (bool): Whether to automatically tune NPAR to be sqrt(
- number of cores) as recommended by VASP for DFT calculations.
- Generally, this results in significant speedups. Defaults to
- True. Set to False for HF, GW and RPA calculations.
- half_kpts (bool): Whether to halve the kpoint grid for NEB.
- Speeds up convergence considerably. Defaults to False.
- auto_gamma (bool): Whether to automatically check if run is a
- Gamma 1x1x1 run, and whether a Gamma optimized version of
- VASP exists with ".gamma" appended to the name of the VASP
- executable (typical setup in many systems). If so, run the
- gamma optimized version of VASP instead of regular VASP. You
- can also specify the gamma vasp command using the
- gamma_vasp_cmd argument if the command is named differently.
- auto_continue (bool): Whether to automatically continue a run
- if a STOPCAR is present. This is very useful if using the
- wall-time handler which will write a read-only STOPCAR to
- prevent VASP from deleting it once it finishes.
- gamma_vasp_cmd (str): Command for gamma vasp version when
- auto_gamma is True. Should follow the list style of
- subprocess. Defaults to None, which means ".gamma" is added
- to the last argument of the standard vasp_cmd.
- settings_override ([dict]): An ansible style list of dict to
- override changes. For example, to set ISTART=1 for subsequent
- runs and to copy the CONTCAR to the POSCAR, you will provide::
-
- [{"dict": "INCAR", "action": {"_set": {"ISTART": 1}}},
- {"file": "CONTCAR",
- "action": {"_file_copy": {"dest": "POSCAR"}}}]
- """
-
- self.vasp_cmd=vasp_cmd
- self.output_file=output_file
- self.stderr_file=stderr_file
- self.final=final
- self.backup=backup
- self.suffix=suffix
- self.auto_npar=auto_npar
- self.half_kpts=half_kpts
- self.auto_gamma=auto_gamma
- self.gamma_vasp_cmd=gamma_vasp_cmd
- self.auto_continue=auto_continue
- self.settings_override=settings_override
- self.neb_dirs=[]# 00, 01, etc.
- self.neb_sub=[]# 01, 02, etc.
-
- forpathinos.listdir("."):
- ifos.path.isdir(path)andpath.isdigit():
- self.neb_dirs.append(path)
- self.neb_dirs=sorted(self.neb_dirs)
- self.neb_sub=self.neb_dirs[1:-1]
-
-
[docs]defrun(self):
- """
- Perform the actual VASP run.
-
- Returns:
- (subprocess.Popen) Used for monitoring.
- """
- cmd=list(self.vasp_cmd)
- ifself.auto_gamma:
- kpts=Kpoints.from_file("KPOINTS")
- ifkpts.style==Kpoints.supported_modes.Gamma \
- andtuple(kpts.kpts[0])==(1,1,1):
- ifself.gamma_vasp_cmdisnotNoneandwhich(
- self.gamma_vasp_cmd[-1]):
- cmd=self.gamma_vasp_cmd
- elifwhich(cmd[-1]+".gamma"):
- cmd[-1]+=".gamma"
- logger.info("Running {}".format(" ".join(cmd)))
- withopen(self.output_file,'w')asf_std, \
- open(self.stderr_file,"w",buffering=1)asf_err:
-
- # Use line buffering for stderr
- p=subprocess.Popen(cmd,stdout=f_std,stderr=f_err)
- returnp
-
-
[docs]defpostprocess(self):
- """
- Postprocessing includes renaming and gzipping where necessary.
- """
- # Add suffix to all sub_dir/{items}
- forpathinself.neb_dirs:
- forfinVASP_NEB_OUTPUT_SUB_FILES:
- f=os.path.join(path,f)
- ifos.path.exists(f):
- ifself.finalandself.suffix!="":
- shutil.move(f,"{}{}".format(f,self.suffix))
- elifself.suffix!="":
- shutil.copy(f,"{}{}".format(f,self.suffix))
-
- # Add suffix to all output files
- forfinVASP_NEB_OUTPUT_FILES+[self.output_file]:
- ifos.path.exists(f):
- ifself.finalandself.suffix!="":
- shutil.move(f,"{}{}".format(f,self.suffix))
- elifself.suffix!="":
- shutil.copy(f,"{}{}".format(f,self.suffix))
-
-
-
[docs]classGenerateVaspInputJob(Job):
-
- def__init__(self,input_set,contcar_only=True,**kwargs):
- """
- Generates a VASP input based on an existing directory. This is typically
- used to modify the VASP input files before the next VaspJob.
-
- Args:
- input_set (str): Full path to the input set. E.g.,
- "pymatgen.io.vasp.sets.MPNonSCFSet".
- contcar_only (bool): If True (default), only CONTCAR structures
- are used as input to the input set.
- """
- self.input_set=input_set
- self.contcar_only=contcar_only
- self.kwargs=kwargs
-
-
[docs]classVasprunXMLValidator(Validator):
- """
- Checks that a valid vasprun.xml was generated
- """
-
- def__init__(self,output_file="vasp.out",stderr_file="std_err.txt"):
- """
- Args:
- output_file (str): Name of file VASP standard output is directed to.
- Defaults to "vasp.out".
- stderr_file (str): Name of file VASP standard error is direct to.
- Defaults to "std_err.txt".
- """
- self.output_file=output_file
- self.stderr_file=stderr_file
- self.logger=logging.getLogger(self.__class__.__name__)
-
-
[docs]classVaspFilesValidator(Validator):
- """
- Check for existence of some of the files that VASP
- normally create upon running.
- """
-
- def__init__(self):
- pass
-
-
[docs]defcheck_broken_chgcar(chgcar):
- chgcar_data=chgcar.data['total']
- if(chgcar_data<0).sum()>100:
- # a decent bunch of the values are negative
- returnTrue
-
- diff=chgcar_data[:-1,:-1,:-1]-chgcar_data[1:,1:,1:]
- ifdiff.max()/(chgcar_data.max()-chgcar_data.min())>0.95:
- # Some single diagonal finite difference is more than 95% of the entire range
- returnTrue
-
- returnFalse