diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a07e542d053..03340740354 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -33,7 +33,7 @@ variables: status_pending: <<: *notification_job_definition stage: prepare - script: bash maintainer/gh_post_status.sh pending + script: sh maintainer/gh_post_status.sh pending style: <<: *global_job_definition @@ -42,7 +42,7 @@ style: before_script: - git submodule deinit . script: - - maintainer/CI/fix_style.sh + - sh maintainer/CI/fix_style.sh tags: - docker - linux @@ -515,7 +515,7 @@ check_with_odd_no_of_processors: script: - cd ${CI_PROJECT_DIR}/build - cmake -DTEST_NP=3 . - - make -t && make check_python_parallel_odd + - make -t && make check_unit_tests && make check_python_parallel_odd tags: - docker - linux @@ -572,19 +572,19 @@ deploy_tutorials: status_success: <<: *notification_job_definition stage: result - script: bash maintainer/gh_post_status.sh success + script: sh maintainer/gh_post_status.sh success when: on_success status_failure: <<: *notification_job_definition stage: result - script: bash maintainer/gh_post_status.sh failure + script: sh maintainer/gh_post_status.sh failure when: on_failure notify_success: <<: *notification_job_definition stage: result - script: bash maintainer/gh_close_issue.sh + script: sh maintainer/gh_close_issue.sh when: on_success only: - python @@ -592,7 +592,7 @@ notify_success: notify_failure: <<: *notification_job_definition stage: result - script: bash maintainer/gh_create_issue.sh + script: sh maintainer/gh_create_issue.sh when: on_failure only: - python diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ebd3e7c07b1..c61e17f2ca3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,6 +9,7 @@ repos: language: system always_run: false files: '.*\.(cpp|hpp|cu|cuh)' + exclude: '^libs/' args: ["-i", "-style=file"] - id: autopep8 @@ -17,8 +18,8 @@ repos: language: system always_run: false files: '.*\.(py|pyx|pxd)' - exclude: '\.pylintrc|.*.\.py\.in' - args: ["--ignore=E266,E701,W291,W293", "--in-place", "--aggressive"] + exclude: '\.pylintrc|.*.\.py\.in|^libs/' + args: ["--ignore=E266,E402,E701,W291,W293", "--in-place", "--aggressive"] - id: cmake-format name: cmake-format @@ -26,6 +27,7 @@ repos: language: system always_run: false files: 'CMakeLists.txt' + exclude: '^libs/h5xx/|^libs/Random123-1\.09/' args: ["-i"] - id: ex-flags diff --git a/AUTHORS b/AUTHORS index 72f4763c3be..deb04f2def8 100644 --- a/AUTHORS +++ b/AUTHORS @@ -10,13 +10,14 @@ The following people have contributed to ESPResSo: Core developers --------------- -Florian Weik Rudolf Weeber Kai Szuttor Jean-Noël Grad +Alexander Reinauer Former Core Developers ---------------------- +Florian Weik Georg Rempfer Stefan Kesselheim Bernward Mann (deceased 2006) @@ -57,6 +58,7 @@ Frank Mühlbach Gizem Inci Gregoria Illya Hamid Zaheri +Hao Lu Henri Menke Igor Pasichnyk Ingo Tischler @@ -103,6 +105,7 @@ Pedro Ojeda Pedro Sanchez Peter Košovan Pierre de Buyl +Riccardo Frenner Robert Kaufmann Robin Bardakcioglu Sam Foley diff --git a/CMakeLists.txt b/CMakeLists.txt index 6257c7a7707..4fb692ed262 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -306,29 +306,6 @@ endif(WITH_VALGRIND_INSTRUMENTATION) find_package(MPI 3.0 REQUIRED) -# CMake < 3.9 -if(NOT TARGET MPI::MPI_CXX) - add_library(MPI::MPI_CXX IMPORTED INTERFACE) - - # Workaround for https://gitlab.kitware.com/cmake/cmake/issues/18349 - foreach(_MPI_FLAG ${MPI_CXX_COMPILE_FLAGS}) - set_property(TARGET MPI::MPI_CXX PROPERTY INTERFACE_COMPILE_OPTIONS - ${_MPI_FLAG}) - endforeach() - - set_property(TARGET MPI::MPI_CXX PROPERTY INTERFACE_INCLUDE_DIRECTORIES - "${MPI_CXX_INCLUDE_PATH}") - - # Workaround for https://gitlab.kitware.com/cmake/cmake/issues/18349 - foreach(_MPI_FLAG ${MPI_CXX_LINK_FLAGS}) - set_property(TARGET MPI::MPI_CXX PROPERTY INTERFACE_LINK_LIBRARIES - ${_MPI_FLAG}) - endforeach() - - set_property(TARGET MPI::MPI_CXX PROPERTY INTERFACE_LINK_LIBRARIES - ${MPI_CXX_LIBRARIES}) -endif() - # ############################################################################## # Boost # ############################################################################## @@ -473,16 +450,14 @@ endif() if(WITH_TESTS) enable_testing() - if(Boost_UNIT_TEST_FRAMEWORK_FOUND) - set(WITH_UNIT_TESTS ON) - endif(Boost_UNIT_TEST_FRAMEWORK_FOUND) add_custom_target(check) - if(WITH_PYTHON) - add_subdirectory(testsuite) - endif(WITH_PYTHON) set(CTEST_ARGS "" CACHE STRING "Extra arguments to give to ctest calls (separated by semicolons)") + set(TEST_NP "4" CACHE STRING "Maximal number of MPI ranks to use per test") + if(WITH_PYTHON) + add_subdirectory(testsuite) + endif(WITH_PYTHON) endif(WITH_TESTS) if(WITH_BENCHMARKS) diff --git a/cmake/FindCUDACompilerNVCC.cmake b/cmake/FindCUDACompilerNVCC.cmake index 1a77224d765..930518e3dcb 100644 --- a/cmake/FindCUDACompilerNVCC.cmake +++ b/cmake/FindCUDACompilerNVCC.cmake @@ -63,7 +63,6 @@ function(find_gpu_library) cmake_parse_arguments(LIBRARY "REQUIRED" "NAMES;VARNAME" "" ${ARGN}) list(APPEND LIBRARY_PATHS ${CUDA_TOOLKIT_ROOT_DIR}/lib64 ${CUDA_TOOLKIT_ROOT_DIR}/lib - ${CUDA_TOOLKIT_ROOT_DIR}/compat /usr/local/nvidia/lib /usr/lib/x86_64-linux-gnu) if(LIBRARY_REQUIRED) find_library(${LIBRARY_VARNAME} NAMES ${LIBRARY_NAMES} PATHS ${LIBRARY_PATHS} NO_DEFAULT_PATH REQUIRED) diff --git a/cmake/unit_test.cmake b/cmake/unit_test.cmake index 6fc459276b9..014ad52c49c 100644 --- a/cmake/unit_test.cmake +++ b/cmake/unit_test.cmake @@ -1,12 +1,3 @@ -if(NOT DEFINED TEST_NP) - include(ProcessorCount) - processorcount(NP) - math(EXPR TEST_NP "${NP}/2 + 1") - if(${TEST_NP} GREATER 4) - set(TEST_NP 4) - endif() -endif() - if(EXISTS ${MPIEXEC}) # OpenMPI 2.0 and higher checks the number of processes against the number of # CPUs diff --git a/doc/sphinx/advanced_methods.rst b/doc/sphinx/advanced_methods.rst index 138f0eca130..95a7f5ec087 100644 --- a/doc/sphinx/advanced_methods.rst +++ b/doc/sphinx/advanced_methods.rst @@ -1894,7 +1894,7 @@ The call of ``add_reaction`` define the insertion :math:`\mathrm{\emptyset \to t Multiple reactions for the insertions of different types can be added to the same ``WidomInsertion`` instance. Measuring the excess chemical potential using the insertion method is done via calling ``widom.measure_excess_chemical_potential(0)``. If another particle insertion is defined, then the excess chemical potential for this insertion can be measured by calling ``widom.measure_excess_chemical_potential(1)``. -Be aware that the implemented method only works for the canonical ensemble. If the numbers of particles fluctuate (i.e. in a semi grand canonical simulation) one has to adapt the formulas from which the excess chemical potential is calculated! This is not implemented. Also in a isobaric-isothermal simulation (NPT) the corresponding formulas for the excess chemical potentials need to be adapted. This is not implemented. +Be aware that the implemented method only works for the canonical ensemble. If the numbers of particles fluctuate (i.e. in a semi grand canonical simulation) one has to adapt the formulas from which the excess chemical potential is calculated! This is not implemented. Also in a isobaric-isothermal simulation (NpT) the corresponding formulas for the excess chemical potentials need to be adapted. This is not implemented. The implementation can also deal with the simultaneous insertion of multiple particles and can therefore measure the change of excess free energy of multiple particles like e.g.: diff --git a/doc/sphinx/electrostatics.rst b/doc/sphinx/electrostatics.rst index b4417a44b03..d39e7db170e 100644 --- a/doc/sphinx/electrostatics.rst +++ b/doc/sphinx/electrostatics.rst @@ -318,7 +318,7 @@ MMM1D is used with:: where the prefactor :math:`C` is defined in Eqn. :eq:`coulomb_prefactor`. MMM1D Coulomb method for systems with periodicity (0 0 1). Needs the -nsquared cell system (see section :ref:`Cellsystems`). The first form sets parameters +N-squared cell system (see section :ref:`Cellsystems`). The first form sets parameters manually. The switch radius determines at which xy-distance the force calculation switches from the near to the far formula. The Bessel cutoff does not need to be specified as it is automatically determined from the @@ -336,7 +336,7 @@ MMM1D on GPU :class:`espressomd.electrostatics.MMM1DGPU` MMM1D is also available in a GPU implementation. Unlike its CPU -counterpart, it does not need the nsquared cell system. +counterpart, it does not need the N-squared cell system. :: diff --git a/doc/sphinx/installation.rst b/doc/sphinx/installation.rst index 59a1458e2f7..3d4949fe3f1 100644 --- a/doc/sphinx/installation.rst +++ b/doc/sphinx/installation.rst @@ -352,9 +352,9 @@ General features In addition, there are switches that enable additional features in the integrator or thermostat: -- ``NPT`` Enables an on-the-fly NPT integration scheme. +- ``NPT`` Enables an on-the-fly NpT integration scheme. - .. seealso:: :ref:`Isotropic NPT thermostat` + .. seealso:: :ref:`Isotropic NpT thermostat` - ``ENGINE`` diff --git a/doc/sphinx/introduction.rst b/doc/sphinx/introduction.rst index a633689d304..d955488f03b 100644 --- a/doc/sphinx/introduction.rst +++ b/doc/sphinx/introduction.rst @@ -448,7 +448,7 @@ report so to the developers using the instructions in :ref:`Contributing`. +--------------------------------+------------------------+------------------+------------+ | Langevin Thermostat | Core | Core | Yes | +--------------------------------+------------------------+------------------+------------+ -| Isotropic NPT | Experimental | None | Yes | +| Isotropic NpT | Experimental | None | Yes | +--------------------------------+------------------------+------------------+------------+ | Quaternion Integrator | Core | Good | Yes | +--------------------------------+------------------------+------------------+------------+ diff --git a/doc/sphinx/running.rst b/doc/sphinx/running.rst index 83911aa0f97..5d94a2d98af 100644 --- a/doc/sphinx/running.rst +++ b/doc/sphinx/running.rst @@ -3,20 +3,80 @@ Running the simulation ====================== +.. _Particle integration and propagation: + +Particle integration and propagation +------------------------------------ + +The main integration scheme of |es| is the velocity Verlet algorithm. +A steepest descent algorithm is used to minimize the system. + +Additional integration schemes are available, which can be coupled to +thermostats to enable Langevin dynamics, Brownian dynamics, Stokesian dynamics, +dissipative particle dynamics, and simulations in the NpT ensemble. + +.. _Rotational degrees of freedom and particle anisotropy: + +Rotational degrees of freedom and particle anisotropy +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When the feature ``ROTATION`` is compiled in, particles not only have a position, but also an orientation that changes with an angular velocity. A torque on a particle leads to a change in angular velocity depending on the particles rotational inertia. The property :attr:`espressomd.particle_data.ParticleHandle.rinertia` has to be specified as the three eigenvalues of the particles rotational inertia tensor. + +The rotational degrees of freedom are also integrated using a velocity Verlet scheme. +The implementation is based on a quaternion representation of the particle orientation and described in :cite:`omelyan98` with quaternion components indexing made according to the formalism :math:`q = a + b\mathbf{i} + c\mathbf{j} + d\mathbf{k}` :cite:`allen2017`. + +When the Langevin thermostat is enabled, the rotational degrees of freedom are also thermalized. + +Whether or not rotational degrees of freedom are propagated, is controlled on a per-particle and per-axis level, where the axes are the Cartesian axes of the particle in its body-fixed frame. +It is important to note that starting from version 4.0 and unlike in earlier versions of |es|, the particles' rotation is disabled by default. +In this way, just compiling in the ``ROTATION`` feature no longer changes the physics of the system. + +The rotation of a particle is controlled via the :attr:`espressomd.particle_data.ParticleHandle.rotation` property. E.g., the following code adds a particle with rotation enabled on the x axis:: + + import espressomd + system = espressomd.System(box_l=[1, 1, 1]) + system.part.add(pos=(0, 0, 0), rotation=(1, 0, 0)) + +Notes: + +* The orientation of a particle is stored as a quaternion in the :attr:`espressomd.particle_data.ParticleHandle.quat` property. For a value of (1,0,0,0), the body and space frames coincide. +* The space-frame direction of the particle's z-axis in its body frame is accessible through the :attr:`espressomd.particle_data.ParticleHandle.director` property. +* Any other vector can be converted from body to space fixed frame using the :meth:`espressomd.particle_data.ParticleHandle.convert_vector_body_to_space` method. +* When ``DIPOLES`` are compiled in, the particles dipole moment is always co-aligned with the z-axis in the body-fixed frame. +* Changing the particles dipole moment and director will re-orient the particle such that its z-axis in space frame is aligned parallel to the given vector. No guarantees are made for the other two axes after setting the director or the dipole moment. + + +The following particle properties are related to rotation: + +* :attr:`espressomd.particle_data.ParticleHandle.dip` +* :attr:`espressomd.particle_data.ParticleHandle.director` +* :attr:`espressomd.particle_data.ParticleHandle.ext_torque` +* :attr:`espressomd.particle_data.ParticleHandle.gamma_rot` +* :attr:`espressomd.particle_data.ParticleHandle.gamma_rot` +* :attr:`espressomd.particle_data.ParticleHandle.omega_body` +* :attr:`espressomd.particle_data.ParticleHandle.omega_lab` +* :attr:`espressomd.particle_data.ParticleHandle.quat` +* :attr:`espressomd.particle_data.ParticleHandle.rinertia` +* :attr:`espressomd.particle_data.ParticleHandle.rotation` +* :attr:`espressomd.particle_data.ParticleHandle.torque_lab` + + +.. _Integrators: + +Integrators +----------- + To run the integrator call the method -:meth:`espressomd.integrate.Integrator.run`:: +:meth:`system.integrate.run() `:: system.integrator.run(number_of_steps, recalc_forces=False, reuse_forces=False) -where ``number_of_steps`` is the number of time steps the integrator -should perform. The two main integration schemes of |es| are the Velocity Verlet algorithm -and an adaption of the Velocity Verlet algorithm to simulate an NpT ensemble. -A steepest descent implementation is also available. +where ``number_of_steps`` is the number of time steps the integrator should perform. .. _Velocity Verlet Algorithm: Velocity Verlet algorithm -------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^ :meth:`espressomd.integrate.IntegratorHandle.set_vv` @@ -47,7 +107,7 @@ For numerical integration, this equation is discretized to the following steps ( .. math:: v(t+dt) = v(t+dt/2) + \frac{F(x(t+dt),t+dt)}{m} dt/2 -Note that this implementation of the Velocity Verlet algorithm reuses +Note that this implementation of the velocity Verlet algorithm reuses forces in step 1. That is, they are computed once in step 3, but used twice, in step 4 and in step 1 of the next iteration. In the first time step after setting up, there are no forces present yet. Therefore, |es| has @@ -77,10 +137,10 @@ would like to recompute the forces, despite the fact that they are already correctly calculated. To this aim, the option ``recalc_forces`` can be used to enforce force recalculation. -.. _Isotropic NPT integrator: +.. _Isotropic NpT integrator: -Isotropic NPT integrator ------------------------- +Isotropic NpT integrator +^^^^^^^^^^^^^^^^^^^^^^^^ :meth:`espressomd.integrate.IntegratorHandle.set_isotropic_npt` @@ -185,55 +245,10 @@ Notes: * The particle forces :math:`F` include interactions as well as a friction (:math:`\gamma^0`) and noise term (:math:`\sqrt{k_B T \gamma^0 dt} \overline{\eta}`) analogous to the terms in the :ref:`Langevin thermostat`. * The particle forces are only calculated in step 5 and then reused in step 1 of the next iteration. See :ref:`Velocity Verlet Algorithm` for the implications of that. -.. _Rotational degrees of freedom and particle anisotropy: - -Rotational degrees of freedom and particle anisotropy ------------------------------------------------------ - -When the feature ``ROTATION`` is compiled in, particles not only have a position, but also an orientation that changes with an angular velocity. A torque on a particle leads to a change in angular velocity depending on the particles rotational inertia. The property :attr:`espressomd.particle_data.ParticleHandle.rinertia` has to be specified as the three eigenvalues of the particles rotational inertia tensor. - -The rotational degrees of freedom are also integrated using a velocity Verlet scheme. -The implementation is based on a quaternion representation of the particle orientation and described in :cite:`omelyan98` with quaternion components indexing made according to the formalism :math:`q = a + b\mathbf{i} + c\mathbf{j} + d\mathbf{k}` :cite:`allen2017`. - -When the Langevin thermostat is enabled, the rotational degrees of freedom are also thermalized. - -Whether or not rotational degrees of freedom are propagated, is controlled on a per-particle and per-axis level, where the axes are the Cartesian axes of the particle in its body-fixed frame. -It is important to note that starting from version 4.0 and unlike in earlier versions of |es|, the particles' rotation is disabled by default. -In this way, just compiling in the ``ROTATION`` feature no longer changes the physics of the system. - -The rotation of a particle is controlled via the :attr:`espressomd.particle_data.ParticleHandle.rotation` property. E.g., the following code adds a particle with rotation enabled on the x axis:: - - import espressomd - system = espressomd.System(box_l=[1, 1, 1]) - system.part.add(pos=(0, 0, 0), rotation=(1, 0, 0)) - -Notes: - -* The orientation of a particle is stored as a quaternion in the :attr:`espressomd.particle_data.ParticleHandle.quat` property. For a value of (1,0,0,0), the body and space frames coincide. -* The space-frame direction of the particle's z-axis in its body frame is accessible through the :attr:`espressomd.particle_data.ParticleHandle.director` property. -* Any other vector can be converted from body to space fixed frame using the :meth:`espressomd.particle_data.ParticleHandle.convert_vector_body_to_space` method. -* When ``DIPOLES`` are compiled in, the particles dipole moment is always co-aligned with the z-axis in the body-fixed frame. -* Changing the particles dipole moment and director will re-orient the particle such that its z-axis in space frame is aligned parallel to the given vector. No guarantees are made for the other two axes after setting the director or the dipole moment. - - -The following particle properties are related to rotation: - -* :attr:`espressomd.particle_data.ParticleHandle.dip` -* :attr:`espressomd.particle_data.ParticleHandle.director` -* :attr:`espressomd.particle_data.ParticleHandle.ext_torque` -* :attr:`espressomd.particle_data.ParticleHandle.gamma_rot` -* :attr:`espressomd.particle_data.ParticleHandle.gamma_rot` -* :attr:`espressomd.particle_data.ParticleHandle.omega_body` -* :attr:`espressomd.particle_data.ParticleHandle.omega_lab` -* :attr:`espressomd.particle_data.ParticleHandle.quat` -* :attr:`espressomd.particle_data.ParticleHandle.rinertia` -* :attr:`espressomd.particle_data.ParticleHandle.rotation` -* :attr:`espressomd.particle_data.ParticleHandle.torque_lab` - .. _Steepest descent: Steepest descent ----------------- +^^^^^^^^^^^^^^^^ :meth:`espressomd.integrate.IntegratorHandle.set_steepest_descent` @@ -265,7 +280,7 @@ Usage example:: system.integrator.set_vv() # to switch back to velocity Verlet Using a custom convergence criterion -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +"""""""""""""""""""""""""""""""""""" The ``f_max`` parameter can be set to zero to prevent the integrator from halting when a specific force/torque is reached. The integration can then @@ -309,11 +324,18 @@ The correct forces need to be re-calculated after running the integration:: system.integrator.run(0, recalc_forces=True) # re-calculate forces from virtual sites system.integrator.set_vv() +.. _Brownian Dynamics: + +Brownian Dynamics +^^^^^^^^^^^^^^^^^ + +Brownian Dynamics integrator :cite:`schlick2010`. +See details in :ref:`Brownian thermostat`. .. _Stokesian Dynamics: Stokesian Dynamics ------------------- +^^^^^^^^^^^^^^^^^^ .. note:: @@ -365,7 +387,7 @@ thermalize the system, the SD thermostat needs to be activated (see .. _Important_SD: Important -~~~~~~~~~ +""""""""" The particles must be prevented from overlapping. It is mathematically allowed for the particles to overlap to a certain degree. However, once the distance @@ -381,3 +403,314 @@ The near field (so-called lubrication) correction is planned. For now, Stokesian Dynamics provides a good approximation of the hydrodynamics in dilute systems where the average distance between particles is several sphere diameters. + + +.. _Thermostats: + +Thermostats +----------- + +To add a thermostat, call the appropriate setter:: + + system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=41) + +The different thermostats available in |es| will be described in the following +subsections. + +You may combine different thermostats at your own risk by turning them on +one by one. The list of active thermostats can be cleared at any time with +:py:meth:`system.thermostat.turn_off() `. +Not all combinations of thermostats are allowed, though (see +:py:func:`espressomd.thermostat.AssertThermostatType` for details). +Some integrators only work with a specific thermostat and throw an +error otherwise. Note that there is only one temperature for all +thermostats, although for some thermostats like the Langevin thermostat, +particles can be assigned individual temperatures. + +Since |es| does not enforce a particular unit system, it cannot know about +the current value of the Boltzmann constant. Therefore, when specifying +the temperature of a thermostat, you actually do not define the +temperature, but the value of the thermal energy :math:`k_B T` in the +current unit system (see the discussion on units, Section :ref:`On units`). + +All thermostats have a ``seed`` argument that controls the state of the random +number generator (Philox Counter-based RNG). This seed is required on first +activation of a thermostat, unless stated otherwise. It can be omitted in +subsequent calls of the method that activates the same thermostat. The random +sequence also depends on the thermostats counters that are +incremented after each integration step. + +.. _Langevin thermostat: + +Langevin thermostat +^^^^^^^^^^^^^^^^^^^ + +In order to activate the Langevin thermostat the member function +:py:meth:`~espressomd.thermostat.Thermostat.set_langevin` of the thermostat +class :class:`espressomd.thermostat.Thermostat` has to be invoked. +Best explained in an example:: + + import espressomd + system = espressomd.System(box_l=[1, 1, 1]) + system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=41) + +As explained before the temperature is set as thermal energy :math:`k_\mathrm{B} T`. + +The Langevin thermostat is based on an extension of Newton's equation of motion to + +.. math:: m_i \dot{v}_i(t) = f_i(\{x_j\},v_i,t) - \gamma v_i(t) + \sqrt{2\gamma k_B T} \eta_i(t). + +Here, :math:`f_i` are all deterministic forces from interactions, +:math:`\gamma` the bare friction coefficient and :math:`\eta` a random, "thermal" force. +The friction term accounts for dissipation in a surrounding fluid whereas +the random force mimics collisions of the particle with solvent molecules +at temperature :math:`T` and satisfies + +.. math:: <\eta(t)> = 0 , <\eta^\alpha_i(t)\eta^\beta_j(t')> = \delta_{\alpha\beta} \delta_{ij}\delta(t-t') + +(:math:`<\cdot>` denotes the ensemble average and :math:`\alpha,\beta` are spatial coordinates). + +In the |es| implementation of the Langevin thermostat, +the additional terms only enter in the force calculation. +This reduces the accuracy of the velocity Verlet integrator +by one order in :math:`dt` because forces are now velocity-dependent. + +The random process :math:`\eta(t)` is discretized by drawing an uncorrelated random number +:math:`\overline{\eta}` for each component of all the particle forces. +The distribution of :math:`\overline{\eta}` is uniform and satisfies + +.. math:: <\overline{\eta}> = 0 , <\overline{\eta}\overline{\eta}> = 1/dt + +If the feature ``ROTATION`` is compiled in, the rotational degrees of freedom are +also coupled to the thermostat. If only the first two arguments are +specified then the friction coefficient for the rotation is set to the +same value as that for the translation. +A separate rotational friction coefficient can be set by inputting +``gamma_rotate``. The two options allow one to switch the translational and rotational +thermalization on or off separately, maintaining the frictional behavior. This +can be useful, for instance, in high Péclet number active matter systems, where +one wants to thermalize only the rotational degrees of freedom while +translational degrees of freedom are affected by the self-propulsion. + +The keywords ``gamma`` and ``gamma_rotate`` can be specified as a scalar, +or, with feature ``PARTICLE_ANISOTROPY`` compiled in, as the three eigenvalues +of the respective friction coefficient tensor. This is enables the simulation of +the anisotropic diffusion of anisotropic colloids (rods, etc.). + +Using the Langevin thermostat, it is possible to set a temperature and a +friction coefficient for every particle individually via the feature +``THERMOSTAT_PER_PARTICLE``. Consult the reference of the ``part`` command +(chapter :ref:`Setting up particles`) for information on how to achieve this. + +.. _Brownian thermostat: + +Brownian thermostat +^^^^^^^^^^^^^^^^^^^ + +Brownian thermostat is a formal name of a thermostat enabling the +Brownian Dynamics feature (see :cite:`schlick2010`) which implies +a propagation scheme involving systematic and thermal parts of the +classical Ermak-McCammom's (see :cite:`ermak78a`) +Brownian Dynamics. Currently it is implemented without +hydrodynamic interactions, i.e. +with a diagonal diffusion tensor. +The hydrodynamic interactions feature will be available later +as a part of the present Brownian Dynamics or +implemented separately within the Stokesian Dynamics. + +In order to activate the Brownian thermostat, the member function +:py:attr:`~espressomd.thermostat.Thermostat.set_brownian` of the thermostat +class :class:`espressomd.thermostat.Thermostat` has to be invoked. +The system integrator should be also changed. +Best explained in an example:: + + import espressomd + system = espressomd.System(box_l=[1, 1, 1]) + system.thermostat.set_brownian(kT=1.0, gamma=1.0, seed=41) + system.integrator.set_brownian_dynamics() + +where ``gamma`` (hereinafter :math:`\gamma`) is a viscous friction coefficient. +In terms of the Python interface and setup, the Brownian thermostat is very +similar to the :ref:`Langevin thermostat`. The feature +``THERMOSTAT_PER_PARTICLE`` is used to control the per-particle +temperature and the friction coefficient setup. The major differences are +its internal integrator implementation and other temporal constraints. +The integrator is still a symplectic velocity Verlet-like one. +It is implemented via a viscous drag part and a random walk of both the position and +velocity. Due to a nature of the Brownian Dynamics method, its time step :math:`\Delta t` +should be large enough compared to the relaxation time +:math:`m/\gamma` where :math:`m` is the particle mass. +This requirement is just a conceptual one +without specific implementation technical restrictions. +Note that with all similarities of +Langevin and Brownian Dynamics, the Langevin thermostat temporal constraint +is opposite. A velocity is restarting from zero at every step. +Formally, the previous step velocity at the beginning of the the :math:`\Delta t` interval +is dissipated further +and does not contribute to the end one as well as to the positional random walk. +Another temporal constraint +which is valid for both Langevin and Brownian Dynamics: conservative forces +should not change significantly over the :math:`\Delta t` interval. + +The viscous terminal velocity :math:`\Delta v` and corresponding positional +step :math:`\Delta r` are fully driven by conservative forces :math:`F`: + +.. math:: \Delta r = \frac{F \cdot \Delta t}{\gamma} + +.. math:: \Delta v = \frac{F}{\gamma} + +A positional random walk variance of each coordinate :math:`\sigma_p^2` +corresponds to a diffusion within the Wiener process: + +.. math:: \sigma_p^2 = 2 \frac{kT}{\gamma} \cdot \Delta t + +Each velocity component random walk variance :math:`\sigma_v^2` is defined by the heat +component: + +.. math:: \sigma_v^2 = \frac{kT}{m} + +Note: the velocity random walk is propagated from zero at each step. + +A rotational motion is implemented similarly. +Note: the rotational Brownian dynamics implementation is compatible with particles which have +the isotropic moment of inertia tensor only. Otherwise, the viscous terminal angular velocity +is not defined, i.e. it has no constant direction over the time. + +.. _Isotropic NpT thermostat: + +Isotropic NpT thermostat +^^^^^^^^^^^^^^^^^^^^^^^^ + +This feature allows to simulate an (on average) homogeneous and isotropic system in the NpT ensemble. +In order to use this feature, ``NPT`` has to be defined in the :file:`myconfig.hpp`. +Activate the NpT thermostat with the command :py:meth:`~espressomd.thermostat.Thermostat.set_npt` +and setup the integrator for the NpT ensemble with :py:meth:`~espressomd.integrate.IntegratorHandle.set_isotropic_npt`. + +For example:: + + import espressomd + + system = espressomd.System(box_l=[1, 1, 1]) + system.thermostat.set_npt(kT=1.0, gamma0=1.0, gammav=1.0, seed=41) + system.integrator.set_isotropic_npt(ext_pressure=1.0, piston=1.0) + +For an explanation of the algorithm involved, see :ref:`Isotropic NpT integrator`. + +Be aware that this feature is neither properly examined for all systems +nor is it maintained regularly. If you use it and notice strange +behavior, please contribute to solving the problem. + +.. _Dissipative Particle Dynamics (DPD): + +Dissipative Particle Dynamics (DPD) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The DPD thermostat adds friction and noise to the particle +dynamics like the :ref:`Langevin thermostat`, but these +are not applied to every particle individually but instead +encoded in a dissipative interaction between particles :cite:`soddeman03a`. + +To realize a complete DPD fluid model in |es|, three parts are needed: +the DPD thermostat, which controls the temperate, a dissipative interaction +between the particles that make up the fluid, see :ref:`DPD interaction`, +and a repulsive conservative force, see :ref:`Hat interaction`. + +The temperature is set via +:py:meth:`espressomd.thermostat.Thermostat.set_dpd` +which takes ``kT`` and ``seed`` as arguments. + +The friction coefficients and cutoff are controlled via the +:ref:`DPD interaction` on a per type-pair basis. + +The friction (dissipative) and noise (random) term are coupled via the +fluctuation-dissipation theorem. The friction term is a function of the +relative velocity of particle pairs. The DPD thermostat is better for +dynamics than the Langevin thermostat, since it mimics hydrodynamics in +the system. + +As a conservative force any interaction potential can be used, +see :ref:`Isotropic non-bonded interactions`. A common choice is +a force ramp which is implemented as :ref:`Hat interaction`. + +A complete example of setting up a DPD fluid and running it +to sample the equation of state can be found in :file:`/samples/dpd.py`. + +When using a Lennard-Jones interaction, :math:`{r_\mathrm{cut}} = +2^{\frac{1}{6}} \sigma` is a good value to choose, so that the +thermostat acts on the relative velocities between nearest neighbor +particles. Larger cutoffs including next nearest neighbors or even more +are unphysical. + +Boundary conditions for DPD can be introduced by adding the boundary +as a particle constraint, and setting a velocity and a type on it, see +:class:`espressomd.constraints.Constraint`. Then a +:ref:`DPD interaction` with the type can be defined, which acts as a +boundary condition. + +.. _LB thermostat: + +Lattice-Boltzmann thermostat +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :ref:`Lattice-Boltzmann` thermostat acts similar to the :ref:`Langevin thermostat` in that the governing equation for particles is + +.. math:: m_i \dot{v}_i(t) = f_i(\{x_j\},v_i,t) - \gamma (v_i(t)-u(x_i(t),t)) + \sqrt{2\gamma k_B T} \eta_i(t). + +where :math:`u(x,t)` is the fluid velocity at position :math:`x` and time :math:`t`. +To preserve momentum, an equal and opposite friction force and random force act on the fluid. + +Numerically the fluid velocity is determined from the lattice-Boltzmann node velocities +by interpolating as described in :ref:`Interpolating velocities`. +The backcoupling of friction forces and noise to the fluid is also done by distributing those forces amongst the nearest LB nodes. +Details for both the interpolation and the force distribution can be found in :cite:`ahlrichs99` and :cite:`duenweg08a`. + +The LB fluid can be used to thermalize particles, while also including their hydrodynamic interactions. +The LB thermostat expects an instance of either :class:`espressomd.lb.LBFluid` or :class:`espressomd.lb.LBFluidGPU`. +Temperature is set via the ``kT`` argument of the LB fluid. + +The magnitude of the frictional coupling can be adjusted by the +parameter ``gamma``. To enable the LB thermostat, use:: + + import espressomd + import espressomd.lb + system = espressomd.System(box_l=[1, 1, 1]) + lbf = espressomd.lb.LBFluid(agrid=1, dens=1, visc=1, tau=0.01) + system.actors.add(lbf) + system.thermostat.set_lb(LB_fluid=lbf, seed=123, gamma=1.5) + +No other thermostatting mechanism is necessary +then. Please switch off any other thermostat before starting the LB +thermostatting mechanism. + +The LBM implementation provides a fully thermalized LB fluid, all +nonconserved modes, including the pressure tensor, fluctuate correctly +according to the given temperature and the relaxation parameters. All +fluctuations can be switched off by setting the temperature to 0. + +.. note:: Coupling between LB and MD only happens if the LB thermostat is set with a :math:`\gamma \ge 0.0`. + +.. _Stokesian thermostat: + +Stokesian thermostat +^^^^^^^^^^^^^^^^^^^^ + +.. note:: + + Requires ``STOKESIAN_DYNAMICS`` external feature, enabled with + ``-DWITH_STOKESIAN_DYNAMICS=ON``. + +In order to thermalize a Stokesian Dynamics simulation, the SD thermostat +needs to be activated via:: + + import espressomd + system = espressomd.System(box_l=[1.0, 1.0, 1.0]) + system.periodicity = [False, False, False] + system.time_step = 0.01 + system.cell_system.skin = 0.4 + system.part.add(pos=[0, 0, 0], rotation=[1, 0, 0], ext_force=[0, 0, -1]) + system.thermostat.set_stokesian(kT=1.0, seed=43) + system.integrator.set_stokesian_dynamics(viscosity=1.0, radii={0: 1.0}) + system.integrator.run(100) + +where ``kT`` denotes the desired temperature of the system, and ``seed`` the +seed for the random number generator. diff --git a/doc/sphinx/system_setup.rst b/doc/sphinx/system_setup.rst index ab25ecdc66b..59b070115c5 100644 --- a/doc/sphinx/system_setup.rst +++ b/doc/sphinx/system_setup.rst @@ -145,7 +145,7 @@ range should be much smaller than the total system size, leaving out all interactions between non-adjacent cells can mean a tremendous speed-up. Moreover, since for constant interaction range, the number of particles in a cell depends only on the density. The number of interactions is -therefore of the order N instead of order :math:`N^2` if one has to +therefore of the order :math:`N` instead of order :math:`N^2` if one has to calculate all pair interactions. .. _N-squared: @@ -154,7 +154,7 @@ N-squared ~~~~~~~~~ Invoking :py:meth:`~espressomd.cellsystem.CellSystem.set_n_square` -selects the very primitive nsquared cellsystem, which calculates +selects the very primitive N-squared cellsystem, which calculates the interactions for all particle pairs. Therefore it loops over all particles, giving an unfavorable computation time scaling of :math:`N^2`. However, algorithms like MMM1D or the plain Coulomb @@ -165,7 +165,7 @@ interactions. :: system.cell_system.set_n_square() -In a multiple processor environment, the nsquared cellsystem uses a +In a multiple processor environment, the N-squared cellsystem uses a simple particle balancing scheme to have a nearly equal number of particles per CPU, :math:`n` nodes have :math:`m` particles, and :math:`p-n` nodes have :math:`m+1` particles, such that @@ -184,318 +184,9 @@ this node is twice as high. For 3 processors, the interactions are 0-0, 1-1, 2-2, 0-1, 1-2, 0-2. Of these interactions, node 0 treats 0-0 and 0-2, node 1 treats 1-1 and 0-1, and node 2 treats 2-2 and 1-2. -Therefore it is highly recommended that you use nsquared only with an +Therefore it is highly recommended that you use N-squared only with an odd number of nodes, if with multiple processors at all. -.. _Thermostats: - -Thermostats ------------ - -The thermostat can be controlled by the class :class:`espressomd.thermostat.Thermostat`. -The different thermostats available in |es| will be described in the following -subsections. - -You may combine different thermostats at your own risk by turning them on -one by one. The list of active thermostats can be cleared at any time with -:py:meth:`system.thermostat.turn_off() `. -Not all combinations of thermostats are allowed, though (see -:py:func:`espressomd.thermostat.AssertThermostatType` for details). -Some integrators only work with a specific thermostat and throw an -error otherwise. Note that there is only one temperature for all -thermostats, although for some thermostats like the Langevin thermostat, -particles can be assigned individual temperatures. - -Since |es| does not enforce a particular unit system, it cannot know about -the current value of the Boltzmann constant. Therefore, when specifying -the temperature of a thermostat, you actually do not define the -temperature, but the value of the thermal energy :math:`k_B T` in the -current unit system (see the discussion on units, Section :ref:`On units`). - -All thermostats have a ``seed`` argument that controls the state of the random -number generator (Philox Counter-based RNG). This seed is required on first -activation of a thermostat, unless stated otherwise. It can be omitted in -subsequent calls of the method that activates the same thermostat. The random -sequence also depends on the thermostats counters that are -incremented after each integration step. - -.. _Langevin thermostat: - -Langevin thermostat -~~~~~~~~~~~~~~~~~~~ - -In order to activate the Langevin thermostat the member function -:py:meth:`~espressomd.thermostat.Thermostat.set_langevin` of the thermostat -class :class:`espressomd.thermostat.Thermostat` has to be invoked. -Best explained in an example:: - - import espressomd - system = espressomd.System(box_l=[1, 1, 1]) - system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=41) - -As explained before the temperature is set as thermal energy :math:`k_\mathrm{B} T`. - -The Langevin thermostat is based on an extension of Newton's equation of motion to - -.. math:: m_i \dot{v}_i(t) = f_i(\{x_j\},v_i,t) - \gamma v_i(t) + \sqrt{2\gamma k_B T} \eta_i(t). - -Here, :math:`f_i` are all deterministic forces from interactions, -:math:`\gamma` the bare friction coefficient and :math:`\eta` a random, "thermal" force. -The friction term accounts for dissipation in a surrounding fluid whereas -the random force mimics collisions of the particle with solvent molecules -at temperature :math:`T` and satisfies - -.. math:: <\eta(t)> = 0 , <\eta^\alpha_i(t)\eta^\beta_j(t')> = \delta_{\alpha\beta} \delta_{ij}\delta(t-t') - -(:math:`<\cdot>` denotes the ensemble average and :math:`\alpha,\beta` are spatial coordinates). - -In the |es| implementation of the Langevin thermostat, -the additional terms only enter in the force calculation. -This reduces the accuracy of the Velocity Verlet integrator -by one order in :math:`dt` because forces are now velocity-dependent. - -The random process :math:`\eta(t)` is discretized by drawing an uncorrelated random number -:math:`\overline{\eta}` for each component of all the particle forces. -The distribution of :math:`\overline{\eta}` is uniform and satisfies - -.. math:: <\overline{\eta}> = 0 , <\overline{\eta}\overline{\eta}> = 1/dt - -If the feature ``ROTATION`` is compiled in, the rotational degrees of freedom are -also coupled to the thermostat. If only the first two arguments are -specified then the friction coefficient for the rotation is set to the -same value as that for the translation. -A separate rotational friction coefficient can be set by inputting -``gamma_rotate``. The two options allow one to switch the translational and rotational -thermalization on or off separately, maintaining the frictional behavior. This -can be useful, for instance, in high Péclet number active matter systems, where -one only wants to thermalize only the rotational degrees of freedom and -translational motion is affected by the self-propulsion. - -The keywords ``gamma`` and ``gamma_rotate`` can be specified as a scalar, -or, with feature ``PARTICLE_ANISOTROPY`` compiled in, as the three eigenvalues -of the respective friction coefficient tensor. This is enables the simulation of -the anisotropic diffusion of anisotropic colloids (rods, etc.). - -Using the Langevin thermostat, it is possible to set a -friction coefficient for every particle individually via the feature -``THERMOSTAT_PER_PARTICLE``. Consult the reference of the ``part`` command -(chapter :ref:`Setting up particles`) for information on how to achieve this. - -.. _LB thermostat: - -Lattice-Boltzmann thermostat -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The :ref:`Lattice-Boltzmann` thermostat acts similar to the :ref:`Langevin thermostat` in that the governing equation for particles is - -.. math:: m_i \dot{v}_i(t) = f_i(\{x_j\},v_i,t) - \gamma (v_i(t)-u(x_i(t),t)) + \sqrt{2\gamma k_B T} \eta_i(t). - -where :math:`u(x,t)` is the fluid velocity at position :math:`x` and time :math:`t`. -To preserve momentum, an equal and opposite friction force and random force act on the fluid. - -Numerically the fluid velocity is determined from the lattice-Boltzmann node velocities -by interpolating as described in :ref:`Interpolating velocities`. -The backcoupling of friction forces and noise to the fluid is also done by distributing those forces amongst the nearest LB nodes. -Details for both the interpolation and the force distribution can be found in :cite:`ahlrichs99` and :cite:`duenweg08a`. - -The LB fluid can be used to thermalize particles, while also including their hydrodynamic interactions. -The LB thermostat expects an instance of either :class:`espressomd.lb.LBFluid` or :class:`espressomd.lb.LBFluidGPU`. -Temperature is set via the ``kT`` argument of the LB fluid. - -The magnitude of the frictional coupling can be adjusted by the -parameter ``gamma``. To enable the LB thermostat, use:: - - import espressomd - import espressomd.lb - system = espressomd.System(box_l=[1, 1, 1]) - lbf = espressomd.lb.LBFluid(agrid=1, dens=1, visc=1, tau=0.01) - system.actors.add(lbf) - system.thermostat.set_lb(LB_fluid=lbf, seed=123, gamma=1.5) - -No other thermostatting mechanism is necessary -then. Please switch off any other thermostat before starting the LB -thermostatting mechanism. - -The LBM implementation provides a fully thermalized LB fluid, all -nonconserved modes, including the pressure tensor, fluctuate correctly -according to the given temperature and the relaxation parameters. All -fluctuations can be switched off by setting the temperature to 0. - -.. note:: Coupling between LB and MD only happens if the LB thermostat is set with a :math:`\gamma \ge 0.0`. - - -.. _Dissipative Particle Dynamics (DPD): - -Dissipative Particle Dynamics (DPD) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The DPD thermostat adds friction and noise to the particle -dynamics like the :ref:`Langevin thermostat`, but these -are not applied to every particle individually but instead -encoded in a dissipative interaction between particles :cite:`soddeman03a`. - -To realize a complete DPD fluid model in |es|, three parts are needed: -the DPD thermostat, which controls the temperate, a dissipative interaction -between the particles that make up the fluid, see :ref:`DPD interaction`, -and a repulsive conservative force, see :ref:`Hat interaction`. - -The temperature is set via -:py:meth:`espressomd.thermostat.Thermostat.set_dpd` -which takes ``kT`` and ``seed`` as arguments. - -The friction coefficients and cutoff are controlled via the -:ref:`DPD interaction` on a per type-pair basis. For details -see there. - -The friction (dissipative) and noise (random) term are coupled via the -fluctuation-dissipation theorem. The friction term is a function of the -relative velocity of particle pairs. The DPD thermostat is better for -dynamics than the Langevin thermostat, since it mimics hydrodynamics in -the system. - -As a conservative force any interaction potential can be used, -see :ref:`Isotropic non-bonded interactions`. A common choice is -a force ramp which is implemented as :ref:`Hat interaction`. - -A complete example of setting up a DPD fluid and running it -to sample the equation of state can be found in :file:`/samples/dpd.py`. - -When using a Lennard-Jones interaction, :math:`{r_\mathrm{cut}} = -2^{\frac{1}{6}} \sigma` is a good value to choose, so that the -thermostat acts on the relative velocities between nearest neighbor -particles. Larger cutoffs including next nearest neighbors or even more -are unphysical. - -Boundary conditions for DPD can be introduced by adding the boundary -as a particle constraint, and setting a velocity and a type on it, see -:class:`espressomd.constraints.Constraint`. Then a -:ref:`DPD interaction` with the type can be defined, which acts as a -boundary condition. - -.. _Isotropic NPT thermostat: - -Isotropic NPT thermostat -~~~~~~~~~~~~~~~~~~~~~~~~ - -This feature allows to simulate an (on average) homogeneous and isotropic system in the NPT ensemble. -In order to use this feature, ``NPT`` has to be defined in the :file:`myconfig.hpp`. -Activate the NPT thermostat with the command :py:meth:`~espressomd.thermostat.Thermostat.set_npt` -and setup the integrator for the NPT ensemble with :py:meth:`~espressomd.integrate.IntegratorHandle.set_isotropic_npt`. - -For example:: - - import espressomd - - system = espressomd.System(box_l=[1, 1, 1]) - system.thermostat.set_npt(kT=1.0, gamma0=1.0, gammav=1.0, seed=41) - system.integrator.set_isotropic_npt(ext_pressure=1.0, piston=1.0) - -For an explanation of the algorithm involved, see :ref:`Isotropic NPT integrator`. - -Be aware that this feature is neither properly examined for all systems -nor is it maintained regularly. If you use it and notice strange -behavior, please contribute to solving the problem. - -.. _Brownian thermostat: - -Brownian thermostat -~~~~~~~~~~~~~~~~~~~ - -Brownian thermostat is a formal name of a thermostat enabling the -Brownian Dynamics feature (see :cite:`schlick2010`) which implies -a propagation scheme involving systematic and thermal parts of the -classical Ermak-McCammom's (see :cite:`ermak78a`) -Brownian Dynamics. Currently it is implemented without -hydrodynamic interactions, i.e. -with a diagonal diffusion tensor. -The hydrodynamic interactions feature will be available later -as a part of the present Brownian Dynamics or -implemented separately within the Stokesian Dynamics. - -In order to activate the Brownian thermostat, the member function -:py:attr:`~espressomd.thermostat.Thermostat.set_brownian` of the thermostat -class :class:`espressomd.thermostat.Thermostat` has to be invoked. -The system integrator should be also changed. -Best explained in an example:: - - import espressomd - system = espressomd.System(box_l=[1, 1, 1]) - system.thermostat.set_brownian(kT=1.0, gamma=1.0, seed=41) - system.integrator.set_brownian_dynamics() - -where ``gamma`` (hereinafter :math:`\gamma`) is a viscous friction coefficient. -In terms of the Python interface and setup, the Brownian thermostat is very -similar to the :ref:`Langevin thermostat`. The feature -``THERMOSTAT_PER_PARTICLE`` is used to control the per-particle -friction coefficient setup. The major differences are -its internal integrator implementation and other temporal constraints. -The integrator is still a symplectic Velocity Verlet-like one. -It is implemented via a viscous drag part and a random walk of both the position and -velocity. Due to a nature of the Brownian Dynamics method, its time step :math:`\Delta t` -should be large enough compared to the relaxation time -:math:`m/\gamma` where :math:`m` is the particle mass. -This requirement is just a conceptual one -without specific implementation technical restrictions. -Note that with all similarities of -Langevin and Brownian Dynamics, the Langevin thermostat temporal constraint -is opposite. A velocity is restarting from zero at every step. -Formally, the previous step velocity at the beginning of the the :math:`\Delta t` interval -is dissipated further -and does not contribute to the end one as well as to the positional random walk. -Another temporal constraint -which is valid for both Langevin and Brownian Dynamics: conservative forces -should not change significantly over the :math:`\Delta t` interval. - -The viscous terminal velocity :math:`\Delta v` and corresponding positional -step :math:`\Delta r` are fully driven by conservative forces :math:`F`: - -.. math:: \Delta r = \frac{F \cdot \Delta t}{\gamma} - -.. math:: \Delta v = \frac{F}{\gamma} - -A positional random walk variance of each coordinate :math:`\sigma_p^2` -corresponds to a diffusion within the Wiener process: - -.. math:: \sigma_p^2 = 2 \frac{kT}{\gamma} \cdot \Delta t - -Each velocity component random walk variance :math:`\sigma_v^2` is defined by the heat -component: - -.. math:: \sigma_v^2 = \frac{kT}{m} - -Note that the velocity random walk is propagated from zero at each step. - -A rotational motion is implemented similarly. -Note: the rotational Brownian dynamics implementation is compatible with particles which have -the isotropic moment of inertia tensor only. Otherwise, the viscous terminal angular velocity -is not defined, i.e. it has no constant direction over the time. - -.. _Stokesian thermostat: - -Stokesian thermostat -~~~~~~~~~~~~~~~~~~~~ - -.. note:: - - Requires ``STOKESIAN_DYNAMICS`` external feature, enabled with - ``-DWITH_STOKESIAN_DYNAMICS=ON``. - -In order to thermalize a Stokesian Dynamics simulation, the SD thermostat -needs to be activated via:: - - import espressomd - system = espressomd.System(box_l=[1.0, 1.0, 1.0]) - system.periodicity = [False, False, False] - system.time_step = 0.01 - system.cell_system.skin = 0.4 - system.part.add(pos=[0, 0, 0], rotation=[1, 0, 0], ext_force=[0, 0, -1]) - system.thermostat.set_stokesian(kT=1.0, seed=43) - system.integrator.set_stokesian_dynamics(viscosity=1.0, radii={0: 1.0}) - system.integrator.run(100) - -where ``kT`` denotes the desired temperature of the system, and ``seed`` the -seed for the random number generator. - .. _CUDA: diff --git a/doc/tutorials/charged_system/charged_system-1.ipynb b/doc/tutorials/charged_system/charged_system-1.ipynb index 485150305b3..d743b8731db 100644 --- a/doc/tutorials/charged_system/charged_system-1.ipynb +++ b/doc/tutorials/charged_system/charged_system-1.ipynb @@ -506,7 +506,7 @@ " * resets the system clock\n", "\n", "**Hints:**\n", - "* The relevant parts of the documentation can be found here: [Thermostats](http://espressomd.org/html/doc/system_setup.html#thermostats), [Particle List](http://espressomd.org/html/doc/espressomd.html#espressomd.particle_data.ParticleList),\n", + "* The relevant parts of the documentation can be found here: [Thermostats](http://espressomd.org/html/doc/running.html#thermostats), [ParticleList](http://espressomd.org/html/doc/espressomd.html#espressomd.particle_data.ParticleList),\n", "[AutoUpdateAccumulators](http://espressomd.org/html/doc/espressomd.html#espressomd.accumulators.AutoUpdateAccumulators),\n", "[System properties](http://espressomd.org/html/doc/espressomd.html#module-espressomd.system)" ] diff --git a/doc/tutorials/convert.py b/doc/tutorials/convert.py index b714a3c216e..a7477763a50 100644 --- a/doc/tutorials/convert.py +++ b/doc/tutorials/convert.py @@ -61,7 +61,7 @@ def add_cell_from_script(nb, filepath): if m and all(x.startswith('#') for x in m.group(0).strip().split('\n')): code = re.sub('^(#\n)+', '', code.replace(m.group(0), ''), re.M) # strip first component in relative paths - code = re.sub('(?<=[\'\"])\.\./', './', code) + code = re.sub(r'(?<=[\'\"])\.\./', './', code) # create new cells filename = os.path.relpath(os.path.realpath(filepath)) if len(filename) > len(filepath): diff --git a/maintainer/CI/build_cmake.sh b/maintainer/CI/build_cmake.sh index 929f22b10fd..bdd690b296d 100755 --- a/maintainer/CI/build_cmake.sh +++ b/maintainer/CI/build_cmake.sh @@ -114,7 +114,7 @@ if [ "${with_coverage}" = true ] || [ "${with_coverage_python}" = true ] ; then bash <(curl -s https://codecov.io/env) 1>/dev/null 2>&1 fi -cmake_params="-DCMAKE_BUILD_TYPE=${build_type} -DWARNINGS_ARE_ERRORS=ON -DTEST_NP:INT=${check_procs} ${cmake_params}" +cmake_params="-DCMAKE_BUILD_TYPE=${build_type} -DWARNINGS_ARE_ERRORS=ON -DCTEST_ARGS=-j${check_procs} ${cmake_params}" cmake_params="${cmake_params} -DCMAKE_INSTALL_PREFIX=/tmp/espresso-unit-tests" cmake_params="${cmake_params} -DTEST_TIMEOUT=${test_timeout}" diff --git a/maintainer/CI/doc_warnings.sh b/maintainer/CI/doc_warnings.sh index 75030e53914..e615939cd5e 100755 --- a/maintainer/CI/doc_warnings.sh +++ b/maintainer/CI/doc_warnings.sh @@ -108,7 +108,7 @@ if [ "${?}" = "0" ]; then fi if [ "${CI}" != "" ]; then - "${srcdir}/maintainer/gh_post_docs_warnings.py" sphinx ${n_warnings} doc_warnings.log || exit 1 + "${srcdir}/maintainer/gh_post_docs_warnings.py" sphinx "${n_warnings}" doc_warnings.log || exit 1 fi if [ "${n_warnings}" = "0" ]; then diff --git a/maintainer/CI/dox_warnings.sh b/maintainer/CI/dox_warnings.sh index b60ca18fc5b..c3ca43b4ddd 100755 --- a/maintainer/CI/dox_warnings.sh +++ b/maintainer/CI/dox_warnings.sh @@ -34,7 +34,7 @@ for supported_version in 1.8.11 1.8.13 1.8.17; do done if [ "${dox_version_supported}" = false ]; then echo "Doxygen version ${dox_version} not fully supported" >&2 - if [ ! -z "${CI}" ]; then + if [ -n "${CI}" ]; then exit 1 fi echo "Proceeding anyway" @@ -89,7 +89,7 @@ rm -f dox_warnings_summary.log # print summary cat dox_warnings_summary.log -n_warnings=$(cat dox_warnings_summary.log | wc -l) +n_warnings=$(wc -l < dox_warnings_summary.log) if [ "${CI}" != "" ]; then "${srcdir}/maintainer/gh_post_docs_warnings.py" doxygen "${n_warnings}" dox_warnings_summary.log || exit 2 diff --git a/maintainer/CI/fix_style.sh b/maintainer/CI/fix_style.sh index 6e98cc51e45..a882c1c5d5c 100755 --- a/maintainer/CI/fix_style.sh +++ b/maintainer/CI/fix_style.sh @@ -16,7 +16,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -cd "$(git rev-parse --show-toplevel)" +cd "$(git rev-parse --show-toplevel)" || exit 1 if ! git diff-index --quiet HEAD -- && [ "${1}" != "-f" ]; then echo "Warning, your working tree is not clean. Please commit your changes." @@ -30,24 +30,28 @@ pre_commit_return_code="${?}" git diff-index --quiet HEAD -- git_diff_return_code="${?}" -if [ "${pre_commit_return_code}" = 1 ]; then - if [ "${git_diff_return_code}" = 1 ]; then - if [ "${CI}" != "" ]; then - git --no-pager diff > style.patch - maintainer/gh_post_style_patch.py || exit 1 - echo "Failed style check. Download ${CI_JOB_URL}/artifacts/raw/style.patch to see which changes are necessary." >&2 - else: - echo "Failed style check." >&2 - fi + +pylint_errors=0 +if [ -f "pylint.log" ]; then + pylint_errors=$(grep -Pc '^[a-z]+/.+?.py:[0-9]+:[0-9]+: [CRWEF][0-9]+:' "pylint.log") +fi + +if [ -n "${CI}" ]; then + git --no-pager diff > style.patch + maintainer/gh_post_style_patch.py || exit 1 + maintainer/gh_post_pylint.py "${pylint_errors}" pylint.log || exit 1 + if [ "${git_diff_return_code}" != 0 ]; then + echo "Failed style check. Download ${CI_JOB_URL}/artifacts/raw/style.patch to see which changes are necessary." >&2 + fi + if [ "${pylint_errors}" != 0 ]; then + echo "Failed pylint check: ${pylint_errors} errors" >&2 + fi +else + if [ "${git_diff_return_code}" != 0 ]; then + echo "Failed style check." >&2 fi - if [ -f "pylint.log" ]; then - if [ "${CI}" != "" ]; then - errors=$(grep -Pc '^[a-z]+/.+?.py:[0-9]+:[0-9]+: [CRWEF][0-9]+:' pylint.log) - maintainer/gh_post_pylint.py "${errors}" pylint.log || exit 1 - echo "Failed pylint check: ${errors} errors" >&2 - else - echo "Failed pylint check." >&2 - fi + if [ "${pylint_errors}" != 0 ]; then + echo "Failed pylint check." >&2 fi fi diff --git a/maintainer/add_missing_headers.sh b/maintainer/add_missing_headers.sh index 5111dd7f200..1023a6d6027 100755 --- a/maintainer/add_missing_headers.sh +++ b/maintainer/add_missing_headers.sh @@ -19,7 +19,7 @@ # Add copyright header to C++, CUDA, Doxygen, Python and shell files. -cd "$(git rev-parse --show-toplevel)" +cd "$(git rev-parse --show-toplevel)" || exit 1 # Get files without headers all_files=$(maintainer/files_with_header.sh) diff --git a/maintainer/benchmarks/CMakeLists.txt b/maintainer/benchmarks/CMakeLists.txt index eb15f156e0e..5a6c5907652 100644 --- a/maintainer/benchmarks/CMakeLists.txt +++ b/maintainer/benchmarks/CMakeLists.txt @@ -1,8 +1,5 @@ -if(NOT DEFINED TEST_NP) - include(ProcessorCount) - processorcount(NP) - math(EXPR TEST_NP "${NP}/2 + 1") -endif() +include(ProcessorCount) +processorcount(NP) if(EXISTS ${MPIEXEC}) # OpenMPI 3.0 and higher checks the number of processes against the number of @@ -49,27 +46,14 @@ function(PYTHON_BENCHMARK) endif() # parallel schemes if(EXISTS ${MPIEXEC} AND ${BENCHMARK_RUN_WITH_MPI}) - set(BENCHMARK_CONFIGURATIONS "0") - if(${NP} GREATER 0 AND ${BENCHMARK_MAX_NUM_PROC} GREATER 0 - AND ${BENCHMARK_MIN_NUM_PROC} LESS 2) - list(APPEND BENCHMARK_CONFIGURATIONS 1) - endif() - if(${NP} GREATER 1 AND ${BENCHMARK_MAX_NUM_PROC} GREATER 1 - AND ${BENCHMARK_MIN_NUM_PROC} LESS 3) - list(APPEND BENCHMARK_CONFIGURATIONS 2) - endif() - if(${NP} GREATER 3 AND ${BENCHMARK_MAX_NUM_PROC} GREATER 3 - AND ${BENCHMARK_MIN_NUM_PROC} LESS 5) - list(APPEND BENCHMARK_CONFIGURATIONS 4) - endif() - if(${NP} GREATER 7 AND ${BENCHMARK_MAX_NUM_PROC} GREATER 7 - AND ${BENCHMARK_MIN_NUM_PROC} LESS 9) - list(APPEND BENCHMARK_CONFIGURATIONS 8) - endif() - if(${NP} GREATER 15 AND ${BENCHMARK_MAX_NUM_PROC} GREATER 15 - AND ${BENCHMARK_MIN_NUM_PROC} LESS 17) - list(APPEND BENCHMARK_CONFIGURATIONS 16) - endif() + set(BENCHMARK_CONFIGURATIONS "sentinel") + foreach(nproc 1 2 4 8 16) + if(${BENCHMARK_MAX_NUM_PROC} GREATER_EQUAL ${nproc} + AND ${BENCHMARK_MIN_NUM_PROC} LESS_EQUAL ${nproc} + AND ${NP} GREATER_EQUAL ${nproc}) + list(APPEND BENCHMARK_CONFIGURATIONS ${nproc}) + endif() + endforeach(nproc) list(REMOVE_AT BENCHMARK_CONFIGURATIONS 0) foreach(nproc IN LISTS BENCHMARK_CONFIGURATIONS) set(BENCHMARK_TEST_NAME benchmark__${BENCHMARK_NAME}__parallel_${nproc}) diff --git a/maintainer/benchmarks/suite.sh b/maintainer/benchmarks/suite.sh index b419742773a..10d2efefd91 100644 --- a/maintainer/benchmarks/suite.sh +++ b/maintainer/benchmarks/suite.sh @@ -49,7 +49,7 @@ mkdir "${build_dir}" cd "${build_dir}" # check for unstaged changes -if [ ! -z "$(git status --porcelain -- ${directories})" ]; then +if [ -n "$(git status --porcelain -- ${directories})" ]; then echo "fatal: you have unstaged changes, please commit or stash them:" git diff-index --name-only HEAD -- ${directories} exit 1 diff --git a/maintainer/find_potentially_missing_authors.sh b/maintainer/find_potentially_missing_authors.sh index 58bb32393f7..7e6a046438d 100755 --- a/maintainer/find_potentially_missing_authors.sh +++ b/maintainer/find_potentially_missing_authors.sh @@ -21,7 +21,7 @@ # based on git commits. The output has to be checked manually, because # users have committed under different spellings and abbreviations. -cd "$(git rev-parse --show-toplevel)" +cd "$(git rev-parse --show-toplevel)" || exit 1 git shortlog -s | cut -f 2 | diff --git a/maintainer/gh_post.py b/maintainer/gh_post.py new file mode 100644 index 00000000000..79829dcd239 --- /dev/null +++ b/maintainer/gh_post.py @@ -0,0 +1,52 @@ +# +# Copyright (C) 2018-2020 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import requests + +if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'): + raise RuntimeError("Not a pull request.") + +PR = os.environ['CI_COMMIT_REF_NAME'][3:] +API = 'https://api.github.com' +URL = f'{API}/repos/espressomd/espresso/issues/{PR}/comments' +HEADERS = {'Authorization': 'token ' + os.environ['GITHUB_TOKEN']} +CI_JOB_URL = os.environ['CI_JOB_URL'] +LOGIN = 'espresso-ci' + + +def delete_comments_by_token(token): + """ + Delete posts made by the espresso-ci bot containing the string in ``token``. + """ + comments = requests.get(URL, headers=HEADERS) + comments.raise_for_status() + for comment in comments.json(): + if comment['user']['login'] == LOGIN and token in comment['body']: + print(f"deleting {comment['url']}") + response = requests.delete(comment['url'], headers=HEADERS) + response.raise_for_status() + + +def post_message(message): + """ + Post a message using the espresso-ci bot. + """ + response = requests.post(URL, headers=HEADERS, json={'body': message}) + response.raise_for_status() diff --git a/maintainer/gh_post_docs_warnings.py b/maintainer/gh_post_docs_warnings.py index 684b353d1a3..11b6314d280 100755 --- a/maintainer/gh_post_docs_warnings.py +++ b/maintainer/gh_post_docs_warnings.py @@ -18,33 +18,24 @@ # along with this program. If not, see . # -import re import os -import sys -import requests -if not os.environ['CI_COMMIT_REF_NAME'].startswith('PR-'): +if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'): + print("Not a pull request. Exiting now.") exit(0) -PR = os.environ['CI_COMMIT_REF_NAME'][3:] -URL = 'https://api.github.com/repos/espressomd/espresso/issues/' + \ - PR + '/comments' -HEADERS = {'Authorization': 'token ' + os.environ['GITHUB_TOKEN']} -SIZELIMIT = 5000 +import re +import sys +import gh_post doc_type, has_warnings, filepath_warnings = sys.argv[-3:] has_warnings = has_warnings != '0' prefix = {'sphinx': 'doc', 'doxygen': 'dox'}[doc_type] TOKEN_ESPRESSO_CI = prefix + '_warnings.sh' +SIZELIMIT = 5000 -# Delete all existing comments -comments = requests.get(URL, headers=HEADERS) -comments.raise_for_status() -for comment in comments.json(): - if comment['user']['login'] == 'espresso-ci' and \ - TOKEN_ESPRESSO_CI in comment['body']: - response = requests.delete(comment['url'], headers=HEADERS) - response.raise_for_status() +# Delete obsolete posts +gh_post.delete_comments_by_token(TOKEN_ESPRESSO_CI) # If documentation raised warnings, post a new comment if has_warnings: @@ -66,16 +57,13 @@ comment += line + '\n' comment = comment.rstrip() + '\n' + backticks + '\n' comment += ( - '\nThis list was truncated, check the [container logfile]' - '({}) for the complete list.\n'.format(os.environ['CI_JOB_URL'])) + f'\nThis list was truncated, check the [container logfile]' + f'({gh_post.CI_JOB_URL}) for the complete list.\n') else: comment += warnings.rstrip() + '\n' + backticks + '\n' comment += ( - '\nYou can generate these warnings with `make -t; make {}; ' - '../maintainer/CI/{}_warnings.sh` using the maxset config. This is ' - 'the same command that I have executed to generate the log above.' - .format(doc_type, prefix)) + f'\nYou can generate these warnings with `make -t; make {doc_type}; ' + f'../maintainer/CI/{prefix}_warnings.sh` using the maxset config. This ' + f'is the same command that I have executed to generate the log above.') assert TOKEN_ESPRESSO_CI in comment - - response = requests.post(URL, headers=HEADERS, json={'body': comment}) - response.raise_for_status() + gh_post.post_message(comment) diff --git a/maintainer/gh_post_pylint.py b/maintainer/gh_post_pylint.py index 3426e951b7d..99d193763d7 100755 --- a/maintainer/gh_post_pylint.py +++ b/maintainer/gh_post_pylint.py @@ -17,31 +17,23 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import re import os -import sys -import requests -if not os.environ['CI_COMMIT_REF_NAME'].startswith('PR-'): +if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'): + print("Not a pull request. Exiting now.") exit(0) -PR = os.environ['CI_COMMIT_REF_NAME'][3:] -URL = 'https://api.github.com/repos/espressomd/espresso/issues/' + \ - PR + '/comments' -HEADERS = {'Authorization': 'token ' + os.environ['GITHUB_TOKEN']} +import re +import sys +import gh_post + SIZELIMIT = 5000 TOKEN_ESPRESSO_CI = 'Pylint summary' n_warnings, filepath_warnings = sys.argv[-2:] -# Delete older pylint messages -comments = requests.get(URL, headers=HEADERS) -comments.raise_for_status() -for comment in comments.json(): - if comment['user']['login'] == 'espresso-ci' and \ - TOKEN_ESPRESSO_CI in comment['body']: - response = requests.delete(comment['url'], headers=HEADERS) - response.raise_for_status() +# Delete obsolete posts +gh_post.delete_comments_by_token(TOKEN_ESPRESSO_CI) # If pylint raised errors, post a new comment if n_warnings != '0': @@ -60,8 +52,8 @@ comment += line + '\n' comment = comment.rstrip() + '\n' + backticks + '\n' comment += ( - '\nThis list was truncated, check the [container logfile]' - '({}) for the complete list.\n'.format(os.environ['CI_JOB_URL'])) + f'\nThis list was truncated, check the [container logfile]' + f'({gh_post.CI_JOB_URL}) for the complete list.\n') else: comment += warnings.rstrip() + '\n' + backticks + '\n' comment += ( @@ -69,6 +61,4 @@ 'This is the same command that I have executed to generate the log above.' ) assert TOKEN_ESPRESSO_CI in comment - - response = requests.post(URL, headers=HEADERS, json={'body': comment}) - response.raise_for_status() + gh_post.post_message(comment) diff --git a/maintainer/gh_post_status.sh b/maintainer/gh_post_status.sh index a0e00c79b00..214e08be9bd 100755 --- a/maintainer/gh_post_status.sh +++ b/maintainer/gh_post_status.sh @@ -16,7 +16,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -[ "${#}" -eq 1 ] || exit -1 +[ "${#}" -eq 1 ] || exit 1 GIT_COMMIT=$(git rev-parse HEAD) URL="https://gitlab.icp.uni-stuttgart.de/espressomd/espresso/pipelines/${CI_PIPELINE_ID}" diff --git a/maintainer/gh_post_style_patch.py b/maintainer/gh_post_style_patch.py index 9cfbd69446c..dd1fc9035af 100755 --- a/maintainer/gh_post_style_patch.py +++ b/maintainer/gh_post_style_patch.py @@ -18,27 +18,19 @@ # along with this program. If not, see . import os -import requests -import subprocess -if not os.environ['CI_COMMIT_REF_NAME'].startswith('PR-'): +if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'): + print("Not a pull request. Exiting now.") exit(0) -PR = os.environ['CI_COMMIT_REF_NAME'][3:] -URL = 'https://api.github.com/repos/espressomd/espresso/issues/' + \ - PR + '/comments' -HEADERS = {'Authorization': 'token ' + os.environ['GITHUB_TOKEN']} +import subprocess +import gh_post + SIZELIMIT = 10000 TOKEN_ESPRESSO_CI = 'style.patch' -# Delete all existing comments -comments = requests.get(URL, headers=HEADERS) -comments.raise_for_status() -for comment in comments.json(): - if comment['user']['login'] == 'espresso-ci' and \ - TOKEN_ESPRESSO_CI in comment['body']: - response = requests.delete(comment['url'], headers=HEADERS) - response.raise_for_status() +# Delete obsolete posts +gh_post.delete_comments_by_token(TOKEN_ESPRESSO_CI) MESSAGE = '''Your pull request does not meet our code formatting \ rules. {header}, please do one of the following: @@ -72,10 +64,8 @@ comment += 'To apply these changes' else: comment = 'To fix this' - message = MESSAGE.format(header=comment, url=os.environ['CI_JOB_URL']) + comment = MESSAGE.format(header=comment, url=gh_post.CI_JOB_URL) if patch: - assert TOKEN_ESPRESSO_CI in message - response = requests.post(URL, headers=HEADERS, - json={'body': message}) - response.raise_for_status() + assert TOKEN_ESPRESSO_CI in comment + gh_post.post_message(comment) diff --git a/maintainer/update_header.sh b/maintainer/update_header.sh index 28d1104007f..6aaa2aeda3e 100755 --- a/maintainer/update_header.sh +++ b/maintainer/update_header.sh @@ -30,7 +30,7 @@ # To review the diff: # $> git diff --word-diff-regex=. -U0 | grep -Po 'Copyright.+' | sort | uniq -cd "$(git rev-parse --show-toplevel)" +cd "$(git rev-parse --show-toplevel)" || exit 1 files=$(maintainer/files_with_header.sh) num_files=$(echo ${files} | wc -w) diff --git a/samples/gibbs_ensemble_socket.py b/samples/gibbs_ensemble_socket.py index 2c3c0a76003..f0ac722e4ad 100644 --- a/samples/gibbs_ensemble_socket.py +++ b/samples/gibbs_ensemble_socket.py @@ -299,7 +299,6 @@ def check_exchange_particle(boxes, inner_potential, rand_box): return True - # init socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) diff --git a/samples/minimal-polymer.py b/samples/minimal-polymer.py index cc1253c851d..40aa550996e 100644 --- a/samples/minimal-polymer.py +++ b/samples/minimal-polymer.py @@ -47,8 +47,8 @@ beads_per_chain=50, bond_length=1.0, seed=3210) +previous_part = None for pos in positions[0]: - previous_part = None part = system.part.add(pos=pos) if previous_part: part.add_bond((fene, previous_part)) diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index f56d4b158de..cec59fab8cb 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -104,9 +104,9 @@ add_subdirectory(object-in-fluid) add_subdirectory(observables) add_subdirectory(virtual_sites) -if(WITH_UNIT_TESTS) +if(WITH_TESTS) add_subdirectory(unit_tests) -endif(WITH_UNIT_TESTS) +endif(WITH_TESTS) if(STOKESIAN_DYNAMICS) add_subdirectory(stokesian_dynamics) diff --git a/src/core/DomainDecomposition.cpp b/src/core/DomainDecomposition.cpp index 89a14f784f1..4604899ae4b 100644 --- a/src/core/DomainDecomposition.cpp +++ b/src/core/DomainDecomposition.cpp @@ -286,7 +286,7 @@ int DomainDecomposition::calc_processor_min_num_cells() const { void DomainDecomposition::create_cell_grid(double range) { auto const cart_info = Utils::Mpi::cart_get<3>(m_comm); - int i, n_local_cells, new_cells; + int n_local_cells; double cell_range[3]; /* initialize */ @@ -308,10 +308,10 @@ void DomainDecomposition::create_cell_grid(double range) { } else { /* Calculate initial cell grid */ double volume = m_local_box.length()[0]; - for (i = 1; i < 3; i++) + for (int i = 1; i < 3; i++) volume *= m_local_box.length()[i]; double scale = pow(DomainDecomposition::max_num_cells / volume, 1. / 3.); - for (i = 0; i < 3; i++) { + for (int i = 0; i < 3; i++) { /* this is at least 1 */ cell_grid[i] = (int)ceil(m_local_box.length()[i] * scale); cell_range[i] = m_local_box.length()[i] / cell_grid[i]; @@ -344,7 +344,7 @@ void DomainDecomposition::create_cell_grid(double range) { int min_ind = 0; double min_size = cell_range[0]; - for (i = 1; i < 3; i++) { + for (int i = 1; i < 3; i++) { if (cell_grid[i] > 1 && cell_range[i] < min_size) { min_ind = i; min_size = cell_range[i]; @@ -372,8 +372,8 @@ void DomainDecomposition::create_cell_grid(double range) { auto const node_pos = cart_info.coords; /* now set all dependent variables */ - new_cells = 1; - for (i = 0; i < 3; i++) { + int new_cells = 1; + for (int i = 0; i < 3; i++) { ghost_cell_grid[i] = cell_grid[i] + 2; new_cells *= ghost_cell_grid[i]; cell_size[i] = m_local_box.length()[i] / (double)cell_grid[i]; diff --git a/src/core/bonded_interactions/CMakeLists.txt b/src/core/bonded_interactions/CMakeLists.txt index 364998ba22b..acff1e20ca1 100644 --- a/src/core/bonded_interactions/CMakeLists.txt +++ b/src/core/bonded_interactions/CMakeLists.txt @@ -11,5 +11,4 @@ target_sources( ${CMAKE_CURRENT_SOURCE_DIR}/fene.cpp ${CMAKE_CURRENT_SOURCE_DIR}/harmonic.cpp ${CMAKE_CURRENT_SOURCE_DIR}/quartic.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/thermalized_bond.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/umbrella.cpp) + ${CMAKE_CURRENT_SOURCE_DIR}/thermalized_bond.cpp) diff --git a/src/core/bonded_interactions/bonded_interaction_data.cpp b/src/core/bonded_interactions/bonded_interaction_data.cpp index 996f246cef8..ed749a7e3aa 100644 --- a/src/core/bonded_interactions/bonded_interaction_data.cpp +++ b/src/core/bonded_interactions/bonded_interaction_data.cpp @@ -68,8 +68,6 @@ auto cutoff(int type, Bond_parameters const &bp) { return bp.ibmVolConsParameters.cutoff(); case BONDED_IA_IBM_TRIBEND: return bp.ibm_tribend.cutoff(); - case BONDED_IA_UMBRELLA: - return bp.umbrella.cutoff(); case BONDED_IA_THERMALIZED_DIST: return bp.thermalized_bond.cutoff(); default: diff --git a/src/core/bonded_interactions/bonded_interaction_data.hpp b/src/core/bonded_interactions/bonded_interaction_data.hpp index 1157c9af55e..c4577e89cef 100644 --- a/src/core/bonded_interactions/bonded_interaction_data.hpp +++ b/src/core/bonded_interactions/bonded_interaction_data.hpp @@ -27,7 +27,6 @@ #include #include -#include #include /** Type codes of bonded interactions. */ @@ -76,8 +75,6 @@ enum BondedInteraction : int { BONDED_IA_IBM_VOLUME_CONSERVATION, /** Type of bonded interaction is bending force (immersed boundary). */ BONDED_IA_IBM_TRIBEND, - /** Type of bonded interaction is umbrella. */ - BONDED_IA_UMBRELLA, /** Type of bonded interaction is thermalized distance bond. */ BONDED_IA_THERMALIZED_DIST, }; @@ -178,20 +175,6 @@ struct Thermalized_bond_parameters { double cutoff() const { return r_cut; } }; -/** Parameters for harmonic dumbbell bond Potential */ -struct Harmonic_dumbbell_bond_parameters { - /** spring constant */ - double k1; - /** rotation constant */ - double k2; - /** equilibrium bond length */ - double r; - /** cutoff bond length */ - double r_cut; - - double cutoff() const { return r_cut; } -}; - /** Parameters for quartic bond Potential */ struct Quartic_bond_parameters { double k0, k1; @@ -277,15 +260,6 @@ struct Tabulated_bond_parameters { } }; -/** Parameters for umbrella potential */ -struct Umbrella_bond_parameters { - double k; - int dir; - double r; - - double cutoff() const { return std::numeric_limits::infinity(); } -}; - /** Parameters for the rigid_bond/SHAKE/RATTLE ALGORITHM */ struct Rigid_bond_parameters { /** Square of the length of Constrained Bond */ @@ -372,7 +346,6 @@ union Bond_parameters { Angle_cossquare_bond_parameters angle_cossquare; Dihedral_bond_parameters dihedral; Tabulated_bond_parameters tab; - Umbrella_bond_parameters umbrella; Thermalized_bond_parameters thermalized_bond; Rigid_bond_parameters rigid_bond; IBM_Triel_Parameters ibm_triel; diff --git a/src/core/bonded_interactions/thermalized_bond.cpp b/src/core/bonded_interactions/thermalized_bond.cpp index ca10469e751..8960d391a16 100644 --- a/src/core/bonded_interactions/thermalized_bond.cpp +++ b/src/core/bonded_interactions/thermalized_bond.cpp @@ -79,14 +79,3 @@ void thermalized_bond_init() { } } } - -void thermalized_bond_update_params(double pref_scale) { - - for (auto &bonded_ia_param : bonded_ia_params) { - if (bonded_ia_param.type == BONDED_IA_THERMALIZED_DIST) { - Thermalized_bond_parameters &t = bonded_ia_param.p.thermalized_bond; - t.pref2_com *= pref_scale; - t.pref2_dist *= pref_scale; - } - } -} diff --git a/src/core/bonded_interactions/thermalized_bond.hpp b/src/core/bonded_interactions/thermalized_bond.hpp index 5caaf6e5d7b..35001dd5963 100644 --- a/src/core/bonded_interactions/thermalized_bond.hpp +++ b/src/core/bonded_interactions/thermalized_bond.hpp @@ -52,7 +52,6 @@ int thermalized_bond_set_params(int bond_type, double temp_com, double gamma_com, double temp_distance, double gamma_distance, double r_cut); -void thermalized_bond_update_params(double pref_scale); void thermalized_bond_init(); /** Separately thermalizes the com and distance of a particle pair. diff --git a/src/core/bonded_interactions/umbrella.cpp b/src/core/bonded_interactions/umbrella.cpp deleted file mode 100644 index e001a0ef075..00000000000 --- a/src/core/bonded_interactions/umbrella.cpp +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (C) 2010-2019 The ESPResSo project - * Copyright (C) 2002,2003,2004,2005,2006,2007,2008,2009,2010 - * Max-Planck-Institute for Polymer Research, Theory Group - * - * This file is part of ESPResSo. - * - * ESPResSo is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * ESPResSo is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -/** \file - * - * Implementation of \ref umbrella.hpp - */ - -#include "config.hpp" - -#include "umbrella.hpp" - -#include "interactions.hpp" - -#include - -int umbrella_set_params(int bond_type, double k, int dir, double r) { - if (bond_type < 0) - return ES_ERROR; - - make_bond_type_exist(bond_type); - - bonded_ia_params[bond_type].p.umbrella.k = k; - bonded_ia_params[bond_type].p.umbrella.dir = dir; - bonded_ia_params[bond_type].p.umbrella.r = r; - bonded_ia_params[bond_type].type = BONDED_IA_UMBRELLA; - bonded_ia_params[bond_type].num = 1; - - /* broadcast interaction parameters */ - mpi_bcast_ia_params(bond_type, -1); - - return ES_OK; -} diff --git a/src/core/bonded_interactions/umbrella.hpp b/src/core/bonded_interactions/umbrella.hpp deleted file mode 100644 index 57fab960746..00000000000 --- a/src/core/bonded_interactions/umbrella.hpp +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (C) 2010-2019 The ESPResSo project - * Copyright (C) 2002,2003,2004,2005,2006,2007,2008,2009,2010 - * Max-Planck-Institute for Polymer Research, Theory Group - * - * This file is part of ESPResSo. - * - * ESPResSo is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * ESPResSo is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef umbrella_H -#define umbrella_H - -/** \file - * Routines to calculate the umbrella potential (harmonic interaction in only - * one Cartesian direction) between particle pairs. Useful for umbrella - * sampling simulations. - * - * Implementation in \ref umbrella.cpp. - */ - -#include "config.hpp" - -#include "bonded_interactions/bonded_interaction_data.hpp" -#include "nonbonded_interactions/nonbonded_interaction_data.hpp" - -#include -#include - -#include - -/** Set the parameters of an umbrella bond - * - * @retval ES_OK on success - * @retval ES_ERROR on error - */ -int umbrella_set_params(int bond_type, double k, int dir, double r); - -/** Resultant force due to an umbrella potential */ -inline double umbrella_force_r(double k, int dir, double r, double distn) { - return -k * (distn - r); -} - -/** Compute the umbrella bond length force. - * @param[in] ia_params Bonded parameters for the pair interaction. - * @param[in] d %Distance between the particles. - */ -inline boost::optional -umbrella_pair_force(Bonded_ia_parameters const &ia_params, - Utils::Vector3d const &d) { - auto const distn = d[ia_params.p.umbrella.dir]; - auto const fac = -ia_params.p.umbrella.k * (distn - ia_params.p.umbrella.r); - Utils::Vector3d force{}; - force[ia_params.p.umbrella.dir] = fac; - - return force; -} - -/** Compute the umbrella bond length energy. - * @param[in] ia_params Bonded parameters for the pair interaction. - * @param[in] d %Distance between the particles. - */ -inline boost::optional -umbrella_pair_energy(Bonded_ia_parameters const &ia_params, - Utils::Vector3d const &d) { - auto const distn = d[ia_params.p.umbrella.dir]; - return 0.5 * ia_params.p.umbrella.k * - Utils::sqr(distn - ia_params.p.umbrella.r); -} - -#endif diff --git a/src/core/cells.cpp b/src/core/cells.cpp index 3ec3554b5fc..4281e6a6ebe 100644 --- a/src/core/cells.cpp +++ b/src/core/cells.cpp @@ -112,9 +112,9 @@ REGISTER_CALLBACK(get_pairs_of_types_local) namespace detail { void search_distance_sanity_check(double const distance) { - /** get_pairs_filtered() finds pairs via the non_bonded_loop. The maximum - *finding range is therefore limited by the decomposition that is used - **/ + /* get_pairs_filtered() finds pairs via the non_bonded_loop. The maximum + * finding range is therefore limited by the decomposition that is used. + */ auto range = *boost::min_element(cell_structure.max_range()); if (distance > range) { runtimeErrorMsg() << "pair search distance " << distance diff --git a/src/core/energy_inline.hpp b/src/core/energy_inline.hpp index 8422eff781a..2a9da8a74a8 100644 --- a/src/core/energy_inline.hpp +++ b/src/core/energy_inline.hpp @@ -37,7 +37,6 @@ #include "bonded_interactions/fene.hpp" #include "bonded_interactions/harmonic.hpp" #include "bonded_interactions/quartic.hpp" -#include "bonded_interactions/umbrella.hpp" #include "nonbonded_interactions/bmhtf-nacl.hpp" #include "nonbonded_interactions/buckingham.hpp" #include "nonbonded_interactions/gaussian.hpp" @@ -242,8 +241,6 @@ calc_bonded_energy(Bonded_ia_parameters const &iaparams, Particle const &p1, #endif case BONDED_IA_TABULATED_DISTANCE: return tab_bond_energy(iaparams, dx); - case BONDED_IA_UMBRELLA: - return umbrella_pair_energy(iaparams, dx); case BONDED_IA_VIRTUAL_BOND: return boost::optional(0); default: diff --git a/src/core/forces_inline.hpp b/src/core/forces_inline.hpp index 6f1dee902d1..50292f92592 100644 --- a/src/core/forces_inline.hpp +++ b/src/core/forces_inline.hpp @@ -35,7 +35,6 @@ #include "bonded_interactions/harmonic.hpp" #include "bonded_interactions/quartic.hpp" #include "bonded_interactions/thermalized_bond.hpp" -#include "bonded_interactions/umbrella.hpp" #include "immersed_boundary/ibm_tribend.hpp" #include "immersed_boundary/ibm_triel.hpp" #include "nonbonded_interactions/bmhtf-nacl.hpp" @@ -333,8 +332,6 @@ calc_bond_pair_force(Particle const &p1, Particle const &p2, #endif case BONDED_IA_TABULATED_DISTANCE: return tab_bond_force(iaparams, dx); - case BONDED_IA_UMBRELLA: - return umbrella_pair_force(iaparams, dx); case BONDED_IA_VIRTUAL_BOND: case BONDED_IA_RIGID_BOND: return Utils::Vector3d{}; diff --git a/src/core/grid_based_algorithms/halo.cpp b/src/core/grid_based_algorithms/halo.cpp index be6686c9837..a7055e684da 100644 --- a/src/core/grid_based_algorithms/halo.cpp +++ b/src/core/grid_based_algorithms/halo.cpp @@ -90,34 +90,27 @@ void halo_create_field_hvector(int vblocks, int vstride, int vskip, } } -void halo_free_fieldtype(Fieldtype *const ftype) { - if ((*ftype)->count > 0) { - free((*ftype)->lengths); - (*ftype)->lengths = nullptr; - } - free(*ftype); -} - /** Set halo region to a given value * @param[out] dest pointer to the halo buffer * @param value integer value to write into the halo buffer * @param type halo field layout description */ void halo_dtset(char *dest, int value, Fieldtype type) { - int vblocks = type->vblocks; - int vstride = type->vstride; - int vskip = type->vskip; - int count = type->count; - int *lens = type->lengths; - int *disps = type->disps; - int extent = type->extent; + auto const vblocks = type->vblocks; + auto const vstride = type->vstride; + auto const vskip = type->vskip; + auto const count = type->count; + int const *const lens = type->lengths; + int const *const disps = type->disps; + auto const extent = type->extent; + auto const block_size = static_cast(vskip) * static_cast(extent); for (int i = 0; i < vblocks; i++) { for (int j = 0; j < vstride; j++) { for (int k = 0; k < count; k++) memset(dest + disps[k], value, lens[k]); } - dest += vskip * extent; + dest += block_size; } } @@ -126,18 +119,18 @@ void halo_dtcopy(char *r_buffer, char *s_buffer, int count, Fieldtype type); void halo_copy_vector(char *r_buffer, char *s_buffer, int count, Fieldtype type, bool vflag) { - int vblocks = type->vblocks; - int vstride = type->vstride; - int vskip = type->vskip; - int extent = type->extent; + auto const vblocks = type->vblocks; + auto const vstride = type->vstride; + auto const extent = type->extent; + auto block_size = static_cast(type->vskip); if (vflag) { - vskip *= type->subtype->extent; + block_size *= static_cast(type->subtype->extent); } for (int i = 0; i < count; i++, s_buffer += extent, r_buffer += extent) { char *dest = r_buffer, *src = s_buffer; - for (int j = 0; j < vblocks; j++, dest += vskip, src += vskip) { + for (int j = 0; j < vblocks; j++, dest += block_size, src += block_size) { halo_dtcopy(dest, src, vstride, type->subtype); } } @@ -185,7 +178,7 @@ void prepare_halo_communication(HaloCommunicator *const hc, hc->num = num; hc->halo_info.resize(num); - int extent = fieldtype->extent; + auto const extent = static_cast(fieldtype->extent); auto const node_neighbors = calc_node_neighbors(comm_cart); @@ -210,12 +203,12 @@ void prepare_halo_communication(HaloCommunicator *const hc, if (lr == 0) { /* send to left, recv from right */ - hinfo->s_offset = extent * stride * 1; - hinfo->r_offset = extent * stride * (grid[dir] + 1); + hinfo->s_offset = extent * static_cast(stride * 1); + hinfo->r_offset = extent * static_cast(stride * (grid[dir] + 1)); } else { /* send to right, recv from left */ - hinfo->s_offset = extent * stride * grid[dir]; - hinfo->r_offset = extent * stride * 0; + hinfo->s_offset = extent * static_cast(stride * grid[dir]); + hinfo->r_offset = extent * static_cast(stride * 0); } hinfo->source_node = node_neighbors[2 * dir + 1 - lr]; diff --git a/src/core/grid_based_algorithms/halo.hpp b/src/core/grid_based_algorithms/halo.hpp index 3eb683c96e8..193ee2c6ea4 100644 --- a/src/core/grid_based_algorithms/halo.hpp +++ b/src/core/grid_based_algorithms/halo.hpp @@ -83,9 +83,6 @@ typedef struct { int source_node; /**< index of processor which sends halo data */ int dest_node; /**< index of processor receiving halo data */ - // void *send_buffer; /**< pointer to data being sent */ - // void *recv_buffer; /**< pointer to data being received */ - unsigned long s_offset; /**< offset for send buffer */ unsigned long r_offset; /**< offset for receive buffer */ @@ -117,11 +114,6 @@ void halo_create_field_vector(int vblocks, int vstride, int vskip, void halo_create_field_hvector(int vblocks, int vstride, int vskip, Fieldtype oldtype, Fieldtype *newtype); -/** Frees a fieldtype - * @param ftype pointer to the type to be freed - */ -void halo_free_fieldtype(Fieldtype *ftype); - /** Preparation of the halo parallelization scheme. Sets up the * necessary data structures for \ref halo_communication * @param[in,out] hc halo communicator being created diff --git a/src/core/grid_based_algorithms/lattice.cpp b/src/core/grid_based_algorithms/lattice.cpp index c5205cd961b..d0f38453658 100644 --- a/src/core/grid_based_algorithms/lattice.cpp +++ b/src/core/grid_based_algorithms/lattice.cpp @@ -32,6 +32,7 @@ #include #include +#include #include #include #include @@ -112,13 +113,14 @@ void Lattice::map_position_to_lattice(const Utils::Vector3d &pos, delta[dir] = 1.0 - delta[3 + dir]; } node_index[0] = Utils::get_linear_index(ind, halo_grid); - node_index[1] = node_index[0] + 1; - node_index[2] = node_index[0] + halo_grid[0]; - node_index[3] = node_index[0] + halo_grid[0] + 1; - node_index[4] = node_index[0] + halo_grid[0] * halo_grid[1]; - node_index[5] = node_index[4] + 1; - node_index[6] = node_index[4] + halo_grid[0]; - node_index[7] = node_index[4] + halo_grid[0] + 1; + node_index[1] = node_index[0] + 1u; + node_index[2] = node_index[0] + static_cast(halo_grid[0]); + node_index[3] = node_index[0] + static_cast(halo_grid[0]) + 1u; + node_index[4] = node_index[0] + static_cast(halo_grid[0]) * + static_cast(halo_grid[1]); + node_index[5] = node_index[4] + 1u; + node_index[6] = node_index[4] + static_cast(halo_grid[0]); + node_index[7] = node_index[4] + static_cast(halo_grid[0]) + 1u; } Utils::Vector3i Lattice::local_index(Utils::Vector3i const &global_index) const diff --git a/src/core/grid_based_algorithms/lb.cpp b/src/core/grid_based_algorithms/lb.cpp index 818061f5fee..3ff9e68fd9a 100644 --- a/src/core/grid_based_algorithms/lb.cpp +++ b/src/core/grid_based_algorithms/lb.cpp @@ -59,6 +59,7 @@ #include #include #include +#include #include #include #include @@ -883,7 +884,8 @@ auto lb_next_offsets(const Lattice &lb_lattice, std::array const &c) { const Utils::Vector3 strides = { {1, lb_lattice.halo_grid[0], - lb_lattice.halo_grid[0] * lb_lattice.halo_grid[1]}}; + static_cast(lb_lattice.halo_grid[0]) * + static_cast(lb_lattice.halo_grid[1])}}; std::array offsets; boost::transform(c, offsets.begin(), diff --git a/src/core/particle_data.hpp b/src/core/particle_data.hpp index 4806370d527..8fab4251da9 100644 --- a/src/core/particle_data.hpp +++ b/src/core/particle_data.hpp @@ -71,9 +71,9 @@ void clear_particle_node(); /** * @brief Get particle data. * - * @param part the identity of the particle to fetch - * @return Pointer to copy of particle if it exists, - * nullptr otherwise; + * @param part the identity of the particle to fetch + * @return Pointer to copy of particle if it exists, + * nullptr otherwise; */ const Particle &get_particle_data(int part); diff --git a/src/python/espressomd/drude_helpers.py b/src/python/espressomd/drude_helpers.py index 52f588b1e42..21ed4b08e91 100644 --- a/src/python/espressomd/drude_helpers.py +++ b/src/python/espressomd/drude_helpers.py @@ -206,7 +206,7 @@ def setup_and_add_drude_exclusion_bonds(system, verbose=False): # All Drude types need... for td in drude_type_list: - #...exclusions with core + # ...exclusions with core qd = drude_dict[td]["q"] # Drude charge qc = drude_dict[td]["qc"] # Core charge subtr_sr_bond = interactions.BondedCoulombSRBond( @@ -254,9 +254,9 @@ def setup_intramol_exclusion_bonds(system, mol_drude_types, mol_core_types, for td in mol_drude_types: drude_dict[td]["subtr_sr_bonds_intramol"] = {} - #... sr exclusion bond with other partial core charges... + # ... sr exclusion bond with other partial core charges... for tc, qp in zip(mol_core_types, mol_core_partial_charges): - #...excluding the Drude core partner + # ...excluding the Drude core partner if drude_dict[td]["core_type"] != tc: qd = drude_dict[td]["q"] # Drude charge subtr_sr_bond = interactions.BondedCoulombSRBond( diff --git a/src/python/espressomd/interactions.pxd b/src/python/espressomd/interactions.pxd index 765fa702529..6ca8b3a912d 100644 --- a/src/python/espressomd/interactions.pxd +++ b/src/python/espressomd/interactions.pxd @@ -304,30 +304,16 @@ IF TABULATED: double min, double max, vector[double] energy, vector[double] force) -IF ROTATION: - cdef extern from "bonded_interactions/bonded_interaction_data.hpp": - #* Parameters for the harmonic dumbbell bond potential */ - cdef struct Harmonic_dumbbell_bond_parameters: - double k1 - double k2 - double r - double r_cut -ELSE: - cdef struct Harmonic_dumbbell_bond_parameters: - double k1 - double k2 - double r - double r_cut cdef extern from "bonded_interactions/bonded_interaction_data.hpp": - #* Parameters for n-body tabulated potential (n=2,3,4). */ + # Parameters for n-body tabulated potential (n=2,3,4). cdef struct Tabulated_bond_parameters: int type TabulatedPotential * pot IF ELECTROSTATICS: cdef extern from "bonded_interactions/bonded_interaction_data.hpp": - #* Parameters for Bonded Coulomb p3m sr */ + # Parameters for Bonded Coulomb p3m sr cdef struct Bonded_coulomb_sr_bond_parameters: double q1q2 ELSE: @@ -342,14 +328,14 @@ cdef extern from "bonded_interactions/bonded_interaction_data.hpp": double drmax2 double drmax2i - #* Parameters for oif_global_forces */ + # Parameters for oif_global_forces cdef struct Oif_global_forces_bond_parameters: double A0_g double ka_g double V0 double kv - #* Parameters for oif_local_forces */ + # Parameters for oif_local_forces cdef struct Oif_local_forces_bond_parameters: double r0 double ks @@ -361,13 +347,13 @@ cdef extern from "bonded_interactions/bonded_interaction_data.hpp": double kal double kvisc - #* Parameters for harmonic bond Potential */ + # Parameters for harmonic bond Potential cdef struct Harmonic_bond_parameters: double k double r double r_cut - #* Parameters for thermalized bond */ + # Parameters for thermalized bond cdef struct Thermalized_bond_parameters: double temp_com double gamma_com @@ -375,35 +361,35 @@ cdef extern from "bonded_interactions/bonded_interaction_data.hpp": double gamma_distance double r_cut - #* Parameters for Bonded Coulomb */ + # Parameters for Bonded Coulomb cdef struct Bonded_coulomb_bond_parameters: double prefactor - #* Parameters for three body angular potential (bond_angle_harmonic). + # Parameters for three body angular potential (bond_angle_harmonic). cdef struct Angle_harmonic_bond_parameters: double bend double phi0 - #* Parameters for three body angular potential (bond_angle_cosine). + # Parameters for three body angular potential (bond_angle_cosine). cdef struct Angle_cosine_bond_parameters: double bend double phi0 double cos_phi0 double sin_phi0 - #* Parameters for three body angular potential (bond_angle_cossquare). + # Parameters for three body angular potential (bond_angle_cossquare). cdef struct Angle_cossquare_bond_parameters: double bend double phi0 double cos_phi0 - #* Parameters for four body angular potential (dihedral-angle potentials). */ + # Parameters for four body angular potential (dihedral-angle potentials). cdef struct Dihedral_bond_parameters: double mult double bend double phase - #* Parameters for n-body overlapped potential (n=2,3,4). */ + # Parameters for n-body overlapped potential (n=2,3,4). cdef struct Overlap_bond_parameters: char * filename int type @@ -413,30 +399,26 @@ cdef extern from "bonded_interactions/bonded_interaction_data.hpp": double * para_b double * para_c - #* Parameters for one-directional harmonic potential */ - cdef struct Umbrella_bond_parameters: - double k - int dir - double r - - #* Parameters for subt-LJ potential */ + # Parameters for subt-LJ potential cdef struct Subt_lj_bond_parameters: double k double r double r2 - #* Parameters for the rigid_bond/SHAKE/RATTLE ALGORITHM */ + # Parameters for the rigid_bond/SHAKE/RATTLE ALGORITHM cdef struct Rigid_bond_parameters: - #*Length of rigid bond/Constrained Bond*/ + # Length of rigid bond/Constrained Bond # double d - #*Square of the length of Constrained Bond*/ + # Square of the length of Constrained Bond double d2 - #*Positional Tolerance/Accuracy value for termination of RATTLE/SHAKE iterations during position corrections*/ + # Positional Tolerance/Accuracy value for termination of RATTLE/SHAKE + # iterations during position corrections double p_tol - #*Velocity Tolerance/Accuracy for termination of RATTLE/SHAKE iterations during velocity corrections */ + # Velocity Tolerance/Accuracy for termination of RATTLE/SHAKE + # iterations during velocity corrections double v_tol - #* Parameters for IBM Triel */ + # Parameters for IBM Triel cdef cppclass tElasticLaw: pass @@ -455,24 +437,25 @@ cdef extern from "bonded_interactions/bonded_interaction_data.hpp": double k1 double k2 - #* Parameters for IBM Tribend */ + # Parameters for IBM Tribend cdef struct IBM_Tribend_Parameters: double kb double theta0 - #* Parameters for IBM VolCons */ + # Parameters for IBM VolCons cdef struct IBM_VolCons_Parameters: int softID double kappaV double volRef - #* Parameters for Quartic */ + # Parameters for Quartic cdef struct Quartic_bond_parameters: double k0, k1 double r double r_cut - #* Union in which to store the parameters of an individual bonded interaction */ + # Union in which to store the parameters of an individual bonded + # interaction cdef union Bond_parameters: Fene_bond_parameters fene Oif_global_forces_bond_parameters oif_global_forces @@ -496,7 +479,7 @@ cdef extern from "bonded_interactions/bonded_interaction_data.hpp": cdef struct Bonded_ia_parameters: int type int num - #* union to store the different bonded interaction parameters. */ + # union to store the different bonded interaction parameters. Bond_parameters p vector[Bonded_ia_parameters] bonded_ia_params @@ -588,6 +571,5 @@ cdef extern from "bonded_interactions/bonded_interaction_data.hpp": BONDED_IA_IBM_TRIEL, BONDED_IA_IBM_TRIBEND, BONDED_IA_IBM_VOLUME_CONSERVATION, - BONDED_IA_UMBRELLA, BONDED_IA_THERMALIZED_DIST BONDED_IA_QUARTIC diff --git a/src/python/espressomd/particle_data.pyx b/src/python/espressomd/particle_data.pyx index 2d17fa4a8f8..3fa32c849a5 100644 --- a/src/python/espressomd/particle_data.pyx +++ b/src/python/espressomd/particle_data.pyx @@ -450,7 +450,7 @@ cdef class ParticleHandle: When using particle dipoles, the dipole moment is co-aligned with the particle director. Setting the director thus modifies the dipole moment orientation (:attr:`espressomd.particle_data.ParticleHandle.dip`) - and vice verca. + and vice versa. See also :ref:`Rotational degrees of freedom and particle anisotropy`. director : (3,) array_like of :obj:`float` diff --git a/src/shapes/CMakeLists.txt b/src/shapes/CMakeLists.txt index 8fe0fcd05bf..d8592bc3e15 100644 --- a/src/shapes/CMakeLists.txt +++ b/src/shapes/CMakeLists.txt @@ -12,6 +12,6 @@ target_include_directories( install(TARGETS EspressoShapes LIBRARY DESTINATION ${PYTHON_INSTDIR}/espressomd) -if(WITH_UNIT_TESTS) +if(WITH_TESTS) add_subdirectory(unit_tests) -endif(WITH_UNIT_TESTS) +endif(WITH_TESTS) diff --git a/src/utils/include/utils/Accumulator.hpp b/src/utils/include/utils/Accumulator.hpp index f805282f462..5292ed81771 100644 --- a/src/utils/include/utils/Accumulator.hpp +++ b/src/utils/include/utils/Accumulator.hpp @@ -20,6 +20,7 @@ #define CORE_UTILS_ACCUMULATOR #include +#include #include #include diff --git a/src/utils/include/utils/math/bspline.hpp b/src/utils/include/utils/math/bspline.hpp index 1de6289bd53..42426ef9b82 100644 --- a/src/utils/include/utils/math/bspline.hpp +++ b/src/utils/include/utils/math/bspline.hpp @@ -26,6 +26,7 @@ #include namespace Utils { +/** @brief Formula of the B-spline. */ template DEVICE_QUALIFIER auto bspline(int i, T x) -> std::enable_if_t<(order > 0) && (order <= 7), T> { @@ -36,15 +37,14 @@ DEVICE_QUALIFIER auto bspline(int i, T x) switch (order) { case 1: return 1.0; - case 2: { + case 2: switch (i) { case 0: return 0.5 - x; case 1: return 0.5 + x; } - } - case 3: { + case 3: switch (i) { case 0: return 0.5 * sqr(0.5 - x); @@ -53,8 +53,7 @@ DEVICE_QUALIFIER auto bspline(int i, T x) case 2: return 0.5 * sqr(0.5 + x); } - - case 4: { + case 4: switch (i) { case 0: return (1.0 + x * (-6.0 + x * (12.0 - x * 8.0))) / 48.0; @@ -65,8 +64,7 @@ DEVICE_QUALIFIER auto bspline(int i, T x) case 3: return (1.0 + x * (6.0 + x * (12.0 + x * 8.0))) / 48.0; } - } - case 5: { + case 5: switch (i) { case 0: return (1.0 + x * (-8.0 + x * (24.0 + x * (-32.0 + x * 16.0)))) / 384.0; @@ -79,8 +77,7 @@ DEVICE_QUALIFIER auto bspline(int i, T x) case 4: return (1.0 + x * (8.0 + x * (24.0 + x * (32.0 + x * 16.0)))) / 384.0; } - } - case 6: { + case 6: switch (i) { case 0: return (1.0 + @@ -111,8 +108,7 @@ DEVICE_QUALIFIER auto bspline(int i, T x) x * (10.0 + x * (40.0 + x * (80.0 + x * (80.0 + x * 32.0))))) / 3840.0; } - } - case 7: { + case 7: switch (i) { case 0: return (1.0 + @@ -158,13 +154,18 @@ DEVICE_QUALIFIER auto bspline(int i, T x) 46080.0; } } - } - } DEVICE_THROW(std::runtime_error("Internal interpolation error.")); return T{}; } +/** + * @brief Calculate B-splines. + * @param i knot number, using 0-based indexing + * @param x position in the range (-0.5, 0.5) + * @param k order of the B-spline, using 1-based indexing, i.e. a + * B-spline of order @p k is a polynomial of degree k-1 + */ template auto bspline(int i, T x, int k) { switch (k) { case 1: @@ -186,6 +187,7 @@ template auto bspline(int i, T x, int k) { return 0.0; } +/** @brief Derivative of the B-spline. */ template inline T bspline_d(int i, T x) { static_assert(order <= 7, ""); DEVICE_ASSERT(i < order); diff --git a/src/utils/include/utils/math/triangle_functions.hpp b/src/utils/include/utils/math/triangle_functions.hpp index f9721da6e85..5e2e3ff80b2 100644 --- a/src/utils/include/utils/math/triangle_functions.hpp +++ b/src/utils/include/utils/math/triangle_functions.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef UTILS_MATH_TRIANGEL_FUNCTIONS_HPP -#define UTILS_MATH_TRIANGEL_FUNCTIONS_HPP +#ifndef UTILS_MATH_TRIANGLE_FUNCTIONS_HPP +#define UTILS_MATH_TRIANGLE_FUNCTIONS_HPP #include "utils/Vector.hpp" #include "utils/constants.hpp" @@ -32,7 +32,7 @@ namespace Utils { * * The sign convention is such that P1P2, P1P3 and * the normal form a right-handed system. - * The normal vector is not normalized, e.g. its length + * The normal vector is not normalized, i.e. its length * is arbitrary. */ inline Vector3d get_n_triangle(const Vector3d &P1, const Vector3d &P2, @@ -51,32 +51,32 @@ inline double area_triangle(const Vector3d &P1, const Vector3d &P2, return 0.5 * get_n_triangle(P1, P2, P3).norm(); } -/** @brief Returns the angle between two triangles in 3D space - - -Returns the angle between two triangles in 3D space given by points P1P2P3 and -P2P3P4. Note, that the common edge is given as the second and the third -argument. Here, the angle can have values from 0 to 2 * PI, depending on the -orientation of the two triangles. So the angle can be convex or concave. For -each triangle, an inward direction has been defined as the direction of one of -the two normal vectors. Particularly, for triangle P1P2P3 it is the vector N1 = -P2P1 x P2P3 and for triangle P2P3P4 it is N2 = P2P3 x P2P4. The method first -computes the angle between N1 and N2, which gives always value between 0 and PI -and then it checks whether this value must be corrected to a value between PI -and 2 * PI. - -As an example, consider 4 points A,B,C,D in space given by coordinates A = -[1,1,1], B = [2,1,1], C = [1,2,1], D = [1,1,2]. We want to determine the angle -between triangles ABC and ACD. In case that the orientations of the triangle ABC -is [0,0,1] and orientation of ACD is [1,0,0], then the resulting angle must be -PI/2.0. To get correct result, note that the common edge is AC, and one must -call the method as angle_btw_triangles(B,A,C,D). With this call we have ensured -that N1 = AB x AC (which coincides with [0,0,1]) and N2 = AC x AD (which -coincides with [1,0,0]). Alternatively, if the orientations of the two triangles -were the opposite, the correct call would be angle_btw_triangles(B,C,A,D) so -that N1 = CB x CA and N2 = CA x CD. - -*/ +/** + * @brief Computes the angle between two triangles in 3D space + * + * Returns the angle between two triangles in 3D space given by points P1P2P3 + * and P2P3P4. Note, that the common edge is given as the second and the third + * argument. Here, the angle can have values from 0 to 2 * PI, depending on the + * orientation of the two triangles. So the angle can be convex or concave. For + * each triangle, an inward direction has been defined as the direction of one + * of the two normal vectors. Particularly, for triangle P1P2P3 it is the vector + * N1 = P2P1 x P2P3 and for triangle P2P3P4 it is N2 = P2P3 x P2P4. The method + * first computes the angle between N1 and N2, which gives always value between + * 0 and PI and then it checks whether this value must be corrected to a value + * between PI and 2 * PI. + * + * As an example, consider 4 points A,B,C,D in space given by coordinates + * A = [1,1,1], B = [2,1,1], C = [1,2,1], D = [1,1,2]. We want to determine + * the angle between triangles ABC and ACD. In case the orientation of the + * triangle ABC is [0,0,1] and orientation of ACD is [1,0,0], the resulting + * angle must be PI/2.0. To get correct results, note that the common edge is + * AC, and one must call the method as angle_btw_triangles(B,A,C,D). + * With this call we have ensured that N1 = AB x AC (which coincides with + * [0,0,1]) and N2 = AC x AD (which coincides with [1,0,0]). Alternatively, + * if the orientations of the two triangles were the opposite, the correct + * call would be angle_btw_triangles(B,C,A,D) so that N1 = CB x CA + * and N2 = CA x CD. + */ inline double angle_btw_triangles(const Vector3d &P1, const Vector3d &P2, const Vector3d &P3, const Vector3d &P4) { auto const normal1 = get_n_triangle(P2, P1, P3); @@ -90,10 +90,9 @@ inline double angle_btw_triangles(const Vector3d &P1, const Vector3d &P2, auto const phi = Utils::pi() - std::acos(cosine); // Now we need to determine, if the angle between two triangles is less than - // Pi or more than Pi. To do this, we check - // if the point P4 lies in the halfspace given by triangle P1P2P3 and the - // normal to this triangle. If yes, we have - // angle less than Pi, if not, we have angle more than Pi. + // Pi or greater than Pi. To do this, we check if the point P4 lies in the + // halfspace given by triangle P1P2P3 and the normal to this triangle. If yes, + // we have an angle less than Pi, otherwise we have an angle greater than Pi. // General equation of the plane is n_x*x + n_y*y + n_z*z + d = 0 where // (n_x,n_y,n_z) is the normal to the plane. // Point P1 lies in the plane, therefore d = -(n_x*P1_x + n_y*P1_y + n_z*P1_z) diff --git a/src/utils/include/utils/quaternion.hpp b/src/utils/include/utils/quaternion.hpp index 6dd066d208c..f2ac8b84a28 100644 --- a/src/utils/include/utils/quaternion.hpp +++ b/src/utils/include/utils/quaternion.hpp @@ -115,12 +115,12 @@ template struct quat_traits> { static inline scalar_type read_element_idx(std::size_t i, quat_type const &q) { - assert(i < 4 and i >= 0); + assert(i < 4); return q[i]; } static inline scalar_type &write_element_idx(std::size_t i, quat_type &q) { - assert(i < 4 and i >= 0); + assert(i < 4); return q[i]; } }; diff --git a/src/utils/tests/CMakeLists.txt b/src/utils/tests/CMakeLists.txt index 273a53e62a2..a15e40a12f8 100644 --- a/src/utils/tests/CMakeLists.txt +++ b/src/utils/tests/CMakeLists.txt @@ -8,7 +8,8 @@ unit_test(NAME NumeratedContainer_test SRC NumeratedContainer_test.cpp DEPENDS unit_test(NAME keys_test SRC keys_test.cpp DEPENDS EspressoUtils) unit_test(NAME Cache_test SRC Cache_test.cpp DEPENDS EspressoUtils) unit_test(NAME histogram SRC histogram.cpp DEPENDS EspressoUtils) -unit_test(NAME accumulator SRC accumulator.cpp DEPENDS EspressoUtils) +unit_test(NAME accumulator SRC accumulator.cpp DEPENDS EspressoUtils + Boost::serialization) unit_test(NAME int_pow SRC int_pow_test.cpp DEPENDS EspressoUtils) unit_test(NAME sgn SRC sgn_test.cpp DEPENDS EspressoUtils) unit_test(NAME AS_erfc_part SRC AS_erfc_part_test.cpp DEPENDS EspressoUtils) @@ -18,9 +19,12 @@ unit_test(NAME permute_ifield_test SRC permute_ifield_test.cpp DEPENDS EspressoUtils) unit_test(NAME vec_rotate SRC vec_rotate_test.cpp DEPENDS EspressoUtils) unit_test(NAME tensor_product SRC tensor_product_test.cpp DEPENDS EspressoUtils) +unit_test(NAME linear_interpolation SRC linear_interpolation_test.cpp DEPENDS + EspressoUtils) unit_test(NAME interpolation_gradient SRC interpolation_gradient_test.cpp DEPENDS EspressoUtils) unit_test(NAME interpolation SRC interpolation_test.cpp DEPENDS EspressoUtils) +unit_test(NAME bspline_test SRC bspline_test.cpp DEPENDS EspressoUtils) unit_test(NAME Span_test SRC Span_test.cpp DEPENDS EspressoUtils) unit_test(NAME matrix_vector_product SRC matrix_vector_product.cpp DEPENDS EspressoUtils) @@ -29,7 +33,8 @@ unit_test(NAME tuple_test SRC tuple_test.cpp DEPENDS EspressoUtils) unit_test(NAME Array_test SRC Array_test.cpp DEPENDS Boost::serialization EspressoUtils) unit_test(NAME contains_test SRC contains_test.cpp DEPENDS EspressoUtils) -unit_test(NAME Counter_test SRC Counter_test.cpp DEPENDS EspressoUtils) +unit_test(NAME Counter_test SRC Counter_test.cpp DEPENDS EspressoUtils + Boost::serialization) unit_test(NAME RunningAverage_test SRC RunningAverage_test.cpp DEPENDS EspressoUtils) unit_test(NAME for_each_pair_test SRC for_each_pair_test.cpp DEPENDS @@ -57,6 +62,8 @@ unit_test(NAME integral_parameter_test SRC integral_parameter_test.cpp DEPENDS unit_test(NAME flatten_test SRC flatten_test.cpp DEPENDS EspressoUtils) unit_test(NAME pack_test SRC pack_test.cpp DEPENDS Boost::serialization EspressoUtils) +unit_test(NAME unordered_map_test SRC unordered_map_test.cpp DEPENDS + Boost::serialization EspressoUtils) unit_test(NAME u32_to_u64_test SRC u32_to_u64_test.cpp DEPENDS EspressoUtils NUM_PROC 1) unit_test(NAME gather_buffer_test SRC gather_buffer_test.cpp DEPENDS @@ -71,4 +78,5 @@ unit_test(NAME all_gatherv_test SRC all_gatherv_test.cpp DEPENDS EspressoUtils Boost::mpi MPI::MPI_CXX) unit_test(NAME sendrecv_test SRC sendrecv_test.cpp DEPENDS EspressoUtils Boost::mpi MPI::MPI_CXX EspressoUtils NUM_PROC 3) -unit_test(NAME matrix_test SRC matrix_test.cpp DEPENDS EspressoUtils NUM_PROC 1) +unit_test(NAME matrix_test SRC matrix_test.cpp DEPENDS EspressoUtils + Boost::serialization NUM_PROC 1) diff --git a/src/utils/tests/Counter_test.cpp b/src/utils/tests/Counter_test.cpp index d6f9a87395f..b78e3b8b2b3 100644 --- a/src/utils/tests/Counter_test.cpp +++ b/src/utils/tests/Counter_test.cpp @@ -21,9 +21,15 @@ #define BOOST_TEST_MODULE Utils::Counter test #define BOOST_TEST_DYN_LINK -#include "utils/Counter.hpp" #include +#include "utils/Counter.hpp" + +#include +#include + +#include + using Utils::Counter; BOOST_AUTO_TEST_CASE(ctor) { @@ -52,3 +58,19 @@ BOOST_AUTO_TEST_CASE(increment) { BOOST_CHECK_EQUAL(c.value(), 6); BOOST_CHECK_EQUAL(c.initial_value(), 5); } + +BOOST_AUTO_TEST_CASE(serialization) { + auto c = Counter(5); + c.increment(); + + std::stringstream stream; + boost::archive::text_oarchive out_ar(stream); + out_ar << c; + + boost::archive::text_iarchive in_ar(stream); + Counter c_deserialized; + in_ar >> c_deserialized; + + BOOST_CHECK_EQUAL(c_deserialized.value(), 6); + BOOST_CHECK_EQUAL(c_deserialized.initial_value(), 5); +} diff --git a/src/utils/tests/accumulator.cpp b/src/utils/tests/accumulator.cpp index 363746b9a0a..2d8f0c6c9be 100644 --- a/src/utils/tests/accumulator.cpp +++ b/src/utils/tests/accumulator.cpp @@ -22,22 +22,44 @@ #include "utils/Accumulator.hpp" +#include +#include + #include #include +#include #include BOOST_AUTO_TEST_CASE(accumulator) { + auto const test_data1 = std::vector{{0.0, 1.0, 2.0, 3.0}}; + auto const test_data2 = std::vector{{1.5, 3.5, 3.5, 4.5}}; + auto const test_mean = std::vector{{0.75, 2.25, 2.75, 3.75}}; + auto const test_var = std::vector{{1.125, 3.125, 1.125, 1.125}}; + auto const test_stderr = std::vector{{0.75, 1.25, 0.75, 0.75}}; + + // check statistics auto acc = Utils::Accumulator(4); - auto test_data1 = std::vector{{0.0, 1.0, 2.0, 3.0}}; - auto test_data2 = std::vector{{1.5, 3.5, 3.5, 4.5}}; acc(test_data1); BOOST_CHECK(acc.mean() == test_data1); BOOST_CHECK(acc.variance() == std::vector(4, std::numeric_limits::max())); acc(test_data2); - BOOST_CHECK((acc.mean() == std::vector{{0.75, 2.25, 2.75, 3.75}})); - BOOST_CHECK( - (acc.variance() == std::vector{{1.125, 3.125, 1.125, 1.125}})); - BOOST_CHECK( - (acc.std_error() == std::vector{{0.75, 1.25, 0.75, 0.75}})); + BOOST_CHECK((acc.mean() == test_mean)); + BOOST_CHECK((acc.variance() == test_var)); + BOOST_CHECK((acc.std_error() == test_stderr)); + BOOST_CHECK_THROW(acc(std::vector{{1, 2, 3, 4, 5}}), + std::runtime_error); + + // check serialization + std::stringstream stream; + boost::archive::text_oarchive out_ar(stream); + out_ar << acc; + + auto acc_deserialized = Utils::Accumulator(4); + boost::archive::text_iarchive in_ar(stream); + in_ar >> acc_deserialized; + + BOOST_CHECK((acc_deserialized.mean() == test_mean)); + BOOST_CHECK((acc_deserialized.variance() == test_var)); + BOOST_CHECK((acc_deserialized.std_error() == test_stderr)); } diff --git a/src/utils/tests/bspline_test.cpp b/src/utils/tests/bspline_test.cpp new file mode 100644 index 00000000000..606d424092a --- /dev/null +++ b/src/utils/tests/bspline_test.cpp @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2020 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define BOOST_TEST_MODULE bspline test +#define BOOST_TEST_DYN_LINK +#include + +#include + +#include + +#include + +template +using integer_list = boost::mpl::list...>; +using test_bspline_orders = integer_list; + +BOOST_AUTO_TEST_CASE_TEMPLATE(bspline_normalization, T, test_bspline_orders) { + // check that B-splines are normalized + constexpr auto order = T::value; + constexpr auto tol = 1e-10; + constexpr std::array x_values{-0.49999, 0.25, 0., 0.25, 0.49999}; + + for (auto const x : x_values) { + double sum = 0; + for (int i = 0; i < order; ++i) { + sum += Utils::bspline(i, x); + } + BOOST_CHECK_SMALL(sum - 1., tol); + } +} + +BOOST_AUTO_TEST_CASE_TEMPLATE(bspline_symmetry, T, test_bspline_orders) { + // check that B-splines are symmetric + constexpr auto order = T::value; + constexpr auto order_mid = (order % 2 == 0) ? order / 2 : (order + 1) / 2; + constexpr auto tol = 1e-10; + constexpr std::array x_values{-0.49999, 0.25, 0.1}; + + for (int i = 0; i < order_mid; ++i) { + for (auto const x : x_values) { + auto const b_left = Utils::bspline(i, x); + auto const b_right = Utils::bspline(order - i - 1, -x); + BOOST_CHECK_SMALL(b_left - b_right, tol); + } + } +} + +BOOST_AUTO_TEST_CASE_TEMPLATE(bspline_derivatives, T, test_bspline_orders) { + // check that B-splines derivatives are correct + constexpr auto order = T::value; + constexpr auto tol = 1e-8; + constexpr std::array x_values{-0.49999, 0.25, 0., 0.25, 0.49999}; + + // approximate a derivative using the two-point central difference formula + auto bspline_d_approx = [](int i, double x, int order) { + using Utils::bspline; + constexpr auto h = 1e-6; + return (bspline(i, x + h / 2, order) - bspline(i, x - h / 2, order)) / h; + }; + + for (int i = 0; i < order; ++i) { + for (auto const x : x_values) { + auto const d_val = Utils::bspline_d(i, x); + auto const d_ref = bspline_d_approx(i, x, order); + BOOST_CHECK_SMALL(d_val - d_ref, tol); + } + } +} diff --git a/src/utils/tests/histogram.cpp b/src/utils/tests/histogram.cpp index 6b6bc4f163e..1ec922ae09f 100644 --- a/src/utils/tests/histogram.cpp +++ b/src/utils/tests/histogram.cpp @@ -21,6 +21,7 @@ #include #include "utils/Histogram.hpp" +#include "utils/constants.hpp" #include #include @@ -59,3 +60,41 @@ BOOST_AUTO_TEST_CASE(histogram) { BOOST_CHECK_THROW(hist.update(std::vector{{1.0, 5.0, 3.0}}), std::invalid_argument); } + +BOOST_AUTO_TEST_CASE(cylindrical_histogram) { + constexpr auto pi = Utils::pi(); + std::array n_bins{{10, 10, 10}}; + std::array, 3> limits{{std::make_pair(0.0, 2.0), + std::make_pair(0.0, 2 * pi), + std::make_pair(0.0, 10.0)}}; + size_t n_dims_data = 3; + auto hist = + Utils::CylindricalHistogram(n_bins, n_dims_data, limits); + // Check getters. + BOOST_CHECK(hist.get_limits() == limits); + BOOST_CHECK(hist.get_n_bins() == n_bins); + BOOST_CHECK((hist.get_bin_sizes() == + std::array{{2.0 / 10.0, 2 * pi / 10.0, 1.0}})); + // Check that histogram is initialized to zero. + BOOST_CHECK(hist.get_histogram() == + std::vector(n_dims_data * 1000, 0.0)); + // Check that histogram still empty if data is out of bounds. + hist.update(std::vector{{1.0, 3 * pi, 1.0}}); + BOOST_CHECK(hist.get_histogram() == + std::vector(n_dims_data * 1000, 0.0)); + // Check if putting in data at the first bin is set correctly. + hist.update( + std::vector{{limits[0].first, limits[1].first, limits[2].first}}); + BOOST_CHECK((hist.get_histogram())[0] == 1.0); + BOOST_CHECK((hist.get_histogram())[1] == 1.0); + BOOST_CHECK((hist.get_histogram())[2] == 1.0); + // Check if weights are correctly set. + hist.update( + std::vector{{limits[0].first, limits[1].first, limits[2].first}}, + std::vector{{10.0, 10.0, 10.0}}); + BOOST_CHECK((hist.get_histogram())[0] == 11.0); + BOOST_CHECK((hist.get_histogram())[1] == 11.0); + BOOST_CHECK((hist.get_histogram())[2] == 11.0); + BOOST_CHECK_THROW(hist.update(std::vector{{1.0, pi}}), + std::invalid_argument); +} diff --git a/src/utils/tests/linear_interpolation_test.cpp b/src/utils/tests/linear_interpolation_test.cpp new file mode 100644 index 00000000000..ee89744d14d --- /dev/null +++ b/src/utils/tests/linear_interpolation_test.cpp @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2020 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define BOOST_TEST_MODULE Utils::linear_interpolation test +#define BOOST_TEST_DYN_LINK +#include + +#include "utils/linear_interpolation.hpp" + +#include + +BOOST_AUTO_TEST_CASE(interpolation) { + using Utils::linear_interpolation; + // tabulated values for f(x) = x^2 + auto const table = std::vector{{1., 4., 9.}}; + constexpr auto tol = 1e-12; + BOOST_CHECK_SMALL(linear_interpolation(table, 1., 1., 1.) - 1., tol); + BOOST_CHECK_SMALL(linear_interpolation(table, 1., 1., 2.) - 4., tol); + BOOST_CHECK_SMALL(linear_interpolation(table, 1., 1., 1.5) - 2.5, tol); + BOOST_CHECK_SMALL(linear_interpolation(table, 1., 1., 3. - 1e-12) - 9., + 1e-10); +} diff --git a/src/utils/tests/matrix_test.cpp b/src/utils/tests/matrix_test.cpp index 182e6e6697e..8030490d2ba 100644 --- a/src/utils/tests/matrix_test.cpp +++ b/src/utils/tests/matrix_test.cpp @@ -18,11 +18,17 @@ #define BOOST_TEST_MODULE Utils::Matrix test #define BOOST_TEST_DYN_LINK -#include +#include #include + #include #include +#include +#include + +#include + BOOST_AUTO_TEST_CASE(matrix) { Utils::Matrix mat2{{8, 2}, {3, 4}}; BOOST_CHECK((mat2(0, 0) == 8)); @@ -54,6 +60,23 @@ BOOST_AUTO_TEST_CASE(identity_matrix) { BOOST_CHECK((id_mat(1, 0) == 0)); } +BOOST_AUTO_TEST_CASE(matrix_serialization) { + Utils::Matrix mat2{{8, 2}, {3, 4}}; + + std::stringstream stream; + boost::archive::text_oarchive out_ar(stream); + out_ar << mat2; + + Utils::Matrix mat2_deserialized; + boost::archive::text_iarchive in_ar(stream); + in_ar >> mat2_deserialized; + + BOOST_CHECK((mat2_deserialized(0, 0) == 8)); + BOOST_CHECK((mat2_deserialized(1, 0) == 3)); + BOOST_CHECK((mat2_deserialized(0, 1) == 2)); + BOOST_CHECK((mat2_deserialized(1, 1) == 4)); +} + BOOST_AUTO_TEST_CASE(type_deduction) { static_assert( std::is_same, diff --git a/src/utils/tests/unordered_map_test.cpp b/src/utils/tests/unordered_map_test.cpp new file mode 100644 index 00000000000..7be7c218fb0 --- /dev/null +++ b/src/utils/tests/unordered_map_test.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2020 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define BOOST_TEST_MODULE unordered_map test +#define BOOST_TEST_DYN_LINK +#include + +#include + +#include +#include + +#include +#include + +BOOST_AUTO_TEST_CASE(serialization) { + + std::unordered_map const map_original{{65, 'A'}, {66, 'B'}}; + + std::stringstream stream; + boost::archive::text_oarchive out_ar(stream); + out_ar << map_original; + + std::unordered_map map_deserialized; + boost::archive::text_iarchive in_ar(stream); + in_ar >> map_deserialized; + + BOOST_CHECK_EQUAL(map_deserialized.size(), 2); + BOOST_CHECK_EQUAL(map_deserialized.count(65), 1); + BOOST_CHECK_EQUAL(map_deserialized.count(66), 1); + BOOST_CHECK_EQUAL(map_deserialized.at(65), 'A'); + BOOST_CHECK_EQUAL(map_deserialized.at(66), 'B'); +} diff --git a/testsuite/python/CMakeLists.txt b/testsuite/python/CMakeLists.txt index 70b85fac535..ec8ff4c9dd6 100644 --- a/testsuite/python/CMakeLists.txt +++ b/testsuite/python/CMakeLists.txt @@ -16,7 +16,7 @@ function(PYTHON_TEST) set(TEST_MAX_NUM_PROC ${TEST_NP}) endif() - if("${TEST_MAX_NUM_PROC}" GREATER ${TEST_NP}) + if(${TEST_MAX_NUM_PROC} GREATER ${TEST_NP}) set(TEST_NUM_PROC ${TEST_NP}) else() set(TEST_NUM_PROC ${TEST_MAX_NUM_PROC}) @@ -40,9 +40,9 @@ function(PYTHON_TEST) set_tests_properties(${TEST_NAME} PROPERTIES RESOURCE_LOCK GPU) endif() - if(${TEST_MAX_NUM_PROC} LESS 2) + if(${TEST_MAX_NUM_PROC} EQUAL 1) set_tests_properties(${TEST_NAME} PROPERTIES LABELS "${TEST_LABELS}") - elseif(${TEST_MAX_NUM_PROC} LESS 3) + elseif(${TEST_MAX_NUM_PROC} EQUAL 2) set_tests_properties(${TEST_NAME} PROPERTIES LABELS "${TEST_LABELS};parallel") else() @@ -252,23 +252,22 @@ add_custom_target( add_custom_target( check_python_parallel_odd - COMMAND ${CMAKE_CTEST_COMMAND} --timeout ${TEST_TIMEOUT} -j${TEST_NP} -L - parallel_odd ${CTEST_ARGS} --output-on-failure) + COMMAND ${CMAKE_CTEST_COMMAND} --timeout ${TEST_TIMEOUT} -L parallel_odd + ${CTEST_ARGS} --output-on-failure) add_dependencies(check_python_parallel_odd pypresso python_test_data) add_custom_target( - check_python_gpu - COMMAND ${CMAKE_CTEST_COMMAND} --timeout ${TEST_TIMEOUT} -j${TEST_NP} -L gpu - ${CTEST_ARGS} --output-on-failure) + check_python_gpu COMMAND ${CMAKE_CTEST_COMMAND} --timeout ${TEST_TIMEOUT} -L + gpu ${CTEST_ARGS} --output-on-failure) add_dependencies(check_python_gpu pypresso python_test_data) add_custom_target( check_python_skip_long - COMMAND ${CMAKE_CTEST_COMMAND} --timeout ${TEST_TIMEOUT} -j${TEST_NP} -LE - long ${CTEST_ARGS} --output-on-failure) + COMMAND ${CMAKE_CTEST_COMMAND} --timeout ${TEST_TIMEOUT} -LE long + ${CTEST_ARGS} --output-on-failure) add_dependencies(check_python_skip_long pypresso python_test_data) add_custom_target( check_python COMMAND ${CMAKE_CTEST_COMMAND} --timeout ${TEST_TIMEOUT} - -j${TEST_NP} ${CTEST_ARGS} --output-on-failure) + ${CTEST_ARGS} --output-on-failure) add_dependencies(check_python pypresso python_test_data) add_dependencies(check check_python) diff --git a/testsuite/python/domain_decomposition.py b/testsuite/python/domain_decomposition.py index 6c629009a09..d0af21976c1 100644 --- a/testsuite/python/domain_decomposition.py +++ b/testsuite/python/domain_decomposition.py @@ -23,13 +23,15 @@ class DomainDecomposition(ut.TestCase): system = espressomd.System(box_l=[50.0, 50.0, 50.0]) + original_node_grid = tuple(system.cell_system.node_grid) def setUp(self): self.system.part.clear() self.system.cell_system.set_domain_decomposition( use_verlet_lists=False) + self.system.cell_system.node_grid = self.original_node_grid - def test_resort(self): + def check_resort(self): n_part = 2351 # Add the particles on node 0, so that they have to be resorted @@ -51,6 +53,16 @@ def test_resort(self): # is still in a valid state after the particle exchange self.assertEqual(sum(self.system.part[:].type), n_part) + def test_resort(self): + self.check_resort() + + @ut.skipIf(system.cell_system.get_state()["n_nodes"] != 4, + "Skipping test: only runs for n_nodes >= 4") + def test_resort_alternating(self): + # check particle resorting when the left and right cells are different + self.system.cell_system.node_grid = [4, 1, 1] + self.check_resort() + def test_position_rounding(self): """This places a particle on the box boundary, with parameters that could cause problems with diff --git a/testsuite/python/elc_vs_analytic.py b/testsuite/python/elc_vs_analytic.py index 935da2d102d..8f186608061 100644 --- a/testsuite/python/elc_vs_analytic.py +++ b/testsuite/python/elc_vs_analytic.py @@ -35,7 +35,7 @@ class ELC_vs_analytic(ut.TestCase): delta_mid_bot = 39. / 41. distance = 1. - number_samples = 25 + number_samples = 6 if '@WITH_COVERAGE@' == 'ON' else 12 minimum_distance_to_wall = 0.1 zPos = np.linspace( minimum_distance_to_wall, @@ -46,7 +46,9 @@ class ELC_vs_analytic(ut.TestCase): def test_elc(self): """ - Testing ELC against the analytic solution for an infinite large simulation box with dielectric contrast on the bottom of the box, which can be calculated analytically with image charges. + Testing ELC against the analytic solution for an infinitely large + simulation box with dielectric contrast on the bottom of the box, + which can be calculated analytically with image charges. """ self.system.part.add(id=1, pos=self.system.box_l / 2., q=self.q[0]) self.system.part.add(id=2, pos=self.system.box_l / 2. + [0, 0, self.distance], diff --git a/testsuite/python/lb_poiseuille_cylinder.py b/testsuite/python/lb_poiseuille_cylinder.py index f0dee7984a7..981c0c23e28 100644 --- a/testsuite/python/lb_poiseuille_cylinder.py +++ b/testsuite/python/lb_poiseuille_cylinder.py @@ -89,6 +89,9 @@ def prepare(self): accuracy. """ + # disable periodicity except in the flow direction + self.system.periodicity = np.logical_not(self.params['axis']) + local_lb_params = LB_PARAMS.copy() local_lb_params['ext_force_density'] = np.array( self.params['axis']) * EXT_FORCE diff --git a/testsuite/scripts/benchmarks/CMakeLists.txt b/testsuite/scripts/benchmarks/CMakeLists.txt index f0ed182ca12..37ea519e169 100644 --- a/testsuite/scripts/benchmarks/CMakeLists.txt +++ b/testsuite/scripts/benchmarks/CMakeLists.txt @@ -26,6 +26,6 @@ benchmark_test(FILE test_p3m.py) add_custom_target( check_benchmarks COMMAND ${CMAKE_CTEST_COMMAND} --timeout ${TEST_TIMEOUT} - -j${TEST_NP} ${CTEST_ARGS} --output-on-failure) + ${CTEST_ARGS} --output-on-failure) add_dependencies(check_benchmarks pypresso local_benchmarks) diff --git a/testsuite/scripts/importlib_wrapper.py b/testsuite/scripts/importlib_wrapper.py index aee693f8f85..bcb7cff7736 100644 --- a/testsuite/scripts/importlib_wrapper.py +++ b/testsuite/scripts/importlib_wrapper.py @@ -440,14 +440,14 @@ def protect_ipython_magics(code): formatted comment. This is necessary whenever the code must be parsed through ``ast``, because magic commands are not valid Python statements. """ - return re.sub("^(%+)(?=[a-z])", "#_IPYTHON_MAGIC_\g<1>", code, flags=re.M) + return re.sub("^(%+)(?=[a-z])", r"#_IPYTHON_MAGIC_\g<1>", code, flags=re.M) def deprotect_ipython_magics(code): """ Reverse the action of :func:`protect_ipython_magics`. """ - return re.sub("^#_IPYTHON_MAGIC_(%+)(?=[a-z])", "\g<1>", code, flags=re.M) + return re.sub("^#_IPYTHON_MAGIC_(%+)(?=[a-z])", r"\g<1>", code, flags=re.M) class GetMatplotlibPyplot(ast.NodeVisitor): diff --git a/testsuite/scripts/samples/CMakeLists.txt b/testsuite/scripts/samples/CMakeLists.txt index 816db7ec58a..382e83b8007 100644 --- a/testsuite/scripts/samples/CMakeLists.txt +++ b/testsuite/scripts/samples/CMakeLists.txt @@ -85,6 +85,6 @@ sample_test(FILE test_immersed_boundary.py) add_custom_target( check_samples COMMAND ${CMAKE_CTEST_COMMAND} --timeout ${TEST_TIMEOUT} - -j${TEST_NP} ${CTEST_ARGS} --output-on-failure) + ${CTEST_ARGS} --output-on-failure) add_dependencies(check_samples pypresso local_samples) diff --git a/testsuite/scripts/tutorials/CMakeLists.txt b/testsuite/scripts/tutorials/CMakeLists.txt index 03a862c1906..ab580f4e633 100644 --- a/testsuite/scripts/tutorials/CMakeLists.txt +++ b/testsuite/scripts/tutorials/CMakeLists.txt @@ -46,6 +46,6 @@ tutorial_test(FILE test_constant_pH__interactions.py) add_custom_target( check_tutorials COMMAND ${CMAKE_CTEST_COMMAND} --timeout ${TEST_TIMEOUT} - -j${TEST_NP} ${CTEST_ARGS} --output-on-failure) + ${CTEST_ARGS} --output-on-failure) add_dependencies(check_tutorials pypresso local_tutorials)