diff --git a/doc/tutorials/12-constant_pH/12-constant_pH.ipynb b/doc/tutorials/12-constant_pH/12-constant_pH.ipynb index ea99de6d7ed..04897ca01b4 100644 --- a/doc/tutorials/12-constant_pH/12-constant_pH.ipynb +++ b/doc/tutorials/12-constant_pH/12-constant_pH.ipynb @@ -451,7 +451,7 @@ "source": [ "The simulation results for the non-interacting case very well compare with the analytical solution of Henderson-Hasselbalch equation. There are only minor deviations, and the estimated errors are small too. This situation will change when we introduce interactions.\n", "\n", - "It is useful to check whether the estimated errors are consistent with the assumptions that were used to obtain them. To do this, we follow [Janke2000] to estimate the number of uncorrelated samples per block, and check whether each block contains a sufficient number of uncorrelated samples (we choose 10 uncorrelated samples per block as the threshold value).\n", + "It is useful to check whether the estimated errors are consistent with the assumptions that were used to obtain them. To do this, we follow [Janke2002](https://www.physik.uni-leipzig.de/~janke/Paper/nic10_423_2002.pdf) to estimate the number of uncorrelated samples per block, and check whether each block contains a sufficient number of uncorrelated samples (we choose 10 uncorrelated samples per block as the threshold value).\n", "\n", "Intentionally, we make our simulation slightly too short, so that it does not produce enough uncorrelated samples. We encourage the reader to vary the number of blocks or the number of samples to see how the estimated error changes with these parameters." ] diff --git a/src/core/accumulators/Correlator.cpp b/src/core/accumulators/Correlator.cpp index b4c406f4a9e..389505203e5 100644 --- a/src/core/accumulators/Correlator.cpp +++ b/src/core/accumulators/Correlator.cpp @@ -28,6 +28,8 @@ #include #include +#include + #include namespace { @@ -100,7 +102,6 @@ std::vector tensor_product(std::vector const &A, *(C_it++) = a * b; } } - assert(C_it == C.end()); return C; } @@ -118,7 +119,7 @@ std::vector square_distance_componentwise(std::vector const &A, std::transform( A.begin(), A.end(), B.begin(), C.begin(), - [](double a, double b) -> double { return (a - b) * (a - b); }); + [](double a, double b) -> double { return Utils::sqr(a - b); }); return C; } @@ -130,7 +131,7 @@ std::vector fcs_acf(std::vector const &A, Utils::Vector3d const &wsquare) { if (A.size() != B.size()) { throw std::runtime_error( - "Error in fcs acf: The vector sizes do not match."); + "Error in fcs_acf: The vector sizes do not match."); } auto const C_size = A.size() / 3; @@ -145,7 +146,7 @@ std::vector fcs_acf(std::vector const &A, auto const &a = A[3 * i + j]; auto const &b = B[3 * i + j]; - C[i] -= (a - b) * (a - b) / wsquare[j]; + C[i] -= Utils::sqr(a - b) / wsquare[j]; } } @@ -155,42 +156,6 @@ std::vector fcs_acf(std::vector const &A, return C; } -int Correlator::get_correlation_time(double *correlation_time) { - // We calculate the correlation time for each m_dim_corr by normalizing the - // correlation, integrating it and finding out where C(tau)=tau - for (unsigned j = 0; j < m_dim_corr; j++) { - correlation_time[j] = 0.; - } - - // here we still have to fix the stuff a bit! - for (unsigned j = 0; j < m_dim_corr; j++) { - double C_tau = 1 * m_dt; - bool ok_flag = false; - for (unsigned k = 1; k < m_n_result - 1; k++) { - if (n_sweeps[k] == 0) - break; - C_tau += (result[k][j] / static_cast(n_sweeps[k]) - - A_accumulated_average[j] * B_accumulated_average[j] / n_data / - n_data) / - (result[0][j] / n_sweeps[0]) * m_dt * (tau[k] - tau[k - 1]); - - if (exp(-tau[k] * m_dt / C_tau) + 2 * sqrt(tau[k] * m_dt / n_data) > - exp(-tau[k - 1] * m_dt / C_tau) + - 2 * sqrt(tau[k - 1] * m_dt / n_data)) { - correlation_time[j] = C_tau * (1 + static_cast(2 * tau[k] + 1) / - static_cast(n_data)); - ok_flag = true; - break; - } - } - if (!ok_flag) { - correlation_time[j] = -1; - } - } - - return 0; -} - void Correlator::initialize() { hierarchy_depth = 0; // Class members are assigned via the initializer list @@ -260,9 +225,8 @@ void Correlator::initialize() { m_correlation_args[2] <= 0) { throw std::runtime_error("missing parameter for fcs_acf: w_x w_y w_z"); } - m_correlation_args[0] = m_correlation_args[0] * m_correlation_args[0]; - m_correlation_args[1] = m_correlation_args[1] * m_correlation_args[1]; - m_correlation_args[2] = m_correlation_args[2] * m_correlation_args[2]; + m_correlation_args = + Utils::hadamard_product(m_correlation_args, m_correlation_args); if (dim_A % 3) throw std::runtime_error("dimA must be divisible by 3 for fcs_acf"); m_dim_corr = static_cast(dim_A) / 3; @@ -447,7 +411,6 @@ int Correlator::finalize() { // We must now go through the hierarchy and make sure there is space for the // new datapoint. For every hierarchy level we have to decide if it is // necessary to move something - int highest_level_to_compress; // mark the correlation as finalized finalized = true; @@ -461,10 +424,9 @@ int Correlator::finalize() { while (vals_ll) { // Check, if we will want to push the value from the lowest level + int highest_level_to_compress = -1; if (vals_ll % 2) { highest_level_to_compress = ll; - } else { - highest_level_to_compress = -1; } int i = ll + 1; // lowest level for which we have to check for compression @@ -534,14 +496,11 @@ std::vector Correlator::get_correlation() { res.resize(m_n_result * cols); for (int i = 0; i < m_n_result; i++) { - res[cols * i + 0] = tau[i] * m_dt; - res[cols * i + 1] = n_sweeps[i]; + auto const index = cols * i; + res[index + 0] = tau[i] * m_dt; + res[index + 1] = n_sweeps[i]; for (int k = 0; k < m_dim_corr; k++) { - if (n_sweeps[i] > 0) { - res[cols * i + 2 + k] = result[i][k] / n_sweeps[i]; - } else { - res[cols * i + 2 + k] = 0; - } + res[index + 2 + k] = (n_sweeps[i] > 0) ? result[i][k] / n_sweeps[i] : 0; } } return res; diff --git a/src/core/accumulators/Correlator.hpp b/src/core/accumulators/Correlator.hpp index 35c8fd0d51b..51e22047be1 100644 --- a/src/core/accumulators/Correlator.hpp +++ b/src/core/accumulators/Correlator.hpp @@ -180,13 +180,6 @@ class Correlator : public AccumulatorBase { */ int finalize(); - /** Return an estimate of the integrated correlation time - * - * We calculate the correlation time for each dim_corr by normalizing the - * correlation, integrating it and finding out where C(tau)=tau; - */ - int get_correlation_time(double *correlation_time); - /** Return correlation result */ std::vector get_correlation(); diff --git a/src/python/espressomd/accumulators.py b/src/python/espressomd/accumulators.py index 7036a731da8..ed0f0dd8b5e 100644 --- a/src/python/espressomd/accumulators.py +++ b/src/python/espressomd/accumulators.py @@ -130,16 +130,6 @@ class Correlator(ScriptInterfaceHelper): :math:`B`, i.e., :math:`C_{i \\cdot l_B + j} = A_i B_j` with :math:`l_B` the length of :math:`B`. - * ``"complex_conjugate_product"``: assuming that the observables - consist of a complex and real part - :math:`A=(A_x+iA_y)`, and :math:`B=(B_x+iB_y)`, this - operation computes the result :math:`C=(C_x+iC_y)`, as: - - .. math:: - - C_x = A_xB_x + A_yB_y\\\\ - C_y = A_yB_x - A_xB_y - * ``"fcs_acf"``: Fluorescence Correlation Spectroscopy (FCS) autocorrelation function, i.e., @@ -152,7 +142,8 @@ class Correlator(ScriptInterfaceHelper): - \\frac{\\Delta z_i^2(\\tau)}{w_z^2} \\right) \\right> - where + where :math:`N` is the average number of fluorophores in the + illumination area, .. math:: @@ -170,17 +161,29 @@ class Correlator(ScriptInterfaceHelper): \\frac{2z^2}{w_z^2} \\right). The values of :math:`w_x`, :math:`w_y`, and :math:`w_z` - are passed to the correlator as ``args`` + are passed to the correlator as ``args``. The correlator calculates - The above equations are a - generalization of the formula presented by Hoefling - et. al. :cite:`hofling11a`. For more information, see - references therein. Per each 3 dimensions of the + .. math:: + + C_i(\\tau) = + \\exp \\left( + - \\frac{\\Delta x_i^2(\\tau)}{w_x^2} + - \\frac{\\Delta y_i^2(\\tau)}{w_y^2} + - \\frac{\\Delta z_i^2(\\tau)}{w_z^2} + \\right) + + Per each 3 dimensions of the observable, one dimension of the correlation output is produced. If ``"fcs_acf"`` is used with other observables than :class:`espressomd.observables.ParticlePositions`, the physical meaning of the result is unclear. + The above equations are a + generalization of the formula presented by Hoefling + et. al. :cite:`hofling11a`. For more information, see + references therein. + + delta_N : :obj:`int` Number of timesteps between subsequent samples for the auto update mechanism. @@ -195,10 +198,14 @@ class Correlator(ScriptInterfaceHelper): tau_lin : :obj:`int` The number of data-points for which the results are linearly spaced in ``tau``. This is a parameter of the multiple tau correlator. If you - want to use it, make sure that you know how it works. By default, it - is set equal to ``tau_max`` which results in the trivial linear - correlator. By setting ``tau_lin < tau_max`` the multiple - tau correlator is switched on. In many cases, ``tau_lin=16`` is a + want to use it, make sure that you know how it works. ``tau_lin`` must + be divisible by 2. By setting ``tau_lin`` such that + ``tau_max >= dt * delta_N * tau_lin``, the + multiple tau correlator is used, otherwise the trivial linear + correlator is used. By setting ``tau_lin = 1``, the value will be + overriden by ``tau_lin = ceil(tau_max / (dt * delta_N))``, which + will result in either the multiple or linear tau correlator. + In many cases, ``tau_lin=16`` is a good choice but this may strongly depend on the observables you are correlating. For more information, we recommend to read ref. :cite:`ramirez10a` or to perform your own tests. diff --git a/testsuite/python/correlation.py b/testsuite/python/correlation.py index 1d81c82b272..65f24226896 100644 --- a/testsuite/python/correlation.py +++ b/testsuite/python/correlation.py @@ -34,7 +34,7 @@ class CorrelatorTest(ut.TestCase): def tearDown(self): self.system.part.clear() - def calc_tau(self, time_step, tau_lin, length): + def calc_tau(self, time_step, tau_lin, length, delta_N=1): tau = [] for i in range(tau_lin): tau.append(i) @@ -44,7 +44,7 @@ def calc_tau(self, time_step, tau_lin, length): for i in range(0, tau_lin, 2): tau.append(p + factor * i) factor *= 2 - return time_step * np.array(tau[:length]) + return time_step * np.array(tau[:length]) * delta_N def test_square_distance_componentwise(self): s = self.system @@ -62,8 +62,8 @@ def test_square_distance_componentwise(self): corr = acc.result() # Check pickling - acc_unpickeled = pickle.loads(pickle.dumps(acc)) - np.testing.assert_array_equal(corr, acc_unpickeled.result()) + acc_unpickled = pickle.loads(pickle.dumps(acc)) + np.testing.assert_array_equal(corr, acc_unpickled.result()) tau = self.calc_tau(s.time_step, acc.tau_lin, corr.shape[0]) np.testing.assert_array_almost_equal(corr[:, 0], tau) @@ -90,8 +90,8 @@ def test_tensor_product(self): corr = acc.result() # Check pickling - acc_unpickeled = pickle.loads(pickle.dumps(acc)) - np.testing.assert_array_equal(corr, acc_unpickeled.result()) + acc_unpickled = pickle.loads(pickle.dumps(acc)) + np.testing.assert_array_equal(corr, acc_unpickled.result()) tau = self.calc_tau(s.time_step, acc.tau_lin, corr.shape[0]) np.testing.assert_array_almost_equal(corr[:, 0], tau) @@ -115,8 +115,8 @@ def test_componentwise_product(self): corr = acc.result() # Check pickling - acc_unpickeled = pickle.loads(pickle.dumps(acc)) - np.testing.assert_array_equal(corr, acc_unpickeled.result()) + acc_unpickled = pickle.loads(pickle.dumps(acc)) + np.testing.assert_array_equal(corr, acc_unpickled.result()) tau = self.calc_tau(s.time_step, acc.tau_lin, corr.shape[0]) np.testing.assert_array_almost_equal(corr[:, 0], tau) @@ -140,14 +140,42 @@ def test_scalar_product(self): corr = acc.result() # Check pickling - acc_unpickeled = pickle.loads(pickle.dumps(acc)) - np.testing.assert_array_equal(corr, acc_unpickeled.result()) + acc_unpickled = pickle.loads(pickle.dumps(acc)) + np.testing.assert_array_equal(corr, acc_unpickled.result()) tau = self.calc_tau(s.time_step, acc.tau_lin, corr.shape[0]) np.testing.assert_array_almost_equal(corr[:, 0], tau) for i in range(corr.shape[0]): np.testing.assert_array_almost_equal(corr[i, 2:], np.sum(v**2)) + def test_fcs(self): + s = self.system + v = np.array([1, 2, 3]) + s.part.add(id=0, pos=(0, 0, 0), v=v) + + w = np.array([3, 2, 1]) + obs = espressomd.observables.ParticlePositions(ids=(0,)) + acc = espressomd.accumulators.Correlator( + obs1=obs, tau_lin=10, tau_max=2.0, delta_N=1, + corr_operation="fcs_acf", args=w) + + s.integrator.run(100) + s.auto_update_accumulators.add(acc) + s.integrator.run(2000) + + corr = acc.result() + + # Check pickling + acc_unpickled = pickle.loads(pickle.dumps(acc)) + np.testing.assert_array_equal(corr, acc_unpickled.result()) + + tau = self.calc_tau(s.time_step, acc.tau_lin, corr.shape[0]) + np.testing.assert_array_almost_equal(corr[:, 0], tau) + for i in range(corr.shape[0]): + np.testing.assert_array_almost_equal( + corr[i, 2:], + np.exp(-np.linalg.norm(v / w * tau[i])**2), decimal=10) + def test_correlator_interface(self): # test setters and getters obs = espressomd.observables.ParticleVelocities(ids=(0,)) @@ -164,6 +192,36 @@ def test_correlator_interface(self): self.assertEqual(acc.delta_N, 2) # check corr_operation self.assertEqual(acc.corr_operation, "scalar_product") + # check linear tau correlator and multiple tau correlator + dt = self.system.time_step + for tau_lin in (10, 20): + for delta_N in (1, 2, 10): + tau_max = dt * delta_N * tau_lin + # linear, multiple and default (=multiple) tau correlator + acc_lin = espressomd.accumulators.Correlator( + obs1=obs, tau_lin=tau_lin, tau_max=0.99 * tau_max, + delta_N=delta_N, corr_operation="scalar_product") + acc_mul = espressomd.accumulators.Correlator( + obs1=obs, tau_lin=tau_lin, tau_max=1.0 * tau_max, + delta_N=delta_N, corr_operation="scalar_product") + acc_def = espressomd.accumulators.Correlator( + obs1=obs, tau_lin=1, tau_max=tau_max, + delta_N=delta_N, corr_operation="scalar_product") + corr_lin = acc_lin.result() + corr_mul = acc_mul.result() + corr_def = acc_mul.result() + # check tau + time_lin = dt * delta_N * np.arange(len(corr_lin)) + time_mul = self.calc_tau(dt, tau_lin, len(corr_mul), delta_N) + np.testing.assert_array_almost_equal(corr_lin[:, 0], time_lin) + np.testing.assert_array_almost_equal(corr_mul[:, 0], time_mul) + np.testing.assert_array_almost_equal(corr_def[:, 0], time_mul) + self.assertEqual(acc_def.tau_lin, tau_lin) + # check pickling + corr_lin_upkl = pickle.loads(pickle.dumps(acc_lin)) + corr_mul_upkl = pickle.loads(pickle.dumps(acc_mul)) + np.testing.assert_array_equal(corr_lin, corr_lin_upkl.result()) + np.testing.assert_array_equal(corr_mul, corr_mul_upkl.result()) if __name__ == "__main__": diff --git a/testsuite/python/time_series.py b/testsuite/python/time_series.py index 96ef12ef93d..9524c63ca77 100644 --- a/testsuite/python/time_series.py +++ b/testsuite/python/time_series.py @@ -56,9 +56,8 @@ def test_time_series(self): time_series = acc.time_series() # Check pickling - acc_unpickeled = pickle.loads(pickle.dumps(acc)) - np.testing.assert_array_equal( - time_series, acc_unpickeled.time_series()) + acc_unpickled = pickle.loads(pickle.dumps(acc)) + np.testing.assert_array_equal(time_series, acc_unpickled.time_series()) for result, expected in zip(time_series, positions): np.testing.assert_array_equal(result, expected)