Skip to content

Commit

Permalink
accumulators: Calculate the correlated shape
Browse files Browse the repository at this point in the history
Use the shape(s) of the correlated observable(s) to deduce the shape
of the correlation output. Extract time lags and sample size from
the correlation matrix and provide methods `correlation_lags()`
and `correlation_sizes()` instead.
  • Loading branch information
jngrad committed Aug 3, 2020
1 parent 45e5a4b commit acfa0b4
Show file tree
Hide file tree
Showing 16 changed files with 183 additions and 103 deletions.
4 changes: 2 additions & 2 deletions doc/tutorials/01-lennard_jones/01-lennard_jones.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -549,9 +549,9 @@
"source": [
"fig3 = plt.figure(num=None, figsize=(10, 6), dpi=80, facecolor='w', edgecolor='k')\n",
"fig3.set_tight_layout(False)\n",
"lag_time = msd[:, 0]\n",
"lag_time = msd_corr.correlation_lags()\n",
"for i in range(0, N_PART, 30):\n",
" msd_particle_i = msd[:, 2+i*3] + msd[:, 3+i*3] + msd[:, 4+i*3]\n",
" msd_particle_i = msd[:, i, 0] + msd[:, i, 1] + msd[:, i, 2]\n",
" plt.plot(lag_time, msd_particle_i,\n",
" 'o-', linewidth=2, label=\"particle id =\" + str(i))\n",
"plt.xlabel(r'Lag time $\\tau$ [$\\delta t$]', fontsize=20)\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import logging\n",
"import sys\n",
"\n",
Expand Down Expand Up @@ -138,7 +139,9 @@
" for i in range(LOOPS):\n",
" system.integrator.run(STEPS)\n",
" correlator.finalize()\n",
" msd_results.append(correlator.result())\n",
" msd_results.append(np.column_stack((correlator.correlation_lags(),\n",
" correlator.correlation_sizes(),\n",
" correlator.result().reshape([-1, 3]))))\n",
"\n",
"logging.info(\"Sampling finished.\")"
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,8 @@
"correlator.finalize()\n",
"corrdata = correlator.result()\n",
"\n",
"tau = corrdata[:, 0]\n",
"msd = corrdata[:, 2] + corrdata[:, 3] + corrdata[:, 4]\n",
"tau = correlator.correlation_lags()\n",
"msd = corrdata[:, 0] + corrdata[:, 1] + corrdata[:, 2]\n",
"\n",
"rh = system.analysis.calc_rh(chain_start=0, number_of_chains=1, chain_length=N_MONOMERS)[0]"
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,8 @@
corrdata = correlator.result()

rh_results[index] = np.average(rhs)
tau = corrdata[1:, 0]
msd = corrdata[1:, 2] + corrdata[1:, 3] + corrdata[1:, 4]
tau = correlator.correlation_lags()[1:]
msd = corrdata[1:, 0] + corrdata[1:, 1] + corrdata[1:, 2]
np.save('msd_{}'.format(N), np.c_[tau, msd])

np.save('rh.npy', rh_results)
5 changes: 4 additions & 1 deletion doc/tutorials/06-active_matter/06-active_matter.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,10 @@
"# Finalize the correlator and write to disk\n",
"system.auto_update_accumulators.remove(msd)\n",
"msd.finalize()\n",
"numpy.savetxt(\"output.dat\", msd.result())\n",
"numpy.savetxt(\"output.dat\",\n",
" numpy.column_stack((msd.correlation_lags(),\n",
" msd.correlation_sizes(),\n",
" msd.result().reshape([-1, 3]))))\n",
"```\n",
"\n",
"Here, the observable `pos_id` is set to the particle positions of the\n",
Expand Down
15 changes: 12 additions & 3 deletions doc/tutorials/06-active_matter/solutions/enhanced_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,12 +119,21 @@
# Finalize the accumulators and write to disk
system.auto_update_accumulators.remove(msd)
msd.finalize()
np.savetxt("{}/msd_{}_{}.dat".format(outdir, vel, run), msd.result())
np.savetxt("{}/msd_{}_{}.dat".format(outdir, vel, run),
np.column_stack((msd.correlation_lags(),
msd.correlation_sizes(),
msd.result().reshape([-1, 3]))))

system.auto_update_accumulators.remove(vacf)
vacf.finalize()
np.savetxt("{}/vacf_{}_{}.dat".format(outdir, vel, run), vacf.result())
np.savetxt("{}/vacf_{}_{}.dat".format(outdir, vel, run),
np.column_stack((vacf.correlation_lags(),
vacf.correlation_sizes(),
vacf.result().reshape([-1, 1]))))

system.auto_update_accumulators.remove(avacf)
avacf.finalize()
np.savetxt("{}/avacf_{}_{}.dat".format(outdir, vel, run), avacf.result())
np.savetxt("{}/avacf_{}_{}.dat".format(outdir, vel, run),
np.column_stack((avacf.correlation_lags(),
avacf.correlation_sizes(),
avacf.result().reshape([-1, 1]))))
15 changes: 9 additions & 6 deletions samples/diffusion_coefficient.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,27 +55,30 @@
c_pos.finalize()
c_vel.finalize()

np.savetxt("msd.dat", c_pos.result())
np.savetxt("vacf.dat", c_vel.result())
msd = np.column_stack((c_pos.correlation_lags(),
c_pos.correlation_sizes(),
c_pos.result().reshape([-1, 3])))
vacf = np.column_stack((c_vel.correlation_lags(),
c_vel.correlation_sizes(),
c_vel.result().reshape([-1, 1])))
np.savetxt("msd.dat", msd)
np.savetxt("vacf.dat", vacf)

# Integral of vacf via Green-Kubo
# D= 1/3 int_0^infty <v(t_0)v(t_0+t)> dt

vacf = c_vel.result()
# Integrate with trapezoidal rule
I = np.trapz(vacf[:, 2], vacf[:, 0])
ratio = 1. / 3. * I / (kT / gamma)
print("Ratio of measured and expected diffusion coefficients from Green-Kubo:",
ratio)

# Check MSD
msd = c_pos.result()


def expected_msd(x):
return 2. * kT / gamma * x


# Check MSD
print("Ratio of expected and measured msd")
print("#time ratio_x ratio_y ratio_z")
for i in range(4, msd.shape[0], 4):
Expand Down
8 changes: 6 additions & 2 deletions samples/observables_correlators.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,10 @@

# Finalize the correlation calculation and write the results to a file
c.finalize()
np.savetxt("res.dat", c.result())
np.savetxt("res.dat", np.column_stack((c.correlation_lags(),
c.correlation_sizes(),
c.result().reshape([-1, 3]))))
fcs.finalize()
np.savetxt("fcs.dat", fcs.result())
np.savetxt("fcs.dat", np.column_stack((fcs.correlation_lags(),
fcs.correlation_sizes(),
fcs.result().reshape([-1, 1]))))
1 change: 1 addition & 0 deletions src/core/accumulators/AccumulatorBase.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#ifndef CORE_ACCUMULATORS_ACCUMULATORBASE
#define CORE_ACCUMULATORS_ACCUMULATORBASE

#include <cstddef>
#include <vector>

namespace Accumulators {
Expand Down
52 changes: 43 additions & 9 deletions src/core/accumulators/Correlator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,13 @@
#include <boost/archive/binary_oarchive.hpp>
#include <boost/iostreams/device/array.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/range/algorithm/transform.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>

#include <utils/math/sqr.hpp>

#include <algorithm>
#include <limits>

namespace {
Expand Down Expand Up @@ -208,14 +210,27 @@ void Correlator::initialize() {
}
if (corr_operation_name == "componentwise_product") {
m_dim_corr = dim_A;
m_shape = A_obs->shape();
corr_operation = &componentwise_product;
m_correlation_args = Utils::Vector3d{0, 0, 0};
} else if (corr_operation_name == "tensor_product") {
m_dim_corr = dim_A * dim_B;
auto const shape_A = A_obs->shape();
auto const shape_B = B_obs->shape();
if (!(shape_A.size() == 1 || (shape_A.size() == 2 && shape_A[0] == 1))) {
throw std::runtime_error(
"tensor_product requires vectors, but observable A is a matrix");
}
if (!(shape_B.size() == 1 || (shape_B.size() == 2 && shape_B[0] == 1))) {
throw std::runtime_error(
"tensor_product requires vectors, but observable B is a matrix");
}
m_shape = {dim_A, dim_B};
corr_operation = &tensor_product;
m_correlation_args = Utils::Vector3d{0, 0, 0};
} else if (corr_operation_name == "square_distance_componentwise") {
m_dim_corr = dim_A;
m_shape = A_obs->shape();
corr_operation = &square_distance_componentwise;
m_correlation_args = Utils::Vector3d{0, 0, 0};
} else if (corr_operation_name == "fcs_acf") {
Expand All @@ -230,9 +245,25 @@ void Correlator::initialize() {
if (dim_A % 3)
throw std::runtime_error("dimA must be divisible by 3 for fcs_acf");
m_dim_corr = dim_A / 3;
m_shape = A_obs->shape();
if (m_shape.back() != 3)
throw std::runtime_error(
"the last dimension of dimA must be 3 for fcs_acf");
m_shape.pop_back();
corr_operation = &fcs_acf;
} else if (corr_operation_name == "scalar_product") {
m_dim_corr = 1;
auto const shape_A = A_obs->shape();
auto const shape_B = B_obs->shape();
if (!(shape_A.size() == 1 || (shape_A.size() == 2 && shape_A[0] == 1))) {
throw std::runtime_error(
"scalar_product requires vectors, but observable A is a matrix");
}
if (!(shape_B.size() == 1 || (shape_B.size() == 2 && shape_B[0] == 1))) {
throw std::runtime_error(
"scalar_product requires vectors, but observable B is a matrix");
}
m_shape = {1};
corr_operation = &scalar_product;
m_correlation_args = Utils::Vector3d{0, 0, 0};
} else {
Expand Down Expand Up @@ -489,29 +520,31 @@ int Correlator::finalize() {
}

std::vector<double> Correlator::get_correlation() {
std::vector<double> res;

// time + n_sweeps + corr_1...corr_n
size_t const cols = 2 + m_dim_corr;
res.resize(m_n_result * cols);
std::vector<double> res(m_n_result * m_dim_corr);

for (size_t i = 0; i < m_n_result; i++) {
auto const index = cols * i;
res[index + 0] = tau[i] * m_dt;
res[index + 1] = n_sweeps[i];
auto const index = m_dim_corr * i;
for (size_t k = 0; k < m_dim_corr; k++) {
res[index + 2 + k] = (n_sweeps[i] > 0) ? result[i][k] / n_sweeps[i] : 0;
res[index + k] = (n_sweeps[i] > 0) ? result[i][k] / n_sweeps[i] : 0;
}
}
return res;
}

std::vector<double> Correlator::get_correlation_lags() const {
std::vector<double> res(m_n_result);
boost::transform(tau, res.begin(),
[dt = m_dt](auto const &a) { return a * dt; });
return res;
}

std::string Correlator::get_internal_state() const {
std::stringstream ss;
boost::archive::binary_oarchive oa(ss);

oa << t;
oa << m_n_result;
oa << m_shape;
oa << A;
oa << B;
oa << result;
Expand All @@ -534,6 +567,7 @@ void Correlator::set_internal_state(std::string const &state) {

ia >> t;
ia >> m_n_result;
ia >> m_shape;
ia >> A;
ia >> B;
ia >> result;
Expand Down
13 changes: 10 additions & 3 deletions src/core/accumulators/Correlator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,14 @@ class Correlator : public AccumulatorBase {
/** Return correlation result */
std::vector<double> get_correlation();
std::vector<size_t> shape() const override {
return {m_n_result, 2 + m_dim_corr};
std::vector<size_t> shape = m_shape;
shape.insert(shape.begin(), m_n_result);
return shape;
}
std::vector<int> get_correlation_sweeps() const {
return std::vector<int>(n_sweeps.begin(), n_sweeps.end());
}
std::vector<double> get_correlation_lags() const;

int tau_lin() const { return m_tau_lin; }
double tau_max() const { return m_tau_max; }
Expand Down Expand Up @@ -250,8 +256,9 @@ class Correlator : public AccumulatorBase {

double m_last_update;

size_t dim_A; ///< dimensionality of A
size_t dim_B; ///< dimensionality of B
size_t dim_A; ///< dimensionality of A
size_t dim_B; ///< dimensionality of B
std::vector<size_t> m_shape; ///< dimensionality of the correlation

using correlation_operation_type = std::vector<double> (*)(
std::vector<double> const &, std::vector<double> const &,
Expand Down
23 changes: 13 additions & 10 deletions src/python/espressomd/accumulators.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,20 +259,23 @@ class Correlator(ScriptInterfaceHelper):

def result(self):
"""
Returns
-------
numpy.ndarray
The result of the correlation function as a 2d-array.
The first column contains the values of the lag time tau.
The second column contains the number of values used to
perform the averaging of the correlation. Further columns contain
the values of the correlation function. The number of these columns
is the dimension of the output of the correlation operation.
Get correlation.
"""
return np.array(self.call_method(
"get_correlation")).reshape(self.shape())

def correlation_lags(self):
"""
Get time lags of the correlation.
"""
return np.array(self.call_method("correlation_lags"))

def correlation_sizes(self):
"""
Get samples sizes for each time lag.
"""
return np.array(self.call_method("correlation_sizes"), dtype=int)


@script_interface_register
class AutoUpdateAccumulators(ScriptObjectRegistry):
Expand Down
4 changes: 4 additions & 0 deletions src/script_interface/accumulators/Correlator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,10 @@ class Correlator : public AccumulatorBase {
correlator()->finalize();
if (method == "get_correlation")
return correlator()->get_correlation();
if (method == "correlation_lags")
return correlator()->get_correlation_lags();
if (method == "correlation_sizes")
return correlator()->get_correlation_sweeps();

return AccumulatorBase::call_method(method, parameters);
}
Expand Down
3 changes: 2 additions & 1 deletion testsuite/python/brownian_dynamics.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,14 +211,15 @@ def test_msd_global_temp(self):

# Check MSD
msd = c_pos.result()
tau = c_pos.correlation_lags()
system.auto_update_accumulators.clear()

def expected_msd(x):
return 2. * kT / gamma * x

for i in range(2, 6):
np.testing.assert_allclose(
msd[i, 2:5], expected_msd(msd[i, 0]), rtol=0.02)
msd[i], expected_msd(tau[i]), rtol=0.02)

@utx.skipIfMissingFeatures("VIRTUAL_SITES")
def test_07__virtual(self):
Expand Down
Loading

0 comments on commit acfa0b4

Please sign in to comment.