diff --git a/CMakeLists.txt b/CMakeLists.txt index e5272ae2de..451392bd88 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -394,6 +394,7 @@ set(CORE_SOURCE src/Format.cpp src/Iteration.cpp src/IterationEncoding.cpp + src/LoadStoreChunk.cpp src/Mesh.cpp src/ParticlePatches.cpp src/ParticleSpecies.cpp @@ -405,6 +406,7 @@ set(CORE_SOURCE src/version.cpp src/auxiliary/Date.cpp src/auxiliary/Filesystem.cpp + src/auxiliary/Future.cpp src/auxiliary/JSON.cpp src/auxiliary/Mpi.cpp src/backend/Attributable.cpp diff --git a/include/openPMD/Dataset.hpp b/include/openPMD/Dataset.hpp index d79380105a..f44faebc7f 100644 --- a/include/openPMD/Dataset.hpp +++ b/include/openPMD/Dataset.hpp @@ -34,6 +34,12 @@ namespace openPMD using Extent = std::vector; using Offset = std::vector; +struct MemorySelection +{ + Offset offset; + Extent extent; +}; + class Dataset { friend class RecordComponent; diff --git a/include/openPMD/Datatype.hpp b/include/openPMD/Datatype.hpp index c18b7cd82b..ce870a80a0 100644 --- a/include/openPMD/Datatype.hpp +++ b/include/openPMD/Datatype.hpp @@ -321,7 +321,8 @@ template inline constexpr Datatype determineDatatype(T &&val) { (void)val; // don't need this, it only has a name for Doxygen - using T_stripped = std::remove_cv_t>; + using T_stripped = + std::remove_extent_t>>; if constexpr (auxiliary::IsPointer_v) { return determineDatatype>(); diff --git a/include/openPMD/IO/ADIOS/ADIOS2File.hpp b/include/openPMD/IO/ADIOS/ADIOS2File.hpp index 5d519a85ad..e44e3ea054 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2File.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2File.hpp @@ -20,11 +20,13 @@ */ #pragma once +#include "openPMD/Dataset.hpp" #include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/IOTask.hpp" #include "openPMD/IO/InvalidatableFile.hpp" #include "openPMD/config.hpp" +#include #if openPMD_HAVE_ADIOS2 #include @@ -106,6 +108,7 @@ struct BufferedUniquePtrPut std::string name; Offset offset; Extent extent; + std::optional memorySelection; UniquePtrWithLambda data; Datatype dtype = Datatype::UNDEFINED; diff --git a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp index fa300da472..a948706cd8 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp @@ -20,9 +20,11 @@ */ #pragma once +#include "openPMD/Dataset.hpp" #include "openPMD/Error.hpp" #include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/IO/ADIOS/ADIOS2FilePosition.hpp" +#include "openPMD/IO/ADIOS/macros.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/AbstractIOHandlerImpl.hpp" #include "openPMD/IO/AbstractIOHandlerImplCommon.hpp" @@ -430,6 +432,7 @@ class ADIOS2IOHandlerImpl adios2::Variable verifyDataset( Offset const &offset, Extent const &extent, + std::optional const &memorySelection, adios2::IO &IO, std::string const &varName) { @@ -511,6 +514,18 @@ class ADIOS2IOHandlerImpl var.SetSelection( {adios2::Dims(offset.begin(), offset.end()), adios2::Dims(extent.begin(), extent.end())}); + + if (memorySelection.has_value()) + { + var.SetMemorySelection( + {adios2::Dims( + memorySelection->offset.begin(), + memorySelection->offset.end()), + adios2::Dims( + memorySelection->extent.begin(), + memorySelection->extent.end())}); + } + return var; } @@ -518,6 +533,7 @@ class ADIOS2IOHandlerImpl { bool noGroupBased = false; bool blosc2bp5 = false; + bool memorySelection = false; } printedWarningsAlready; }; // ADIOS2IOHandlerImpl diff --git a/include/openPMD/IO/ADIOS/macros.hpp b/include/openPMD/IO/ADIOS/macros.hpp index 8618573713..8bd7ad9f62 100644 --- a/include/openPMD/IO/ADIOS/macros.hpp +++ b/include/openPMD/IO/ADIOS/macros.hpp @@ -34,6 +34,26 @@ #define openPMD_HAVE_ADIOS2_BP5 0 #endif +namespace detail +{ +template +struct CanTheMemorySelectionBeReset +{ + static constexpr bool value = false; +}; + +template +struct CanTheMemorySelectionBeReset< + Variable, + decltype(std::declval().SetMemorySelection())> +{ + static constexpr bool value = true; +}; +} // namespace detail + +constexpr bool CanTheMemorySelectionBeReset = + detail::CanTheMemorySelectionBeReset>::value; + #else #define openPMD_HAS_ADIOS_2_8 0 diff --git a/include/openPMD/IO/IOTask.hpp b/include/openPMD/IO/IOTask.hpp index 5589db6923..0e218df599 100644 --- a/include/openPMD/IO/IOTask.hpp +++ b/include/openPMD/IO/IOTask.hpp @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -450,6 +451,7 @@ struct OPENPMDAPI_EXPORT Parameter Extent extent = {}; Offset offset = {}; + std::optional memorySelection = std::nullopt; Datatype dtype = Datatype::UNDEFINED; auxiliary::WriteBuffer data; }; diff --git a/include/openPMD/LoadStoreChunk.hpp b/include/openPMD/LoadStoreChunk.hpp new file mode 100644 index 0000000000..e40ec18c91 --- /dev/null +++ b/include/openPMD/LoadStoreChunk.hpp @@ -0,0 +1,290 @@ +#pragma once + +#include "openPMD/Dataset.hpp" +#include "openPMD/auxiliary/Future.hpp" +#include "openPMD/auxiliary/ShareRawInternal.hpp" +#include "openPMD/auxiliary/UniquePtr.hpp" + +#include +#include +#include +#include + +namespace openPMD +{ +class RecordComponent; +template +class ConfigureStoreChunkFromBuffer; +template +class ConfigureLoadStoreFromBuffer; +template +class DynamicMemoryView; + +namespace internal +{ + struct LoadStoreConfig + { + Offset offset; + Extent extent; + }; + struct LoadStoreConfigWithBuffer + { + Offset offset; + Extent extent; + std::optional memorySelection; + }; + +} // namespace internal + +namespace auxiliary::detail +{ + using shared_ptr_dataset_types = + map_variant::type; +} // namespace auxiliary::detail + +namespace compose +{ + template + class ConfigureLoadStore; + template + class ConfigureLoadStoreFromBuffer; +} // namespace compose + +enum class EnqueuePolicy +{ + Defer, + Immediate +}; + +namespace core +{ + /* + * Actual data members of `ConfigureLoadStore<>` and methods that don't + * depend on the ChildClass template parameter. By extracting the members to + * this struct, we can pass them around between different instances of the + * class template. Numbers of method instantiations can be reduced. + */ + class ConfigureLoadStore + { + template + friend class compose::ConfigureLoadStore; + template + friend class compose::ConfigureLoadStoreFromBuffer; + + protected: + ConfigureLoadStore(RecordComponent &); + RecordComponent &m_rc; + + std::optional m_offset; + std::optional m_extent; + + [[nodiscard]] auto dim() const -> uint8_t; + auto storeChunkConfig() -> internal::LoadStoreConfig; + + public: + auto getOffset() -> Offset const &; + auto getExtent() -> Extent const &; + /* + * If the type is non-const, then the return type should be + * ConfigureLoadStoreFromBuffer<>, ... + */ + template + struct shared_ptr_return_type_impl + { + using type = ConfigureLoadStoreFromBuffer>; + }; + /* + * ..., but if it is a const type, Load operations make no sense, so the + * return type should be ConfigureStoreChunkFromBuffer<>. + */ + template + struct shared_ptr_return_type_impl + { + using type = + ConfigureStoreChunkFromBuffer>; + }; + + template + using shared_ptr_return_type = + typename shared_ptr_return_type_impl>::type; + + /* + * As loading into unique pointer types makes no sense, the case is + * simpler for unique pointers. Just remove the array extents here. + */ + template + using unique_ptr_return_type = ConfigureStoreChunkFromBuffer< + UniquePtrWithLambda>>; + + // @todo rvalue references..? + template + auto withSharedPtr(std::shared_ptr) -> shared_ptr_return_type; + template + auto withUniquePtr(UniquePtrWithLambda) -> unique_ptr_return_type; + template + auto withUniquePtr(std::unique_ptr) + -> unique_ptr_return_type; + template + auto withRawPtr(T *data) -> shared_ptr_return_type; + template + auto withContiguousContainer(T_ContiguousContainer &data) + -> std::enable_if_t< + auxiliary::IsContiguousContainer_v, + shared_ptr_return_type< + typename T_ContiguousContainer::value_type>>; + + template + [[nodiscard]] auto enqueueStore() -> DynamicMemoryView; + // definition for this one is in RecordComponent.tpp since it needs the + // definition of class RecordComponent. + template + [[nodiscard]] auto enqueueStore(F &&createBuffer) + -> DynamicMemoryView; + + template + [[nodiscard]] auto enqueueLoad() + -> auxiliary::DeferredComputation>; + + template + [[nodiscard]] auto load(EnqueuePolicy) -> std::shared_ptr; + + [[nodiscard]] auto enqueueLoadVariant() + -> auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types>; + + [[nodiscard]] auto loadVariant(EnqueuePolicy) + -> auxiliary::detail::shared_ptr_dataset_types; + }; + + template + class ConfigureStoreChunkFromBuffer : public ConfigureLoadStore + { + public: + Ptr_Type m_buffer; + std::optional m_mem_select; + + ConfigureStoreChunkFromBuffer(Ptr_Type buffer, ConfigureLoadStore &&); + + auto storeChunkConfig() -> internal::LoadStoreConfigWithBuffer; + + auto enqueueStore() -> auxiliary::DeferredComputation; + + auto store(EnqueuePolicy) -> void; + + /** This intentionally shadows the parent class's enqueueLoad methods in + * order to show a compile error when using enqueueLoad() on an object + * of this class. The parent method can still be accessed through + * as_parent() if needed. + */ + template + auto enqueueLoad() + { + static_assert( + auxiliary::dependent_false_v, + "Cannot load chunk data into a buffer that is const or a " + "unique_ptr."); + } + + template + auto load(EnqueuePolicy) + { + static_assert( + auxiliary::dependent_false_v, + "Cannot load chunk data into a buffer that is const or a " + "unique_ptr."); + } + }; + + template + class ConfigureLoadStoreFromBuffer + : public ConfigureStoreChunkFromBuffer + { + public: + using ConfigureStoreChunkFromBuffer< + Ptr_Type>::ConfigureStoreChunkFromBuffer; + + auto enqueueLoad() -> auxiliary::DeferredComputation; + + auto load(EnqueuePolicy) -> void; + }; +} // namespace core + +namespace compose +{ + /** Basic configuration for a Load/Store operation. + * + * @tparam ChildClass CRT pattern. + * The purpose is that in child classes `return *this` should return + * an instance of the child class, not of ConfigureLoadStore. + * Instantiate with void when using without subclass. + */ + template + class ConfigureLoadStore + { + public: + auto offset(Offset) -> ChildClass &; + auto extent(Extent) -> ChildClass &; + }; + + /** Configuration for a Store operation with a buffer type. + * + * This class does intentionally not support Load operations since there are + * pointer types (const pointers, unique pointers) where Load operations + * make no sense. See the \ref ConfigureLoadStoreFromBuffer class template + * for both Load/Store operations. + * + * @tparam Ptr_Type The type of pointer used internally. + * @tparam ChildClass CRT pattern. + * The purpose is that in child classes `return *this` should return + * an instance of the child class, not of + * ConfigureStoreChunkFromBuffer. Instantiate with void when using without + * subclass. + */ + template + class ConfigureStoreChunkFromBuffer + { + public: + auto memorySelection(MemorySelection) -> ChildClass &; + }; +} // namespace compose + +class ConfigureLoadStore + : public core::ConfigureLoadStore + , public compose::ConfigureLoadStore +{ + friend class RecordComponent; + friend class core::ConfigureLoadStore; + + ConfigureLoadStore(RecordComponent &rc); + ConfigureLoadStore(core::ConfigureLoadStore &&); +}; + +template +class ConfigureStoreChunkFromBuffer + : public core::ConfigureStoreChunkFromBuffer + , public compose::ConfigureLoadStore< + ConfigureStoreChunkFromBuffer> + , public compose::ConfigureStoreChunkFromBuffer< + ConfigureStoreChunkFromBuffer> +{ + friend class core::ConfigureLoadStore; + + using core::ConfigureStoreChunkFromBuffer< + Ptr_Type>::ConfigureStoreChunkFromBuffer; +}; + +template +class ConfigureLoadStoreFromBuffer + : public core::ConfigureLoadStoreFromBuffer + , public compose::ConfigureLoadStore> + , public compose::ConfigureStoreChunkFromBuffer< + ConfigureLoadStoreFromBuffer> +{ + friend class ConfigureLoadStoreCore; + + using core::ConfigureLoadStoreFromBuffer< + Ptr_Type>::ConfigureLoadStoreFromBuffer; +}; +} // namespace openPMD + +#include "openPMD/LoadStoreChunk.tpp" diff --git a/include/openPMD/LoadStoreChunk.tpp b/include/openPMD/LoadStoreChunk.tpp new file mode 100644 index 0000000000..c3c104cebd --- /dev/null +++ b/include/openPMD/LoadStoreChunk.tpp @@ -0,0 +1,64 @@ +#pragma once + +#include "openPMD/LoadStoreChunk.hpp" + +namespace openPMD::core +{ +template +auto ConfigureLoadStore::withSharedPtr(std::shared_ptr data) + -> shared_ptr_return_type +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + return shared_ptr_return_type( + std::static_pointer_cast>(std::move(data)), + {std::move(*this)}); +} +template +auto ConfigureLoadStore::withUniquePtr(UniquePtrWithLambda data) + -> unique_ptr_return_type + +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + return unique_ptr_return_type( + std::move(data).template static_cast_>(), + {std::move(*this)}); +} +template +auto ConfigureLoadStore::withRawPtr(T *data) -> shared_ptr_return_type +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + return shared_ptr_return_type( + auxiliary::shareRaw(data), {std::move(*this)}); +} + +template +auto ConfigureLoadStore::withUniquePtr(std::unique_ptr data) + -> unique_ptr_return_type +{ + return withUniquePtr(UniquePtrWithLambda(std::move(data))); +} +template +auto ConfigureLoadStore::withContiguousContainer(T_ContiguousContainer &data) + -> std::enable_if_t< + auxiliary::IsContiguousContainer_v, + shared_ptr_return_type> +{ + if (!m_extent.has_value() && dim() == 1) + { + m_extent = Extent{data.size()}; + } + return withRawPtr(data.data()); +} +} // namespace openPMD::core diff --git a/include/openPMD/RecordComponent.hpp b/include/openPMD/RecordComponent.hpp index ee29a6d7fa..c2ffee50c5 100644 --- a/include/openPMD/RecordComponent.hpp +++ b/include/openPMD/RecordComponent.hpp @@ -22,6 +22,7 @@ #include "openPMD/Dataset.hpp" #include "openPMD/Datatype.hpp" +#include "openPMD/LoadStoreChunk.hpp" #include "openPMD/auxiliary/ShareRaw.hpp" #include "openPMD/auxiliary/TypeTraits.hpp" #include "openPMD/auxiliary/UniquePtr.hpp" @@ -114,6 +115,17 @@ namespace internal class BaseRecordData; } // namespace internal +namespace core +{ + class ConfigureLoadStore; + template + class ConfigureLoadStoreFromBuffer; + template + class ConfigureStoreChunkFromBuffer; + struct VisitorEnqueueLoadVariant; + struct VisitorLoadVariant; +} // namespace core + template class BaseRecord; @@ -135,6 +147,13 @@ class RecordComponent : public BaseRecordComponent friend class MeshRecordComponent; template friend T &internal::makeOwning(T &self, Series); + friend class core::ConfigureLoadStore; + template + friend class core::ConfigureLoadStoreFromBuffer; + template + friend class core::ConfigureStoreChunkFromBuffer; + friend struct core::VisitorEnqueueLoadVariant; + friend struct core::VisitorLoadVariant; public: enum class Allocation @@ -229,8 +248,8 @@ class RecordComponent : public BaseRecordComponent template std::shared_ptr loadChunk(Offset = {0u}, Extent = {-1u}); - using shared_ptr_dataset_types = auxiliary::detail:: - map_variant::type; + using shared_ptr_dataset_types = + auxiliary::detail::shared_ptr_dataset_types; /** std::variant-based version of allocating loadChunk(Offset, Extent) * @@ -268,25 +287,6 @@ class RecordComponent : public BaseRecordComponent template void loadChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Load a chunk of data into pre-allocated memory, array version. - * - * @param data Preallocated, contiguous buffer, large enough to load the - * the requested data into it. - * The shared pointer must own and manage the buffer. - * Optimizations might be implemented based on this - * assumption (e.g. skipping the operation if the backend - * is the unique owner). - * The array-based overload helps avoid having to manually - * specify the delete[] destructor (C++17 feature). - * @param offset Offset within the dataset. Set to {0u} for full selection. - * @param extent Extent within the dataset, counted from the offset. - * Set to {-1u} for full selection. - * If offset is non-zero and extent is {-1u} the leftover - * extent in the record component will be selected. - */ - template - void loadChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Load a chunk of data into pre-allocated memory, raw pointer version. * * @param data Preallocated, contiguous buffer, large enough to load the @@ -305,6 +305,8 @@ class RecordComponent : public BaseRecordComponent template void loadChunkRaw(T *data, Offset offset, Extent extent); + ConfigureLoadStore prepareLoadStore(); + /** Store a chunk of data from a chunk of memory. * * @param data Preallocated, contiguous buffer, large enough to read the @@ -326,18 +328,6 @@ class RecordComponent : public BaseRecordComponent template void storeChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Store a chunk of data from a chunk of memory, array version. - * - * @param data Preallocated, contiguous buffer, large enough to read the - * the specified data from it. - * The array-based overload helps avoid having to manually - * specify the delete[] destructor (C++17 feature). - * @param offset Offset within the dataset. - * @param extent Extent within the dataset, counted from the offset. - */ - template - void storeChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Store a chunk of data from a chunk of memory, unique pointer version. * * @param data Preallocated, contiguous buffer, large enough to read the @@ -499,8 +489,22 @@ class RecordComponent : public BaseRecordComponent */ RecordComponent &makeEmpty(Dataset d); - void storeChunk( - auxiliary::WriteBuffer buffer, Datatype datatype, Offset o, Extent e); + void storeChunk_impl( + auxiliary::WriteBuffer buffer, + Datatype datatype, + internal::LoadStoreConfigWithBuffer); + + template + DynamicMemoryView storeChunkSpan_impl(internal::LoadStoreConfig); + template + DynamicMemoryView storeChunkSpanCreateBuffer_impl( + internal::LoadStoreConfig, F &&createBuffer); + + template + void + loadChunk_impl(std::shared_ptr, internal::LoadStoreConfigWithBuffer); + template + std::shared_ptr loadChunkAllocate_impl(internal::LoadStoreConfig); // clang-format off OPENPMD_protected diff --git a/include/openPMD/RecordComponent.tpp b/include/openPMD/RecordComponent.tpp index 7beaae8b9d..e6b8c1a6e7 100644 --- a/include/openPMD/RecordComponent.tpp +++ b/include/openPMD/RecordComponent.tpp @@ -23,6 +23,7 @@ #include "openPMD/Datatype.hpp" #include "openPMD/Error.hpp" +#include "openPMD/LoadStoreChunk.hpp" #include "openPMD/RecordComponent.hpp" #include "openPMD/Span.hpp" #include "openPMD/auxiliary/Memory.hpp" @@ -31,6 +32,7 @@ #include "openPMD/auxiliary/UniquePtr.hpp" #include +#include #include namespace openPMD @@ -60,37 +62,54 @@ template inline std::shared_ptr RecordComponent::loadChunk(Offset o, Extent e) { uint8_t dim = getDimensionality(); + auto operation = prepareLoadStore(); // default arguments // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = o; - if (o.size() == 1u && o.at(0) == 0u && dim > 1u) - offset = Offset(dim, 0u); + if (o.size() != 1u || o.at(0) != 0u || dim <= 1u) + { + operation.offset(std::move(o)); + } // extent = {-1u}: take full size - Extent extent(dim, 1u); - if (e.size() == 1u && e.at(0) == -1u) + if (e.size() != 1u || e.at(0) != -1u) { - extent = getExtent(); - for (uint8_t i = 0u; i < dim; ++i) - extent[i] -= offset[i]; + operation.extent(std::move(e)); } - else - extent = e; - uint64_t numPoints = 1u; - for (auto const &dimensionSize : extent) - numPoints *= dimensionSize; + return operation.load(EnqueuePolicy::Defer); +} + +template +inline std::shared_ptr +RecordComponent::loadChunkAllocate_impl(internal::LoadStoreConfig cfg) +{ + static_assert(!std::is_same_v, "EVIL"); + auto [o, e] = std::move(cfg); + + size_t numPoints = 1; + for (auto val : e) + { + numPoints *= val; + } #if (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000) || \ (defined(__apple_build_version__) && __clang_major__ < 14) auto newData = std::shared_ptr(new T[numPoints], [](T *p) { delete[] p; }); - loadChunk(newData, offset, extent); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withSharedPtr(newData) + .load(EnqueuePolicy::Defer); return newData; #else auto newData = std::shared_ptr(new T[numPoints]); - loadChunk(newData, offset, extent); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withSharedPtr(newData) + .load(EnqueuePolicy::Defer); return std::static_pointer_cast(std::move(newData)); #endif } @@ -117,10 +136,41 @@ namespace detail }; } // namespace detail -template -inline void -RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) +template +inline void RecordComponent::loadChunk( + std::shared_ptr data, Offset o, Extent e) { + static_assert(!std::is_same_v, "EVIL"); + uint8_t dim = getDimensionality(); + auto operation = prepareLoadStore(); + + // default arguments + // offset = {0u}: expand to right dim {0u, 0u, ...} + if (o.size() != 1u || o.at(0) != 0u || dim <= 1u) + { + operation.offset(std::move(o)); + } + + // extent = {-1u}: take full size + if (e.size() != 1u || e.at(0) != -1u) + { + operation.extent(std::move(e)); + } + + operation.withSharedPtr(std::move(data)).load(EnqueuePolicy::Defer); +} + +template +inline void RecordComponent::loadChunk_impl( + std::shared_ptr data, + internal::LoadStoreConfigWithBuffer cfg) +{ + if (cfg.memorySelection.has_value()) + { + throw error::WrongAPIUsage( + "Unsupported: Memory selections in chunk loading."); + } + using T = std::remove_cv_t>; Datatype dtype = determineDatatype(data); /* * For constant components, we implement type conversion, so there is @@ -145,24 +195,8 @@ RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) throw std::runtime_error(err_msg); } - uint8_t dim = getDimensionality(); - - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = o; - if (o.size() == 1u && o.at(0) == 0u && dim > 1u) - offset = Offset(dim, 0u); - - // extent = {-1u}: take full size - Extent extent(dim, 1u); - if (e.size() == 1u && e.at(0) == -1u) - { - extent = getExtent(); - for (uint8_t i = 0u; i < dim; ++i) - extent[i] -= offset[i]; - } - else - extent = e; + auto dim = getDimensionality(); + auto [offset, extent, memorySelection] = std::move(cfg); if (extent.size() != dim || offset.size() != dim) { @@ -181,9 +215,6 @@ RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) "Chunk does not reside inside dataset (Dimension on index " + std::to_string(i) + ". DS: " + std::to_string(dse[i]) + " - Chunk: " + std::to_string(offset[i] + extent[i]) + ")"); - if (!data) - throw std::runtime_error( - "Unallocated pointer passed during chunk loading."); auto &rc = get(); if (constant()) @@ -198,7 +229,7 @@ RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) if (val.has_value()) { - T *raw_ptr = data.get(); + auto raw_ptr = static_cast(data.get()); std::fill(raw_ptr, raw_ptr + numPoints, *val); } else @@ -223,77 +254,57 @@ RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) } } -template -inline void RecordComponent::loadChunk( - std::shared_ptr ptr, Offset offset, Extent extent) -{ - loadChunk( - std::static_pointer_cast(std::move(ptr)), - std::move(offset), - std::move(extent)); -} - template inline void RecordComponent::loadChunkRaw(T *ptr, Offset offset, Extent extent) { - loadChunk(auxiliary::shareRaw(ptr), std::move(offset), std::move(extent)); + prepareLoadStore() + .offset(std::move(offset)) + .extent(std::move(extent)) + .withRawPtr(ptr) + .load(EnqueuePolicy::Defer); } template inline void RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) { - if (!data) - throw std::runtime_error( - "Unallocated pointer passed during chunk store."); - Datatype dtype = determineDatatype(data); - - /* std::static_pointer_cast correctly reference-counts the pointer */ - storeChunk( - auxiliary::WriteBuffer(std::static_pointer_cast(data)), - dtype, - std::move(o), - std::move(e)); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withSharedPtr(std::move(data)) + .store(EnqueuePolicy::Defer); } template inline void RecordComponent::storeChunk(UniquePtrWithLambda data, Offset o, Extent e) { - if (!data) - throw std::runtime_error( - "Unallocated pointer passed during chunk store."); - Datatype dtype = determineDatatype<>(data); - - storeChunk( - auxiliary::WriteBuffer{std::move(data).template static_cast_()}, - dtype, - std::move(o), - std::move(e)); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withUniquePtr(std::move(data)) + .store(EnqueuePolicy::Defer); } template inline void RecordComponent::storeChunk(std::unique_ptr data, Offset o, Extent e) { - storeChunk( - UniquePtrWithLambda(std::move(data)), std::move(o), std::move(e)); -} - -template -inline void -RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) -{ - storeChunk( - std::static_pointer_cast(std::move(data)), - std::move(o), - std::move(e)); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withUniquePtr(std::move(data)) + .store(EnqueuePolicy::Defer); } template void RecordComponent::storeChunkRaw(T *ptr, Offset offset, Extent extent) { - storeChunk(auxiliary::shareRaw(ptr), std::move(offset), std::move(extent)); + prepareLoadStore() + .offset(std::move(offset)) + .extent(std::move(extent)) + .withRawPtr(ptr) + .store(EnqueuePolicy::Defer); } template @@ -301,39 +312,48 @@ inline typename std::enable_if_t< auxiliary::IsContiguousContainer_v> RecordComponent::storeChunk(T_ContiguousContainer &data, Offset o, Extent e) { - uint8_t dim = getDimensionality(); + auto storeChunkConfig = prepareLoadStore(); - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = o; - if (o.size() == 1u && o.at(0) == 0u) + auto joined_dim = joinedDimension(); + if (!joined_dim.has_value() && (o.size() != 1 || o.at(0) != 0u)) { - if (joinedDimension().has_value()) - { - offset.clear(); - } - else if (dim > 1u) - { - offset = Offset(dim, 0u); - } + storeChunkConfig.offset(std::move(o)); + } + if (e.size() != 1 || e.at(0) != -1u) + { + storeChunkConfig.extent(std::move(e)); } - // extent = {-1u}: take full size - Extent extent(dim, 1u); - // avoid outsmarting the user: - // - stdlib data container implement 1D -> 1D chunk to write - if (e.size() == 1u && e.at(0) == -1u && dim == 1u) - extent.at(0) = data.size(); - else - extent = e; - - storeChunk(auxiliary::shareRaw(data.data()), offset, extent); + std::move(storeChunkConfig) + .withContiguousContainer(data) + .store(EnqueuePolicy::Defer); } template inline DynamicMemoryView RecordComponent::storeChunk(Offset o, Extent e, F &&createBuffer) { + return prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .enqueueStore(std::forward(createBuffer)); +} + +template +inline DynamicMemoryView +RecordComponent::storeChunk(Offset offset, Extent extent) +{ + return prepareLoadStore() + .offset(std::move(offset)) + .extent(std::move(extent)) + .enqueueStore(); +} + +template +inline DynamicMemoryView RecordComponent::storeChunkSpanCreateBuffer_impl( + internal::LoadStoreConfig cfg, F &&createBuffer) +{ + auto [o, e] = std::move(cfg); verifyChunk(o, e); /* @@ -389,20 +409,6 @@ RecordComponent::storeChunk(Offset o, Extent e, F &&createBuffer) return DynamicMemoryView{std::move(getBufferView), size, *this}; } -template -inline DynamicMemoryView -RecordComponent::storeChunk(Offset offset, Extent extent) -{ - return storeChunk(std::move(offset), std::move(extent), [](size_t size) { -#if (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000) || \ - (defined(__apple_build_version__) && __clang_major__ < 14) - return std::shared_ptr{new T[size], [](auto *ptr) { delete[] ptr; }}; -#else - return std::shared_ptr< T[] >{ new T[ size ] }; -#endif - }); -} - namespace detail { template @@ -440,4 +446,13 @@ void RecordComponent::verifyChunk(Offset const &o, Extent const &e) const { verifyChunk(determineDatatype(), o, e); } + +// definitions for LoadStoreChunk.hpp +template +auto core::ConfigureLoadStore::enqueueStore(F &&createBuffer) + -> DynamicMemoryView +{ + return m_rc.storeChunkSpanCreateBuffer_impl( + storeChunkConfig(), std::forward(createBuffer)); +} } // namespace openPMD diff --git a/include/openPMD/auxiliary/Future.hpp b/include/openPMD/auxiliary/Future.hpp new file mode 100644 index 0000000000..d40f7372fb --- /dev/null +++ b/include/openPMD/auxiliary/Future.hpp @@ -0,0 +1,23 @@ +#pragma once + +#include "openPMD/auxiliary/TypeTraits.hpp" + +#include + +namespace openPMD::auxiliary +{ +template +class DeferredComputation +{ + using task_type = std::function; + task_type m_task; + bool m_valid = false; + +public: + DeferredComputation(task_type); + + auto get() -> T; + + [[nodiscard]] auto valid() const noexcept -> bool; +}; +} // namespace openPMD::auxiliary diff --git a/include/openPMD/auxiliary/Memory.hpp b/include/openPMD/auxiliary/Memory.hpp index 66ebb5fd34..62750784bc 100644 --- a/include/openPMD/auxiliary/Memory.hpp +++ b/include/openPMD/auxiliary/Memory.hpp @@ -180,9 +180,10 @@ namespace auxiliary WriteBuffer() : m_buffer(UniquePtrWithLambda()) {} - template - explicit WriteBuffer(Args &&...args) - : m_buffer(std::forward(args)...) + WriteBuffer(std::shared_ptr ptr) : m_buffer(std::move(ptr)) + {} + + WriteBuffer(UniquePtrWithLambda ptr) : m_buffer(std::move(ptr)) {} WriteBuffer(WriteBuffer &&) = default; @@ -196,7 +197,7 @@ namespace auxiliary return *this; } - WriteBuffer const &operator=(UniquePtrWithLambda ptr) + WriteBuffer const &operator=(UniquePtrWithLambda ptr) { m_buffer = std::move(ptr); return *this; diff --git a/include/openPMD/auxiliary/TypeTraits.hpp b/include/openPMD/auxiliary/TypeTraits.hpp index 526746de89..5a6cbb13bb 100644 --- a/include/openPMD/auxiliary/TypeTraits.hpp +++ b/include/openPMD/auxiliary/TypeTraits.hpp @@ -196,5 +196,4 @@ namespace detail using type = std::variant<>; }; } // namespace detail - } // namespace openPMD::auxiliary diff --git a/include/openPMD/auxiliary/UniquePtr.hpp b/include/openPMD/auxiliary/UniquePtr.hpp index 6a64763b13..622a2b4ac2 100644 --- a/include/openPMD/auxiliary/UniquePtr.hpp +++ b/include/openPMD/auxiliary/UniquePtr.hpp @@ -170,10 +170,11 @@ template UniquePtrWithLambda UniquePtrWithLambda::static_cast_() && { using other_type = std::remove_extent_t; + auto original_ptr = this->release(); return UniquePtrWithLambda{ - static_cast(this->release()), - [deleter = std::move(this->get_deleter())](other_type *ptr) { - deleter(static_cast(ptr)); + static_cast(original_ptr), + [deleter = std::move(this->get_deleter()), original_ptr](other_type *) { + deleter(original_ptr); }}; } } // namespace openPMD diff --git a/include/openPMD/benchmark/mpi/MPIBenchmark.hpp b/include/openPMD/benchmark/mpi/MPIBenchmark.hpp index 3d6d78c7e4..8eadec41db 100644 --- a/include/openPMD/benchmark/mpi/MPIBenchmark.hpp +++ b/include/openPMD/benchmark/mpi/MPIBenchmark.hpp @@ -239,7 +239,7 @@ MPIBenchmark::runBenchmark(int rootThread) } for (Datatype dt : datatypes) { - switchType>(dt, exec, res, rootThread); + switchDatasetType>(dt, exec, res, rootThread); } return res; diff --git a/src/IO/ADIOS/ADIOS2File.cpp b/src/IO/ADIOS/ADIOS2File.cpp index 82dd33f70b..cd21ff0010 100644 --- a/src/IO/ADIOS/ADIOS2File.cpp +++ b/src/IO/ADIOS/ADIOS2File.cpp @@ -22,11 +22,13 @@ #include "openPMD/IO/ADIOS/ADIOS2File.hpp" #include "openPMD/Error.hpp" #include "openPMD/IO/ADIOS/ADIOS2IOHandler.hpp" +#include "openPMD/IO/ADIOS/macros.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IterationEncoding.hpp" #include "openPMD/auxiliary/Environment.hpp" #include "openPMD/auxiliary/StringManip.hpp" +#include #include #if openPMD_USE_VERIFY @@ -60,8 +62,8 @@ void DatasetReader::call( adios2::Engine &engine, std::string const &fileName) { - adios2::Variable var = - impl->verifyDataset(bp.param.offset, bp.param.extent, IO, bp.name); + adios2::Variable var = impl->verifyDataset( + bp.param.offset, bp.param.extent, std::nullopt, IO, bp.name); if (!var) { throw std::runtime_error( @@ -75,6 +77,12 @@ void DatasetReader::call( template inline constexpr bool always_false_v = false; +static constexpr char const *warningMemorySelection = + "[Warning] Using a version of ADIOS2 that cannot reset memory selections " + "on a variable, once specified. When using memory selections, then please " + "specify it explicitly on all storeChunk() calls. Further info: " + "https://github.com/ornladios/ADIOS2/pull/4169."; + template void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) { @@ -90,9 +98,26 @@ void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) auto ptr = static_cast(arg.get()); adios2::Variable var = ba.m_impl->verifyDataset( - bp.param.offset, bp.param.extent, ba.m_IO, bp.name); + bp.param.offset, + bp.param.extent, + bp.param.memorySelection, + ba.m_IO, + bp.name); ba.getEngine().Put(var, ptr); + if (bp.param.memorySelection.has_value()) + { + if constexpr (CanTheMemorySelectionBeReset) + { + var.SetMemorySelection(); + } + else if (!ba.m_impl->printedWarningsAlready.memorySelection) + { + std::cerr << warningMemorySelection << std::endl; + ba.m_impl->printedWarningsAlready.memorySelection = + true; + } + } } else if constexpr (std::is_same_v< ptr_type, @@ -102,6 +127,7 @@ void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) bput.name = std::move(bp.name); bput.offset = std::move(bp.param.offset); bput.extent = std::move(bp.param.extent); + bput.memorySelection = std::move(bp.param.memorySelection); /* * Note: Moving is required here since it's a unique_ptr. * std::forward<>() would theoretically work, but it @@ -148,8 +174,24 @@ struct RunUniquePtrPut { auto ptr = static_cast(bufferedPut.data.get()); adios2::Variable var = ba.m_impl->verifyDataset( - bufferedPut.offset, bufferedPut.extent, ba.m_IO, bufferedPut.name); + bufferedPut.offset, + bufferedPut.extent, + bufferedPut.memorySelection, + ba.m_IO, + bufferedPut.name); ba.getEngine().Put(var, ptr); + if (bufferedPut.memorySelection.has_value()) + { + if constexpr (CanTheMemorySelectionBeReset) + { + var.SetMemorySelection(); + } + else if (!ba.m_impl->printedWarningsAlready.memorySelection) + { + std::cerr << warningMemorySelection << std::endl; + ba.m_impl->printedWarningsAlready.memorySelection = true; + } + } } static constexpr char const *errorMsg = "RunUniquePtrPut"; diff --git a/src/IO/ADIOS/ADIOS2IOHandler.cpp b/src/IO/ADIOS/ADIOS2IOHandler.cpp index 0f1bbe3eeb..baa8bb0e41 100644 --- a/src/IO/ADIOS/ADIOS2IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS2IOHandler.cpp @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -1112,7 +1113,7 @@ namespace detail auto &IO = ba.m_IO; auto &engine = ba.getEngine(); adios2::Variable variable = impl->verifyDataset( - params.offset, params.extent, IO, varName); + params.offset, params.extent, std::nullopt, IO, varName); adios2::Dims offset(params.offset.begin(), params.offset.end()); adios2::Dims extent(params.extent.begin(), params.extent.end()); variable.SetSelection({std::move(offset), std::move(extent)}); diff --git a/src/IO/AbstractIOHandlerImpl.cpp b/src/IO/AbstractIOHandlerImpl.cpp index 7675cc3e07..7769f14c81 100644 --- a/src/IO/AbstractIOHandlerImpl.cpp +++ b/src/IO/AbstractIOHandlerImpl.cpp @@ -286,7 +286,26 @@ std::future AbstractIOHandlerImpl::flush() i.writable->parent, "->", i.writable, - "] WRITE_DATASET"); + "] WRITE_DATASET: ", + [&]() { + std::stringstream stream; + stream << "offset: " << vec_as_string(parameter.offset) + << " extent: " << vec_as_string(parameter.extent) + << " mem-selection: "; + if (parameter.memorySelection.has_value()) + { + stream << vec_as_string( + parameter.memorySelection->offset) + << "--" + << vec_as_string( + parameter.memorySelection->extent); + } + else + { + stream << "NONE"; + } + return stream.str(); + }); writeDataset(i.writable, parameter); break; } diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index 874768adbe..ea9c16e169 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -1533,6 +1533,13 @@ void HDF5IOHandlerImpl::writeDataset( "[HDF5] Writing into a dataset in a file opened as read only is " "not possible."); + if (parameters.memorySelection.has_value()) + { + throw error::OperationUnsupportedInBackend( + "HDF5", + "Non-contiguous memory selections not supported in HDF5 backend."); + } + auto res = getFile(writable); File file = res ? res.value() : getFile(writable->parent).value(); diff --git a/src/IO/JSON/JSONIOHandlerImpl.cpp b/src/IO/JSON/JSONIOHandlerImpl.cpp index a432737188..c3537ae3db 100644 --- a/src/IO/JSON/JSONIOHandlerImpl.cpp +++ b/src/IO/JSON/JSONIOHandlerImpl.cpp @@ -1246,6 +1246,13 @@ void JSONIOHandlerImpl::writeDataset( access::write(m_handler->m_backendAccess), "[JSON] Cannot write data in read-only mode."); + if (parameters.memorySelection.has_value()) + { + throw error::OperationUnsupportedInBackend( + "JSON", + "Non-contiguous memory selections not supported in JSON backend."); + } + auto pos = setAndGetFilePosition(writable); auto file = refreshFileFromParent(writable); auto &j = obtainJsonContents(writable); diff --git a/src/LoadStoreChunk.cpp b/src/LoadStoreChunk.cpp new file mode 100644 index 0000000000..8c60138636 --- /dev/null +++ b/src/LoadStoreChunk.cpp @@ -0,0 +1,369 @@ + + +#include "openPMD/LoadStoreChunk.hpp" +#include "openPMD/Datatype.hpp" +#include "openPMD/RecordComponent.hpp" +#include "openPMD/Span.hpp" +#include "openPMD/auxiliary/Memory.hpp" +#include "openPMD/auxiliary/ShareRawInternal.hpp" +#include "openPMD/auxiliary/TypeTraits.hpp" +#include "openPMD/auxiliary/UniquePtr.hpp" + +// comment to keep clang-format from reordering +#include "openPMD/DatatypeMacros.hpp" + +#include +#include +#include +#include + +namespace openPMD +{ +namespace +{ + template + auto asWriteBuffer(std::shared_ptr &&ptr) -> auxiliary::WriteBuffer + { + /* std::static_pointer_cast correctly reference-counts the pointer */ + return auxiliary::WriteBuffer( + std::static_pointer_cast(std::move(ptr))); + } + template + auto asWriteBuffer(UniquePtrWithLambda &&ptr) -> auxiliary::WriteBuffer + { + return auxiliary::WriteBuffer( + std::move(ptr).template static_cast_()); + } + + /* + * There is no backend support currently for const unique pointers. + * We support these mostly for providing a clean API to users that have such + * pointers and want to store from them, but there will be no + * backend-specific optimizations for such buffers as there are for + * non-const unique pointers. + */ + template + auto asWriteBuffer(UniquePtrWithLambda &&ptr) + -> auxiliary::WriteBuffer + { + auto raw_ptr = ptr.release(); + return asWriteBuffer(std::shared_ptr{ + raw_ptr, + [deleter = std::move(ptr.get_deleter())](auto const *delete_me) { + deleter(delete_me); + }}); + } +} // namespace + +namespace core +{ + ConfigureLoadStore::ConfigureLoadStore(RecordComponent &rc) : m_rc(rc) + {} + + auto ConfigureLoadStore::dim() const -> uint8_t + { + return m_rc.getDimensionality(); + } + + auto ConfigureLoadStore::storeChunkConfig() -> internal::LoadStoreConfig + { + return internal::LoadStoreConfig{getOffset(), getExtent()}; + } + + auto ConfigureLoadStore::getOffset() -> Offset const & + { + if (!m_offset.has_value()) + { + if (m_rc.joinedDimension().has_value()) + { + m_offset = std::make_optional(); + } + else + { + m_offset = std::make_optional(dim(), 0); + } + } + return *m_offset; + } + + auto ConfigureLoadStore::getExtent() -> Extent const & + { + if (!m_extent.has_value()) + { + m_extent = std::make_optional(m_rc.getExtent()); + if (m_offset.has_value()) + { + auto it_o = m_offset->begin(); + auto end_o = m_offset->end(); + auto it_e = m_extent->begin(); + auto end_e = m_extent->end(); + for (; it_o != end_o && it_e != end_e; ++it_e, ++it_o) + { + *it_e -= *it_o; + } + } + } + return *m_extent; + } + + template + auto ConfigureLoadStore::enqueueStore() -> DynamicMemoryView + { + return m_rc.storeChunkSpan_impl(storeChunkConfig()); + } + + template + auto ConfigureLoadStore::enqueueLoad() + -> auxiliary::DeferredComputation> + { + auto res = m_rc.loadChunkAllocate_impl(storeChunkConfig()); + return auxiliary::DeferredComputation>( + [res_lambda = std::move(res), rc = m_rc]() mutable { + rc.seriesFlush(); + return res_lambda; + }); + } + + template + auto ConfigureLoadStore::load(EnqueuePolicy ep) -> std::shared_ptr + { + auto res = m_rc.loadChunkAllocate_impl(storeChunkConfig()); + switch (ep) + { + case EnqueuePolicy::Defer: + break; + case EnqueuePolicy::Immediate: + m_rc.seriesFlush(); + break; + } + return res; + } + + struct VisitorEnqueueLoadVariant + { + template + static auto call(RecordComponent &rc, internal::LoadStoreConfig cfg) + -> auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types> + { + auto res = rc.loadChunkAllocate_impl(std::move(cfg)); + return auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types>( + + [res_lambda = std::move(res), rc_lambda = rc]() mutable + -> auxiliary::detail::shared_ptr_dataset_types { + std::cout << "Flushing Series from Future" << std::endl; + rc_lambda.seriesFlush(); + std::cout << "Flushed Series from Future" << std::endl; + return res_lambda; + }); + } + }; + + auto ConfigureLoadStore::enqueueLoadVariant() + -> auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types> + { + return m_rc.visit(this->storeChunkConfig()); + } + + struct VisitorLoadVariant + { + template + static auto call(RecordComponent &rc, internal::LoadStoreConfig cfg) + -> auxiliary::detail::shared_ptr_dataset_types + { + return rc.loadChunkAllocate_impl(std::move(cfg)); + } + }; + + auto ConfigureLoadStore::loadVariant(EnqueuePolicy ep) + -> auxiliary::detail::shared_ptr_dataset_types + { + auto res = m_rc.visit(this->storeChunkConfig()); + switch (ep) + { + case EnqueuePolicy::Defer: + break; + case EnqueuePolicy::Immediate: + m_rc.seriesFlush(); + break; + } + return res; + } + + template + ConfigureStoreChunkFromBuffer::ConfigureStoreChunkFromBuffer( + Ptr_Type buffer, ConfigureLoadStore &&core) + : ConfigureLoadStore(std::move(core)), m_buffer(std::move(buffer)) + {} + + template + auto ConfigureStoreChunkFromBuffer::storeChunkConfig() + -> internal::LoadStoreConfigWithBuffer + { + return internal::LoadStoreConfigWithBuffer{ + this->getOffset(), this->getExtent(), m_mem_select}; + } + + template + auto ConfigureStoreChunkFromBuffer::enqueueStore() + -> auxiliary::DeferredComputation + { + this->m_rc.storeChunk_impl( + asWriteBuffer(std::move(m_buffer)), + determineDatatype>(), + storeChunkConfig()); + return auxiliary::DeferredComputation( + [rc_lambda = m_rc]() mutable -> void { rc_lambda.seriesFlush(); }); + } + + template + auto ConfigureStoreChunkFromBuffer::store(EnqueuePolicy ep) + -> void + { + this->m_rc.storeChunk_impl( + asWriteBuffer(std::move(m_buffer)), + determineDatatype>(), + storeChunkConfig()); + switch (ep) + { + case EnqueuePolicy::Defer: + break; + case EnqueuePolicy::Immediate: + m_rc.seriesFlush(); + break; + } + } + + template + auto ConfigureLoadStoreFromBuffer::enqueueLoad() + -> auxiliary::DeferredComputation + { + static_assert( + std::is_same_v< + Ptr_Type, + std::shared_ptr< + std::remove_cv_t>>, + "ConfigureLoadStoreFromBuffer must be instantiated with a " + "non-const " + "shared_ptr type."); + this->m_rc.loadChunk_impl( + std::move(this->m_buffer), this->storeChunkConfig()); + return auxiliary::DeferredComputation( + [rc_lambda = this->m_rc]() mutable -> void { + rc_lambda.seriesFlush(); + }); + } + + template + auto ConfigureLoadStoreFromBuffer::load(EnqueuePolicy ep) -> void + { + this->m_rc.loadChunk_impl( + std::move(this->m_buffer), this->storeChunkConfig()); + switch (ep) + { + + case EnqueuePolicy::Defer: + break; + case EnqueuePolicy::Immediate: + this->m_rc.seriesFlush(); + break; + } + } +} // namespace core + +namespace compose +{ + template + auto ConfigureLoadStore::extent(Extent extent) -> ChildClass & + { + static_cast(this)->m_extent = + std::make_optional(std::move(extent)); + return *static_cast(this); + } + + template + auto ConfigureLoadStore::offset(Offset offset) -> ChildClass & + { + static_cast(this)->m_offset = + std::make_optional(std::move(offset)); + return *static_cast(this); + } + + template + auto ConfigureStoreChunkFromBuffer::memorySelection( + MemorySelection sel) -> ChildClass & + { + static_cast(this)->m_mem_select = + std::make_optional(std::move(sel)); + return *static_cast(this); + } +} // namespace compose + +template class compose::ConfigureLoadStore; + +/* clang-format would destroy the NOLINT comments */ +// clang-format off +#define INSTANTIATE_METHOD_TEMPLATES(dtype) \ + template auto core::ConfigureLoadStore::enqueueStore() \ + -> DynamicMemoryView; \ + template auto core::ConfigureLoadStore::enqueueLoad() \ + /* NOLINTNEXTLINE(bugprone-macro-parentheses) */ \ + -> auxiliary::DeferredComputation>; \ + template auto core::ConfigureLoadStore::load(EnqueuePolicy) \ + ->std::shared_ptr; +// clang-format on + +OPENPMD_FOREACH_DATASET_DATATYPE(INSTANTIATE_METHOD_TEMPLATES) + +#undef INSTANTIATE_METHOD_TEMPLATES + +/* clang-format would destroy the NOLINT comments */ +// clang-format off +#define INSTANTIATE_HALF(pointer_type) \ + template class ConfigureStoreChunkFromBuffer; \ + template class core::ConfigureStoreChunkFromBuffer; \ + template class compose::ConfigureLoadStore< \ + /* NOLINTNEXTLINE(bugprone-macro-parentheses) */ \ + ConfigureStoreChunkFromBuffer>; \ + template class compose::ConfigureStoreChunkFromBuffer< \ + /* NOLINTNEXTLINE(bugprone-macro-parentheses) */ \ + ConfigureStoreChunkFromBuffer>; +// clang-format on + +/* clang-format would destroy the NOLINT comments */ +// clang-format off +#define INSTANTIATE_FULL(pointer_type) \ + INSTANTIATE_HALF(pointer_type) \ + template class ConfigureLoadStoreFromBuffer; \ + template class core::ConfigureLoadStoreFromBuffer; \ + template class compose::ConfigureLoadStore< \ + /* NOLINTNEXTLINE(bugprone-macro-parentheses) */ \ + ConfigureLoadStoreFromBuffer>; \ + template class compose::ConfigureStoreChunkFromBuffer< \ + /* NOLINTNEXTLINE(bugprone-macro-parentheses) */ \ + ConfigureLoadStoreFromBuffer>; +// clang-format on + +#define INSTANTIATE_STORE_CHUNK_FROM_BUFFER(dtype) \ + INSTANTIATE_FULL(std::shared_ptr) \ + INSTANTIATE_HALF(std::shared_ptr) \ + INSTANTIATE_HALF(UniquePtrWithLambda) \ + INSTANTIATE_HALF(UniquePtrWithLambda) +// /* NOLINTNEXTLINE(bugprone-macro-parentheses) */ + +OPENPMD_FOREACH_DATASET_DATATYPE(INSTANTIATE_STORE_CHUNK_FROM_BUFFER) + +#undef INSTANTIATE_STORE_CHUNK_FROM_BUFFER +#undef INSTANTIATE_METHOD_TEMPLATES +#undef INSTANTIATE_FULL +#undef INSTANTIATE_HALF + +ConfigureLoadStore::ConfigureLoadStore(RecordComponent &rc) + : core::ConfigureLoadStore{rc} +{} +ConfigureLoadStore::ConfigureLoadStore(core::ConfigureLoadStore &&core) + : core::ConfigureLoadStore{std::move(core)} +{} +} // namespace openPMD diff --git a/src/RecordComponent.cpp b/src/RecordComponent.cpp index fc17909fc6..24ecebc97d 100644 --- a/src/RecordComponent.cpp +++ b/src/RecordComponent.cpp @@ -23,11 +23,15 @@ #include "openPMD/DatatypeHelpers.hpp" #include "openPMD/Error.hpp" #include "openPMD/IO/Format.hpp" +#include "openPMD/LoadStoreChunk.hpp" #include "openPMD/Series.hpp" #include "openPMD/auxiliary/Memory.hpp" #include "openPMD/backend/Attributable.hpp" #include "openPMD/backend/BaseRecord.hpp" +// comment +#include "openPMD/DatatypeMacros.hpp" + #include #include #include @@ -62,6 +66,42 @@ RecordComponent::RecordComponent() : BaseRecordComponent(NoInit()) setData(std::make_shared()); } +ConfigureLoadStore RecordComponent::prepareLoadStore() +{ + return ConfigureLoadStore{*this}; +} + +namespace +{ +#if (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000) || \ + (defined(__apple_build_version__) && __clang_major__ < 14) + template + auto createSpanBufferFallback(size_t size) -> std::shared_ptr + { + return std::shared_ptr{new T[size], [](auto *ptr) { delete[] ptr; }}; + } +#else + template + auto createSpanBufferFallback(size_t size) -> std::shared_ptr + { + return std::shared_ptr{new T[size]}; + } +#endif +} // namespace + +template +DynamicMemoryView +RecordComponent::storeChunkSpan_impl(internal::LoadStoreConfig cfg) +{ + return storeChunkSpanCreateBuffer_impl( + std::move(cfg), &createSpanBufferFallback); +} +#define OPENPMD_INSTANTIATE(dtype) \ + template DynamicMemoryView RecordComponent::storeChunkSpan_impl( \ + internal::LoadStoreConfig cfg); +OPENPMD_FOREACH_DATASET_DATATYPE(OPENPMD_INSTANTIATE) +#undef OPENPMD_INSTANTIATE + RecordComponent::RecordComponent(NoInit) : BaseRecordComponent(NoInit()) {} @@ -464,14 +504,18 @@ void RecordComponent::readBase(bool require_unit_si) } } -void RecordComponent::storeChunk( - auxiliary::WriteBuffer buffer, Datatype dtype, Offset o, Extent e) +void RecordComponent::storeChunk_impl( + auxiliary::WriteBuffer buffer, + Datatype dtype, + internal::LoadStoreConfigWithBuffer cfg) { + auto [o, e, memorySelection] = std::move(cfg); verifyChunk(dtype, o, e); Parameter dWrite; dWrite.offset = std::move(o); dWrite.extent = std::move(e); + dWrite.memorySelection = memorySelection; dWrite.dtype = dtype; /* std::static_pointer_cast correctly reference-counts the pointer */ dWrite.data = std::move(buffer); diff --git a/src/auxiliary/Future.cpp b/src/auxiliary/Future.cpp new file mode 100644 index 0000000000..944fe63504 --- /dev/null +++ b/src/auxiliary/Future.cpp @@ -0,0 +1,53 @@ +#include "openPMD/auxiliary/Future.hpp" +#include "openPMD/LoadStoreChunk.hpp" + +#include +#include +#include + +// comment + +#include "openPMD/DatatypeMacros.hpp" + +namespace openPMD::auxiliary +{ + +template +DeferredComputation::DeferredComputation(task_type task) + : m_task([wrapped_task = std::move(task), this]() { + if (!this->m_valid) + { + throw std::runtime_error( + "[DeferredComputation] No valid state. Probably already " + "computed."); + } + this->m_valid = false; + return std::move(wrapped_task)(); + }) + , m_valid(true) +{} + +template +auto DeferredComputation::get() -> T +{ + return m_task(); +} + +template +auto DeferredComputation::valid() const noexcept -> bool +{ + return m_valid; +} + +template class DeferredComputation; +template class DeferredComputation; +// clang-format off +#define INSTANTIATE_FUTURE(dtype) \ + /* NOLINTNEXTLINE(bugprone-macro-parentheses) */ \ + template class DeferredComputation>; +OPENPMD_FOREACH_DATASET_DATATYPE(INSTANTIATE_FUTURE) +#undef INSTANTIATE_FUTURE +// clang-format on +} // namespace openPMD::auxiliary + +#include "openPMD/UndefDatatypeMacros.hpp" diff --git a/src/binding/python/RecordComponent.cpp b/src/binding/python/RecordComponent.cpp index 5645053f0e..44eaae3111 100644 --- a/src/binding/python/RecordComponent.cpp +++ b/src/binding/python/RecordComponent.cpp @@ -600,14 +600,6 @@ struct StoreChunkSpan static constexpr char const *errorMsg = "RecordComponent.store_chunk()"; }; - -template <> -PythonDynamicMemoryView StoreChunkSpan::call( - RecordComponent &, Offset const &, Extent const &) -{ - throw std::runtime_error( - "[RecordComponent.store_chunk()] Only PODs allowed."); -} } // namespace inline PythonDynamicMemoryView store_chunk_span( @@ -628,7 +620,7 @@ inline PythonDynamicMemoryView store_chunk_span( std::begin(shape), [&maskIt](std::uint64_t) { return !*(maskIt++); }); - return switchNonVectorType( + return switchDatasetType( r.getDatatype(), r, offset, extent); } @@ -698,7 +690,7 @@ void load_chunk( } } - switchNonVectorType( + switchDatasetType( r.getDatatype(), r, buffer, buffer_info, offset, extent); } diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index 519a8749b2..ccd23a4208 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -429,13 +429,42 @@ void available_chunks_test(std::string const &file_ending) } )END"; - std::vector data{2, 4, 6, 8}; + std::vector xdata{2, 4, 6, 8}; + std::vector ydata{0, 0, 0, 0, 0, // + 0, 1, 2, 3, 0, // + 0, 4, 5, 6, 0, // + 0, 7, 8, 9, 0, // + 0, 0, 0, 0, 0}; + std::vector ydata_firstandlastrow{-1, -1, -1}; { Series write(name, Access::CREATE, MPI_COMM_WORLD, parameters.str()); Iteration it0 = write.iterations[0]; auto E_x = it0.meshes["E"]["x"]; E_x.resetDataset({Datatype::INT, {mpi_size, 4}}); - E_x.storeChunk(data, {mpi_rank, 0}, {1, 4}); + E_x.storeChunk(xdata, {mpi_rank, 0}, {1, 4}); + auto E_y = it0.meshes["E"]["y"]; + E_y.resetDataset({Datatype::INT, {5, 3ul * mpi_size}}); + E_y.prepareLoadStore() + .withContiguousContainer(ydata_firstandlastrow) + .offset({0, 3ul * mpi_rank}) + .extent({1, 3}) + .enqueueStore(); + E_y.prepareLoadStore() + .offset({1, 3ul * mpi_rank}) + .extent({3, 3}) + .withContiguousContainer(ydata) + .memorySelection({{1, 1}, {5, 5}}) + .enqueueStore(); + // if condition checks if this PR is available in ADIOS2: + // https://github.com/ornladios/ADIOS2/pull/4169 + if constexpr (CanTheMemorySelectionBeReset) + { + E_y.prepareLoadStore() + .withContiguousContainer(ydata_firstandlastrow) + .offset({4, 3ul * mpi_rank}) + .extent({1, 3}) + .enqueueStore(); + } it0.close(); } @@ -467,6 +496,42 @@ void available_chunks_test(std::string const &file_ending) { REQUIRE(ranks[i] == i); } + + auto E_y = it0.meshes["E"]["y"]; + auto width = E_y.getExtent()[1]; + auto first_row = + E_y.prepareLoadStore().extent({1, width}).enqueueLoad().get(); + auto middle_rows = E_y.prepareLoadStore() + .offset({1, 0}) + .extent({3, width}) + .enqueueLoad() + .get(); + auto last_row = + E_y.prepareLoadStore().offset({4, 0}).enqueueLoad().get(); + read.flush(); + + for (auto row : [&]() -> std::vector *> { + if constexpr (CanTheMemorySelectionBeReset) + { + return {&first_row, &last_row}; + } + else + { + return {&first_row}; + } + }()) + { + for (size_t i = 0; i < width; ++i) + { + REQUIRE(row->get()[i] == -1); + } + } + for (size_t i = 0; i < width * 3; ++i) + { + size_t row = i / width; + int required_value = row * 3 + (i % 3) + 1; + REQUIRE(middle_rows.get()[i] == required_value); + } } } diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 9c06ac0e3e..cbce4ff8a5 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -915,7 +915,11 @@ inline void constant_scalar(std::string const &file_ending) new unsigned int[6], [](unsigned int const *p) { delete[] p; }); unsigned int e{0}; std::generate(E.get(), E.get() + 6, [&e] { return e++; }); - E_y.storeChunk(std::move(E), {0, 0, 0}, {1, 2, 3}); + // check that const-type unique pointers work in the builder pattern + E_y.prepareLoadStore() + .extent({1, 2, 3}) + .withUniquePtr(std::move(E).static_cast_()) + .enqueueStore(); // store a number of predefined attributes in E Mesh &E_mesh = s.iterations[1].meshes["E"]; @@ -1726,13 +1730,17 @@ inline void write_test( auto opaqueTypeDataset = rc.visit(); auto variantTypeDataset = rc.loadChunkVariant(); + auto variantTypeDataset2 = rc.prepareLoadStore().enqueueLoadVariant().get(); rc.seriesFlush(); - std::visit( - [](auto &&shared_ptr) { - std::cout << "First value in loaded chunk: '" << shared_ptr.get()[0] - << '\'' << std::endl; - }, - variantTypeDataset); + for (auto ptr : {&variantTypeDataset, &variantTypeDataset2}) + { + std::visit( + [](auto &&shared_ptr) { + std::cout << "First value in loaded chunk: '" + << shared_ptr.get()[0] << '\'' << std::endl; + }, + *ptr); + } #ifndef _WIN32 if (test_rank_table) @@ -2988,7 +2996,7 @@ TEST_CASE("git_hdf5_legacy_picongpu", "[serial][hdf5]") auto radiationMask = o.iterations[200] .particles["e"]["radiationMask"][RecordComponent::SCALAR]; - switchNonVectorType( + switchDatasetType( radiationMask.getDatatype(), radiationMask); auto particlePatches = o.iterations[200].particles["e"].particlePatches;