From 7b6f43ccbb2991d26b7a810935c16f4baa6b7125 Mon Sep 17 00:00:00 2001 From: Bernhard Manfred Gruber Date: Sat, 15 May 2021 21:53:18 +0200 Subject: [PATCH] add example loading an entire RNTuple into a LLAMA view --- CMakeLists.txt | 6 ++++ examples/hep_rntuple/CMakeLists.txt | 11 ++++++ examples/hep_rntuple/hep_rntuple.cpp | 53 ++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+) create mode 100644 examples/hep_rntuple/CMakeLists.txt create mode 100644 examples/hep_rntuple/hep_rntuple.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index b3269dd321..ce9cff5205 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -69,6 +69,12 @@ if (LLAMA_BUILD_EXAMPLES) message(WARNING "Could not find alpaka. Alpaka examples are disabled.") endif() + # ROOT examples + find_package(ROOT QUIET) + if (ROOT_FOUND) + add_subdirectory("examples/hep_rntuple") + endif() + # CUDA examples include(CheckLanguage) check_language(CUDA) diff --git a/examples/hep_rntuple/CMakeLists.txt b/examples/hep_rntuple/CMakeLists.txt new file mode 100644 index 0000000000..1ff55a2867 --- /dev/null +++ b/examples/hep_rntuple/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required (VERSION 3.15) +project(llama-hep_rntuple) + +set(CMAKE_CXX_STANDARD 17) + +find_package(ROOT REQUIRED) +if (NOT TARGET llama::llama) + find_package(llama REQUIRED) +endif() +add_executable(${PROJECT_NAME} hep_rntuple.cpp) +target_link_libraries(${PROJECT_NAME} PRIVATE ROOT::Hist ROOT::Graf ROOT::Gpad ROOT::ROOTNTuple llama::llama) diff --git a/examples/hep_rntuple/hep_rntuple.cpp b/examples/hep_rntuple/hep_rntuple.cpp new file mode 100644 index 0000000000..7354dfb999 --- /dev/null +++ b/examples/hep_rntuple/hep_rntuple.cpp @@ -0,0 +1,53 @@ +// This example uses a non-public CMS NanoAOD file called: ttjet_13tev_june2019_lzma. +// Please ask contact us if you need it. + +#include "../common/ttjet_13tev_june2019.hpp" + +#include +#define R__HAS_STD_STRING_VIEW +#include +#include +#include +#include +#include +#include +#include +#include + +int main(int argc, const char* argv[]) +{ + if (argc != 2) + { + fmt::print("Please specify input file!\n"); + return 1; + } + + using namespace std::chrono; + using namespace ROOT::Experimental; + + auto ntuple = RNTupleReader::Open(RNTupleModel::Create(), "NTuple", argv[1]); + const auto n = ntuple->GetNEntries(); + + auto start = steady_clock::now(); + auto view = llama::allocView(llama::mapping::SoA, Event, true>{llama::ArrayDims{n}}); + fmt::print("Alloc LLAMA view: {}ms\n", duration_cast(steady_clock::now() - start).count()); + + std::size_t totalSize = 0; + for (auto i = 0u; i < view.mapping.blobCount; i++) + totalSize += view.mapping.blobSize(i); + fmt::print("Total LLAMA view memory: {}MiB in {} blobs\n", totalSize / 1024 / 1024, view.mapping.blobCount); + + start = steady_clock::now(); + llama::forEachLeaf( + [&](auto coord) + { + using Name = llama::GetTag; + using Type = llama::GetType; + auto column = ntuple->GetView(llama::structName()); + for (std::size_t i = 0; i < n; i++) + view(i)(coord) = column(i); + }); + fmt::print("Copy RNTuple -> LLAMA view: {}ms\n", duration_cast(steady_clock::now() - start).count()); + + start = steady_clock::now(); +}