-
Notifications
You must be signed in to change notification settings - Fork 10
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
add example loading an entire RNTuple into a LLAMA view
- Loading branch information
1 parent
e1dfd94
commit 7b6f43c
Showing
3 changed files
with
70 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
cmake_minimum_required (VERSION 3.15) | ||
project(llama-hep_rntuple) | ||
|
||
set(CMAKE_CXX_STANDARD 17) | ||
|
||
find_package(ROOT REQUIRED) | ||
if (NOT TARGET llama::llama) | ||
find_package(llama REQUIRED) | ||
endif() | ||
add_executable(${PROJECT_NAME} hep_rntuple.cpp) | ||
target_link_libraries(${PROJECT_NAME} PRIVATE ROOT::Hist ROOT::Graf ROOT::Gpad ROOT::ROOTNTuple llama::llama) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
// This example uses a non-public CMS NanoAOD file called: ttjet_13tev_june2019_lzma. | ||
// Please ask contact us if you need it. | ||
|
||
#include "../common/ttjet_13tev_june2019.hpp" | ||
|
||
#include <RConfigure.h> | ||
#define R__HAS_STD_STRING_VIEW | ||
#include <ROOT/RNTuple.hxx> | ||
#include <ROOT/RNTupleDS.hxx> | ||
#include <ROOT/RNTupleModel.hxx> | ||
#include <ROOT/RNTupleOptions.hxx> | ||
#include <ROOT/RNTupleView.hxx> | ||
#include <chrono> | ||
#include <llama/DumpMapping.hpp> | ||
#include <llama/llama.hpp> | ||
|
||
int main(int argc, const char* argv[]) | ||
{ | ||
if (argc != 2) | ||
{ | ||
fmt::print("Please specify input file!\n"); | ||
return 1; | ||
} | ||
|
||
using namespace std::chrono; | ||
using namespace ROOT::Experimental; | ||
|
||
auto ntuple = RNTupleReader::Open(RNTupleModel::Create(), "NTuple", argv[1]); | ||
const auto n = ntuple->GetNEntries(); | ||
|
||
auto start = steady_clock::now(); | ||
auto view = llama::allocView(llama::mapping::SoA<llama::ArrayDims<1>, Event, true>{llama::ArrayDims{n}}); | ||
fmt::print("Alloc LLAMA view: {}ms\n", duration_cast<milliseconds>(steady_clock::now() - start).count()); | ||
|
||
std::size_t totalSize = 0; | ||
for (auto i = 0u; i < view.mapping.blobCount; i++) | ||
totalSize += view.mapping.blobSize(i); | ||
fmt::print("Total LLAMA view memory: {}MiB in {} blobs\n", totalSize / 1024 / 1024, view.mapping.blobCount); | ||
|
||
start = steady_clock::now(); | ||
llama::forEachLeaf<Event>( | ||
[&](auto coord) | ||
{ | ||
using Name = llama::GetTag<Event, decltype(coord)>; | ||
using Type = llama::GetType<Event, decltype(coord)>; | ||
auto column = ntuple->GetView<Type>(llama::structName<Name>()); | ||
for (std::size_t i = 0; i < n; i++) | ||
view(i)(coord) = column(i); | ||
}); | ||
fmt::print("Copy RNTuple -> LLAMA view: {}ms\n", duration_cast<milliseconds>(steady_clock::now() - start).count()); | ||
|
||
start = steady_clock::now(); | ||
} |