Skip to content

Commit

Permalink
Added benchmark plots, amended RooFitBinned benchmarks, and added bas…
Browse files Browse the repository at this point in the history
…h script
  • Loading branch information
Abhigyan Acherjee authored and Abhigyan Acherjee committed Mar 4, 2024
1 parent d479d99 commit eb1b2fc
Show file tree
Hide file tree
Showing 4 changed files with 157 additions and 3 deletions.
2 changes: 2 additions & 0 deletions root/roofit/roofit/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ if(cuda)
endif()

file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/benchRooFitBackends_make_plot.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/compare_benchmarks.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/run_benchmarks.sh DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)

RB_ADD_GBENCHMARK(benchCodeSquashAD
benchCodeSquashAD.cxx
Expand Down
67 changes: 64 additions & 3 deletions root/roofit/roofit/RooFitBinnedBenchmarks.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,48 @@ namespace {
const std::vector<int> nBinsVector {5, 10, 15};
const int nBinsForChannelScan = 10;
const int nChannelsForBinScan = 1;
const std::vector<int> nCPUVector {1, 2, 3};
//const std::vector<int> nCPUVector {1, 2, 3};
const std::vector<int> nCPUVector {1};

////default evaluation backend
//constexpr auto evalBackend = RooFit::EvalBackend::Value::Legacy;
std::string evalBackend="cpu";
//RooFit::EvalBackend::Value evalBackend = RooFit::EvalBackend::Value::Legacy;

constexpr auto evalBackend = RooFit::EvalBackend::Value::Cpu;
// Function to parse enum from string
// RooFit::EvalBackend::Value parseEvalBackend(const std::string& str)
// {
// if (str == "Cpu")
// {
// return RooFit::EvalBackend::Value::Cpu;
// }
// else if (str == "Codegen")
// {
// return RooFit::EvalBackend::Value::Codegen;
// }
// else if (str == "CodegenNoGrad")
// {
// return RooFit::EvalBackend::Value::CodegenNoGrad;
// }
// else if (str == "Legacy")
// {
// return RooFit::EvalBackend::Value::Legacy;
// }
// else {
// throw std::invalid_argument("Invalid evalBackend value");
// }
// }

// // Command-line flag to set evalBackend
// void CustomArguments(benchmark::internal::Benchmark* b)
// {
// // Define a command-line argument to specify evalBackend
// static std::vector<std::string> evalBackendValues = {"Cpu", "Codegen","CodegenNoGrad","Legacy"}; // Add more values if needed
// for (const auto& value : evalBackendValues)
// {
// b->Arg(value);
// }
// }
auto const timeUnit = benchmark::kMillisecond;

void setupRooMsgService() {
Expand Down Expand Up @@ -274,4 +312,27 @@ BENCHMARK(BM_RooFit_BinnedTestMinos)
//####################################################################
//############## RUN #################################################

BENCHMARK_MAIN();
////BENCHMARK_MAIN();
int main(int argc, char** argv)
{

benchmark::Initialize(&argc, argv);

for (int i = 1; i < argc; ++i)
{
if (std::string(argv[i]) == "-b")
{
if (i + 1 < argc)
{
// Set the evalBackend value from the next command-line argument
evalBackend = argv[i+1];//parseEvalBackend(argv[i + 1]);
}
else
{
std::cerr << "Missing value for --evalBackend argument" << std::endl;
return 1;
}
}
}
benchmark::RunSpecifiedBenchmarks();
}
64 changes: 64 additions & 0 deletions root/roofit/roofit/compare_benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import pandas as pd
import csv
import matplotlib.pyplot as plt
import numpy as np

def parse_and_writer(csv_file_path,name):
with open(csv_file_path, 'r', newline='') as csvfile:
reader = csv.reader(csvfile)
for _ in range(8):
next(reader)

filtered_csv_file_path=""+name+".csv"
with open(filtered_csv_file_path, 'w', newline='') as filtered_csvfile:
writer = csv.writer(filtered_csvfile)
for row in reader:
writer.writerow(row)

parse_and_writer("./out_codegen.csv","codegen")
parse_and_writer("./out_codegen_ngrad.csv","codegen_ngrad")
parse_and_writer("./out_cpu.csv","cpu")
parse_and_writer("./out_legacy.csv","legacy")

codegen_df=pd.read_csv("codegen.csv")
codegen_nograd_df=pd.read_csv("codegen_ngrad.csv")
legacy_df=pd.read_csv("legacy.csv")
cpu_df=pd.read_csv("cpu.csv")



# Plotting
plt.figure(figsize=(10, 6))

x = np.arange(len(codegen_df['name'].unique()))



for i, benchmark in enumerate(codegen_df['name'].unique()):

codegen_time = codegen_df.loc[codegen_df['name'] == benchmark, 'real_time']
codegen_nograd_time = codegen_nograd_df.loc[codegen_nograd_df['name'] == benchmark, 'real_time']
cpu_time = cpu_df.loc[cpu_df['name'] == benchmark, 'real_time']
legacy_time = legacy_df.loc[legacy_df['name'] == benchmark, 'real_time']

plt.bar(x[i]-0.10, codegen_time, width=0.15, align='center', label='codegen',color='lightblue')
plt.bar(x[i], codegen_nograd_time, width=0.15, align='edge', label='codegen_nograd',color='navy')
plt.bar(x[i]+0.15, cpu_time, width=0.15, align='edge', label='cpu',color='cyan')
plt.bar(x[i]+0.30, legacy_time, width=0.15, align='edge', label='legacy',color='gray')


# Customize legend
legend_labels = ['codegen', 'codegen_nograd', 'cpu','legacy']
legend_colors = ['lightblue', 'navy', 'cyan','gray']
legend_handles = [plt.Rectangle((0,0),1,1, color=color) for color in legend_colors]
plt.legend(legend_handles, legend_labels)

plt.yscale('log')

plt.xlabel('Benchmark')
plt.ylabel('Time (milliseconds)')
plt.title('Comparison of Benchmarks for Different Evaluation Backends')
plt.xticks(x, rotation=90)
plt.tight_layout()
plt.savefig('comparision_plot.jpg')
plt.show()
27 changes: 27 additions & 0 deletions root/roofit/roofit/run_benchmarks.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/bin/bash

# Function to run a benchmark script and wait until the CSV file is generated
# to make it executable chmod +x run_benchmarks.sh
#then run it using ./run_benchmarks.sh
#!/bin/bash

# Function to run the benchmark command and wait for CSV file to be generated
run_benchmark() {
echo "Running benchmark: $1"
$1 &
local pid=$!
while [ ! -f $2 ]; do
sleep 1
done
wait $pid
echo "CSV file generated: $2"
}

# Run benchmarks
run_benchmark "./benchRooFitBinned -b codegen --benchmark_out_format=csv --benchmark_out=out_codegen.csv" "out_codegen.csv"
run_benchmark "./benchRooFitBinned -b codegen_no_grad --benchmark_out_format=csv --benchmark_out=out_codegen_ngrad.csv" "out_codegen_ngrad.csv"
run_benchmark "./benchRooFitBinned -b legacy --benchmark_out_format=csv --benchmark_out=out_legacy.csv" "out_legacy.csv"
run_benchmark "./benchRooFitBinned -b cpu --benchmark_out_format=csv --benchmark_out=out_cpu.csv" "out_cpu.csv"

# Run Python script
python3 compare_benchmarks.py

0 comments on commit eb1b2fc

Please sign in to comment.