Skip to content

Commit

Permalink
Pre-commit formatting pass on cpp files. (#1244)
Browse files Browse the repository at this point in the history
* Run pre-commit to format files. We were behind a bit.

* Update pre-commit config to 16.0.1 to match cudf. Re-ran formatting.

* Reformat of code via pre-commit

Signed-off-by: db <[email protected]>

---------

Signed-off-by: db <[email protected]>
  • Loading branch information
nvdbaranec authored Jul 5, 2023
1 parent c1c9d03 commit 3b3ced7
Show file tree
Hide file tree
Showing 25 changed files with 2,825 additions and 2,207 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

repos:
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v14.0.6
rev: v16.0.1
hooks:
- id: clang-format
files: \.(cu|cuh|h|hpp|cpp|inl)$
Expand Down
16 changes: 9 additions & 7 deletions src/main/cpp/benchmarks/cast_string_to_float.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,16 +27,18 @@
void string_to_float(nvbench::state& state)
{
cudf::size_type const n_rows{(cudf::size_type)state.get_int64("num_rows")};
auto const float_tbl = create_random_table({cudf::type_id::FLOAT32}, row_count{n_rows});
auto const float_col = float_tbl->get_column(0);
auto const float_tbl = create_random_table({cudf::type_id::FLOAT32}, row_count{n_rows});
auto const float_col = float_tbl->get_column(0);
auto const string_col = cudf::strings::from_floats(float_col.view());

state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) {
auto rows = spark_rapids_jni::string_to_float(cudf::data_type{cudf::type_id::FLOAT32}, string_col->view(), false, cudf::get_default_stream());
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto rows = spark_rapids_jni::string_to_float(cudf::data_type{cudf::type_id::FLOAT32},
string_col->view(),
false,
cudf::get_default_stream());
});
}

NVBENCH_BENCH(string_to_float)
.set_name("Strings to Float Cast")
.add_int64_axis("num_rows", {1 * 1024 * 1024, 100 * 1024 * 1024});
.set_name("Strings to Float Cast")
.add_int64_axis("num_rows", {1 * 1024 * 1024, 100 * 1024 * 1024});
3 changes: 1 addition & 2 deletions src/main/cpp/benchmarks/common/generate_input.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,7 @@ struct distribution_params<T, std::enable_if_t<std::is_same_v<T, cudf::struct_vi

// Present for compilation only. To be implemented once reader/writers support the fixed width type.
template <typename T>
struct distribution_params<T, std::enable_if_t<cudf::is_fixed_point<T>()>> {
};
struct distribution_params<T, std::enable_if_t<cudf::is_fixed_point<T>()>> {};

/**
* @brief Returns a vector of types, corresponding to the input type or a type group.
Expand Down
68 changes: 33 additions & 35 deletions src/main/cpp/benchmarks/row_conversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,15 @@ void fixed_width(nvbench::state& state)
{
cudf::size_type const n_rows{(cudf::size_type)state.get_int64("num_rows")};
auto const direction = state.get_string("direction");
auto const table = create_random_table(cycle_dtypes({cudf::type_id::INT8,
cudf::type_id::INT32,
cudf::type_id::INT16,
cudf::type_id::INT64,
cudf::type_id::INT32,
cudf::type_id::BOOL8,
cudf::type_id::UINT16,
cudf::type_id::UINT8,
cudf::type_id::UINT64},
auto const table = create_random_table(cycle_dtypes({cudf::type_id::INT8,
cudf::type_id::INT32,
cudf::type_id::INT16,
cudf::type_id::INT64,
cudf::type_id::INT32,
cudf::type_id::BOOL8,
cudf::type_id::UINT16,
cudf::type_id::UINT8,
cudf::type_id::UINT64},
212),
row_count{n_rows});

Expand All @@ -50,16 +50,15 @@ void fixed_width(nvbench::state& state)

auto rows = spark_rapids_jni::convert_to_rows_fixed_width_optimized(table->view());

state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) {
if (direction == "to row") {
auto _rows = spark_rapids_jni::convert_to_rows_fixed_width_optimized(table->view());
} else {
for (auto const &r : rows) {
cudf::lists_column_view const l(r->view());
auto out = spark_rapids_jni::convert_from_rows_fixed_width_optimized(l, schema);
}
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
if (direction == "to row") {
auto _rows = spark_rapids_jni::convert_to_rows_fixed_width_optimized(table->view());
} else {
for (auto const& r : rows) {
cudf::lists_column_view const l(r->view());
auto out = spark_rapids_jni::convert_from_rows_fixed_width_optimized(l, schema);
}
}
});

state.add_buffer_size(n_rows, "trc", "Total Rows");
Expand All @@ -69,7 +68,7 @@ void fixed_width(nvbench::state& state)
static void variable_or_fixed_width(nvbench::state& state)
{
cudf::size_type const n_rows{(cudf::size_type)state.get_int64("num_rows")};
auto const direction = state.get_string("direction");
auto const direction = state.get_string("direction");
auto const include_strings = state.get_string("strings");

if (n_rows > 1 * 1024 * 1024 && include_strings == "include strings") {
Expand Down Expand Up @@ -120,30 +119,29 @@ static void variable_or_fixed_width(nvbench::state& state)

auto rows = spark_rapids_jni::convert_to_rows(table->view());

state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) {
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto new_rows = spark_rapids_jni::convert_to_rows(table->view());
if (direction == "to row") {
auto _rows = spark_rapids_jni::convert_to_rows(table->view());
} else {
for (auto const &r : rows) {
cudf::lists_column_view const l(r->view());
auto out = spark_rapids_jni::convert_from_rows(l, schema);
}
if (direction == "to row") {
auto _rows = spark_rapids_jni::convert_to_rows(table->view());
} else {
for (auto const& r : rows) {
cudf::lists_column_view const l(r->view());
auto out = spark_rapids_jni::convert_from_rows(l, schema);
}
}
});

state.add_buffer_size(n_rows, "trc", "Total Rows");
state.add_global_memory_reads<int64_t>(bytes_per_row * table->num_rows());
}

NVBENCH_BENCH(fixed_width)
.set_name("Fixed Width Only")
.add_int64_axis("num_rows", {1 * 1024 * 1024, 4 * 1024 * 1024})
.add_string_axis("direction", {"to row", "from row"});
.set_name("Fixed Width Only")
.add_int64_axis("num_rows", {1 * 1024 * 1024, 4 * 1024 * 1024})
.add_string_axis("direction", {"to row", "from row"});

NVBENCH_BENCH(variable_or_fixed_width)
.set_name("Fixed or Variable Width")
.add_int64_axis("num_rows", {1 * 1024 * 1024, 4 * 1024 * 1024})
.add_string_axis("direction", {"to row", "from row"})
.add_string_axis("strings", {"include strings", "no strings"});
.set_name("Fixed or Variable Width")
.add_int64_axis("num_rows", {1 * 1024 * 1024, 4 * 1024 * 1024})
.add_string_axis("direction", {"to row", "from row"})
.add_string_axis("strings", {"include strings", "no strings"});
91 changes: 44 additions & 47 deletions src/main/cpp/src/DecimalUtilsJni.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,98 +14,95 @@
* limitations under the License.
*/

#include "decimal_utils.hpp"
#include "cudf_jni_apis.hpp"
#include "decimal_utils.hpp"

extern "C" {

JNIEXPORT jlongArray JNICALL Java_com_nvidia_spark_rapids_jni_DecimalUtils_multiply128(JNIEnv *env, jclass,
jlong j_view_a,
jlong j_view_b,
jint j_product_scale) {
JNIEXPORT jlongArray JNICALL Java_com_nvidia_spark_rapids_jni_DecimalUtils_multiply128(
JNIEnv* env, jclass, jlong j_view_a, jlong j_view_b, jint j_product_scale)
{
JNI_NULL_CHECK(env, j_view_a, "column is null", 0);
JNI_NULL_CHECK(env, j_view_b, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto view_a = reinterpret_cast<cudf::column_view const *>(j_view_a);
auto view_b = reinterpret_cast<cudf::column_view const *>(j_view_b);
auto scale = static_cast<int>(j_product_scale);
return cudf::jni::convert_table_for_return(env, cudf::jni::multiply_decimal128(*view_a, *view_b,
scale));
auto view_a = reinterpret_cast<cudf::column_view const*>(j_view_a);
auto view_b = reinterpret_cast<cudf::column_view const*>(j_view_b);
auto scale = static_cast<int>(j_product_scale);
return cudf::jni::convert_table_for_return(
env, cudf::jni::multiply_decimal128(*view_a, *view_b, scale));
}
CATCH_STD(env, 0);
}

JNIEXPORT jlongArray JNICALL Java_com_nvidia_spark_rapids_jni_DecimalUtils_divide128(JNIEnv *env, jclass,
jlong j_view_a,
jlong j_view_b,
jint j_quotient_scale,
jboolean j_is_int_div) {
JNIEXPORT jlongArray JNICALL Java_com_nvidia_spark_rapids_jni_DecimalUtils_divide128(
JNIEnv* env, jclass, jlong j_view_a, jlong j_view_b, jint j_quotient_scale, jboolean j_is_int_div)
{
JNI_NULL_CHECK(env, j_view_a, "column is null", 0);
JNI_NULL_CHECK(env, j_view_b, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto view_a = reinterpret_cast<cudf::column_view const *>(j_view_a);
auto view_b = reinterpret_cast<cudf::column_view const *>(j_view_b);
auto scale = static_cast<int>(j_quotient_scale);
auto view_a = reinterpret_cast<cudf::column_view const*>(j_view_a);
auto view_b = reinterpret_cast<cudf::column_view const*>(j_view_b);
auto scale = static_cast<int>(j_quotient_scale);
auto is_int_division = static_cast<bool>(j_is_int_div);
if (is_int_division) {
return cudf::jni::convert_table_for_return(env, cudf::jni::integer_divide_decimal128(*view_a, *view_b, scale));
return cudf::jni::convert_table_for_return(
env, cudf::jni::integer_divide_decimal128(*view_a, *view_b, scale));
} else {
return cudf::jni::convert_table_for_return(env, cudf::jni::divide_decimal128(*view_a, *view_b, scale));
return cudf::jni::convert_table_for_return(
env, cudf::jni::divide_decimal128(*view_a, *view_b, scale));
}
}
CATCH_STD(env, 0);
}

JNIEXPORT jlongArray JNICALL Java_com_nvidia_spark_rapids_jni_DecimalUtils_remainder128(JNIEnv *env, jclass,
jlong j_view_a,
jlong j_view_b,
jint j_remainder_scale) {
JNIEXPORT jlongArray JNICALL Java_com_nvidia_spark_rapids_jni_DecimalUtils_remainder128(
JNIEnv* env, jclass, jlong j_view_a, jlong j_view_b, jint j_remainder_scale)
{
JNI_NULL_CHECK(env, j_view_a, "column is null", 0);
JNI_NULL_CHECK(env, j_view_b, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto view_a = reinterpret_cast<cudf::column_view const *>(j_view_a);
auto view_b = reinterpret_cast<cudf::column_view const *>(j_view_b);
auto scale = static_cast<int>(j_remainder_scale);
return cudf::jni::convert_table_for_return(env, cudf::jni::remainder_decimal128(*view_a, *view_b, scale));
auto view_a = reinterpret_cast<cudf::column_view const*>(j_view_a);
auto view_b = reinterpret_cast<cudf::column_view const*>(j_view_b);
auto scale = static_cast<int>(j_remainder_scale);
return cudf::jni::convert_table_for_return(
env, cudf::jni::remainder_decimal128(*view_a, *view_b, scale));
}
CATCH_STD(env, 0);
}

JNIEXPORT jlongArray JNICALL Java_com_nvidia_spark_rapids_jni_DecimalUtils_add128(JNIEnv *env, jclass,
jlong j_view_a,
jlong j_view_b,
jint j_target_scale) {
JNIEXPORT jlongArray JNICALL Java_com_nvidia_spark_rapids_jni_DecimalUtils_add128(
JNIEnv* env, jclass, jlong j_view_a, jlong j_view_b, jint j_target_scale)
{
JNI_NULL_CHECK(env, j_view_a, "column is null", 0);
JNI_NULL_CHECK(env, j_view_b, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const view_a= reinterpret_cast<cudf::column_view const *>(j_view_a);
auto const view_b= reinterpret_cast<cudf::column_view const *>(j_view_b);
auto const scale = static_cast<int>(j_target_scale);
return cudf::jni::convert_table_for_return(env, cudf::jni::add_decimal128(*view_a, *view_b,
scale));
auto const view_a = reinterpret_cast<cudf::column_view const*>(j_view_a);
auto const view_b = reinterpret_cast<cudf::column_view const*>(j_view_b);
auto const scale = static_cast<int>(j_target_scale);
return cudf::jni::convert_table_for_return(env,
cudf::jni::add_decimal128(*view_a, *view_b, scale));
}
CATCH_STD(env, 0);
}

JNIEXPORT jlongArray JNICALL Java_com_nvidia_spark_rapids_jni_DecimalUtils_subtract128(JNIEnv *env, jclass,
jlong j_view_a,
jlong j_view_b,
jint j_target_scale) {
JNIEXPORT jlongArray JNICALL Java_com_nvidia_spark_rapids_jni_DecimalUtils_subtract128(
JNIEnv* env, jclass, jlong j_view_a, jlong j_view_b, jint j_target_scale)
{
JNI_NULL_CHECK(env, j_view_a, "column is null", 0);
JNI_NULL_CHECK(env, j_view_b, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const view_a = reinterpret_cast<cudf::column_view const *>(j_view_a);
auto const view_b = reinterpret_cast<cudf::column_view const *>(j_view_b);
auto const scale = static_cast<int>(j_target_scale);
return cudf::jni::convert_table_for_return(env, cudf::jni::sub_decimal128(*view_a, *view_b,
scale));
auto const view_a = reinterpret_cast<cudf::column_view const*>(j_view_a);
auto const view_b = reinterpret_cast<cudf::column_view const*>(j_view_b);
auto const scale = static_cast<int>(j_target_scale);
return cudf::jni::convert_table_for_return(env,
cudf::jni::sub_decimal128(*view_a, *view_b, scale));
}
CATCH_STD(env, 0);
}

} // extern "C"
} // extern "C"
5 changes: 3 additions & 2 deletions src/main/cpp/src/MapUtilsJni.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,13 @@
extern "C" {

JNIEXPORT jlong JNICALL Java_com_nvidia_spark_rapids_jni_MapUtils_extractRawMapFromJsonString(
JNIEnv *env, jclass, jlong input_handle) {
JNIEnv* env, jclass, jlong input_handle)
{
JNI_NULL_CHECK(env, input_handle, "json_column_handle is null", 0);

try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::column_view const *>(input_handle);
auto const input = reinterpret_cast<cudf::column_view const*>(input_handle);
return cudf::jni::ptr_as_jlong(spark_rapids_jni::from_json(*input).release());
}
CATCH_STD(env, 0);
Expand Down
Loading

0 comments on commit 3b3ced7

Please sign in to comment.