Skip to content

Commit

Permalink
Fix cudf::cast overflow for decimal64 to int32_t or smaller in …
Browse files Browse the repository at this point in the history
…certain cases (#7733)

@galipremsagar found an issue with `cudf::cast` for `decimal64`. His test case was when you have a value un-representable in `int32_t`. The cast operation would cast to early and therefore overflow. This PR fixes that issue.

Resolves #7689

Authors:
  - Conor Hoekstra (@codereport)

Approvers:
  - Mike Wilson (@hyperbolic2346)
  - Ram (Ramakrishna Prabhu) (@rgsl888prabhu)

URL: #7733
  • Loading branch information
codereport authored Mar 29, 2021
1 parent ccc28d5 commit fe7ec85
Show file tree
Hide file tree
Showing 2 changed files with 69 additions and 9 deletions.
51 changes: 42 additions & 9 deletions cpp/include/cudf/fixed_point/fixed_point.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,23 +218,41 @@ class fixed_point {
using rep = Rep;

/**
* @brief Constructor that will perform shifting to store value appropriately
* @brief Constructor that will perform shifting to store value appropriately (from floating point
* types)
*
* @tparam T The type that you are constructing from (integral or floating)
* @tparam T The floating point type that you are constructing from
* @param value The value that will be constructed from
* @param scale The exponent that is applied to Rad to perform shifting
*/
template <typename T,
typename cuda::std::enable_if_t<is_supported_construction_value_type<T>() &&
typename cuda::std::enable_if_t<cuda::std::is_floating_point<T>() &&
is_supported_representation_type<Rep>()>* = nullptr>
CUDA_HOST_DEVICE_CALLABLE explicit fixed_point(T const& value, scale_type const& scale)
: _value{static_cast<Rep>(detail::shift<Rep, Rad>(value, scale))}, _scale{scale}
{
}

/**
* @brief Constructor that will not perform shifting (assumes value already
* shifted)
* @brief Constructor that will perform shifting to store value appropriately (from integral
* types)
*
* @tparam T The integral type that you are constructing from
* @param value The value that will be constructed from
* @param scale The exponent that is applied to Rad to perform shifting
*/
template <typename T,
typename cuda::std::enable_if_t<cuda::std::is_integral<T>() &&
is_supported_representation_type<Rep>()>* = nullptr>
CUDA_HOST_DEVICE_CALLABLE explicit fixed_point(T const& value, scale_type const& scale)
// `value` is cast to `Rep` to avoid overflow in cases where
// constructing to `Rep` that is wider than `T`
: _value{detail::shift<Rep, Rad>(static_cast<Rep>(value), scale)}, _scale{scale}
{
}

/**
* @brief Constructor that will not perform shifting (assumes value already shifted)
*
* @param s scaled_integer that contains scale and already shifted value
*/
Expand All @@ -260,18 +278,33 @@ class fixed_point {
fixed_point() : _value{0}, _scale{scale_type{0}} {}

/**
* @brief Explicit conversion operator
* @brief Explicit conversion operator for casting to floating point types
*
* @tparam U The type that is being explicitly converted to (integral or floating)
* @tparam U The floating point type that is being explicitly converted to
* @return The `fixed_point` number in base 10 (aka human readable format)
*/
template <typename U,
typename cuda::std::enable_if_t<is_supported_construction_value_type<U>()>* = nullptr>
CUDA_HOST_DEVICE_CALLABLE explicit constexpr operator U() const
typename cuda::std::enable_if_t<cuda::std::is_floating_point<U>::value>* = nullptr>
explicit constexpr operator U() const
{
return detail::shift<Rep, Rad>(static_cast<U>(_value), detail::negate(_scale));
}

/**
* @brief Explicit conversion operator for casting to integral types
*
* @tparam U The integral type that is being explicitly converted to
* @return The `fixed_point` number in base 10 (aka human readable format)
*/
template <typename U,
typename cuda::std::enable_if_t<cuda::std::is_integral<U>::value>* = nullptr>
explicit constexpr operator U() const
{
// Don't cast to U until converting to Rep because in certain cases casting to U before shifting
// will result in integer overflow (i.e. if U = int32_t, Rep = int64_t and _value > 2 billion)
return static_cast<U>(detail::shift<Rep, Rad>(_value, detail::negate(_scale)));
}

CUDA_HOST_DEVICE_CALLABLE operator scaled_integer<Rep>() const
{
return scaled_integer<Rep>{_value, _scale};
Expand Down
27 changes: 27 additions & 0 deletions cpp/tests/unary/cast_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -537,6 +537,9 @@ inline auto make_fixed_point_data_type(int32_t scale)
return cudf::data_type{cudf::type_to_id<T>(), scale};
}

struct FixedPointTestSingleType : public cudf::test::BaseFixture {
};

template <typename T>
struct FixedPointTests : public cudf::test::BaseFixture {
};
Expand Down Expand Up @@ -592,6 +595,18 @@ TYPED_TEST(FixedPointTests, CastToInt32)
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view());
}

TEST_F(FixedPointTestSingleType, CastDecimal64ToInt32)
{
using fp_wrapper = cudf::test::fixed_point_column_wrapper<int64_t>;
using fw_wrapper = cudf::test::fixed_width_column_wrapper<int32_t>;

auto const input = fp_wrapper{{7246212000}, numeric::scale_type{-5}};
auto const expected = fw_wrapper{72462};
auto const result = cudf::cast(input, make_data_type<int32_t>());

CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view());
}

TYPED_TEST(FixedPointTests, CastToIntLarge)
{
using namespace numeric;
Expand Down Expand Up @@ -659,6 +674,18 @@ TYPED_TEST(FixedPointTests, CastFromInt)
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view());
}

TEST_F(FixedPointTestSingleType, CastInt32ToDecimal64)
{
using fp_wrapper = cudf::test::fixed_point_column_wrapper<int64_t>;
using fw_wrapper = cudf::test::fixed_width_column_wrapper<int32_t>;

auto const input = fw_wrapper{-48938};
auto const expected = fp_wrapper{{-4893800000LL}, numeric::scale_type{-5}};
auto const result = cudf::cast(input, make_fixed_point_data_type<numeric::decimal64>(-5));

CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view());
}

TYPED_TEST(FixedPointTests, CastFromIntLarge)
{
using namespace numeric;
Expand Down

0 comments on commit fe7ec85

Please sign in to comment.