Skip to content

Commit

Permalink
Added isnan and isinf operator and tests to OperatorTests/OperatorFuncs
Browse files Browse the repository at this point in the history
add docs input for operators
  • Loading branch information
nvjonwong committed Jan 10, 2024
1 parent 389ee69 commit 2608f09
Show file tree
Hide file tree
Showing 5 changed files with 132 additions and 0 deletions.
17 changes: 17 additions & 0 deletions docs_input/api/logic/truth/isinf.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
.. _inf_func:

isinf
===

Returns a truth value if operator value is infinite

.. doxygenfunction:: isinf(const InType &)

Examples
~~~~~~~~

.. literalinclude:: ../../../../test/00_operators/OperatorTests.cu
:language: cpp
:start-after: example-begin inf-test-1
:end-before: example-end inf-test-1
:dedent:
17 changes: 17 additions & 0 deletions docs_input/api/logic/truth/isnan.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
.. _isnan_func:

isnan
===

Returns a truth value if operator value is NaN

.. doxygenfunction:: isnan(const InType &)

Examples
~~~~~~~~

.. literalinclude:: ../../../../test/00_operators/OperatorTests.cu
:language: cpp
:start-after: example-begin nan-test-1
:end-before: example-end nan-test-1
:dedent:
54 changes: 54 additions & 0 deletions include/matx/operators/scalar_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -662,6 +662,60 @@ template <typename T1> struct NotF {
};
template <typename T1> using NotOp = UnOp<T1, NotF<T1>>;

template <typename T>
static __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ auto _internal_isnan(T v1)
{
using conversionType = typename matx::detail::value_promote_t<T>;
if constexpr(!std::is_floating_point_v<conversionType>) {
return false;
}

using castType = matx::detail::matx_convert_complex_type<T>;
if constexpr(is_complex_v<T>) {
return cuda::std::isnan(static_cast<typename castType::value_type>(v1.real())) || cuda::std::isnan(static_cast<typename castType::value_type>(v1.imag()));
} else {
return cuda::std::isnan(static_cast<castType>(v1));
}

return false;
}
template <typename T>
struct IsNan {
static __MATX_INLINE__ std::string str() { return "isnan"; }
static __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ auto op(T v1)
{
return _internal_isnan(v1);
}
};
template <typename T> using IsNanOp = UnOp<T, IsNan<T>>;

template <typename T>
static __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ auto _internal_isinf(T v1)
{
using conversionType = typename matx::detail::value_promote_t<T>;
if constexpr(!std::is_floating_point_v<conversionType>) {
return false;
}

using castType = matx::detail::matx_convert_complex_type<T>;
if constexpr(is_complex_v<T>) {
return cuda::std::isinf(static_cast<typename castType::value_type>(v1.real())) || cuda::std::isinf(static_cast<typename castType::value_type>(v1.imag()));
} else {
return cuda::std::isinf(static_cast<castType>(v1));
}

return false;
}
template <typename T>
struct IsInf {
static __MATX_INLINE__ std::string str() { return "isinf"; }
static __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ auto op(T v1)
{
return _internal_isinf(v1);
}
};
template <typename T> using IsInfOp = UnOp<T, IsInf<T>>;

template <typename T1, typename T2> struct AndF {
static std::string str(const std::string &str1, const std::string &str2) { return "(" + str1 + "&" + str2 + ")"; }

Expand Down
3 changes: 3 additions & 0 deletions include/matx/operators/unary_operators.h
Original file line number Diff line number Diff line change
Expand Up @@ -403,4 +403,7 @@ namespace matx
DEFINE_UNARY_OP(operator-, detail::SubNegOp );
#endif

DEFINE_UNARY_OP(isnan, detail::IsNanOp);
DEFINE_UNARY_OP(isinf, detail::IsInfOp);

} // end namespace matx
41 changes: 41 additions & 0 deletions test/00_operators/OperatorTests.cu
Original file line number Diff line number Diff line change
Expand Up @@ -1846,6 +1846,47 @@ TYPED_TEST(OperatorTestsNumericAllExecs, OperatorFuncs)
res = c * c * (c + c) / c + three;
EXPECT_TRUE(MatXUtils::MatXTypeCompare(tov0(), res, 0.07));

auto nan = make_tensor<TestType>({});
using conversionType = typename matx::detail::value_promote_t<TestType>;
if constexpr(matx::is_complex_v<TestType>) {
nan() = TestType(std::numeric_limits<conversionType>::quiet_NaN());
} else {
nan() = std::numeric_limits<conversionType>::quiet_NaN();
}
auto tob = make_tensor<bool>({});
// example-begin nan-test-1
(tob = matx::isnan(nan)).run();
// example-end nan-test-1
cudaDeviceSynchronize();
EXPECT_TRUE(MatXUtils::MatXTypeCompare(tob(), std::is_floating_point_v<conversionType> ? true : false));

auto notnanorinf = make_tensor<TestType>({});
if constexpr(matx::is_complex_v<TestType>) {
notnanorinf() = TestType(0);
} else {
notnanorinf() = 0;
}
(tob = matx::isnan(notnanorinf)).run();
cudaDeviceSynchronize();
EXPECT_TRUE(MatXUtils::MatXTypeCompare(tob(), false));

auto inf = make_tensor<TestType>({});
using conversionType = typename matx::detail::value_promote_t<TestType>;
if constexpr(matx::is_complex_v<TestType>) {
inf() = TestType(std::numeric_limits<conversionType>::infinity());
} else {
inf() = std::numeric_limits<conversionType>::infinity();
}
// example-begin inf-test-1
(tob = matx::isinf(inf)).run();
// example-end inf-test-1
cudaDeviceSynchronize();
EXPECT_TRUE(MatXUtils::MatXTypeCompare(tob(), std::is_floating_point_v<conversionType> ? true : false));

(tob = matx::isinf(notnanorinf)).run();
cudaDeviceSynchronize();
EXPECT_TRUE(MatXUtils::MatXTypeCompare(tob(), false));

MATX_EXIT_HANDLER();
}

Expand Down

0 comments on commit 2608f09

Please sign in to comment.