Skip to content

Commit

Permalink
Added frexp() operator
Browse files Browse the repository at this point in the history
  • Loading branch information
cliffburdick committed Apr 12, 2024
1 parent f402035 commit 4039738
Show file tree
Hide file tree
Showing 6 changed files with 313 additions and 17 deletions.
18 changes: 18 additions & 0 deletions docs_input/api/math/misc/frexp.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
.. _frexp_func:

frexp
=====

Return the normalized fraction and exponent part of a floating point number

.. doxygenfunction:: frexp(const OpA &a)

Examples
~~~~~~~~

.. literalinclude:: ../../../../test/00_operators/OperatorTests.cu
:language: cpp
:start-after: example-begin frexp-test-1
:end-before: example-end frexp-test-1
:dedent:

18 changes: 18 additions & 0 deletions docs_input/api/math/misc/frexpc.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
.. _frexpc_func:

frexpc
======

Return the normalized fraction and exponent part of a complex floating point number

.. doxygenfunction:: frexpc(const OpA &a)

Examples
~~~~~~~~

.. literalinclude:: ../../../../test/00_operators/OperatorTests.cu
:language: cpp
:start-after: example-begin frexpc-test-1
:end-before: example-end frexpc-test-1
:dedent:

35 changes: 18 additions & 17 deletions include/matx/generators/random.h
Original file line number Diff line number Diff line change
Expand Up @@ -379,15 +379,16 @@ template <typename T, int RANK> class randomTensorView_t {
template <typename T, typename ShapeType>
class RandomOp : public BaseOp<RandomOp<T, ShapeType>> {
private:
using inner_t = typename inner_op_type_t<T>::type;
static constexpr int RANK = std::tuple_size<ShapeType>{};
Distribution_t dist_;
std::array<index_t, RANK> shape_;
std::array<index_t, RANK> strides_;
index_t total_size_;
curandStatePhilox4_32_10_t *states_;
uint64_t seed_;
T alpha_;
T beta_;
inner_t alpha_;
inner_t beta_;
bool init_ = false;
bool device_;

Expand All @@ -405,7 +406,7 @@ template <typename T, int RANK> class randomTensorView_t {
// Shapeless constructor to be allocated at run invocation
RandomOp() = delete;

inline RandomOp(ShapeType &&s, Distribution_t dist, uint64_t seed, T alpha, T beta) :
inline RandomOp(ShapeType &&s, Distribution_t dist, uint64_t seed, inner_t alpha, inner_t beta) :
dist_(dist), seed_(seed), alpha_(alpha), beta_(beta)
{
total_size_ = std::accumulate(s.begin(), s.end(), 1, std::multiplies<index_t>());
Expand Down Expand Up @@ -514,15 +515,15 @@ template <typename T, int RANK> class randomTensorView_t {
else if constexpr (std::is_same_v<T, double>) {
curandGenerateUniformDouble(gen_, &val, 1);
}
if constexpr (std::is_same_v<T, cuda::std::complex<float>>) {
else if constexpr (std::is_same_v<T, cuda::std::complex<float>>) {
float *tmp = reinterpret_cast<float *>(&val);
curandGenerateUniform(gen_, &val[0], 1);
curandGenerateUniform(gen_, &val[1], 1);
curandGenerateUniform(gen_, &tmp[0], 1);
curandGenerateUniform(gen_, &tmp[1], 1);
}
if constexpr (std::is_same_v<T, cuda::std::complex<double>>) {
else if constexpr (std::is_same_v<T, cuda::std::complex<double>>) {
double *tmp = reinterpret_cast<double *>(&val);
curandGenerateUniformDouble(gen_, &val[0], 1);
curandGenerateUniformDouble(gen_, &val[1], 1);
curandGenerateUniformDouble(gen_, &tmp[0], 1);
curandGenerateUniformDouble(gen_, &tmp[1], 1);
}

val = alpha_ * val + beta_;
Expand All @@ -534,15 +535,15 @@ template <typename T, int RANK> class randomTensorView_t {
else if constexpr (std::is_same_v<T, double>) {
curandGenerateNormalDouble(gen_, &val, 1, beta_, alpha_);
}
if constexpr (std::is_same_v<T, cuda::std::complex<float>>) {
else if constexpr (std::is_same_v<T, cuda::std::complex<float>>) {
float *tmp = reinterpret_cast<float *>(&val);
curandGenerateNormal(gen_, &val[0], 1, beta_, alpha_);
curandGenerateNormal(gen_, &val[1], 1, beta_, alpha_);
curandGenerateNormal(gen_, &tmp[0], 1, beta_, alpha_);
curandGenerateNormal(gen_, &tmp[1], 1, beta_, alpha_);
}
if constexpr (std::is_same_v<T, cuda::std::complex<double>>) {
else if constexpr (std::is_same_v<T, cuda::std::complex<double>>) {
double *tmp = reinterpret_cast<double *>(&val);
curandGenerateNormalDouble(gen_, &val[0], 1, beta_, alpha_);
curandGenerateNormalDouble(gen_, &val[1], 1, beta_, alpha_);
curandGenerateNormalDouble(gen_, &tmp[0], 1, beta_, alpha_);
curandGenerateNormalDouble(gen_, &tmp[1], 1, beta_, alpha_);
}
}
#endif
Expand Down Expand Up @@ -574,7 +575,7 @@ template <typename T, int RANK> class randomTensorView_t {
*/
template <typename T, typename ShapeType,
std::enable_if_t<!std::is_array_v<remove_cvref_t<ShapeType>>, bool> = true>
inline auto random(ShapeType &&s, Distribution_t dist, uint64_t seed = 0, T alpha = 1, T beta = 0)
inline auto random(ShapeType &&s, Distribution_t dist, uint64_t seed = 0, typename inner_op_type_t<T>::type alpha = 1, typename inner_op_type_t<T>::type beta = 0)
{
using shape_strip_t = remove_cvref_t<ShapeType>;
return detail::RandomOp<T, shape_strip_t>(std::forward<shape_strip_t>(s), dist, seed, alpha, beta);
Expand All @@ -593,7 +594,7 @@ template <typename T, int RANK> class randomTensorView_t {
* @return Random number operator
*/
template <typename T, int RANK>
inline auto random(const index_t (&s)[RANK], Distribution_t dist, uint64_t seed = 0, T alpha = 1, T beta = 0)
inline auto random(const index_t (&s)[RANK], Distribution_t dist, uint64_t seed = 0, typename inner_op_type_t<T>::type alpha = 1, typename inner_op_type_t<T>::type beta = 0)
{
auto sarray = detail::to_array(s);
return random<T, decltype(sarray)>(std::move(sarray), dist, seed, alpha, beta);
Expand Down
164 changes: 164 additions & 0 deletions include/matx/operators/frexp.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// COpBright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above cOpBright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above cOpBright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the cOpBright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COpBRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COpBRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////

#pragma once


#include "matx/core/type_utils.h"
#include "matx/operators/base_operator.h"
#include "matx/transforms/solver.h"

namespace matx {

namespace detail {
template<typename OpA, int WHICH>
class FrexpOp : public BaseOp<FrexpOp<OpA, WHICH>>
{
private:
OpA a_;

public:
using matxop = bool;
using scalar_type = typename OpA::scalar_type;

__MATX_INLINE__ std::string str() const { return "frexp()"; }
__MATX_INLINE__ FrexpOp(OpA a) : a_(a) {
static_assert(std::is_floating_point_v<scalar_type> ||
is_cuda_complex_v<scalar_type>, "frexp() must take a floating point input");

};

template <typename... Is>
__MATX_INLINE__ __MATX_DEVICE__ __MATX_HOST__ auto operator()(Is... indices) const
{
[[maybe_unused]] int rexp;
if constexpr (is_cuda_complex_v<scalar_type>) {
if constexpr (std::is_same_v<float, typename scalar_type::value_type>) {
if constexpr (WHICH == 0) { // real fractional
const auto frac = cuda::std::frexpf(a_(indices...).real(), &rexp);
return frac;
} else if constexpr (WHICH == 1) { // real exponent
[[maybe_unused]] const auto frac = cuda::std::frexpf(a_(indices...).real(), &rexp);
return rexp;
} else if constexpr (WHICH == 2) { // imag fractional
const auto frac = cuda::std::frexpf(a_(indices...).imag(), &rexp);
return frac;
} else if constexpr (WHICH == 3) { // imag exponent
[[maybe_unused]] const auto frac = cuda::std::frexpf(a_(indices...).imag(), &rexp);
return rexp;
}
}
else {
if constexpr (WHICH == 0) { // real fractional
const auto frac = cuda::std::frexp(a_(indices...).real(), &rexp);
return frac;
} else if constexpr (WHICH == 1) { // real exponent
[[maybe_unused]] const auto frac = cuda::std::frexp(a_(indices...).real(), &rexp);
return rexp;
} else if constexpr (WHICH == 2) { // imag fractional
const auto frac = cuda::std::frexp(a_(indices...).imag(), &rexp);
return frac;
} else if constexpr (WHICH == 3) { // imag exponent
[[maybe_unused]] const auto frac = cuda::std::frexp(a_(indices...).imag(), &rexp);
return rexp;
}
}
}
else {
if constexpr (std::is_same_v<float, scalar_type>) {
[[maybe_unused]] const float frac = cuda::std::frexpf(a_(indices...), &rexp);
if constexpr (WHICH == 0) { // fractional
return frac;
} else if constexpr (WHICH == 1) { // exponent
return rexp;
}
}
else {
[[maybe_unused]] const double frac = cuda::std::frexp(a_(indices...), &rexp);
if constexpr (WHICH == 0) { // fractional
return frac;
} else if constexpr (WHICH == 1) { // exponent
return rexp;
}
}
}
}

static __MATX_INLINE__ constexpr __MATX_HOST__ __MATX_DEVICE__ int32_t Rank()
{
return OpA::Rank();
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpA>()) {
a_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
}
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PostRun(ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpA>()) {
a_.PostRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
}
}

constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const
{
return a_.Size(dim);
}

};
}

template<typename OpA>
__MATX_INLINE__ auto frexp(const OpA &a) {
return std::tuple{
detail::FrexpOp<OpA, 0>(a),
detail::FrexpOp<OpA, 1>(a)
};
}

template<typename OpA>
__MATX_INLINE__ auto frexpc(const OpA &a) {
return std::tuple{
detail::FrexpOp<OpA, 0>(a),
detail::FrexpOp<OpA, 1>(a),
detail::FrexpOp<OpA, 2>(a),
detail::FrexpOp<OpA, 3>(a)
};
}

};

1 change: 1 addition & 0 deletions include/matx/operators/operators.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@
#include "matx/operators/fftshift.h"
#include "matx/operators/filter.h"
#include "matx/operators/flatten.h"
#include "matx/operators/frexp.h"
#include "matx/operators/hermitian.h"
#include "matx/operators/hist.h"
#include "matx/operators/if.h"
Expand Down
Loading

0 comments on commit 4039738

Please sign in to comment.