diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index b60ace57bc4..964fc8ad9a4 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -126,6 +126,20 @@ pmacc-compile-reduced-matrix:
job: pmacc-generate-reduced-matrix
strategy: depend
+picongpu-unittest-generate-reduced-matrix:
+ variables:
+ PIC_INPUTS: "unit"
+ TEST_TUPLE_NUM_ELEM: 1
+ extends: ".base_generate-reduced-matrix"
+
+picongpu-unittest-compile-reduced-matrix:
+ stage: test
+ trigger:
+ include:
+ - artifact: compile.yml
+ job: picongpu-unittest-generate-reduced-matrix
+ strategy: depend
+
pypicongpu-generate-full-matrix:
stage: generate
image: ubuntu:22.04
diff --git a/share/ci/backendFlags.sh b/share/ci/backendFlags.sh
new file mode 100755
index 00000000000..97c6f974353
--- /dev/null
+++ b/share/ci/backendFlags.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+###################################################
+# translate PIConGPU backend names into CMake Flags
+###################################################
+
+get_backend_flags()
+{
+ backend_cfg=(${1//:/ })
+ num_options="${#backend_cfg[@]}"
+ if [ $num_options -gt 2 ] ; then
+ echo "-b|--backend must be contain 'backend:arch' or 'backend'" >&2
+ exit 1
+ fi
+ if [ "${backend_cfg[0]}" == "cuda" ] ; then
+ result+=" -Dalpaka_ACC_GPU_CUDA_ENABLE=ON -Dalpaka_ACC_GPU_CUDA_ONLY_MODE=ON"
+ if [ $num_options -eq 2 ] ; then
+ result+=" -DCMAKE_CUDA_ARCHITECTURES=\"${backend_cfg[1]}\""
+ else
+ result+=" -DCMAKE_CUDA_ARCHITECTURES=52"
+ fi
+ elif [ "${backend_cfg[0]}" == "omp2b" ] ; then
+ result+=" -Dalpaka_ACC_CPU_B_OMP2_T_SEQ_ENABLE=ON"
+ if [ $num_options -eq 2 ] ; then
+ result+=" -DPMACC_CPU_ARCH=\"${backend_cfg[1]}\""
+ fi
+ elif [ "${backend_cfg[0]}" == "serial" ] ; then
+ result+=" -Dalpaka_ACC_CPU_B_SEQ_T_SEQ_ENABLE=ON"
+ if [ $num_options -eq 2 ] ; then
+ result+=" -DPMACC_CPU_ARCH=\"${backend_cfg[1]}\""
+ fi
+ elif [ "${backend_cfg[0]}" == "tbb" ] ; then
+ result+=" -Dalpaka_ACC_CPU_B_TBB_T_SEQ_ENABLE=ON"
+ if [ $num_options -eq 2 ] ; then
+ result+=" -DPMACC_CPU_ARCH=\"${backend_cfg[1]}\""
+ fi
+ elif [ "${backend_cfg[0]}" == "threads" ] ; then
+ result+=" -Dalpaka_ACC_CPU_B_SEQ_T_THREADS_ENABLE=ON"
+ if [ $num_options -eq 2 ] ; then
+ result+=" -DPMACC_CPU_ARCH=\"${backend_cfg[1]}\""
+ fi
+ elif [ "${backend_cfg[0]}" == "hip" ] ; then
+ result+=" -Dalpaka_ACC_GPU_HIP_ENABLE=ON -Dalpaka_ACC_GPU_HIP_ONLY_MODE=ON"
+ if [ $num_options -eq 2 ] ; then
+ result+=" -DGPU_TARGETS=\"${backend_cfg[1]}\""
+ else
+ # If no architecture is given build for Radeon VII or MI50/60.
+ result+=" -DGPU_TARGETS=gfx906"
+ fi
+ else
+ echo "unsupported backend given '$1'" >&2
+ exit 1
+ fi
+
+ echo "$result"
+ exit 0
+}
diff --git a/share/ci/generate_reduced_matrix.sh b/share/ci/generate_reduced_matrix.sh
index 4b29e545873..f45324ec792 100755
--- a/share/ci/generate_reduced_matrix.sh
+++ b/share/ci/generate_reduced_matrix.sh
@@ -33,6 +33,9 @@ folders=()
if [ "$PIC_INPUTS" == "pmacc" ] ; then
# create test cases for PMacc
echo "pmacc" | tr " " "\n" | n_wise_generator.py $@ --limit_boost_version
+elif [ "$PIC_INPUTS" == "unit" ] ; then
+ # create test cases for PMacc
+ echo "unit" | tr " " "\n" | n_wise_generator.py $@ --limit_boost_version
else
# create test cases for PIConGPU
for CASE in ${PIC_INPUTS}; do
diff --git a/share/ci/n_wise_generator.py b/share/ci/n_wise_generator.py
index 37c8511b7d4..d1075d2ae0a 100755
--- a/share/ci/n_wise_generator.py
+++ b/share/ci/n_wise_generator.py
@@ -296,7 +296,8 @@ def is_valid_combination(row):
v_cuda_hip_str = "" if v_cuda_hip == 0 else str(v_cuda_hip)
os_name = pairs[2][0]
os_version = get_version(pairs[2])
- image_prefix = "_run" if folder == "pmacc" else "_compile"
+ image_prefix = "_run" if folder == "pmacc" or folder == "unit"\
+ else "_compile"
job_name = compiler + "_" + backend + v_cuda_hip_str + \
"_boost" + boost_version + "_" + folder.replace("/", ".")
print(job_name + ":")
diff --git a/share/ci/run_picongpu_unit_tests.sh b/share/ci/run_picongpu_unit_tests.sh
new file mode 100755
index 00000000000..513c6e8c652
--- /dev/null
+++ b/share/ci/run_picongpu_unit_tests.sh
@@ -0,0 +1,99 @@
+#!/bin/bash
+
+set -e
+set -o pipefail
+
+export code_DIR=$CI_PROJECT_DIR
+source $code_DIR/share/ci/backendFlags.sh
+
+# the default build type is Release
+# if neccesary, you can rerun the pipeline with another build type-> https://docs.gitlab.com/ee/ci/pipelines.html#manually-executing-pipelines
+# to change the build type, you must set the environment variable PIC_BUILD_TYPE
+if [[ ! -v PIC_BUILD_TYPE ]] ; then
+ PIC_BUILD_TYPE=Release ;
+fi
+
+if [[ "$CI_RUNNER_TAGS" =~ .*cpuonly.* ]] ; then
+ # In cases where the compile-only job is executed on a GPU runner but with different kinds of accelerators
+ # we need to reset the variables to avoid compiling for the wrong architecture and accelerator.
+ unset CI_GPUS
+ unset CI_GPU_ARCH
+fi
+
+if [ -n "$CI_GPUS" ] ; then
+ # select randomly a device if multiple exists
+ # CI_GPUS is provided by the gitlab CI runner
+ SELECTED_DEVICE_ID=$((RANDOM%CI_GPUS))
+ export HIP_VISIBLE_DEVICES=$SELECTED_DEVICE_ID
+ export CUDA_VISIBLE_DEVICES=$SELECTED_DEVICE_ID
+ echo "selected device '$SELECTED_DEVICE_ID' of '$CI_GPUS'"
+else
+ echo "No GPU device selected because environment variable CI_GPUS is not set."
+fi
+
+if [[ "$PIC_BACKEND" =~ hip.* ]] ; then
+ if [ -z "$CI_GPU_ARCH" ] ; then
+ # In case the CI runner is not providing a GPU architecture e.g. a CPU runner set the architecture
+ # to Radeon VII or MI50/60.
+ export GPU_TARGETS="gfx906"
+ fi
+ export PIC_CMAKE_ARGS="$PIC_CMAKE_ARGS -DGPU_TARGETS=$GPU_TARGETS"
+fi
+
+###################################################
+# cmake config builder
+###################################################
+
+PIC_CONST_ARGS=""
+PIC_CONST_ARGS="${PIC_CONST_ARGS} -DCMAKE_BUILD_TYPE=${PIC_BUILD_TYPE}"
+CMAKE_ARGS="${PIC_CONST_ARGS} ${PIC_CMAKE_ARGS} -DCMAKE_CXX_COMPILER=${CXX_VERSION} -DBOOST_ROOT=/opt/boost/${BOOST_VERSION}"
+
+# check and activate if clang should be used as CUDA device compiler
+if [ -n "$CI_CLANG_AS_CUDA_COMPILER" ] ; then
+ export PATH="$(agc-manager -b cuda)/bin:$PATH"
+ CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_CUDA_COMPILER=${CXX_VERSION}"
+fi
+
+alpaka_backend=$(get_backend_flags ${PIC_BACKEND})
+CMAKE_ARGS="$CMAKE_ARGS $alpaka_backend"
+
+###################################################
+# build and run unit tests
+###################################################
+
+# adjust number of parallel builds to avoid out of memory errors
+# PIC_BUILD_REQUIRED_MEM_BYTES is a configured variable in the CI web interface
+PIC_PARALLEL_BUILDS=$(($CI_RAM_BYTES_TOTAL/$PIC_BUILD_REQUIRED_MEM_BYTES))
+
+# limit to number of available cores
+if [ $PIC_PARALLEL_BUILDS -gt $CI_CPUS ] ; then
+ PIC_PARALLEL_BUILDS=$CI_CPUS
+fi
+
+# CI_MAX_PARALLELISM is a configured variable in the CI web interface
+if [ $PIC_PARALLEL_BUILDS -gt $CI_MAX_PARALLELISM ] ; then
+ PIC_PARALLEL_BUILDS=$CI_MAX_PARALLELISM
+fi
+echo -e "\033[0;32m///////////////////////////////////////////////////"
+echo "PIC_BUILD_REQUIRED_MEM_BYTES-> ${PIC_BUILD_REQUIRED_MEM_BYTES}"
+echo "CI_RAM_BYTES_TOTAL -> ${CI_RAM_BYTES_TOTAL}"
+echo "CI_CPUS -> ${CI_CPUS}"
+echo "CI_MAX_PARALLELISM -> ${CI_MAX_PARALLELISM}"
+echo "number of processor threads -> $(nproc)"
+echo "number of parallel builds -> $PIC_PARALLEL_BUILDS"
+echo "cmake version -> $(cmake --version | head -n 1)"
+echo "build directory -> $(pwd)"
+echo "CMAKE_ARGS -> ${CMAKE_ARGS}"
+echo "accelerator -> ${PIC_BACKEND}"
+echo "input set -> ${PIC_TEST_CASE_FOLDER}"
+echo -e "/////////////////////////////////////////////////// \033[0m \n\n"
+
+
+## run unit tests
+export unitTest_folder=$HOME/buildPICUnitTest
+mkdir -p $unitTest_folder
+cd $unitTest_folder
+cmake $CMAKE_ARGS $code_DIR/share/picongpu/unit
+make -j $PIC_PARALLEL_BUILDS
+# execute on one device
+ctest -V
diff --git a/share/ci/run_pmacc_tests.sh b/share/ci/run_pmacc_tests.sh
index 86488453cc0..b8eefa9393f 100755
--- a/share/ci/run_pmacc_tests.sh
+++ b/share/ci/run_pmacc_tests.sh
@@ -3,6 +3,8 @@
set -e
set -o pipefail
+source $CI_PROJECT_DIR/share/ci/backendFlags.sh
+
# the default build type is Release
# if neccesary, you can rerun the pipeline with another build type-> https://docs.gitlab.com/ee/ci/pipelines.html#manually-executing-pipelines
# to change the build type, you must set the environment variable PMACC_BUILD_TYPE
@@ -28,62 +30,6 @@ if [ -n "$CI_CLANG_AS_CUDA_COMPILER" ] ; then
CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_CUDA_COMPILER=${CXX_VERSION}"
fi
-###################################################
-# translate PIConGPU backend names into CMake Flags
-###################################################
-
-get_backend_flags()
-{
- backend_cfg=(${1//:/ })
- num_options="${#backend_cfg[@]}"
- if [ $num_options -gt 2 ] ; then
- echo "-b|--backend must be contain 'backend:arch' or 'backend'" >&2
- exit 1
- fi
- if [ "${backend_cfg[0]}" == "cuda" ] ; then
- result+=" -Dalpaka_ACC_GPU_CUDA_ENABLE=ON -Dalpaka_ACC_GPU_CUDA_ONLY_MODE=ON"
- if [ $num_options -eq 2 ] ; then
- result+=" -DCMAKE_CUDA_ARCHITECTURES=\"${backend_cfg[1]}\""
- else
- result+=" -DCMAKE_CUDA_ARCHITECTURES=52"
- fi
- elif [ "${backend_cfg[0]}" == "omp2b" ] ; then
- result+=" -Dalpaka_ACC_CPU_B_OMP2_T_SEQ_ENABLE=ON"
- if [ $num_options -eq 2 ] ; then
- result+=" -DPMACC_CPU_ARCH=\"${backend_cfg[1]}\""
- fi
- elif [ "${backend_cfg[0]}" == "serial" ] ; then
- result+=" -Dalpaka_ACC_CPU_B_SEQ_T_SEQ_ENABLE=ON"
- if [ $num_options -eq 2 ] ; then
- result+=" -DPMACC_CPU_ARCH=\"${backend_cfg[1]}\""
- fi
- elif [ "${backend_cfg[0]}" == "tbb" ] ; then
- result+=" -Dalpaka_ACC_CPU_B_TBB_T_SEQ_ENABLE=ON"
- if [ $num_options -eq 2 ] ; then
- result+=" -DPMACC_CPU_ARCH=\"${backend_cfg[1]}\""
- fi
- elif [ "${backend_cfg[0]}" == "threads" ] ; then
- result+=" -Dalpaka_ACC_CPU_B_SEQ_T_THREADS_ENABLE=ON"
- if [ $num_options -eq 2 ] ; then
- result+=" -DPMACC_CPU_ARCH=\"${backend_cfg[1]}\""
- fi
- elif [ "${backend_cfg[0]}" == "hip" ] ; then
- result+=" -Dalpaka_ACC_GPU_HIP_ENABLE=ON -Dalpaka_ACC_GPU_HIP_ONLY_MODE=ON"
- if [ $num_options -eq 2 ] ; then
- result+=" -DGPU_TARGETS=\"${backend_cfg[1]}\""
- else
- # If no architecture is given build for Radeon VII or MI50/60.
- result+=" -DGPU_TARGETS=gfx906"
- fi
- else
- echo "unsupported backend given '$1'" >&2
- exit 1
- fi
-
- echo "$result"
- exit 0
-}
-
###################################################
# build an run tests
###################################################
diff --git a/share/ci/run_tests.sh b/share/ci/run_tests.sh
index 1d9c6f515ea..74691378f39 100755
--- a/share/ci/run_tests.sh
+++ b/share/ci/run_tests.sh
@@ -5,6 +5,8 @@ set -o pipefail
if [ "$1" == "pmacc" ] ; then
$CI_PROJECT_DIR/share/ci/run_pmacc_tests.sh
+elif [ "$1" == "unit" ] ; then
+ $CI_PROJECT_DIR/share/ci/run_picongpu_unit_tests.sh
else
$CI_PROJECT_DIR/share/ci/run_picongpu_tests.sh
fi
diff --git a/share/picongpu/unit/CMakeLists.txt b/share/picongpu/unit/CMakeLists.txt
new file mode 100644
index 00000000000..45893aea231
--- /dev/null
+++ b/share/picongpu/unit/CMakeLists.txt
@@ -0,0 +1,93 @@
+# Copyright 2023 Rene Widera
+#
+# This file is part of PIConGPU.
+#
+# PIConGPU is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# PIConGPU is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with PIConGPU.
+# If not, see .
+#
+
+cmake_minimum_required(VERSION 3.22.0)
+project("UnitTest")
+
+###############################################################################
+# Language Flags
+###############################################################################
+
+# enforce C++17
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+set(CMAKE_CXX_STANDARD 17)
+
+
+################################################################################
+# CMake policies
+#
+# Search in _ROOT:
+# https://cmake.org/cmake/help/v3.12/policy/CMP0074.html
+################################################################################
+if(POLICY CMP0074)
+ cmake_policy(SET CMP0074 NEW)
+endif()
+
+################################################################################
+# PMacc
+################################################################################
+find_package(PMacc REQUIRED CONFIG PATHS "${CMAKE_CURRENT_SOURCE_DIR}/../../../include/pmacc")
+
+###############################################################################
+# Catch2
+###############################################################################
+
+add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../../../thirdParty/catch2 ${CMAKE_BINARY_DIR}/catch2)
+
+################################################################################
+# MPI
+################################################################################
+
+# MPI is provided by pmacc but to execute the binaries via root additional flags must be given to the execution command
+option(USE_MPI_AS_ROOT_USER "add --allow-run-as-root mpiexec used by ctest" OFF)
+
+if(USE_MPI_AS_ROOT_USER)
+ set(MPI_RUNTIME_FLAGS "--allow-run-as-root")
+endif()
+
+# PIConGPU
+
+include_directories(BEFORE "${CMAKE_CURRENT_SOURCE_DIR}/../../../include")
+
+###############################################################################
+# Targets
+###############################################################################
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/test)
+
+# CTest
+enable_testing()
+
+# Test cases
+# Each *UT.cpp file is an independent executable with one or more test cases
+file(GLOB_RECURSE TESTS *.cpp)
+foreach(dim 2 3)
+ foreach(testCaseFilepath ${TESTS})
+ get_filename_component(testCaseFilename ${testCaseFilepath} NAME)
+ string(REPLACE "UT.cpp" "" testCase ${testCaseFilename})
+ set(testExe "${PROJECT_NAME}-${testCase}-${dim}D")
+ cupla_add_executable(${testExe} ${testCaseFilepath})
+ target_compile_definitions(${testExe} PRIVATE TEST_DIM=${dim})
+ target_link_libraries(${testExe} PUBLIC Catch2 Catch2WithMain)
+ target_link_libraries(${testExe} PRIVATE pmacc::pmacc)
+ add_test(NAME "${testCase}-${dim}D" COMMAND mpiexec ${MPI_RUNTIME_FLAGS} -n 1 ./${testExe})
+ endforeach()
+ string(REPLACE "-DTEST_DIM=${dim}" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+endforeach()
\ No newline at end of file
diff --git a/share/picongpu/unit/README.rst b/share/picongpu/unit/README.rst
new file mode 100755
index 00000000000..3dd3b7cc3be
--- /dev/null
+++ b/share/picongpu/unit/README.rst
@@ -0,0 +1,12 @@
+PIConGPU unit test
+==================
+
+Test components in an as best as possible isolated environment.
+
+Example how to compile and execute the tests.compile
+
+.. code-block:: bash
+ # compile for NVIDIA GPUs
+ cmake /share/picongpu/unit/ -Dalpaka_ACC_GPU_CUDA_ENABLE=ON
+ make -j
+ ctest
\ No newline at end of file
diff --git a/share/picongpu/unit/shape.cpp b/share/picongpu/unit/shape.cpp
new file mode 100644
index 00000000000..80cbacce222
--- /dev/null
+++ b/share/picongpu/unit/shape.cpp
@@ -0,0 +1,202 @@
+/* Copyright 2023 Rene Widera
+ *
+ * This file is part of PIConGPU.
+ *
+ * PIConGPU is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * PIConGPU is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with PIConGPU.
+ * If not, see .
+ */
+
+#include
+
+#include
+
+// STL
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include /* uint8_t */
+#include /* cout, endl */
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+//! Helper to setup the PMacc environment
+using TestFixture = pmacc::test::PMaccFixture;
+static TestFixture fixture;
+
+using namespace picongpu;
+using namespace pmacc;
+
+constexpr uint32_t numValues = 1024;
+constexpr uint32_t elemPerBlock = 256;
+
+/** check if floating point result is equal
+ *
+ * Allows an error of one epsilon.
+ * @return true if equal, else false
+ */
+template
+static bool isApproxEqual(T const& a, T const& b)
+{
+ return a == Catch::Approx(b).margin(std::numeric_limits::epsilon());
+}
+
+/** Do not shift the in cell position. */
+struct NoPositionShift
+{
+ template
+ HDINLINE float_X shift(float_X pos)
+ {
+ return pos;
+ }
+};
+
+/** Shift the in cell position.
+ *
+ * Shifting the in cell position before querying the on support shape is required to fulfill the pre conditions of the
+ * shape function.
+ */
+struct PositionShift
+{
+ template
+ HDINLINE float_X shift(float_X pos)
+ {
+ const float_X v_pos = pos - 0.5_X;
+ int intShift;
+ if constexpr(isEven)
+ {
+ // pos range [-1.0;0.5)
+ intShift = v_pos >= float_X{-0.5} ? 0 : -1;
+ }
+ else
+ {
+ // pos range [-1.0;0.5)
+ intShift = v_pos >= float_X{0.0} ? 1 : 0;
+ }
+ return v_pos - float_X(intShift) + float_X{0.5};
+ }
+};
+
+
+/** Test a shape
+ *
+ * Evaluate the assignment shape at all grid points based on a random particle position.
+ * The sum of shape values must be 1.0.
+ *
+ * @tparam T_Shape assignment shape type, supports shape::ChargeAssignment and shape::ChargeAssignmentOnSUpport
+ */
+template
+struct TestShape
+{
+ /** Validates the shape
+ *
+ * @param inputBuffer Buffer with positions, each vaolu must be in rage [0.0;1.0).
+ * @param posShiftFunctor Functor which shifts the position into a valid range to be passed to the shape.
+ */
+ template
+ void operator()(T_InputBuffer& inputBuffer, T_ShiftFunctor posShiftFunctor)
+ {
+ std::cout << "Test Shape" << typeid(T_Shape).name() << std::endl;
+ ::pmacc::DeviceBuffer deviceInput(numValues);
+ ::pmacc::HostBuffer resultHost(numValues);
+ ::pmacc::DeviceBuffer resultDevice(numValues);
+
+ deviceInput.copyFrom(inputBuffer);
+ resultDevice.setValue(0.0_X);
+
+ auto shapeTestKernel
+ = [this] DEVICEONLY(auto const& worker, auto positionShift, auto const& positions, auto result)
+ {
+ auto blockIdx = cupla::blockIdx(worker.getAcc()).x;
+
+ auto forEach = lockstep::makeForEach(worker);
+
+ forEach(
+ [&](uint32_t const idx) -> int32_t
+ {
+ auto valueIdx = blockIdx * elemPerBlock + idx;
+
+ if(valueIdx < numValues)
+ {
+ using Shape = T_Shape;
+ auto shape = Shape{};
+
+ for(int g = Shape::begin; g <= Shape::end; ++g)
+ {
+ auto p = positionShift.template shift(positions[valueIdx]);
+ result[valueIdx] += shape(g - p);
+ }
+ }
+ });
+ };
+
+ auto workerCfg = lockstep::makeWorkerCfg();
+ PMACC_LOCKSTEP_KERNEL(shapeTestKernel, workerCfg)
+ ((numValues + elemPerBlock - 1)
+ / elemPerBlock)(posShiftFunctor, deviceInput.getDataBox(), resultDevice.getDataBox());
+
+ resultHost.copyFrom(resultDevice);
+
+ auto res = resultHost.getDataBox();
+ for(uint32_t i = 0u; i < numValues; ++i)
+ {
+ REQUIRE(isApproxEqual(res[i], 1.0_X));
+ }
+ }
+};
+
+TEST_CASE("unit::shape", "[shape test]")
+{
+ ::pmacc::HostBuffer inputBuffer(numValues);
+
+ std::mt19937 mt(42.0);
+ std::uniform_real_distribution<> dist(0.0, 1.0);
+
+ auto input = inputBuffer.getDataBox();
+ for(uint32_t i = 0u; i < numValues; ++i)
+ input[i] = dist(mt);
+
+ // check on support assignment shape
+ using OnSupportShapes = pmacc::MakeSeq_t<
+ particles::shapes::NGP::ChargeAssignmentOnSupport,
+ particles::shapes::CIC::ChargeAssignmentOnSupport,
+ particles::shapes::TSC::ChargeAssignmentOnSupport,
+ particles::shapes::PQS::ChargeAssignmentOnSupport,
+ particles::shapes::PCS::ChargeAssignmentOnSupport>;
+
+ meta::ForEach>{}(inputBuffer, PositionShift{});
+
+ // check assignment shape outside of the uspport
+ using NotOnSupportShapes = pmacc::MakeSeq_t<
+ particles::shapes::NGP::ChargeAssignment,
+ particles::shapes::CIC::ChargeAssignment,
+ particles::shapes::TSC::ChargeAssignment,
+ particles::shapes::PQS::ChargeAssignment,
+ particles::shapes::PCS::ChargeAssignment>;
+
+ meta::ForEach>{}(inputBuffer, NoPositionShift{});
+}