diff --git a/test/stdgpu/deque.inc b/test/stdgpu/deque.inc index 8755db40e..3ad7dc326 100644 --- a/test/stdgpu/deque.inc +++ b/test/stdgpu/deque.inc @@ -1711,3 +1711,525 @@ TEST_F(stdgpu_deque, shrink_to_fit) } +namespace +{ + template + struct non_const_front_functor + { + stdgpu::deque pool; + T* result; + + non_const_front_functor(const stdgpu::deque& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(STDGPU_MAYBE_UNUSED const stdgpu::index_t i) + { + *result = pool.front(); + } + }; + + + template + struct const_front_functor + { + const stdgpu::deque pool; + T* result; + + const_front_functor(const stdgpu::deque& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(STDGPU_MAYBE_UNUSED const stdgpu::index_t i) + { + *result = pool.front(); + } + }; + + + template + T + non_const_front(const stdgpu::deque& pool) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(0), thrust::counting_iterator(1), + non_const_front_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + + template + T + const_front(const stdgpu::deque& pool) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(0), thrust::counting_iterator(1), + const_front_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + template + T + front(const stdgpu::deque& pool) + { + T non_const_front_value = non_const_front(pool); + T const_front_value = const_front(pool); + + EXPECT_EQ(non_const_front_value, const_front_value); + + return non_const_front_value; + } + + + template + struct non_const_back_functor + { + stdgpu::deque pool; + T* result; + + non_const_back_functor(const stdgpu::deque& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(STDGPU_MAYBE_UNUSED const stdgpu::index_t i) + { + *result = pool.back(); + } + }; + + + template + struct const_back_functor + { + const stdgpu::deque pool; + T* result; + + const_back_functor(const stdgpu::deque& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(STDGPU_MAYBE_UNUSED const stdgpu::index_t i) + { + *result = pool.back(); + } + }; + + + template + T + non_const_back(const stdgpu::deque& pool) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(0), thrust::counting_iterator(1), + non_const_back_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + + template + T + const_back(const stdgpu::deque& pool) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(0), thrust::counting_iterator(1), + const_back_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + template + T + back(const stdgpu::deque& pool) + { + T non_const_back_value = non_const_back(pool); + T const_back_value = const_back(pool); + + EXPECT_EQ(non_const_back_value, const_back_value); + + return non_const_back_value; + } + + + template + struct non_const_operator_access_functor + { + stdgpu::deque pool; + T* result; + + non_const_operator_access_functor(const stdgpu::deque& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(const stdgpu::index_t i) + { + *result = pool[i]; + } + }; + + + template + struct const_operator_access_functor + { + const stdgpu::deque pool; + T* result; + + const_operator_access_functor(const stdgpu::deque& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(const stdgpu::index_t i) + { + *result = pool[i]; + } + }; + + + template + T + non_const_operator_access(const stdgpu::deque& pool, + const stdgpu::index_t i) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(i), thrust::counting_iterator(i + 1), + non_const_operator_access_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + + template + T + const_operator_access(const stdgpu::deque& pool, + const stdgpu::index_t i) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(i), thrust::counting_iterator(i + 1), + const_operator_access_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + template + T + operator_access(const stdgpu::deque& pool, + const stdgpu::index_t i) + { + T non_const_operator_access_value = non_const_operator_access(pool, i); + T const_operator_access_value = const_operator_access(pool, i); + + EXPECT_EQ(non_const_operator_access_value, const_operator_access_value); + + return non_const_operator_access_value; + } + + + template + struct non_const_at_functor + { + stdgpu::deque pool; + T* result; + + non_const_at_functor(const stdgpu::deque& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(const stdgpu::index_t i) + { + *result = pool.at(i); + } + }; + + + template + struct const_at_functor + { + const stdgpu::deque pool; + T* result; + + const_at_functor(const stdgpu::deque& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(const stdgpu::index_t i) + { + *result = pool.at(i); + } + }; + + + template + T + non_const_at(const stdgpu::deque& pool, + const stdgpu::index_t i) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(i), thrust::counting_iterator(i + 1), + non_const_at_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + + template + T + const_at(const stdgpu::deque& pool, + const stdgpu::index_t i) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(i), thrust::counting_iterator(i + 1), + const_at_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + template + T + at(const stdgpu::deque& pool, + const stdgpu::index_t i) + { + T non_const_at_value = non_const_at(pool, i); + T const_at_value = const_at(pool, i); + + EXPECT_EQ(non_const_at_value, const_at_value); + + return non_const_at_value; + } +} + + +TEST_F(stdgpu_deque, front) +{ + const stdgpu::index_t N = 10000; + + stdgpu::deque pool = stdgpu::deque::createDeviceObject(N); + + fill_deque(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + EXPECT_EQ(front(pool), 1); + + stdgpu::deque::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_deque, back) +{ + const stdgpu::index_t N = 10000; + + stdgpu::deque pool = stdgpu::deque::createDeviceObject(N); + + fill_deque(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + EXPECT_EQ(back(pool), N); + + stdgpu::deque::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_deque, operator_access) +{ + const stdgpu::index_t N = 100; + + stdgpu::deque pool = stdgpu::deque::createDeviceObject(N); + + fill_deque(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + for (stdgpu::index_t i = 0; i < N; ++i) + { + EXPECT_EQ(operator_access(pool, i), i + 1); + } + + stdgpu::deque::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_deque, at) +{ + const stdgpu::index_t N = 100; + + stdgpu::deque pool = stdgpu::deque::createDeviceObject(N); + + fill_deque(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + for (stdgpu::index_t i = 0; i < N; ++i) + { + EXPECT_EQ(at(pool, i), i + 1); + } + + stdgpu::deque::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_deque, data) +{ + const stdgpu::index_t N = 10000; + + stdgpu::deque pool = stdgpu::deque::createDeviceObject(N); + + fill_deque(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + int* non_const_data = pool.data(); + const int* const_data = static_cast&>(pool).data(); + + EXPECT_EQ(non_const_data, const_data); + + stdgpu::deque::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_deque, non_const_device_range) +{ + const stdgpu::index_t N = 10000; + + stdgpu::deque pool = stdgpu::deque::createDeviceObject(N); + + fill_deque(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + auto range = pool.device_range(); + int sum = thrust::reduce(range.begin(), range.end(), + 0, + thrust::plus()); + + EXPECT_EQ(sum, N * (N + 1) / 2); + + stdgpu::deque::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_deque, const_device_range) +{ + const stdgpu::index_t N = 10000; + + stdgpu::deque pool = stdgpu::deque::createDeviceObject(N); + + fill_deque(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + auto range = static_cast&>(pool).device_range(); + int sum = thrust::reduce(range.begin(), range.end(), + 0, + thrust::plus()); + + EXPECT_EQ(sum, N * (N + 1) / 2); + + stdgpu::deque::destroyDeviceObject(pool); +} + + diff --git a/test/stdgpu/memory.inc b/test/stdgpu/memory.inc index 4f63fc02e..0cfccca22 100644 --- a/test/stdgpu/memory.inc +++ b/test/stdgpu/memory.inc @@ -1230,6 +1230,279 @@ TEST_F(STDGPU_MEMORY_TEST_CLASS, destroyManangedArray_double_free_shifted) } +TEST_F(STDGPU_MEMORY_TEST_CLASS, safe_device_allocator) +{ + stdgpu::safe_device_allocator a; + stdgpu::index64_t size = 42; + + int* array = a.allocate(size); + + #if STDGPU_BACKEND != STDGPU_BACKEND_CUDA || STDGPU_DEVICE_COMPILER == STDGPU_DEVICE_COMPILER_NVCC + int default_value = 10; + thrust::fill(stdgpu::device_begin(array), stdgpu::device_end(array), + default_value); + + EXPECT_TRUE( thrust::all_of(stdgpu::device_cbegin(array), stdgpu::device_cend(array), + equal_to_number(default_value)) ); + #endif + + a.deallocate(array, size); +} + + +TEST_F(STDGPU_MEMORY_TEST_CLASS, safe_host_allocator) +{ + stdgpu::safe_host_allocator a; + stdgpu::index64_t size = 42; + + int* array = a.allocate(size); + + int default_value = 10; + thrust::fill(stdgpu::host_begin(array), stdgpu::host_end(array), + default_value); + + EXPECT_TRUE( thrust::all_of(stdgpu::host_cbegin(array), stdgpu::host_cend(array), + equal_to_number(default_value)) ); + + + a.deallocate(array, size); +} + + +TEST_F(STDGPU_MEMORY_TEST_CLASS, safe_managed_allocator) +{ + stdgpu::safe_managed_allocator a; + stdgpu::index64_t size = 42; + + int* array = a.allocate(size); + + int default_value = 10; + thrust::fill(stdgpu::host_begin(array), stdgpu::host_end(array), + default_value); + + EXPECT_TRUE( thrust::all_of(stdgpu::host_cbegin(array), stdgpu::host_cend(array), + equal_to_number(default_value)) ); + + + a.deallocate(array, size); +} + + +TEST_F(STDGPU_MEMORY_TEST_CLASS, allocator_traits_allocate_deallocate) +{ + using Allocator = stdgpu::safe_host_allocator; + + Allocator a; + stdgpu::index64_t size = 42; + + int* array = stdgpu::allocator_traits::allocate(a, size); + + int default_value = 10; + thrust::fill(stdgpu::host_begin(array), stdgpu::host_end(array), + default_value); + + EXPECT_TRUE( thrust::all_of(stdgpu::host_cbegin(array), stdgpu::host_cend(array), + equal_to_number(default_value)) ); + + + stdgpu::allocator_traits::deallocate(a, array, size); +} + + +TEST_F(STDGPU_MEMORY_TEST_CLASS, allocator_traits_allocate_hint_deallocate) +{ + using Allocator = stdgpu::safe_host_allocator; + + Allocator a; + stdgpu::index64_t size = 42; + + int* array_hint = stdgpu::allocator_traits::allocate(a, size); + int* array = stdgpu::allocator_traits::allocate(a, size, array_hint); + + int default_value = 10; + thrust::fill(stdgpu::host_begin(array), stdgpu::host_end(array), + default_value); + + EXPECT_TRUE( thrust::all_of(stdgpu::host_cbegin(array), stdgpu::host_cend(array), + equal_to_number(default_value)) ); + + + stdgpu::allocator_traits::deallocate(a, array, size); + stdgpu::allocator_traits::deallocate(a, array_hint, size); +} + + +namespace +{ + struct Counter + { + // Some member to let the class have a suitable size + int x = 0; + + static int constructor_calls; + static int destructor_calls; + + STDGPU_HOST_DEVICE + Counter() + { + #if STDGPU_CODE == STDGPU_CODE_HOST + Counter::constructor_calls++; + #endif + } + + STDGPU_HOST_DEVICE + ~Counter() + { + #if STDGPU_CODE == STDGPU_CODE_HOST + Counter::destructor_calls++; + #endif + } + }; + + int Counter::constructor_calls = 0; + int Counter::destructor_calls = 0; + + + template + struct traits_construct + { + Allocator a; + + void + operator()(typename Allocator::value_type& value) + { + stdgpu::allocator_traits::construct(a, &value); + } + }; + + + template + struct traits_destroy + { + Allocator a; + + void + operator()(typename Allocator::value_type& value) + { + stdgpu::allocator_traits::destroy(a, &value); + } + }; +} + + +TEST_F(STDGPU_MEMORY_TEST_CLASS, allocator_traits_construct_destroy) +{ + using Allocator = stdgpu::safe_host_allocator; + Allocator a; + + stdgpu::index64_t size = 42; + + typename Allocator::value_type* array = stdgpu::allocator_traits::allocate(a, size); + + Counter::constructor_calls = 0; + Counter::destructor_calls = 0; + ASSERT_EQ(Counter::constructor_calls, 0); + ASSERT_EQ(Counter::destructor_calls, 0); + + thrust::for_each(stdgpu::host_begin(array), stdgpu::host_end(array), + traits_construct()); + + EXPECT_EQ(Counter::constructor_calls, size); + EXPECT_EQ(Counter::destructor_calls, 0); + + thrust::for_each(stdgpu::host_begin(array), stdgpu::host_end(array), + traits_destroy()); + + EXPECT_EQ(Counter::constructor_calls, size); + EXPECT_EQ(Counter::destructor_calls, size); + + stdgpu::allocator_traits::deallocate(a, array, size); +} + + +TEST_F(STDGPU_MEMORY_TEST_CLASS, allocator_traits_max_size_and_select) +{ + using Allocator = stdgpu::safe_host_allocator; + Allocator a; + + Allocator b = stdgpu::allocator_traits::select_on_container_copy_construction(a); + + EXPECT_EQ(stdgpu::allocator_traits::max_size(a), stdgpu::allocator_traits::max_size(b)); +} + + +TEST_F(STDGPU_MEMORY_TEST_CLASS, destroy) +{ + using Allocator = stdgpu::safe_host_allocator; + Allocator a; + + stdgpu::index64_t size = 42; + + typename Allocator::value_type* array = stdgpu::allocator_traits::allocate(a, size); + + Counter::constructor_calls = 0; + Counter::destructor_calls = 0; + ASSERT_EQ(Counter::constructor_calls, 0); + ASSERT_EQ(Counter::destructor_calls, 0); + + stdgpu::destroy(stdgpu::host_begin(array), stdgpu::host_end(array)); + + EXPECT_EQ(Counter::constructor_calls, 0); + EXPECT_EQ(Counter::destructor_calls, size); + + stdgpu::allocator_traits::deallocate(a, array, size); +} + + +TEST_F(STDGPU_MEMORY_TEST_CLASS, destroy_n) +{ + using Allocator = stdgpu::safe_host_allocator; + Allocator a; + + stdgpu::index64_t size = 42; + + typename Allocator::value_type* array = stdgpu::allocator_traits::allocate(a, size); + + Counter::constructor_calls = 0; + Counter::destructor_calls = 0; + ASSERT_EQ(Counter::constructor_calls, 0); + ASSERT_EQ(Counter::destructor_calls, 0); + + stdgpu::destroy_n(stdgpu::host_begin(array), size); + + EXPECT_EQ(Counter::constructor_calls, 0); + EXPECT_EQ(Counter::destructor_calls, size); + + stdgpu::allocator_traits::deallocate(a, array, size); +} + + +TEST_F(STDGPU_MEMORY_TEST_CLASS, destroy_at) +{ + using Allocator = stdgpu::safe_host_allocator; + Allocator a; + + stdgpu::index64_t size = 42; + + typename Allocator::value_type* array = stdgpu::allocator_traits::allocate(a, size); + + Counter::constructor_calls = 0; + Counter::destructor_calls = 0; + ASSERT_EQ(Counter::constructor_calls, 0); + ASSERT_EQ(Counter::destructor_calls, 0); + + for (stdgpu::index_t i = 0; i < size; ++i) + { + stdgpu::destroy_at(array + i); + } + + EXPECT_EQ(Counter::constructor_calls, 0); + EXPECT_EQ(Counter::destructor_calls, size); + + stdgpu::allocator_traits::deallocate(a, array, size); +} + + TEST_F(STDGPU_MEMORY_TEST_CLASS, safe_pinned_host_allocator) { stdgpu::safe_pinned_host_allocator a; diff --git a/test/stdgpu/vector.inc b/test/stdgpu/vector.inc index 73605735f..7ec25c076 100644 --- a/test/stdgpu/vector.inc +++ b/test/stdgpu/vector.inc @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -909,3 +910,571 @@ TEST_F(stdgpu_vector, shrink_to_fit) } +namespace +{ + template + struct non_const_front_functor + { + stdgpu::vector pool; + T* result; + + non_const_front_functor(const stdgpu::vector& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(STDGPU_MAYBE_UNUSED const stdgpu::index_t i) + { + *result = pool.front(); + } + }; + + + template + struct const_front_functor + { + const stdgpu::vector pool; + T* result; + + const_front_functor(const stdgpu::vector& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(STDGPU_MAYBE_UNUSED const stdgpu::index_t i) + { + *result = pool.front(); + } + }; + + + template + T + non_const_front(const stdgpu::vector& pool) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(0), thrust::counting_iterator(1), + non_const_front_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + + template + T + const_front(const stdgpu::vector& pool) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(0), thrust::counting_iterator(1), + const_front_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + template + T + front(const stdgpu::vector& pool) + { + T non_const_front_value = non_const_front(pool); + T const_front_value = const_front(pool); + + EXPECT_EQ(non_const_front_value, const_front_value); + + return non_const_front_value; + } + + + template + struct non_const_back_functor + { + stdgpu::vector pool; + T* result; + + non_const_back_functor(const stdgpu::vector& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(STDGPU_MAYBE_UNUSED const stdgpu::index_t i) + { + *result = pool.back(); + } + }; + + + template + struct const_back_functor + { + const stdgpu::vector pool; + T* result; + + const_back_functor(const stdgpu::vector& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(STDGPU_MAYBE_UNUSED const stdgpu::index_t i) + { + *result = pool.back(); + } + }; + + + template + T + non_const_back(const stdgpu::vector& pool) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(0), thrust::counting_iterator(1), + non_const_back_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + + template + T + const_back(const stdgpu::vector& pool) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(0), thrust::counting_iterator(1), + const_back_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + template + T + back(const stdgpu::vector& pool) + { + T non_const_back_value = non_const_back(pool); + T const_back_value = const_back(pool); + + EXPECT_EQ(non_const_back_value, const_back_value); + + return non_const_back_value; + } + + + template + struct non_const_operator_access_functor + { + stdgpu::vector pool; + T* result; + + non_const_operator_access_functor(const stdgpu::vector& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(const stdgpu::index_t i) + { + *result = pool[i]; + } + }; + + + template + struct const_operator_access_functor + { + const stdgpu::vector pool; + T* result; + + const_operator_access_functor(const stdgpu::vector& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(const stdgpu::index_t i) + { + *result = pool[i]; + } + }; + + + template + T + non_const_operator_access(const stdgpu::vector& pool, + const stdgpu::index_t i) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(i), thrust::counting_iterator(i + 1), + non_const_operator_access_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + + template + T + const_operator_access(const stdgpu::vector& pool, + const stdgpu::index_t i) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(i), thrust::counting_iterator(i + 1), + const_operator_access_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + template + T + operator_access(const stdgpu::vector& pool, + const stdgpu::index_t i) + { + T non_const_operator_access_value = non_const_operator_access(pool, i); + T const_operator_access_value = const_operator_access(pool, i); + + EXPECT_EQ(non_const_operator_access_value, const_operator_access_value); + + return non_const_operator_access_value; + } + + + template + struct non_const_at_functor + { + stdgpu::vector pool; + T* result; + + non_const_at_functor(const stdgpu::vector& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(const stdgpu::index_t i) + { + *result = pool.at(i); + } + }; + + + template + struct const_at_functor + { + const stdgpu::vector pool; + T* result; + + const_at_functor(const stdgpu::vector& pool, + T* result) + : pool(pool), + result(result) + { + + } + + STDGPU_DEVICE_ONLY void + operator()(const stdgpu::index_t i) + { + *result = pool.at(i); + } + }; + + + template + T + non_const_at(const stdgpu::vector& pool, + const stdgpu::index_t i) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(i), thrust::counting_iterator(i + 1), + non_const_at_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + + template + T + const_at(const stdgpu::vector& pool, + const stdgpu::index_t i) + { + T* result = createDeviceArray(1); + + thrust::for_each(thrust::counting_iterator(i), thrust::counting_iterator(i + 1), + const_at_functor(pool, result)); + + T host_result; + copyDevice2HostArray(result, 1, &host_result, MemoryCopy::NO_CHECK); + + destroyDeviceArray(result); + + return host_result; + } + + template + T + at(const stdgpu::vector& pool, + const stdgpu::index_t i) + { + T non_const_at_value = non_const_at(pool, i); + T const_at_value = const_at(pool, i); + + EXPECT_EQ(non_const_at_value, const_at_value); + + return non_const_at_value; + } +} + + +TEST_F(stdgpu_vector, front) +{ + const stdgpu::index_t N = 10000; + + stdgpu::vector pool = stdgpu::vector::createDeviceObject(N); + + fill_vector(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + EXPECT_EQ(front(pool), 1); + + stdgpu::vector::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_vector, back) +{ + const stdgpu::index_t N = 10000; + + stdgpu::vector pool = stdgpu::vector::createDeviceObject(N); + + fill_vector(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + EXPECT_EQ(back(pool), N); + + stdgpu::vector::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_vector, operator_access) +{ + const stdgpu::index_t N = 100; + + stdgpu::vector pool = stdgpu::vector::createDeviceObject(N); + + fill_vector(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + for (stdgpu::index_t i = 0; i < N; ++i) + { + EXPECT_EQ(operator_access(pool, i), i + 1); + } + + stdgpu::vector::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_vector, at) +{ + const stdgpu::index_t N = 100; + + stdgpu::vector pool = stdgpu::vector::createDeviceObject(N); + + fill_vector(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + for (stdgpu::index_t i = 0; i < N; ++i) + { + EXPECT_EQ(at(pool, i), i + 1); + } + + stdgpu::vector::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_vector, data) +{ + const stdgpu::index_t N = 10000; + + stdgpu::vector pool = stdgpu::vector::createDeviceObject(N); + + fill_vector(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + int* non_const_data = pool.data(); + const int* const_data = static_cast&>(pool).data(); + + EXPECT_EQ(non_const_data, const_data); + + stdgpu::vector::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_vector, device_begin) +{ + const stdgpu::index_t N = 10000; + + stdgpu::vector pool = stdgpu::vector::createDeviceObject(N); + + fill_vector(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + stdgpu::device_ptr non_const_begin = pool.device_begin(); + stdgpu::device_ptr const_begin = static_cast&>(pool).device_begin(); + stdgpu::device_ptr cbegin = static_cast&>(pool).device_cbegin(); + + EXPECT_EQ(non_const_begin, const_begin); + EXPECT_EQ(const_begin, cbegin); + + stdgpu::vector::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_vector, device_end) +{ + const stdgpu::index_t N = 10000; + + stdgpu::vector pool = stdgpu::vector::createDeviceObject(N); + + fill_vector(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + stdgpu::device_ptr non_const_end = pool.device_end(); + stdgpu::device_ptr const_end = static_cast&>(pool).device_end(); + stdgpu::device_ptr cend = static_cast&>(pool).device_cend(); + + EXPECT_EQ(non_const_end, const_end); + EXPECT_EQ(const_end, cend); + + stdgpu::vector::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_vector, non_const_device_range) +{ + const stdgpu::index_t N = 10000; + + stdgpu::vector pool = stdgpu::vector::createDeviceObject(N); + + fill_vector(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + auto range = pool.device_range(); + int sum = thrust::reduce(range.begin(), range.end(), + 0, + thrust::plus()); + + EXPECT_EQ(sum, N * (N + 1) / 2); + + stdgpu::vector::destroyDeviceObject(pool); +} + + +TEST_F(stdgpu_vector, const_device_range) +{ + const stdgpu::index_t N = 10000; + + stdgpu::vector pool = stdgpu::vector::createDeviceObject(N); + + fill_vector(pool); + + ASSERT_EQ(pool.size(), N); + ASSERT_EQ(pool.capacity(), N); + ASSERT_TRUE(pool.valid()); + + auto range = static_cast&>(pool).device_range(); + int sum = thrust::reduce(range.begin(), range.end(), + 0, + thrust::plus()); + + EXPECT_EQ(sum, N * (N + 1) / 2); + + stdgpu::vector::destroyDeviceObject(pool); +} + +