Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Added pinned memory resource #141

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 80 additions & 0 deletions include/rmm/mr/pinned_memory_resource.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once

#include "device_memory_resource.hpp"

#include <cuda_runtime_api.h>
#include <cassert>
#include <exception>
#include <iostream>

namespace rmm {
namespace mr {
/**---------------------------------------------------------------------------*
* @brief `device_memory_resource` derived class that uses cudaMalloc/Free for
* allocation/deallocation.
*---------------------------------------------------------------------------**/
class pinned_memory_resource final : public device_memory_resource {
public:
bool supports_streams() const noexcept override { return false; }

private:
/**---------------------------------------------------------------------------*
* @brief Allocates memory of size at least \p bytes using cudaHostAlloc.
*
* The returned pointer has at least 256B alignment.
*
* @note Stream argument is ignored
*
* @throws `std::bad_alloc` if the requested allocation could not be fulfilled
*
* @param bytes The size, in bytes, of the allocation
* @return void* Pointer to the newly allocated memory
*---------------------------------------------------------------------------**/
void* do_allocate(std::size_t bytes, cudaStream_t) override {
void* p{nullptr};
cudaError_t const status = cudaHostAlloc(&p, bytes);
if (cudaSuccess != status) {
#ifndef NDEBUG
std::cerr << "cudaMalloc failed: " << cudaGetErrorName(status) << " "
<< cudaGetErrorString(status) << "\n";
#endif
throw std::bad_alloc{};
}
return p;
}

/**---------------------------------------------------------------------------*
* @brief Deallocate memory pointed to by \p p.
*
* @note Stream argument is ignored.
*
* @throws Nothing.
*
* @param p Pointer to be deallocated
*---------------------------------------------------------------------------**/
void do_deallocate(void* p, std::size_t, cudaStream_t) override {
cudaError_t const status = cudaFreeHost(p);
#ifndef NDEBUG
std::cerr << "cudaFree failed: " << cudaGetErrorName(status) << " "
<< cudaGetErrorString(status) << "\n";
#endif
}
};

} // namespace mr
} // namespace rmm
3 changes: 2 additions & 1 deletion tests/mr/mr_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,8 @@ struct MRTest : public ::testing::Test {

using resources = ::testing::Types<rmm::mr::cuda_memory_resource,
rmm::mr::managed_memory_resource,
rmm::mr::cnmem_memory_resource>;
rmm::mr::cnmem_memory_resource
rmm::mr::pinned_memory_resource>;

TYPED_TEST_CASE(MRTest, resources);

Expand Down