Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace BOOST_CURRENT_FUNCTION with C++11 __func__ #762

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions include/alpaka/core/Debug.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@

#include <alpaka/core/BoostPredef.hpp>

#include <boost/current_function.hpp>

#include <string>
#include <iostream>

Expand Down Expand Up @@ -76,7 +74,7 @@ namespace alpaka
// Define ALPAKA_DEBUG_MINIMAL_LOG_SCOPE.
#if ALPAKA_DEBUG >= ALPAKA_DEBUG_MINIMAL
#define ALPAKA_DEBUG_MINIMAL_LOG_SCOPE\
::alpaka::core::detail::ScopeLogStdOut const scopeLogStdOut(BOOST_CURRENT_FUNCTION)
::alpaka::core::detail::ScopeLogStdOut const scopeLogStdOut(__func__)
#else
#define ALPAKA_DEBUG_MINIMAL_LOG_SCOPE
#endif
Expand All @@ -85,7 +83,7 @@ namespace alpaka
// Define ALPAKA_DEBUG_FULL_LOG_SCOPE.
#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
#define ALPAKA_DEBUG_FULL_LOG_SCOPE\
::alpaka::core::detail::ScopeLogStdOut const scopeLogStdOut(BOOST_CURRENT_FUNCTION)
::alpaka::core::detail::ScopeLogStdOut const scopeLogStdOut(__func__)
#else
#define ALPAKA_DEBUG_FULL_LOG_SCOPE
#endif
Expand Down
4 changes: 2 additions & 2 deletions include/alpaka/kernel/TaskKernelCpuFibers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,15 +135,15 @@ namespace alpaka
m_args));

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " blockSharedMemDynSizeBytes: " << blockSharedMemDynSizeBytes << " B" << std::endl;
#endif
acc::AccCpuFibers<TDim, TIdx> acc(
*static_cast<workdiv::WorkDivMembers<TDim, TIdx> const *>(this),
blockSharedMemDynSizeBytes);

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " Fiber stack idx: " << boost::fibers::fixedsize_stack::traits_type::default_size() << " B" << std::endl;
#endif

Expand Down
8 changes: 4 additions & 4 deletions include/alpaka/kernel/TaskKernelCpuOmp2Blocks.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ namespace alpaka
m_args));

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " blockSharedMemDynSizeBytes: " << blockSharedMemDynSizeBytes << " B" << std::endl;
#endif
// Bind all arguments except the accelerator.
Expand All @@ -140,7 +140,7 @@ namespace alpaka
if(::omp_in_parallel() != 0)
{
#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION << " already within a parallel region." << std::endl;
std::cout << __func__ << " already within a parallel region." << std::endl;
#endif
parallelFn(
boundKernelFnObj,
Expand All @@ -151,7 +151,7 @@ namespace alpaka
else
{
#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION << " opening new parallel region." << std::endl;
std::cout << __func__ << " opening new parallel region." << std::endl;
#endif
#pragma omp parallel
parallelFn(
Expand Down Expand Up @@ -182,7 +182,7 @@ namespace alpaka
}

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_MINIMAL
std::cout << BOOST_CURRENT_FUNCTION << " omp_get_num_threads: " << ::omp_get_num_threads() << std::endl;
std::cout << __func__ << " omp_get_num_threads: " << ::omp_get_num_threads() << std::endl;
#endif
}

Expand Down
2 changes: 1 addition & 1 deletion include/alpaka/kernel/TaskKernelCpuOmp2Threads.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ namespace alpaka
m_args));

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " blockSharedMemDynSizeBytes: " << blockSharedMemDynSizeBytes << " B" << std::endl;
#endif
// Bind all arguments except the accelerator.
Expand Down
6 changes: 3 additions & 3 deletions include/alpaka/kernel/TaskKernelCpuOmp4.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ namespace alpaka
m_args));

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " blockSharedMemDynSizeBytes: " << blockSharedMemDynSizeBytes << " B" << std::endl;
#endif
// Bind all arguments except the accelerator.
Expand Down Expand Up @@ -158,7 +158,7 @@ namespace alpaka
if((::omp_get_team_num() == 0))
{
int const iNumTeams(::omp_get_num_teams());
printf("%s omp_get_num_teams: %d\n", BOOST_CURRENT_FUNCTION, iNumTeams);
printf("%s omp_get_num_teams: %d\n", __func__, iNumTeams);
}
#endif
acc::AccCpuOmp4<TDim, TIdx> acc(
Expand Down Expand Up @@ -190,7 +190,7 @@ namespace alpaka
if((::omp_get_thread_num() == 0) && (b == 0))
{
int const numThreads(::omp_get_num_threads());
printf("%s omp_get_num_threads: %d\n", BOOST_CURRENT_FUNCTION, numThreads);
printf("%s omp_get_num_threads: %d\n", __func__, numThreads);
if(numThreads != static_cast<int>(blockThreadCount))
{
throw std::runtime_error("ERROR: The OpenMP runtime did not use the number of threads that had been required!");
Expand Down
2 changes: 1 addition & 1 deletion include/alpaka/kernel/TaskKernelCpuSerial.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ namespace alpaka
m_args));

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " blockSharedMemDynSizeBytes: " << blockSharedMemDynSizeBytes << " B" << std::endl;
#endif
// Bind all arguments except the accelerator.
Expand Down
2 changes: 1 addition & 1 deletion include/alpaka/kernel/TaskKernelCpuTbbBlocks.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ namespace alpaka
m_args));

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " blockSharedMemDynSizeBytes: " << blockSharedMemDynSizeBytes << " B" << std::endl;
#endif
// Bind all arguments except the accelerator.
Expand Down
2 changes: 1 addition & 1 deletion include/alpaka/kernel/TaskKernelCpuThreads.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ namespace alpaka
m_args));

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " blockSharedMemDynSizeBytes: " << blockSharedMemDynSizeBytes << " B" << std::endl;
#endif
acc::AccCpuThreads<TDim, TIdx> acc(
Expand Down
22 changes: 11 additions & 11 deletions include/alpaka/kernel/TaskKernelGpuCudaRt.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -302,10 +302,10 @@ namespace alpaka
#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
//std::size_t printfFifoSize;
//cudaDeviceGetLimit(&printfFifoSize, cudaLimitPrintfFifoSize);
//std::cout << BOOST_CURRENT_FUNCTION << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//std::cout << __func__ << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//cudaDeviceSetLimit(cudaLimitPrintfFifoSize, printfFifoSize*10);
//cudaDeviceGetLimit(&printfFifoSize, cudaLimitPrintfFifoSize);
//std::cout << BOOST_CURRENT_FUNCTION << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//std::cout << __func__ << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
#endif
auto const gridBlockExtent(
workdiv::getWorkDiv<Grid, Blocks>(task));
Expand All @@ -319,7 +319,7 @@ namespace alpaka
kernel::cuda::detail::checkVecOnly3Dim(threadElemExtent);

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " gridDim: " << gridDim.z << " " << gridDim.y << " " << gridDim.x
<< " blockDim: " << blockDim.z << " " << blockDim.y << " " << blockDim.x
<< std::endl;
Expand Down Expand Up @@ -350,15 +350,15 @@ namespace alpaka

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
// Log the block shared memory idx.
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " BlockSharedMemDynSizeBytes: " << blockSharedMemDynSizeBytes << " B" << std::endl;
#endif

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
// Log the function attributes.
cudaFuncAttributes funcAttrs;
cudaFuncGetAttributes(&funcAttrs, kernel::cuda::detail::cudaKernel<TDim, TIdx, TKernelFnObj, TArgs...>);
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " binaryVersion: " << funcAttrs.binaryVersion
<< " constSizeBytes: " << funcAttrs.constSizeBytes << " B"
<< " localSizeBytes: " << funcAttrs.localSizeBytes << " B"
Expand Down Expand Up @@ -424,10 +424,10 @@ namespace alpaka
#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
//std::size_t printfFifoSize;
//cudaDeviceGetLimit(&printfFifoSize, cudaLimitPrintfFifoSize);
//std::cout << BOOST_CURRENT_FUNCTION << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//std::cout << __func__ << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//cudaDeviceSetLimit(cudaLimitPrintfFifoSize, printfFifoSize*10);
//cudaDeviceGetLimit(&printfFifoSize, cudaLimitPrintfFifoSize);
//std::cout << BOOST_CURRENT_FUNCTION << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//std::cout << __func__ << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
#endif
auto const gridBlockExtent(
workdiv::getWorkDiv<Grid, Blocks>(task));
Expand All @@ -441,8 +441,8 @@ namespace alpaka
kernel::cuda::detail::checkVecOnly3Dim(threadElemExtent);

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION << "gridDim: " << gridDim.z << " " << gridDim.y << " " << gridDim.x << std::endl;
std::cout << BOOST_CURRENT_FUNCTION << "blockDim: " << blockDim.z << " " << blockDim.y << " " << blockDim.x << std::endl;
std::cout << __func__ << "gridDim: " << gridDim.z << " " << gridDim.y << " " << gridDim.x << std::endl;
std::cout << __func__ << "blockDim: " << blockDim.z << " " << blockDim.y << " " << blockDim.x << std::endl;
#endif

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_MINIMAL
Expand Down Expand Up @@ -470,15 +470,15 @@ namespace alpaka

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
// Log the block shared memory idx.
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " BlockSharedMemDynSizeBytes: " << blockSharedMemDynSizeBytes << " B" << std::endl;
#endif

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
// Log the function attributes.
cudaFuncAttributes funcAttrs;
cudaFuncGetAttributes(&funcAttrs, kernel::cuda::detail::cudaKernel<TDim, TIdx, TKernelFnObj, TArgs...>);
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " binaryVersion: " << funcAttrs.binaryVersion
<< " constSizeBytes: " << funcAttrs.constSizeBytes << " B"
<< " localSizeBytes: " << funcAttrs.localSizeBytes << " B"
Expand Down
22 changes: 11 additions & 11 deletions include/alpaka/kernel/TaskKernelGpuHipRt.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -306,10 +306,10 @@ namespace alpaka
#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
//std::size_t printfFifoSize;
//hipDeviceGetLimit(&printfFifoSize, hipLimitPrintfFifoSize);
//std::cout << BOOST_CURRENT_FUNCTION << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//std::cout << __func__ << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//hipDeviceSetLimit(hipLimitPrintfFifoSize, printfFifoSize*10);
//hipDeviceGetLimit(&printfFifoSize, hipLimitPrintfFifoSize);
//std::cout << BOOST_CURRENT_FUNCTION << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//std::cout << __func__ << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
#endif
auto const gridBlockExtent(
workdiv::getWorkDiv<Grid, Blocks>(task));
Expand All @@ -323,7 +323,7 @@ namespace alpaka
kernel::hip::detail::checkVecOnly3Dim(threadElemExtent);

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " gridDim: " << gridDim.z << " " << gridDim.y << " " << gridDim.x
<< " blockDim: " << blockDim.z << " " << blockDim.y << " " << blockDim.x
<< std::endl;
Expand Down Expand Up @@ -360,15 +360,15 @@ namespace alpaka

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
// Log the block shared memory size.
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " BlockSharedMemDynSizeBytes: " << blockSharedMemDynSizeBytes << " B" << std::endl;
#endif

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
// Log the function attributes.
/*hipFuncAttributes funcAttrs;
hipFuncGetAttributes(&funcAttrs, kernel::hip::detail::hipKernel<TDim, TIdx, TKernelFnObj, TArgs...>);
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " binaryVersion: " << funcAttrs.binaryVersion
<< " constSizeBytes: " << funcAttrs.constSizeBytes << " B"
<< " localSizeBytes: " << funcAttrs.localSizeBytes << " B"
Expand Down Expand Up @@ -438,10 +438,10 @@ namespace alpaka
#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
//std::size_t printfFifoSize;
//hipDeviceGetLimit(&printfFifoSize, hipLimitPrintfFifoSize);
//std::cout << BOOST_CURRENT_FUNCTION << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//std::cout << __func__ << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//hipDeviceSetLimit(hipLimitPrintfFifoSize, printfFifoSize*10);
//hipDeviceGetLimit(&printfFifoSize, hipLimitPrintfFifoSize);
//std::cout << BOOST_CURRENT_FUNCTION << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
//std::cout << __func__ << "INFO: printfFifoSize: " << printfFifoSize << std::endl;
#endif
auto const gridBlockExtent(
workdiv::getWorkDiv<Grid, Blocks>(task));
Expand All @@ -455,8 +455,8 @@ namespace alpaka
kernel::hip::detail::checkVecOnly3Dim(threadElemExtent);

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION << "gridDim: " << gridDim.z << " " << gridDim.y << " " << gridDim.x << std::endl;
std::cout << BOOST_CURRENT_FUNCTION << "blockDim: " << blockDim.z << " " << blockDim.y << " " << blockDim.x << std::endl;
std::cout << __func__ << "gridDim: " << gridDim.z << " " << gridDim.y << " " << gridDim.x << std::endl;
std::cout << __func__ << "blockDim: " << blockDim.z << " " << blockDim.y << " " << blockDim.x << std::endl;
#endif

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_MINIMAL
Expand Down Expand Up @@ -484,7 +484,7 @@ namespace alpaka

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
// Log the block shared memory size.
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " BlockSharedMemDynSizeBytes: " << blockSharedMemDynSizeBytes << " B" << std::endl;
#endif

Expand All @@ -493,7 +493,7 @@ namespace alpaka
// Log the function attributes.
/*hipFuncAttributes funcAttrs;
hipFuncGetAttributes(&funcAttrs, kernel::hip::detail::hipKernel<TDim, TIdx, TKernelFnObj, TArgs...>);
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " binaryVersion: " << funcAttrs.binaryVersion
<< " constSizeBytes: " << funcAttrs.constSizeBytes << " B"
<< " localSizeBytes: " << funcAttrs.localSizeBytes << " B"
Expand Down
2 changes: 1 addition & 1 deletion include/alpaka/kernel/Traits.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ namespace alpaka
"The idx type of TAcc and the idx type of TWorkDiv have to be identical!");

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " gridBlockExtent: " << workdiv::getWorkDiv<Grid, Blocks>(workDiv)
<< ", blockThreadExtent: " << workdiv::getWorkDiv<Block, Threads>(workDiv)
<< std::endl;
Expand Down
2 changes: 1 addition & 1 deletion include/alpaka/mem/buf/BufCpu.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ namespace alpaka
"The idx type of TExtent and the TIdx template parameter have to be identical!");

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " e: " << m_extentElements
<< " ptr: " << static_cast<void *>(m_pMem)
<< " pitch: " << m_pitchBytes
Expand Down
6 changes: 3 additions & 3 deletions include/alpaka/mem/buf/BufCudaRt.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ namespace alpaka
static_cast<std::size_t>(widthBytes)));

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " ew: " << width
<< " ewb: " << widthBytes
<< " ptr: " << memPtr
Expand Down Expand Up @@ -404,7 +404,7 @@ namespace alpaka
ALPAKA_ASSERT(pitchBytes >= static_cast<std::size_t>(widthBytes) || (width * height) == 0);

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " ew: " << width
<< " eh: " << height
<< " ewb: " << widthBytes
Expand Down Expand Up @@ -460,7 +460,7 @@ namespace alpaka


#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " ew: " << extent::getWidth(extent)
<< " eh: " << cudaExtentVal.height
<< " ed: " << cudaExtentVal.depth
Expand Down
6 changes: 3 additions & 3 deletions include/alpaka/mem/buf/BufHipRt.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ namespace alpaka
static_cast<std::size_t>(widthBytes)));

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " ew: " << width
<< " ewb: " << widthBytes
<< " ptr: " << memPtr
Expand Down Expand Up @@ -411,7 +411,7 @@ namespace alpaka
}

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " ew: " << width
<< " eh: " << height
<< " ewb: " << widthBytes
Expand Down Expand Up @@ -473,7 +473,7 @@ namespace alpaka
}

#if ALPAKA_DEBUG >= ALPAKA_DEBUG_FULL
std::cout << BOOST_CURRENT_FUNCTION
std::cout << __func__
<< " ew: " << extent::getWidth(extent)
<< " eh: " << hipExtentVal.height
<< " ed: " << hipExtentVal.depth
Expand Down
Loading