From 980ede8438ac4cf29303913c823538f42d19b226 Mon Sep 17 00:00:00 2001 From: Zachary Jones Date: Fri, 7 Apr 2023 08:39:32 -0700 Subject: [PATCH] Rename (un)likely to avoid conflict with boost (#127) Summary: Pull Request resolved: https://github.com/facebookincubator/dynolog/pull/127 Boost has defintions for likely and unlikely that conflict and can cause compiler errors. Renaming to __hbt_likely and __hbt_unlikley to resolve the conflict. Reviewed By: jj10306 Differential Revision: D44730441 fbshipit-source-id: 338754ba0e5a2add03809639d5d433faa0d0b8ed --- hbt/src/common/Defs.h | 42 ++++++++-------- hbt/src/common/System.cpp | 2 +- hbt/src/common/System.h | 4 +- hbt/src/perf_event/CpuEventsGroup.h | 24 ++++----- .../perf_event/PerCpuCountSampleGenerator.h | 8 +-- .../perf_event/PerCpuSampleGeneratorBase.h | 2 +- .../perf_event/PerCpuThreadSwitchGenerator.h | 24 ++++----- hbt/src/ringbuffer/Consumer.h | 50 +++++++++---------- hbt/src/ringbuffer/PerCpuRingBuffer.h | 6 +-- hbt/src/ringbuffer/Producer.h | 24 ++++----- hbt/src/ringbuffer/Shm.h | 4 +- hbt/src/tagstack/IntervalSlicer.h | 4 +- hbt/src/tagstack/PerfEventStream.h | 2 +- hbt/src/tagstack/Slicer.h | 26 +++++----- hbt/src/tagstack/Stream.h | 2 +- 15 files changed, 112 insertions(+), 112 deletions(-) diff --git a/hbt/src/common/Defs.h b/hbt/src/common/Defs.h index 8f01491f..3a31f453 100644 --- a/hbt/src/common/Defs.h +++ b/hbt/src/common/Defs.h @@ -33,8 +33,8 @@ inline pid_t gettid() noexcept { #endif // Branch hint macros. C++20 will include them as part of language. -#define likely(x) __builtin_expect(!!(x), 1) -#define unlikely(x) __builtin_expect(!!(x), 0) +#define __hbt_likely(x) __builtin_expect(!!(x), 1) +#define __hbt_unlikely(x) __builtin_expect(!!(x), 0) template TStream& LogCtxt(TStream& oss) { @@ -132,41 +132,41 @@ class EnvironmentError : public std::exception { #define HBT_THROW_EINVAL() HBT_THROW(std::invalid_argument) #define HBT_THROW_EINVAL_IF(cond) \ - if (unlikely(cond)) \ + if (__hbt_unlikely(cond)) \ HBT_THROW_EINVAL() << "\t" << HBT_STRINGIFY(cond) << ". " #define HBT_THROW_ENVIRONMENT(err) HBT_THROW(EnvironmentError, err) #define HBT_THROW_ENVIRONMENT_IF(cond, err) \ - if (unlikely(cond)) \ + if (__hbt_unlikely(cond)) \ HBT_THROW_ENVIRONMENT(err) << "\t" << HBT_STRINGIFY(cond) << ". " #define HBT_THROW_SYSTEM(err) \ HBT_THROW(std::system_error, err, std::system_category()) #define HBT_THROW_SYSTEM_IF(cond, err) \ - if (unlikely(cond)) \ + if (__hbt_unlikely(cond)) \ HBT_THROW_SYSTEM(err) << "\t" << HBT_STRINGIFY(cond) << ". " #define HBT_THROW_SYSTEM_CODE(err) HBT_THROW(std::system_error, err) #define HBT_THROW_SYSTEM_CODE_IF(cond, err) \ - if (unlikely(cond)) \ + if (__hbt_unlikely(cond)) \ HBT_THROW_SYSTEM_CODE(err) << "\t" << HBT_STRINGIFY(cond) << ". " #define HBT_THROW_ASSERT() HBT_THROW(std::runtime_error) #define HBT_THROW_ASSERT_IF(cond) \ - if (unlikely(cond)) \ + if (__hbt_unlikely(cond)) \ HBT_THROW_ASSERT() << "\t" << HBT_STRINGIFY(cond) << ". " // Conditional throwing exception -#define HBT_THROW_IF_NULLPTR(ptr) \ - if (unlikely(ptr == nullptr)) \ +#define HBT_THROW_IF_NULLPTR(ptr) \ + if (__hbt_unlikely(ptr == nullptr)) \ HBT_THROW_EINVAL() << HBT_STRINGIFY(ptr) << " has nullptr value. " // Safe-cast to std::error_code inline std::error_code toErrorCode(ssize_t e) { - if (unlikely(e <= 0)) { + if (__hbt_unlikely(e <= 0)) { HBT_THROW_EINVAL() << "\n\tError " << e << " is not a positive number. " << " Is this value really an error?"; - } else if (unlikely(e > std::numeric_limits::max())) { + } else if (__hbt_unlikely(e > std::numeric_limits::max())) { HBT_THROW_EINVAL() << "\n\tError " << e << " is out of range. " << " Is this really an error?"; } @@ -206,17 +206,17 @@ class LogEntry final { << "\n " << HBT_LOG_PREFFIX << "]\n => \033[0m" #define HBT_LOG_INFO_IF(cond) \ - if (unlikely(cond)) \ + if (__hbt_unlikely(cond)) \ HBT_LOG_INFO() #define HBT_LOG_WARNING_IF(cond) \ - if (unlikely(cond)) \ + if (__hbt_unlikely(cond)) \ HBT_LOG_WARNING() #define HBT_LOG_ERROR_IF(cond) \ - if (unlikely(cond)) \ + if (__hbt_unlikely(cond)) \ HBT_LOG_ERROR() -#define HBT_DCHECK_NOT_NULLPTR(t) \ - if (unlikely((t) == nullptr)) \ +#define HBT_DCHECK_NOT_NULLPTR(t) \ + if (__hbt_unlikely((t) == nullptr)) \ HBT_THROW_ASSERT() << "\n\tExpected argument to be not null." #define __HBT_EXPAND_OPD(opd) HBT_STRINGIFY(opd) << " (" << (opd) << ")" @@ -227,12 +227,12 @@ class LogEntry final { // must handle all errors explicitly. // -#define __HBT_DCHECK(a) \ - if (unlikely(!((a)))) \ +#define __HBT_DCHECK(a) \ + if (__hbt_unlikely(!((a)))) \ HBT_THROW_ASSERT() << "\n\tExpected true for " << __HBT_EXPAND_OPD(a) << ". " #define __HBT_DCHECK_CMP(a, b, op) \ - if (unlikely(!((a)op(b)))) \ + if (__hbt_unlikely(!((a)op(b)))) \ HBT_THROW_ASSERT() << "\n\tExpected " << __HBT_EXPAND_OPD(a) << " " \ << HBT_STRINGIFY(op) << " " << __HBT_EXPAND_OPD(b) \ << ". " @@ -269,12 +269,12 @@ class LogEntry final { // Argument checks // #define HBT_ARG_CHECK(a) \ - if (unlikely(!((a)))) \ + if (__hbt_unlikely(!((a)))) \ HBT_THROW_EINVAL() << "\n\tExpected argument to be true: " \ << __HBT_EXPAND_OPD(a) << ". " #define _HBT_ARG_CMP(a, b, op) \ - if (unlikely(!((a)op(b)))) \ + if (__hbt_unlikely(!((a)op(b)))) \ HBT_THROW_EINVAL() << "\n\tExpected argument " << __HBT_EXPAND_OPD(a) \ << " " HBT_STRINGIFY(op) << " " << __HBT_EXPAND_OPD(b) \ << ". " diff --git a/hbt/src/common/System.cpp b/hbt/src/common/System.cpp index 6c0e886f..bd2c230c 100644 --- a/hbt/src/common/System.cpp +++ b/hbt/src/common/System.cpp @@ -395,7 +395,7 @@ CpuId cpu_first_set(const cpu_set_t& cpu_set) noexcept { CpuId getCpu() { int ret = sched_getcpu(); - if (unlikely(0 > ret)) { + if (__hbt_unlikely(0 > ret)) { HBT_THROW_SYSTEM(errno) << "Error reading ID of current CPU."; } if (ret > kMaxCpus - 1) { diff --git a/hbt/src/common/System.h b/hbt/src/common/System.h index 2a8b5eeb..4f4e5dc7 100644 --- a/hbt/src/common/System.h +++ b/hbt/src/common/System.h @@ -235,9 +235,9 @@ struct CpuSet { cpu_set_t cpus; CPU_ZERO(&cpus); for (auto cpu : s) { - if (unlikely(cpu < 0)) { + if (__hbt_unlikely(cpu < 0)) { HBT_THROW_EINVAL() << "Invalid CPU ID: " << cpu; - } else if (unlikely(cpu >= kMaxCpus)) { + } else if (__hbt_unlikely(cpu >= kMaxCpus)) { HBT_THROW_EINVAL() << "Maximum CPU ID is " << kMaxCpus - 1 << " Got CPU ID: " << cpu << ". Do you want to increase kMaxCpus?"; diff --git a/hbt/src/perf_event/CpuEventsGroup.h b/hbt/src/perf_event/CpuEventsGroup.h index 0a0ea55d..8d7c2a3c 100644 --- a/hbt/src/perf_event/CpuEventsGroup.h +++ b/hbt/src/perf_event/CpuEventsGroup.h @@ -1356,7 +1356,7 @@ ssize_t CpuEventsGroup::consume(unsigned max_num_records) { reinterpret_cast(record_begin); auto record_end = data_start + ((data_tail + event_header->size) & data_offset_mask); - if (unlikely(record_end < record_begin)) { + if (__hbt_unlikely(record_end < record_begin)) { // perf event is wrapped around the ring buffer, make a contiguous copy. void* buffer = enlargeAuxBuffer(event_header->size); const uint8_t* sentinel = data_start + data_size; @@ -1376,7 +1376,7 @@ ssize_t CpuEventsGroup::consume(unsigned max_num_records) { auto r = reinterpret_cast(event_header); HBT_DCHECK_EQ(r->header.size, recordSize(*r)); err = this->asImpl_().handleRecordLost(*r); - if (unlikely(0 > err)) { + if (__hbt_unlikely(0 > err)) { goto exit; } num_record_lost_ += r->num_lost; @@ -1401,7 +1401,7 @@ ssize_t CpuEventsGroup::consume(unsigned max_num_records) { auto r = reinterpret_cast(event_header); HBT_DCHECK_EQ(r->header.size, recordSize(*r)); err = this->asImpl_().handleRecordExit(*r); - if (unlikely(0 > err)) { + if (__hbt_unlikely(0 > err)) { goto exit; } ++num_record_exit_; @@ -1424,7 +1424,7 @@ ssize_t CpuEventsGroup::consume(unsigned max_num_records) { auto r = reinterpret_cast(event_header); HBT_DCHECK_EQ(r->header.size, recordSize(*r)); err = this->asImpl_().handleRecordFork(*r); - if (unlikely(0 > err)) { + if (__hbt_unlikely(0 > err)) { goto exit; } ++num_record_fork_; @@ -1433,14 +1433,14 @@ ssize_t CpuEventsGroup::consume(unsigned max_num_records) { case PERF_RECORD_SAMPLE: { if constexpr (!std::is_void::value) { auto r = reinterpret_cast(event_header); - if (unlikely(r->header.size != recordSize(*r))) { + if (__hbt_unlikely(r->header.size != recordSize(*r))) { HBT_LOG_ERROR() << "Invalid record size of: " << r->header.size << " expected " << recordSize(*r); err = -EPERM; goto exit; } err = this->asImpl_().handleRecordSample(*r); - if (unlikely(0 > err)) { + if (__hbt_unlikely(0 > err)) { goto exit; } ++num_record_sample_; @@ -1449,14 +1449,14 @@ ssize_t CpuEventsGroup::consume(unsigned max_num_records) { case PERF_RECORD_READ: { if constexpr (!std::is_void::value) { auto r = reinterpret_cast(event_header); - if (unlikely(r->header.size != recordSize(*r))) { + if (__hbt_unlikely(r->header.size != recordSize(*r))) { HBT_LOG_ERROR() << "Invalid record read size of: " << r->header.size << " expected " << recordSize(*r); err = -EPERM; goto exit; } err = this->asImpl_().handleRecordRead(*r); - if (unlikely(0 > err)) { + if (__hbt_unlikely(0 > err)) { goto exit; } ++num_record_read_; @@ -1467,7 +1467,7 @@ ssize_t CpuEventsGroup::consume(unsigned max_num_records) { auto r = reinterpret_cast(event_header); HBT_DCHECK_EQ(r->header.size, recordSize(*r)); err = this->asImpl_().handleRecordAux(*r); - if (unlikely(0 > err)) { + if (__hbt_unlikely(0 > err)) { goto exit; } ++num_record_aux_; @@ -1478,7 +1478,7 @@ ssize_t CpuEventsGroup::consume(unsigned max_num_records) { auto r = reinterpret_cast(event_header); HBT_DCHECK_EQ(r->header.size, recordSize(*r)); err = this->asImpl_().handleRecordItraceStart(*r); - if (unlikely(0 > err)) { + if (__hbt_unlikely(0 > err)) { goto exit; } ++num_record_itrace_start_; @@ -1489,7 +1489,7 @@ ssize_t CpuEventsGroup::consume(unsigned max_num_records) { auto r = reinterpret_cast(event_header); HBT_DCHECK_EQ(r->header.size, recordSize(*r)); err = this->asImpl_().handleRecordSwitchCpuWide(*r); - if (unlikely(0 > err)) { + if (__hbt_unlikely(0 > err)) { goto exit; } ++num_record_switch_cpu_wide_; @@ -1557,7 +1557,7 @@ void CpuEventsGroup::onCpuDataBufferRead( std::min(rb_num_bytes, static_cast(sentinel - begin_ptr)); CpuId cpuId = this->getCpu(); - if (unlikely(end_ptr < begin_ptr)) { + if (__hbt_unlikely(end_ptr < begin_ptr)) { // Ring buffer wrapped, so there are two slices callback(cpuId, RbDataSlices(begin_ptr, len, data_start, data_size - len)); } else { diff --git a/hbt/src/perf_event/PerCpuCountSampleGenerator.h b/hbt/src/perf_event/PerCpuCountSampleGenerator.h index 597309d9..36936626 100644 --- a/hbt/src/perf_event/PerCpuCountSampleGenerator.h +++ b/hbt/src/perf_event/PerCpuCountSampleGenerator.h @@ -132,7 +132,7 @@ class CpuCountSampleGenerator final ssize_t handleRecordSample(const mode::Sampling::Sample& r) noexcept { HBT_DCHECK_EQ(r.nr, kNumEvents); - if (unlikely(count_.tstamp == kInvalidTimeStamp)) { + if (__hbt_unlikely(count_.tstamp == kInvalidTimeStamp)) { // Discard this count because we don't know the start of // of the counting interval due to the lost package. @@ -153,7 +153,7 @@ class CpuCountSampleGenerator final return 0; } - if (unlikely(count_.tstamp > r.tstamp)) { + if (__hbt_unlikely(count_.tstamp > r.tstamp)) { HBT_LOG_ERROR() << fmt::format( "New record's timestamp ({}) precedes last timestamp ({})", r.tstamp, @@ -180,7 +180,7 @@ class CpuCountSampleGenerator final // Do not write the "values" part of Count. auto ret = output_producer_.write(&count_, kWriteByteSize); - if (unlikely(kNumBytesDropIfFull > 0 && ret == -ENOSPC)) { + if (__hbt_unlikely(kNumBytesDropIfFull > 0 && ret == -ENOSPC)) { auto err = output_producer_.dropN(kNumBytesDropIfFull); HBT_THROW_ASSERT_IF(0 > err); // Retry now that space has been cleared. There is @@ -188,7 +188,7 @@ class CpuCountSampleGenerator final // cannot fail due too lack of space again. ret = output_producer_.write(&count_, kWriteByteSize); } - if (unlikely(0 > ret)) { + if (__hbt_unlikely(0 > ret)) { count_.tstamp = kInvalidTimeStamp; return ret; } diff --git a/hbt/src/perf_event/PerCpuSampleGeneratorBase.h b/hbt/src/perf_event/PerCpuSampleGeneratorBase.h index b49ffd55..0f0dc8d1 100644 --- a/hbt/src/perf_event/PerCpuSampleGeneratorBase.h +++ b/hbt/src/perf_event/PerCpuSampleGeneratorBase.h @@ -88,7 +88,7 @@ class PerCpuSampleGeneratorBase : public PerCpuBase { // At this point it either finished because it hit stop_ts or // an unhandled error. - if (likely(ret >= 0)) { + if (__hbt_likely(ret >= 0)) { ++num_done; } } diff --git a/hbt/src/perf_event/PerCpuThreadSwitchGenerator.h b/hbt/src/perf_event/PerCpuThreadSwitchGenerator.h index 727c1cc4..28173bcb 100644 --- a/hbt/src/perf_event/PerCpuThreadSwitchGenerator.h +++ b/hbt/src/perf_event/PerCpuThreadSwitchGenerator.h @@ -227,7 +227,7 @@ class CpuThreadSwitchGenerator final auto handleRecordLost(const mode::ContextSwitch::Lost& r) noexcept { auto ret = pendingRecordLost_(r.sample_id.tstamp); - if (unlikely(0 > ret)) { + if (__hbt_unlikely(0 > ret)) { return ret; } @@ -242,7 +242,7 @@ class CpuThreadSwitchGenerator final auto ev_start = tagstack::Event::makeWriteErrorsStart(last_tstamp_ + 1, comp_unit_id_); ret = output_producer_.write(ev_start); - if (likely(ret >= 0)) { + if (__hbt_likely(ret >= 0)) { last_tstamp_ = r.sample_id.tstamp; in_write_errors_ = true; // We've lost track of which threads are active. TID could've wrapped @@ -266,7 +266,7 @@ class CpuThreadSwitchGenerator final TimeStamp new_tstamp = sample_id_ptr->tstamp; auto ret = pendingRecordLost_(new_tstamp); - if (unlikely(0 > ret)) { + if (__hbt_unlikely(0 > ret)) { return ret; } @@ -304,7 +304,7 @@ class CpuThreadSwitchGenerator final HBT_DCHECK_GE(r.tid, 0); auto ret = pendingRecordLost_(new_tstamp); - if (unlikely(0 > ret)) { + if (__hbt_unlikely(0 > ret)) { return ret; } auto [vid, is_new_vid] = @@ -312,7 +312,7 @@ class CpuThreadSwitchGenerator final auto ev = tagstack::Event::makeThreadDestruction( new_tstamp, tid_level_, vid, comp_unit_id_); ret = output_producer_.write(ev); - if (likely(ret >= 0)) { + if (__hbt_likely(ret >= 0)) { if (is_new_vid) { auto& tinfo = thread_stats_->createThreadInfo( static_cast(r.ppid), @@ -339,7 +339,7 @@ class CpuThreadSwitchGenerator final HBT_DCHECK_GE(r.tid, 0); auto ret = pendingRecordLost_(new_tstamp); - if (unlikely(0 > ret)) { + if (__hbt_unlikely(0 > ret)) { return ret; } @@ -348,7 +348,7 @@ class CpuThreadSwitchGenerator final auto ev = tagstack::Event::makeThreadCreation( new_tstamp, tid_level_, vid, comp_unit_id_); ret = output_producer_.write(ev); - if (likely(ret >= 0)) { + if (__hbt_likely(ret >= 0)) { if (!is_new_vid) { // Has already been seen, probably coming from another CPU. auto& tinfo = thread_stats_->info.at(vid); @@ -375,7 +375,7 @@ class CpuThreadSwitchGenerator final TimeStamp new_tstamp = r.sample_id.tstamp; auto ret = pendingRecordLost_(new_tstamp); - if (unlikely(0 > ret)) { + if (__hbt_unlikely(0 > ret)) { return ret; } @@ -395,7 +395,7 @@ class CpuThreadSwitchGenerator final thread_stats_->selectNextVid(static_cast(r.sample_id.tid)); ret = output_producer_.write(tagstack::Event::makeSwitchIn( new_tstamp, tid_level_, vid, comp_unit_id_)); - if (unlikely(is_new_vid && ret >= 0)) { + if (__hbt_unlikely(is_new_vid && ret >= 0)) { // Add ThreadInfo if this is a new vid. pid_t pid = static_cast(r.sample_id.pid); pid_t tid = static_cast(r.sample_id.tid); @@ -405,7 +405,7 @@ class CpuThreadSwitchGenerator final } } - if (likely(ret >= 0)) { + if (__hbt_likely(ret >= 0)) { last_tstamp_ = new_tstamp; } return ret; @@ -429,12 +429,12 @@ class CpuThreadSwitchGenerator final /// Try to write any pending record lost and clear error if succesful. inline ssize_t pendingRecordLost_(TimeStamp tstamp) noexcept { - if (likely(!in_write_errors_)) { + if (__hbt_likely(!in_write_errors_)) { return 0; } auto ev_end = tagstack::Event::makeWriteErrorsEnd(tstamp, comp_unit_id_); auto ret = output_producer_.write(ev_end); - if (likely(ret >= 0)) { + if (__hbt_likely(ret >= 0)) { last_tstamp_ = tstamp; in_write_errors_ = false; } diff --git a/hbt/src/ringbuffer/Consumer.h b/hbt/src/ringbuffer/Consumer.h index 5c9433a4..8b4740e0 100644 --- a/hbt/src/ringbuffer/Consumer.h +++ b/hbt/src/ringbuffer/Consumer.h @@ -55,7 +55,7 @@ class Consumer : public RingBufferWrapper { /// Returns -EBUSY if there is contention and -EAGAIN if /// a temporal error occured. [[nodiscard]] ssize_t startTx() DEXCEPT { - if (unlikely(inTx())) { + if (__hbt_unlikely(inTx())) { return -EBUSY; } bool f = false; @@ -70,10 +70,10 @@ class Consumer : public RingBufferWrapper { /// Returns {size, ptr) if succeded. std::pair readInTx(const size_t size) DEXCEPT { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return {-EINVAL, nullptr}; } - if (unlikely(size > this->header_.kDataPoolByteSize)) { + if (__hbt_unlikely(size > this->header_.kDataPoolByteSize)) { return {-EINVAL, nullptr}; } auto ptr = this->read_(size); @@ -102,7 +102,7 @@ class Consumer : public RingBufferWrapper { template std::pair readInTxWithSize() DEXCEPT { static_assert(std::is_integral::value); - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return {-EINVAL, nullptr}; } @@ -125,18 +125,18 @@ class Consumer : public RingBufferWrapper { // Copy next bytes to buffer. [[nodiscard]] ssize_t copyInTx(const size_t size, uint8_t* buffer) DEXCEPT { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } if (size == 0) { return 0; } - if (unlikely(size > this->header_.kDataPoolByteSize)) { + if (__hbt_unlikely(size > this->header_.kDataPoolByteSize)) { return -EINVAL; } auto [ptr, resized] = readFromRingBuffer_( size, static_cast(buffer), size); - if (unlikely(resized)) { + if (__hbt_unlikely(resized)) { HBT_LOG_ERROR() << "Unexpeted resizing of buffers"; return -EPERM; } @@ -153,7 +153,7 @@ class Consumer : public RingBufferWrapper { } [[nodiscard]] ssize_t commitTx() DEXCEPT { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } this->header_.incTail(this->tx_size_); @@ -172,7 +172,7 @@ class Consumer : public RingBufferWrapper { /// It does not update . template ssize_t findInBufferInTx() const DEXCEPT { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } uint64_t head = this->header_.readHead(); @@ -206,14 +206,14 @@ class Consumer : public RingBufferWrapper { const auto loop1_end = wrap ? this->header_.kDataPoolByteSize : end; for (auto i = start; i < loop1_end; ++i) { - if (unlikely(this->data_[i] == kElem)) { + if (__hbt_unlikely(this->data_[i] == kElem)) { return static_cast(i - start) + 1; } } - if (unlikely(wrap)) { + if (__hbt_unlikely(wrap)) { // Search in wrapped part of buffer. for (decltype(end) i = 0; i < end; ++i) { - if (unlikely(this->data_[i] == kElem)) { + if (__hbt_unlikely(this->data_[i] == kElem)) { return static_cast( this->header_.kDataPoolByteSize - start + i + 1); } @@ -227,7 +227,7 @@ class Consumer : public RingBufferWrapper { /// Both modes takes into account bytes already read in transaction. template [[nodiscard]] std::pair readInTxChunk() { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return {-EINVAL, nullptr}; } auto ret = findInBufferInTx(); @@ -243,7 +243,7 @@ class Consumer : public RingBufferWrapper { } [[nodiscard]] ssize_t cancelTx() DEXCEPT { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } auto old_tx_size = static_cast(this->tx_size_); @@ -257,7 +257,7 @@ class Consumer : public RingBufferWrapper { /// Drop the contents of buffer and commits transaction. /// Note that this is a writing operation. [[nodiscard]] ssize_t dropInTx() DEXCEPT { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } auto old_tx_size = static_cast(this->tx_size_); @@ -271,7 +271,7 @@ class Consumer : public RingBufferWrapper { /// Clamps in [tail, head). /// Does not terminate transaction in error. [[nodiscard]] ssize_t seekInTx(size_t pos) DEXCEPT { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } auto h = this->header_.readHead(); @@ -289,7 +289,7 @@ class Consumer : public RingBufferWrapper { /// Clamps in [tail, head). /// Does not terminate transaction in error. [[nodiscard]] ssize_t relSeekInTx(ssize_t delta) DEXCEPT { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } auto h = static_cast(this->header_.readHead()); @@ -356,7 +356,7 @@ class Consumer : public RingBufferWrapper { /// Start transaction and get ptr to first element. std::pair startReadTx(size_t size) DEXCEPT { - if (unlikely(size > this->header_.kDataPoolByteSize)) { + if (__hbt_unlikely(size > this->header_.kDataPoolByteSize)) { return {-EINVAL, nullptr}; } @@ -491,26 +491,26 @@ class Consumer : public RingBufferWrapper { HBT_DCHECK_LE(size, this->header_.kDataPoolByteSize); if constexpr (kAlwaysCopy) { - if (unlikely(copy_buffer == nullptr)) { + if (__hbt_unlikely(copy_buffer == nullptr)) { HBT_LOG_ERROR() << "kAlwaysCopy requires non-null copy_buffer"; return {nullptr, nullptr}; } } if constexpr (!kCanResizeCopyBuffer) { - if (unlikely(buffer_size < size)) { + if (__hbt_unlikely(buffer_size < size)) { HBT_LOG_ERROR() << "Copy buffer is too small and " << "kCanResizeCopyBuffer is set to false"; return {nullptr, nullptr}; } } - if (unlikely(0 >= size)) { + if (__hbt_unlikely(0 >= size)) { HBT_LOG_ERROR() << "Cannot copy value of zero size"; return {nullptr, nullptr}; } - if (unlikely(size > this->header_.kDataPoolByteSize)) { + if (__hbt_unlikely(size > this->header_.kDataPoolByteSize)) { HBT_LOG_ERROR() << "reads larger than buffer are not supported"; return {nullptr, nullptr}; } @@ -539,20 +539,20 @@ class Consumer : public RingBufferWrapper { } bool needs_resize = buffer_size < size; - if (unlikely(needs_resize)) { + if (__hbt_unlikely(needs_resize)) { if constexpr (!kCanResizeCopyBuffer) { HBT_LOG_ERROR() << "resizing is needed but kCanResizeCopyBuffer" << " is set to false. Make buffer larger?"; return {nullptr, nullptr}; } copy_buffer = static_cast(realloc(copy_buffer, size)); - if (unlikely(copy_buffer == nullptr)) { + if (__hbt_unlikely(copy_buffer == nullptr)) { HBT_LOG_ERROR() << "Bad alloc"; return {nullptr, nullptr}; } } - if (likely(!wrap)) { + if (__hbt_likely(!wrap)) { std::memcpy(copy_buffer, &this->data_[start], size); } else { size_t size0 = this->header_.kDataPoolByteSize - start; diff --git a/hbt/src/ringbuffer/PerCpuRingBuffer.h b/hbt/src/ringbuffer/PerCpuRingBuffer.h index 638ce2db..2862a70e 100644 --- a/hbt/src/ringbuffer/PerCpuRingBuffer.h +++ b/hbt/src/ringbuffer/PerCpuRingBuffer.h @@ -84,7 +84,7 @@ class PerCpuRingBufferHeader { } THeader* getHeader(CpuId cpu_id) { - if (unlikely(!cpu_set.hasCpu(cpu_id))) { + if (__hbt_unlikely(!cpu_set.hasCpu(cpu_id))) { HBT_THROW_EINVAL() << "No CPU with ID: " << cpu_id; } auto header_offset = perCpuOffset(cpu_id); @@ -93,7 +93,7 @@ class PerCpuRingBufferHeader { } MemOffset getDataOffset(CpuId cpu_id) const { - if (unlikely(!cpu_set.hasCpu(cpu_id))) { + if (__hbt_unlikely(!cpu_set.hasCpu(cpu_id))) { HBT_THROW_EINVAL() << "No CPU with ID: " << cpu_id; } return rb_data_offsets_[cpu_id]; @@ -226,7 +226,7 @@ class PerCpuRingBufferHelper { TProdCons& atCpu(CpuId cpu) { auto& ptr = per_cpu_prodcons_[cpu]; - if (unlikely(ptr == nullptr)) { + if (__hbt_unlikely(ptr == nullptr)) { std::ostringstream oss; oss << "No RingBuffer at CPU: " << cpu; throw std::out_of_range(oss.str()); diff --git a/hbt/src/ringbuffer/Producer.h b/hbt/src/ringbuffer/Producer.h index 3dfa1c97..e96f3a5b 100644 --- a/hbt/src/ringbuffer/Producer.h +++ b/hbt/src/ringbuffer/Producer.h @@ -40,7 +40,7 @@ class Producer : public RingBufferWrapper { /// Return -EBUSY if there is contention and -EAGAIN if /// a temporal error occured. [[nodiscard]] ssize_t startTx() { - if (unlikely(inTx())) { + if (__hbt_unlikely(inTx())) { return -EBUSY; } bool f = false; @@ -54,7 +54,7 @@ class Producer : public RingBufferWrapper { } [[nodiscard]] ssize_t writeInTx(size_t size, const void* data) noexcept { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } return this->copyToRingBuffer_( @@ -72,7 +72,7 @@ class Producer : public RingBufferWrapper { const void* data) noexcept { static_assert(std::is_integral::value); - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } @@ -93,7 +93,7 @@ class Producer : public RingBufferWrapper { } [[nodiscard]] ssize_t commitTx() noexcept { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } this->header_.incHead(this->tx_size_); @@ -105,7 +105,7 @@ class Producer : public RingBufferWrapper { } [[nodiscard]] ssize_t cancelTx() noexcept { - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } auto old_tx_size = static_cast(this->tx_size_); @@ -120,10 +120,10 @@ class Producer : public RingBufferWrapper { template [[nodiscard]] ssize_t writeInTxSizedChunk(const std::string& str) noexcept { static_assert(std::is_integral_v); - if (unlikely(!inTx())) { + if (__hbt_unlikely(!inTx())) { return -EINVAL; } - if (unlikely(str.size() > std::numeric_limits::max())) { + if (__hbt_unlikely(str.size() > std::numeric_limits::max())) { return -EINVAL; } auto byte_size = static_cast(str.size()); @@ -205,13 +205,13 @@ class Producer : public RingBufferWrapper { const size_t rb_offset) noexcept { HBT_THROW_IF_NULLPTR(d); - if (unlikely(size == 0)) { + if (__hbt_unlikely(size == 0)) { HBT_LOG_WARNING() << "Copies of size zero are not supported. " << "Is size set correctly?"; return -EINVAL; } - if (unlikely(size > this->header_.kDataPoolByteSize)) { + if (__hbt_unlikely(size > this->header_.kDataPoolByteSize)) { HBT_LOG_WARNING() << "Asked to write " << size << " bytes in a buffer" << " of size " << this->header_.kDataPoolByteSize; return -EINVAL; @@ -223,7 +223,7 @@ class Producer : public RingBufferWrapper { HBT_DCHECK_LE(tail, head); uint64_t used = head - tail; - if (unlikely(used > this->header_.kDataPoolByteSize)) { + if (__hbt_unlikely(used > this->header_.kDataPoolByteSize)) { HBT_LOG_ERROR() << "number of used bytes found to be larger than ring buffer size"; return -EPERM; @@ -231,14 +231,14 @@ class Producer : public RingBufferWrapper { // Check that there is enough space. uint64_t space = this->header_.kDataPoolByteSize - used; - if (unlikely(rb_offset + size > space)) { + if (__hbt_unlikely(rb_offset + size > space)) { return -ENOSPC; } uint64_t start = (rb_offset + head) & this->header_.kDataModMask; uint64_t end = (start + size) & this->header_.kDataModMask; - if (likely(start < end)) { + if (__hbt_likely(start < end)) { // d's content won't wrap around end of buffer. std::memcpy(&this->data_[start], d, size); } else { diff --git a/hbt/src/ringbuffer/Shm.h b/hbt/src/ringbuffer/Shm.h index 8e878664..b018ef1d 100644 --- a/hbt/src/ringbuffer/Shm.h +++ b/hbt/src/ringbuffer/Shm.h @@ -117,14 +117,14 @@ std::shared_ptr load( header_name, perm_write, hbt::shm::PageType::Default); constexpr auto kHeaderSize = sizeof(typename TRingBuffer::THeader); - if (unlikely(kHeaderSize != header_segment->getSize())) { + if (__hbt_unlikely(kHeaderSize != header_segment->getSize())) { HBT_THROW_SYSTEM(EPERM) << "Header segment of unexpected size"; } auto [data, data_segment] = hbt::shm::Segment::load(data_name, perm_write, data_page_type); - if (unlikely(header->kDataPoolByteSize != data_segment->getSize())) { + if (__hbt_unlikely(header->kDataPoolByteSize != data_segment->getSize())) { HBT_THROW_SYSTEM(EPERM) << "Data segment of unexpected size"; } diff --git a/hbt/src/tagstack/IntervalSlicer.h b/hbt/src/tagstack/IntervalSlicer.h index cda828dc..0cb0d99f 100644 --- a/hbt/src/tagstack/IntervalSlicer.h +++ b/hbt/src/tagstack/IntervalSlicer.h @@ -120,10 +120,10 @@ class IntervalSlicer { // IntervalSlicer is only writer for (int i = 0; i < kMaxNumRetries; ++i) { ret = slices_prod_->write(slice); - if (likely(0 < ret)) { + if (__hbt_likely(0 < ret)) { break; } - if (unlikely(kBytesDropIfFull > 0 && ret == -ENOSPC)) { + if (__hbt_unlikely(kBytesDropIfFull > 0 && ret == -ENOSPC)) { auto err = slices_prod_->dropN(kBytesDropIfFull); HBT_DCHECK_GE(err, 0); } diff --git a/hbt/src/tagstack/PerfEventStream.h b/hbt/src/tagstack/PerfEventStream.h index e30116af..55767c02 100644 --- a/hbt/src/tagstack/PerfEventStream.h +++ b/hbt/src/tagstack/PerfEventStream.h @@ -31,7 +31,7 @@ class PerfEventStream { // XXX: Articulate the stream API. inline const Event* prepareNext(TimeStamp stop_ts) noexcept { - if (unlikely(rb_->getHeader().usedSizeWeak() == 0)) { + if (__hbt_unlikely(rb_->getHeader().usedSizeWeak() == 0)) { ssize_t ret = events_gen_->consume(kBatchSize_); HBT_LOG_ERROR_IF(0 > ret && ret != -ENODATA && ret != -ENOSPC) << "Unexpected error consuming perf_event ringbuffer data. " diff --git a/hbt/src/tagstack/Slicer.h b/hbt/src/tagstack/Slicer.h index 8cef991e..819a4026 100644 --- a/hbt/src/tagstack/Slicer.h +++ b/hbt/src/tagstack/Slicer.h @@ -346,7 +346,7 @@ class Slicer { // << " stop_ts: " << stop_ts while (num_processed < max_num_events) { const Event* ev = events_->prepareNext(stop_ts); - if (unlikely(ev == nullptr)) { + if (__hbt_unlikely(ev == nullptr)) { return num_processed; } HBT_DCHECK_NE(ev->tstamp, kInvalidTimeStamp); @@ -361,7 +361,7 @@ class Slicer { // DLOG(INFO) << "Slicer sees next event: " // << *ev << " in stream idx: " << group_idx; - if (unlikely(ev->tstamp < last_ts_)) { + if (__hbt_unlikely(ev->tstamp < last_ts_)) { // XXX: This case will happen (very rarely) in production. // Double check that handling it this way is robust enough. HBT_LOG_ERROR() @@ -376,12 +376,12 @@ class Slicer { } // Has it reached the stop timestamp? - if (unlikely(ev->tstamp > stop_ts)) { + if (__hbt_unlikely(ev->tstamp > stop_ts)) { return num_processed; } // WriteErrors? - if (unlikely(handleWriteErrors_(ev))) { + if (__hbt_unlikely(handleWriteErrors_(ev))) { ++num_processed; continue; } @@ -421,7 +421,7 @@ class Slicer { Stack last_stack = get_last_stack(); if (ev->isOut()) { - if (unlikely( + if (__hbt_unlikely( handleEventOutPreConditionError_(ev, group_idx, last_stack))) { ++num_processed; continue; @@ -436,7 +436,7 @@ class Slicer { } else { // Event In. - if (unlikely(handleEventInPreConditionError_(ev, last_stack))) { + if (__hbt_unlikely(handleEventInPreConditionError_(ev, last_stack))) { ++num_processed; continue; } @@ -674,7 +674,7 @@ class Slicer { /// Return true if an error was handled inline bool handleWriteErrors_(const Event* ev) { // WriteErrors? - if (unlikely(ev->type == Event::Type::WriteErrorsEnd)) { + if (__hbt_unlikely(ev->type == Event::Type::WriteErrorsEnd)) { HBT_LOG_WARNING() << "Slicer has found WriteErrorsEnd " << *ev; last_ts_ = ev->tstamp; ++stats_.num_processed; @@ -684,7 +684,7 @@ class Slicer { return true; } - if (unlikely(ev->type == Event::Type::WriteErrorsStart)) { + if (__hbt_unlikely(ev->type == Event::Type::WriteErrorsStart)) { HBT_LOG_WARNING() << "Slicer has found WriteErrorsStart " << *ev; last_ts_ = ev->tstamp; ++stats_.num_processed; @@ -695,7 +695,7 @@ class Slicer { return true; } - if (unlikely(num_active_write_errors_)) { + if (__hbt_unlikely(num_active_write_errors_)) { // HBT_LOG_WARNING() << "Slicer received event: " << *ev << " while in // write error"; // Cannot process events while in error because we don't know @@ -715,7 +715,7 @@ class Slicer { ev->type == Event::Type::ThreadCreation || ev->type == Event::Type::ThreadDestruction); - if (unlikely(ev->level != 0)) { + if (__hbt_unlikely(ev->level != 0)) { HBT_LOG_ERROR() << "Thread event level was " << ev->level << "but was expected to be zero"; last_ts_ = ev->tstamp; @@ -773,7 +773,7 @@ class Slicer { HBT_DCHECK_GT(last_state->stats.stack.num_set_tags, 0); dormant_states_[tags[0]].push_front(*last_state); HBT_DCHECK(last_state->is_linked()); - if (unlikely(!last_state->is_linked())) { + if (__hbt_unlikely(!last_state->is_linked())) { HBT_LOG_ERROR() << "Inconsistent state: " << "Last state was expected to be in list of active states"; @@ -822,7 +822,7 @@ class Slicer { // match, unless we are starting. if (last_stack.known_num_levels && last_stack.num_set_tags > 0) { auto l = static_cast(ev->level); - if (unlikely(last_stack.num_set_tags <= l)) { + if (__hbt_unlikely(last_stack.num_set_tags <= l)) { std::ostringstream oss; oss << "Event that pop a Tag from TagStack at level: " << l << " was received but the last stack for group_idx " << group_idx @@ -851,7 +851,7 @@ class Slicer { // present) from the dormant_states_, unless it's a Phase start or the // TagStack is aliased. HBT_DCHECK(ev->isIn()); - if (unlikely( + if (__hbt_unlikely( last_stack.num_set_tags > ev->level && last_stack.tags[ev->level] != kNA)) { Tag old_tag = last_stack.tags[ev->level]; diff --git a/hbt/src/tagstack/Stream.h b/hbt/src/tagstack/Stream.h index 3ce95f71..4ff17ef4 100644 --- a/hbt/src/tagstack/Stream.h +++ b/hbt/src/tagstack/Stream.h @@ -71,7 +71,7 @@ class RingBufferStream : protected ringbuffer::Consumer { auto [ret, event] = this->template startReadTx(); if (ret == -ENODATA) { return nullptr; - } else if (unlikely(ret == -EAGAIN || ret == -EBUSY)) { + } else if (__hbt_unlikely(ret == -EAGAIN || ret == -EBUSY)) { if (ret == -EBUSY) { // If busy, then another thread is in the middle of a transaction. // Yield and retry.