From 3d6b1f023e6dbd95351e8739d427ca200b0b179b Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Tue, 31 Aug 2021 05:42:53 +0800 Subject: [PATCH] [buffer orch] Bugfix: Don't query counter SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES on a pool where it is not supported (#1857) * [Bugfix] Don't query SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES if it is not supported by a pool Currently, SAI_BUFFER_POOL_STAT_WATERMARK_BYTES and SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES are queried for buffer pools. However, the latter is not supported on all pools and all platforms, which will results in sairedis complaint To avoid that, we need to test whether it is supported before putting it to FLEX_COUNTER table Signed-off-by: Stephen Sun How I verified it Run vstest and manually test. --- orchagent/bufferorch.cpp | 81 +++++++++++++++++++++++++++++++--------- 1 file changed, 64 insertions(+), 17 deletions(-) diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index 5aa42f4265c8..5e9eacba98b7 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -22,6 +22,16 @@ extern sai_object_id_t gSwitchId; static const vector bufferPoolWatermarkStatIds = +{ + SAI_BUFFER_POOL_STAT_WATERMARK_BYTES +}; + +static const vector bufferSharedHeadroomPoolWatermarkStatIds = +{ + SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES +}; + +static const vector bufferPoolAllWatermarkStatIds = { SAI_BUFFER_POOL_STAT_WATERMARK_BYTES, SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES @@ -218,29 +228,60 @@ void BufferOrch::generateBufferPoolWatermarkCounterIdList(void) } // Detokenize the SAI watermark stats to a string, separated by comma - string statList; + string statListBufferPoolOnly; for (const auto &it : bufferPoolWatermarkStatIds) { - statList += (sai_serialize_buffer_pool_stat(it) + list_item_delimiter); + statListBufferPoolOnly += (sai_serialize_buffer_pool_stat(it) + list_item_delimiter); } - if (!statList.empty()) + string statListBufferPoolAndSharedHeadroomPool = statListBufferPoolOnly; + for (const auto &it : bufferSharedHeadroomPoolWatermarkStatIds) { - statList.pop_back(); + statListBufferPoolAndSharedHeadroomPool += (sai_serialize_buffer_pool_stat(it) + list_item_delimiter); + } + if (!statListBufferPoolOnly.empty()) + { + statListBufferPoolOnly.pop_back(); + } + if (!statListBufferPoolAndSharedHeadroomPool.empty()) + { + statListBufferPoolAndSharedHeadroomPool.pop_back(); } // Some platforms do not support buffer pool watermark clear operation on a particular pool // Invoke the SAI clear_stats API per pool to query the capability from the API call return status + // Some platforms do not support shared headroom pool watermark read operation on a particular pool + // Invoke the SAI get_buffer_pool_stats API per pool to query the capability from the API call return status. // We use bit mask to mark the clear watermark capability of each buffer pool. We use an unsigned int to place hold // these bits. This assumes the total number of buffer pools to be no greater than 32, which should satisfy all use cases. unsigned int noWmClrCapability = 0; + unsigned int noSharedHeadroomPoolWmRdCapability = 0; unsigned int bitMask = 1; + uint32_t size = static_cast(bufferSharedHeadroomPoolWatermarkStatIds.size()); + vector counterData(size); for (const auto &it : *(m_buffer_type_maps[APP_BUFFER_POOL_TABLE_NAME])) { - sai_status_t status = sai_buffer_api->clear_buffer_pool_stats( + sai_status_t status; + // Check whether shared headroom pool water mark is supported + status = sai_buffer_api->get_buffer_pool_stats( + it.second.m_saiObjectId, + size, + reinterpret_cast(bufferSharedHeadroomPoolWatermarkStatIds.data()), + counterData.data()); + if (SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status) + || status == SAI_STATUS_NOT_SUPPORTED || status == SAI_STATUS_NOT_IMPLEMENTED) + { + SWSS_LOG_NOTICE("Read shared headroom pool watermark failed on %s, rv: %s", it.first.c_str(), sai_serialize_status(status).c_str()); + noSharedHeadroomPoolWmRdCapability |= bitMask; + } + + const auto &watermarkStatIds = (noSharedHeadroomPoolWmRdCapability & bitMask) ? bufferPoolWatermarkStatIds : bufferPoolAllWatermarkStatIds; + + status = sai_buffer_api->clear_buffer_pool_stats( it.second.m_saiObjectId, - static_cast(bufferPoolWatermarkStatIds.size()), - reinterpret_cast(bufferPoolWatermarkStatIds.data())); - if (status == SAI_STATUS_NOT_SUPPORTED || status == SAI_STATUS_NOT_IMPLEMENTED) + static_cast(watermarkStatIds.size()), + reinterpret_cast(watermarkStatIds.data())); + if (SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status) + || status == SAI_STATUS_NOT_SUPPORTED || status == SAI_STATUS_NOT_IMPLEMENTED) { SWSS_LOG_NOTICE("Clear watermark failed on %s, rv: %s", it.first.c_str(), sai_serialize_status(status).c_str()); noWmClrCapability |= bitMask; @@ -259,11 +300,21 @@ void BufferOrch::generateBufferPoolWatermarkCounterIdList(void) // Push buffer pool watermark COUNTER_ID_LIST to FLEX_COUNTER_TABLE on a per buffer pool basis vector fvTuples; - fvTuples.emplace_back(BUFFER_POOL_COUNTER_ID_LIST, statList); + bitMask = 1; for (const auto &it : *(m_buffer_type_maps[APP_BUFFER_POOL_TABLE_NAME])) { string key = BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP ":" + sai_serialize_object_id(it.second.m_saiObjectId); + fvTuples.clear(); + + if (noSharedHeadroomPoolWmRdCapability & bitMask) + { + fvTuples.emplace_back(BUFFER_POOL_COUNTER_ID_LIST, statListBufferPoolOnly); + } + else + { + fvTuples.emplace_back(BUFFER_POOL_COUNTER_ID_LIST, statListBufferPoolAndSharedHeadroomPool); + } if (noWmClrCapability) { @@ -273,15 +324,11 @@ void BufferOrch::generateBufferPoolWatermarkCounterIdList(void) stats_mode = STATS_MODE_READ; } fvTuples.emplace_back(STATS_MODE_FIELD, stats_mode); - - m_flexCounterTable->set(key, fvTuples); - fvTuples.pop_back(); - bitMask <<= 1; - } - else - { - m_flexCounterTable->set(key, fvTuples); } + + m_flexCounterTable->set(key, fvTuples); + + bitMask <<= 1; } m_isBufferPoolWatermarkCounterIdListGenerated = true;