From ef0d5a9fefa96eda7a2dbe42d693c071823aa334 Mon Sep 17 00:00:00 2001 From: Tangyanzhao Date: Wed, 23 Dec 2020 00:48:17 +0800 Subject: [PATCH 01/19] format --- rdsn | 2 +- src/server/hotkey_collector.cpp | 17 +++++++++++++++++ src/server/hotkey_collector.h | 1 + src/server/pegasus_server_impl.cpp | 3 ++- src/server/test/hotkey_collector_test.cpp | 6 ++++++ 5 files changed, 27 insertions(+), 2 deletions(-) diff --git a/rdsn b/rdsn index 67e06d2c50..0b07a633aa 160000 --- a/rdsn +++ b/rdsn @@ -1 +1 @@ -Subproject commit 67e06d2c5022e82984b0c7e6651876b5b0e7410d +Subproject commit 0b07a633aae13300f731558718fd279a0ff2d7ae diff --git a/src/server/hotkey_collector.cpp b/src/server/hotkey_collector.cpp index 60263f6a6f..3371278cd0 100644 --- a/src/server/hotkey_collector.cpp +++ b/src/server/hotkey_collector.cpp @@ -185,6 +185,9 @@ void hotkey_collector::handle_rpc(const dsn::replication::detect_hotkey_request case dsn::replication::detect_action::STOP: on_stop_detect(resp); return; + case dsn::replication::detect_action::QUERY: + query_result(resp); + return; default: std::string hint = fmt::format("{}: can't find this detect action", req.action); resp.err = dsn::ERR_INVALID_STATE; @@ -272,6 +275,20 @@ void hotkey_collector::on_stop_detect(dsn::replication::detect_hotkey_response & ddebug_replica(hint); } +void hotkey_collector::query_result(dsn::replication::detect_hotkey_response &resp) +{ + if (_state != hotkey_collector_state::FINISHED) { + resp.err = dsn::ERR_BUSY; + std::string hint = fmt::format("hotkey is detecting now, now state: {}", + dsn::enum_to_string(_hotkey_type)); + resp.err_hint = hint; + ddebug_replica(hint); + } else { + resp.err = dsn::ERR_OK; + resp.hotkey_result = _result.hot_hash_key; + } +} + bool hotkey_collector::terminate_if_timeout() { if (dsn_now_s() >= _collector_start_time_second.load() + FLAGS_max_seconds_to_detect_hotkey) { diff --git a/src/server/hotkey_collector.h b/src/server/hotkey_collector.h index a54f01941f..bdc7fcbd67 100644 --- a/src/server/hotkey_collector.h +++ b/src/server/hotkey_collector.h @@ -98,6 +98,7 @@ class hotkey_collector : public dsn::replication::replica_base private: void on_start_detect(dsn::replication::detect_hotkey_response &resp); void on_stop_detect(dsn::replication::detect_hotkey_response &resp); + void query_result(dsn::replication::detect_hotkey_response &resp); void change_state_to_stopped(); void change_state_to_coarse_detecting(); diff --git a/src/server/pegasus_server_impl.cpp b/src/server/pegasus_server_impl.cpp index 949eead7e6..5c3d5c718d 100644 --- a/src/server/pegasus_server_impl.cpp +++ b/src/server/pegasus_server_impl.cpp @@ -2841,7 +2841,8 @@ void pegasus_server_impl::on_detect_hotkey(const dsn::replication::detect_hotkey { if (dsn_unlikely(req.action != dsn::replication::detect_action::START && - req.action != dsn::replication::detect_action::STOP)) { + req.action != dsn::replication::detect_action::STOP && + req.action != dsn::replication::detect_action::QUERY)) { resp.err = dsn::ERR_INVALID_PARAMETERS; resp.__set_err_hint("invalid detect_action"); return; diff --git a/src/server/test/hotkey_collector_test.cpp b/src/server/test/hotkey_collector_test.cpp index 5eea0d8aad..99913a45fe 100644 --- a/src/server/test/hotkey_collector_test.cpp +++ b/src/server/test/hotkey_collector_test.cpp @@ -304,6 +304,12 @@ TEST_F(hotkey_collector_test, state_transform) ASSERT_TRUE(result->if_find_result); ASSERT_EQ(result->hot_hash_key, "ThisisahotkeyThisisahotkey"); + on_detect_hotkey(generate_control_rpc(dsn::replication::hotkey_type::READ, + dsn::replication::detect_action::QUERY), + resp); + ASSERT_EQ(resp.err, dsn::ERR_OK); + ASSERT_EQ(resp.hotkey_result, "ThisisahotkeyThisisahotkey"); + on_detect_hotkey(generate_control_rpc(dsn::replication::hotkey_type::READ, dsn::replication::detect_action::STOP), resp); From 1304533e715cd45999c0543dfd8740b6c540c83c Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Wed, 23 Dec 2020 10:22:30 +0800 Subject: [PATCH 02/19] add log --- rdsn | 2 +- src/server/hotkey_collector.cpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/rdsn b/rdsn index 0b07a633aa..64b9e11703 160000 --- a/rdsn +++ b/rdsn @@ -1 +1 @@ -Subproject commit 0b07a633aae13300f731558718fd279a0ff2d7ae +Subproject commit 64b9e11703aea7acfa771037cbc3ad8b363abfaa diff --git a/src/server/hotkey_collector.cpp b/src/server/hotkey_collector.cpp index 3371278cd0..a8b82f66df 100644 --- a/src/server/hotkey_collector.cpp +++ b/src/server/hotkey_collector.cpp @@ -168,6 +168,7 @@ inline void hotkey_collector::change_state_by_result() case hotkey_collector_state::FINE_DETECTING: if (!_result.hot_hash_key.empty()) { change_state_to_finished(); + derror_replica("Find the hotkey: {}", _result.hot_hash_key); } break; default: From 44f22091bcd1745c32efdca5c4d63b75d1f2b979 Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Thu, 24 Dec 2020 14:25:33 +0800 Subject: [PATCH 03/19] update rdsn --- rdsn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rdsn b/rdsn index 64b9e11703..29c6ccca84 160000 --- a/rdsn +++ b/rdsn @@ -1 +1 @@ -Subproject commit 64b9e11703aea7acfa771037cbc3ad8b363abfaa +Subproject commit 29c6ccca84873f4735c690dc8a616b8ef93377eb From 7a3d764d0864e18ccec970e50d2acb6f101bc5de Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Fri, 25 Dec 2020 16:21:03 +0800 Subject: [PATCH 04/19] update --- src/server/hotkey_collector.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server/hotkey_collector.cpp b/src/server/hotkey_collector.cpp index a8b82f66df..23aa21a433 100644 --- a/src/server/hotkey_collector.cpp +++ b/src/server/hotkey_collector.cpp @@ -282,11 +282,11 @@ void hotkey_collector::query_result(dsn::replication::detect_hotkey_response &re resp.err = dsn::ERR_BUSY; std::string hint = fmt::format("hotkey is detecting now, now state: {}", dsn::enum_to_string(_hotkey_type)); - resp.err_hint = hint; + resp.__set_err_hint(hint); ddebug_replica(hint); } else { resp.err = dsn::ERR_OK; - resp.hotkey_result = _result.hot_hash_key; + resp.__set_hotkey_result(_result.hot_hash_key); } } From a5ae1fe1ed2d859266b30041976b8467a6aabdbf Mon Sep 17 00:00:00 2001 From: Tangyanzhao Date: Sun, 27 Dec 2020 22:52:03 +0800 Subject: [PATCH 05/19] add ft --- .rat-excludes | 19 - debug | 1584 +++++++++++++++++ rdsn | 2 +- src/server/config.min.ini | 1 + src/server/hotspot_partition_calculator.cpp | 4 +- src/server/test/hotspot_partition_test.cpp | 13 +- src/shell/commands/detect_hotkey.cpp | 2 + src/test/function_test/run.sh | 2 + .../function_test/test_detect_hotspot.cpp | 137 ++ 9 files changed, 1741 insertions(+), 23 deletions(-) delete mode 100644 .rat-excludes create mode 100644 debug create mode 100644 src/test/function_test/test_detect_hotspot.cpp diff --git a/.rat-excludes b/.rat-excludes deleted file mode 100644 index 7cd6ebafe5..0000000000 --- a/.rat-excludes +++ /dev/null @@ -1,19 +0,0 @@ -.*ini -.*json -.*in -.*md -carrot -.clang-format -sds/* -linenoise/* -argh.h -git_commit.h -ssh-no-interactive -scp-no-interactive -get_local_ip -DISCLAIMER-WIP -.travis.yml -.rat-excludes -.gitmodules -.gitignore -apache-rat-0.13.jar diff --git a/debug b/debug new file mode 100644 index 0000000000..d1869d4930 --- /dev/null +++ b/debug @@ -0,0 +1,1584 @@ +Test start time: 2020年 12月 27日 星期日 22:23:02 CST +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Stopping zookeeper ... no zookeeper to stop (could not find file /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/data/zookeeper_server.pid) +Clearing zookeeper ... CLEARED +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Starting zookeeper ... STARTED +Zookeeper started at port 22181 +starting server +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 361298 361236 0 22:23 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta2 && /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta &>result & +smilenc+ 361306 361236 0 22:23 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta3 && /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta &>result & +smilenc+ 361314 361236 0 22:23 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 361384 361236 0 22:23 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 361431 361236 0 22:23 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 361489 361236 0 22:23 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +Wait cluster to become healthy... +Sleeped for 1 seconds +Sleeped for 2 seconds +Sleeped for 3 seconds +Sleeped for 4 seconds +Sleeped for 5 seconds +Sleeped for 6 seconds +Sleeped for 7 seconds +Sleeped for 8 seconds +Sleeped for 9 seconds +Sleeped for 10 seconds +Sleeped for 11 seconds +Sleeped for 12 seconds +Sleeped for 13 seconds +Sleeped for 14 seconds +Sleeped for 15 seconds +Sleeped for 16 seconds +Sleeped for 17 seconds +Sleeped for 18 seconds +Sleeped for 19 seconds +Sleeped for 20 seconds +Cluster becomes healthy. +~/Code/incubator-pegasus/src/builder/bin/pegasus_function_test ~/Code/incubator-pegasus +W2020-12-27 22:23:27.166 (1609079007166682457 58bfa) unknown.io-thrd.363514:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:23:27.166 (1609079007166700465 58bfa) unknown.io-thrd.363514:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:23:27.1685 (1609079007168678589 58bfa) unknown.io-thrd.363514: process(363514) start: 1609079007165, date: 2020-12-27 22:23:27.165 +D2020-12-27 22:23:27.168 (1609079007168956068 58bfa) unknown.io-thrd.363514: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:27.1681 (1609079007168960974 58bfa) unknown.io-thrd.363514: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:23:27.169� (1609079007169022977 58bfa) unknown.io-thrd.363514: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:27.1691 (1609079007169113018 58bfa) unknown.io-thrd.363514: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:23:27.169 (1609079007169251970 58bfa) unknown.io-thrd.363514: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:27.1691 (1609079007169255910 58bfa) unknown.io-thrd.363514: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:23:27.169� (1609079007169312669 58bfa) unknown.io-thrd.363514: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:27.1691 (1609079007169464227 58bfa) unknown.io-thrd.363514: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:23:27.169 (1609079007169582467 58bfa) unknown.io-thrd.363514: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:27.1691 (1609079007169586117 58bfa) unknown.io-thrd.363514: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:23:27.169� (1609079007169616988 58bfa) unknown.io-thrd.363514: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:27.1691 (1609079007169702939 58bfa) unknown.io-thrd.363514: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:23:27.169 (1609079007169708477 58bfa) unknown.io-thrd.363514: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:23:27.1691 (1609079007169737993 58c13) mimic.io-thrd.363539: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:23:27.1708 (1609079007170010041 58bfa) unknown.io-thrd.363514: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:23:27.1701 (1609079007170042547 58c1c) mimic.io-thrd.363548: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:23:27.1704 (1609079007170188022 58bfa) unknown.io-thrd.363514: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:23:27.170 (1609079007170248001 58bfa) unknown.io-thrd.363514: MainThread: app_name=temp +Note: Google Test filter = basic.* +[==========] Running 8 tests from 1 test case. +[----------] Global test environment set-up. +[----------] 8 tests from basic +[ RUN ] basic.set_get_del +D2020-12-27 22:23:27.1701 (1609079007170382720 58bfa) mimic.io-thrd.363514: client session created, remote_server = 127.0.0.1:34602, current_count = 1 +D2020-12-27 22:23:27.1701 (1609079007170468415 58bfe) mimic.io-thrd.363518: client session connected, remote_server = 127.0.0.1:34602, current_count = 1 +D2020-12-27 22:23:27.1702 (1609079007170716319 58bfb) mimic.io-thrd.363515: client session created, remote_server = 192.168.123.119:34601, current_count = 2 +D2020-12-27 22:23:27.1702 (1609079007170776748 58bfd) mimic.io-thrd.363517: client session connected, remote_server = 192.168.123.119:34601, current_count = 2 +D2020-12-27 22:23:27.1703 (1609079007170935546 58c15) mimic.default1.01008bfa00030003: client session created, remote_server = 192.168.123.119:34801, current_count = 3 +D2020-12-27 22:23:27.1703 (1609079007170993301 58bfc) mimic.io-thrd.363516: client session connected, remote_server = 192.168.123.119:34801, current_count = 3 +[ OK ] basic.set_get_del (2 ms) +[ RUN ] basic.multi_get +D2020-12-27 22:23:27.1724 (1609079007172611814 58bfa) mimic.io-thrd.363514: client session created, remote_server = 192.168.123.119:34802, current_count = 4 +D2020-12-27 22:23:27.1724 (1609079007172666584 58bfc) mimic.io-thrd.363516: client session connected, remote_server = 192.168.123.119:34802, current_count = 4 +[ OK ] basic.multi_get (1007 ms) +[ RUN ] basic.multi_get_reverse +[ OK ] basic.multi_get_reverse (8 ms) +[ RUN ] basic.multi_set_get_del +E2020-12-27 22:23:28.188 (1609079008188285692 58bfa) mimic.io-thrd.363514: invalid sort keys: should not be empty +[ OK ] basic.multi_set_get_del (2 ms) +[ RUN ] basic.set_get_del_async +[ OK ] basic.set_get_del_async (1 ms) +[ RUN ] basic.multi_set_get_del_async +E2020-12-27 22:23:28.191 (1609079008191916945 58bfa) mimic.io-thrd.363514: invalid sort keys: should not be empty +[ OK ] basic.multi_set_get_del_async (2 ms) +[ RUN ] basic.scan_with_filter +[ OK ] basic.scan_with_filter (2 ms) +[ RUN ] basic.full_scan_with_filter +D2020-12-27 22:23:28.1945 (1609079008194912925 58bfa) mimic.io-thrd.363514: client session created, remote_server = 127.0.0.1:34601, current_count = 5 +D2020-12-27 22:23:28.1945 (1609079008194997217 58bfc) mimic.io-thrd.363516: client session connected, remote_server = 127.0.0.1:34601, current_count = 5 +D2020-12-27 22:23:28.1956 (1609079008195181003 58bfa) mimic.io-thrd.363514: client session created, remote_server = 192.168.123.119:34803, current_count = 6 +D2020-12-27 22:23:28.1956 (1609079008195225511 58bfd) mimic.io-thrd.363517: client session connected, remote_server = 192.168.123.119:34803, current_count = 6 +[ OK ] basic.full_scan_with_filter (4 ms) +[----------] 8 tests from basic (1028 ms total) + +[----------] Global test environment tear-down +[==========] 8 tests from 1 test case ran. (1028 ms total) +[ PASSED ] 8 tests. +dsn exit with code 0 +W2020-12-27 22:23:28.265 (1609079008265870797 58c21) unknown.io-thrd.363553:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:23:28.265 (1609079008265890784 58c21) unknown.io-thrd.363553:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:23:28.2695 (1609079008269333142 58c21) unknown.io-thrd.363553: process(363553) start: 1609079008265, date: 2020-12-27 22:23:28.265 +D2020-12-27 22:23:28.269 (1609079008269614652 58c21) unknown.io-thrd.363553: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:28.2691 (1609079008269621326 58c21) unknown.io-thrd.363553: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:23:28.269� (1609079008269679551 58c21) unknown.io-thrd.363553: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:28.2691 (1609079008269790461 58c21) unknown.io-thrd.363553: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:23:28.269 (1609079008269904657 58c21) unknown.io-thrd.363553: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:28.2691 (1609079008269909525 58c21) unknown.io-thrd.363553: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:23:28.269� (1609079008269948692 58c21) unknown.io-thrd.363553: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:28.2701 (1609079008270038480 58c21) unknown.io-thrd.363553: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:23:28.270 (1609079008270146831 58c21) unknown.io-thrd.363553: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:28.2701 (1609079008270151472 58c21) unknown.io-thrd.363553: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:23:28.270� (1609079008270189568 58c21) unknown.io-thrd.363553: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:28.2701 (1609079008270279616 58c21) unknown.io-thrd.363553: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:23:28.270 (1609079008270287523 58c21) unknown.io-thrd.363553: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:23:28.2701 (1609079008270332116 58c3a) mimic.io-thrd.363578: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:23:28.2708 (1609079008270529675 58c21) unknown.io-thrd.363553: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:23:28.2701 (1609079008270571194 58c43) mimic.io-thrd.363587: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:23:28.2704 (1609079008270674438 58c21) unknown.io-thrd.363553: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:23:28.270 (1609079008270745933 58c21) unknown.io-thrd.363553: MainThread: app_name=temp +Note: Google Test filter = incr.* +[==========] Running 11 tests from 1 test case. +[----------] Global test environment set-up. +[----------] 11 tests from incr +[ RUN ] incr.unexist_key +D2020-12-27 22:23:28.2701 (1609079008270898354 58c21) mimic.io-thrd.363553: client session created, remote_server = 127.0.0.1:34603, current_count = 1 +D2020-12-27 22:23:28.2711 (1609079008271005567 58c24) mimic.io-thrd.363556: client session connected, remote_server = 127.0.0.1:34603, current_count = 1 +D2020-12-27 22:23:28.2712 (1609079008271197830 58c25) mimic.io-thrd.363557: client session created, remote_server = 192.168.123.119:34601, current_count = 2 +D2020-12-27 22:23:28.2712 (1609079008271278586 58c22) mimic.io-thrd.363554: client session connected, remote_server = 192.168.123.119:34601, current_count = 2 +D2020-12-27 22:23:28.2713 (1609079008271463988 58c3c) mimic.default1.01008c2100030003: client session created, remote_server = 192.168.123.119:34801, current_count = 3 +D2020-12-27 22:23:28.2713 (1609079008271557214 58c24) mimic.io-thrd.363556: client session connected, remote_server = 192.168.123.119:34801, current_count = 3 +[ OK ] incr.unexist_key (3 ms) +[ RUN ] incr.empty_key +[ OK ] incr.empty_key (1 ms) +[ RUN ] incr.negative_value +[ OK ] incr.negative_value (1 ms) +[ RUN ] incr.increase_zero +[ OK ] incr.increase_zero (1 ms) +[ RUN ] incr.multiple_increment +D2020-12-27 22:23:28.2764 (1609079008276169198 58c21) mimic.io-thrd.363553: client session created, remote_server = 192.168.123.119:34803, current_count = 4 +D2020-12-27 22:23:28.2764 (1609079008276243128 58c22) mimic.io-thrd.363554: client session connected, remote_server = 192.168.123.119:34803, current_count = 4 +[ OK ] incr.multiple_increment (1 ms) +[ RUN ] incr.invalid_old_data +D2020-12-27 22:23:28.2775 (1609079008277580804 58c21) mimic.io-thrd.363553: client session created, remote_server = 192.168.123.119:34802, current_count = 5 +D2020-12-27 22:23:28.2775 (1609079008277641554 58c22) mimic.io-thrd.363554: client session connected, remote_server = 192.168.123.119:34802, current_count = 5 +[ OK ] incr.invalid_old_data (1 ms) +[ RUN ] incr.out_of_range_old_data +[ OK ] incr.out_of_range_old_data (1 ms) +[ RUN ] incr.up_overflow +[ OK ] incr.up_overflow (1 ms) +[ RUN ] incr.down_overflow +[ OK ] incr.down_overflow (1 ms) +[ RUN ] incr.preserve_ttl +[ OK ] incr.preserve_ttl (4003 ms) +[ RUN ] incr.reset_ttl +[ OK ] incr.reset_ttl (4009 ms) +[----------] 11 tests from incr (8023 ms total) + +[----------] Global test environment tear-down +[==========] 11 tests from 1 test case ran. (8023 ms total) +[ PASSED ] 11 tests. +dsn exit with code 0 +W2020-12-27 22:23:36.299 (1609079016299603556 58c48) unknown.io-thrd.363592:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:23:36.299 (1609079016299621996 58c48) unknown.io-thrd.363592:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:23:36.3018 (1609079016301662123 58c48) unknown.io-thrd.363592: process(363592) start: 1609079016298, date: 2020-12-27 22:23:36.298 +D2020-12-27 22:23:36.301 (1609079016301888071 58c48) unknown.io-thrd.363592: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3011 (1609079016301893655 58c48) unknown.io-thrd.363592: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:23:36.301� (1609079016301937376 58c48) unknown.io-thrd.363592: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3021 (1609079016302018444 58c48) unknown.io-thrd.363592: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:23:36.302 (1609079016302108867 58c48) unknown.io-thrd.363592: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3021 (1609079016302112743 58c48) unknown.io-thrd.363592: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:23:36.302� (1609079016302142197 58c48) unknown.io-thrd.363592: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3021 (1609079016302204591 58c48) unknown.io-thrd.363592: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:23:36.302 (1609079016302292504 58c48) unknown.io-thrd.363592: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3021 (1609079016302295940 58c48) unknown.io-thrd.363592: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:23:36.302� (1609079016302324886 58c48) unknown.io-thrd.363592: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3021 (1609079016302390964 58c48) unknown.io-thrd.363592: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:23:36.302 (1609079016302396619 58c48) unknown.io-thrd.363592: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:23:36.3021 (1609079016302428263 58c61) mimic.io-thrd.363617: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:23:36.3028 (1609079016302609331 58c48) unknown.io-thrd.363592: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:23:36.3021 (1609079016302658043 58c6a) mimic.io-thrd.363626: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:23:36.3024 (1609079016302730289 58c48) unknown.io-thrd.363592: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:23:36.302 (1609079016302797546 58c48) unknown.io-thrd.363592: MainThread: app_name=temp +Note: Google Test filter = check_and_set.* +[==========] Running 9 tests from 1 test case. +[----------] Global test environment set-up. +[----------] 9 tests from check_and_set +[ RUN ] check_and_set.value_not_exist +D2020-12-27 22:23:36.3021 (1609079016302922126 58c48) mimic.io-thrd.363592: client session created, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:23:36.3031 (1609079016303023192 58c49) mimic.io-thrd.363593: client session connected, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:23:36.3032 (1609079016303204092 58c63) mimic.default1.01008c4800030003: client session created, remote_server = 192.168.123.119:34803, current_count = 2 +D2020-12-27 22:23:36.3032 (1609079016303268248 58c4c) mimic.io-thrd.363596: client session connected, remote_server = 192.168.123.119:34803, current_count = 2 +[ OK ] check_and_set.value_not_exist (5 ms) +[ RUN ] check_and_set.value_exist +D2020-12-27 22:23:36.3073 (1609079016307573709 58c48) mimic.io-thrd.363592: client session created, remote_server = 192.168.123.119:34802, current_count = 3 +D2020-12-27 22:23:36.3073 (1609079016307639553 58c4b) mimic.io-thrd.363595: client session connected, remote_server = 192.168.123.119:34802, current_count = 3 +[ OK ] check_and_set.value_exist (3 ms) +[ RUN ] check_and_set.value_not_empty +[ OK ] check_and_set.value_not_empty (3 ms) +[ RUN ] check_and_set.value_match_anywhere +[ OK ] check_and_set.value_match_anywhere (6 ms) +[ RUN ] check_and_set.value_match_prefix +D2020-12-27 22:23:36.3194 (1609079016319216241 58c48) mimic.io-thrd.363592: client session created, remote_server = 192.168.123.119:34801, current_count = 4 +D2020-12-27 22:23:36.3194 (1609079016319293893 58c4b) mimic.io-thrd.363595: client session connected, remote_server = 192.168.123.119:34801, current_count = 4 +[ OK ] check_and_set.value_match_prefix (5 ms) +[ RUN ] check_and_set.value_match_postfix +[ OK ] check_and_set.value_match_postfix (5 ms) +[ RUN ] check_and_set.value_bytes_compare +[ OK ] check_and_set.value_bytes_compare (6 ms) +[ RUN ] check_and_set.value_int_compare +[ OK ] check_and_set.value_int_compare (7 ms) +[ RUN ] check_and_set.invalid_type +E2020-12-27 22:23:36.3420 (1609079016342867385 58c48) mimic.io-thrd.363592: invalid check type: 100 +[ OK ] check_and_set.invalid_type (0 ms) +[----------] 9 tests from check_and_set (40 ms total) + +[----------] Global test environment tear-down +[==========] 9 tests from 1 test case ran. (40 ms total) +[ PASSED ] 9 tests. +dsn exit with code 0 +W2020-12-27 22:23:36.349 (1609079016349051185 58c6f) unknown.io-thrd.363631:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:23:36.349 (1609079016349070379 58c6f) unknown.io-thrd.363631:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:23:36.3518 (1609079016351115790 58c6f) unknown.io-thrd.363631: process(363631) start: 1609079016348, date: 2020-12-27 22:23:36.348 +D2020-12-27 22:23:36.351 (1609079016351360841 58c6f) unknown.io-thrd.363631: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3511 (1609079016351366329 58c6f) unknown.io-thrd.363631: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:23:36.351� (1609079016351416582 58c6f) unknown.io-thrd.363631: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3511 (1609079016351528192 58c6f) unknown.io-thrd.363631: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:23:36.351 (1609079016351623082 58c6f) unknown.io-thrd.363631: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3511 (1609079016351626921 58c6f) unknown.io-thrd.363631: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:23:36.351� (1609079016351657303 58c6f) unknown.io-thrd.363631: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3511 (1609079016351739887 58c6f) unknown.io-thrd.363631: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:23:36.351 (1609079016351833083 58c6f) unknown.io-thrd.363631: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3511 (1609079016351836698 58c6f) unknown.io-thrd.363631: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:23:36.351� (1609079016351867079 58c6f) unknown.io-thrd.363631: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:23:36.3511 (1609079016351949742 58c6f) unknown.io-thrd.363631: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:23:36.351 (1609079016351955551 58c6f) unknown.io-thrd.363631: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:23:36.3521 (1609079016352009733 58c88) mimic.io-thrd.363656: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:23:36.3528 (1609079016352175221 58c6f) unknown.io-thrd.363631: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:23:36.3521 (1609079016352225699 58c91) mimic.io-thrd.363665: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:23:36.3524 (1609079016352308490 58c6f) unknown.io-thrd.363631: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:23:36.352 (1609079016352374389 58c6f) unknown.io-thrd.363631: MainThread: app_name=temp +Note: Google Test filter = check_and_mutate.* +[==========] Running 12 tests from 1 test case. +[----------] Global test environment set-up. +[----------] 12 tests from check_and_mutate +[ RUN ] check_and_mutate.value_not_exist +D2020-12-27 22:23:36.3521 (1609079016352510705 58c6f) mimic.io-thrd.363631: client session created, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:23:36.3521 (1609079016352595957 58c70) mimic.io-thrd.363632: client session connected, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:23:36.3522 (1609079016352797644 58c8a) mimic.default1.01008c6f00030003: client session created, remote_server = 192.168.123.119:34803, current_count = 2 +D2020-12-27 22:23:36.3522 (1609079016352863146 58c72) mimic.io-thrd.363634: client session connected, remote_server = 192.168.123.119:34803, current_count = 2 +[ OK ] check_and_mutate.value_not_exist (5 ms) +[ RUN ] check_and_mutate.value_exist +D2020-12-27 22:23:36.3573 (1609079016357250669 58c6f) mimic.io-thrd.363631: client session created, remote_server = 192.168.123.119:34802, current_count = 3 +D2020-12-27 22:23:36.3573 (1609079016357325799 58c70) mimic.io-thrd.363632: client session connected, remote_server = 192.168.123.119:34802, current_count = 3 +[ OK ] check_and_mutate.value_exist (3 ms) +[ RUN ] check_and_mutate.value_not_empty +[ OK ] check_and_mutate.value_not_empty (3 ms) +[ RUN ] check_and_mutate.value_match_anywhere +[ OK ] check_and_mutate.value_match_anywhere (5 ms) +[ RUN ] check_and_mutate.value_match_prefix +[ OK ] check_and_mutate.value_match_prefix (6 ms) +[ RUN ] check_and_mutate.value_match_postfix +[ OK ] check_and_mutate.value_match_postfix (6 ms) +[ RUN ] check_and_mutate.value_bytes_compare +[ OK ] check_and_mutate.value_bytes_compare (6 ms) +[ RUN ] check_and_mutate.value_int_compare +[ OK ] check_and_mutate.value_int_compare (7 ms) +[ RUN ] check_and_mutate.invalid_type +E2020-12-27 22:23:36.3930 (1609079016393573232 58c6f) mimic.io-thrd.363631: invalid check type: 100 +[ OK ] check_and_mutate.invalid_type (0 ms) +[ RUN ] check_and_mutate.set_del +[ OK ] check_and_mutate.set_del (1 ms) +[ RUN ] check_and_mutate.multi_get_mutations +[ OK ] check_and_mutate.multi_get_mutations (12003 ms) +[ RUN ] check_and_mutate.expire_seconds +D2020-12-27 22:23:48.3974 (1609079028397858175 58c6f) mimic.io-thrd.363631: client session created, remote_server = 192.168.123.119:34801, current_count = 4 +D2020-12-27 22:23:48.3984 (1609079028398062309 58c73) mimic.io-thrd.363635: client session connected, remote_server = 192.168.123.119:34801, current_count = 4 +[ OK ] check_and_mutate.expire_seconds (12004 ms) +[----------] 12 tests from check_and_mutate (24049 ms total) + +[----------] Global test environment tear-down +[==========] 12 tests from 1 test case ran. (24049 ms total) +[ PASSED ] 12 tests. +dsn exit with code 0 +W2020-12-27 22:24:00.412 (1609079040412124515 58c9d) unknown.io-thrd.363677:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:24:00.412 (1609079040412162033 58c9d) unknown.io-thrd.363677:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:24:00.4161 (1609079040416666847 58c9d) unknown.io-thrd.363677: process(363677) start: 1609079040411, date: 2020-12-27 22:24:00.411 +D2020-12-27 22:24:00.417 (1609079040417057348 58c9d) unknown.io-thrd.363677: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:24:00.4171 (1609079040417068189 58c9d) unknown.io-thrd.363677: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:24:00.417� (1609079040417157221 58c9d) unknown.io-thrd.363677: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:24:00.4171 (1609079040417338452 58c9d) unknown.io-thrd.363677: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:24:00.417 (1609079040417510886 58c9d) unknown.io-thrd.363677: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:24:00.4171 (1609079040417519010 58c9d) unknown.io-thrd.363677: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:24:00.417� (1609079040417582096 58c9d) unknown.io-thrd.363677: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:24:00.4171 (1609079040417721828 58c9d) unknown.io-thrd.363677: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:24:00.417 (1609079040417883275 58c9d) unknown.io-thrd.363677: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:24:00.4171 (1609079040417890731 58c9d) unknown.io-thrd.363677: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:24:00.417� (1609079040417951605 58c9d) unknown.io-thrd.363677: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:24:00.4181 (1609079040418083835 58c9d) unknown.io-thrd.363677: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:24:00.418 (1609079040418095270 58c9d) unknown.io-thrd.363677: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:24:00.4181 (1609079040418174923 58cb6) mimic.io-thrd.363702: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:24:00.4188 (1609079040418479164 58c9d) unknown.io-thrd.363677: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:24:00.4181 (1609079040418525809 58cbf) mimic.io-thrd.363711: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:24:00.4184 (1609079040418785806 58c9d) unknown.io-thrd.363677: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:24:00.418 (1609079040418875541 58c9d) unknown.io-thrd.363677: MainThread: app_name=temp +Note: Google Test filter = scan.* +[==========] Running 8 tests from 1 test case. +[----------] Global test environment set-up. +[----------] 8 tests from scan +D2020-12-27 22:24:00.418 (1609079040418994144 58c9d) unknown.io-thrd.363677: SetUp... +D2020-12-27 22:24:00.4180 (1609079040418999807 58c9d) unknown.io-thrd.363677: CLEARING_DATABASE... +D2020-12-27 22:24:00.4191 (1609079040419063872 58c9d) mimic.io-thrd.363677: client session created, remote_server = 127.0.0.1:34603, current_count = 1 +D2020-12-27 22:24:00.4191 (1609079040419176315 58c9e) mimic.io-thrd.363678: client session connected, remote_server = 127.0.0.1:34603, current_count = 1 +D2020-12-27 22:24:00.4192 (1609079040419386405 58ca1) mimic.io-thrd.363681: client session created, remote_server = 192.168.123.119:34601, current_count = 2 +D2020-12-27 22:24:00.4192 (1609079040419498173 58ca0) mimic.io-thrd.363680: client session connected, remote_server = 192.168.123.119:34601, current_count = 2 +D2020-12-27 22:24:00.4203 (1609079040420100757 58cb9) mimic.default2.01008c9d00030005: client session created, remote_server = 192.168.123.119:34803, current_count = 3 +D2020-12-27 22:24:00.4203 (1609079040420200564 58c9f) mimic.io-thrd.363679: client session connected, remote_server = 192.168.123.119:34803, current_count = 3 +D2020-12-27 22:24:00.4204 (1609079040420444308 58cba) mimic.default3.01008c9d00030003: client session created, remote_server = 192.168.123.119:34802, current_count = 4 +D2020-12-27 22:24:00.4204 (1609079040420523830 58ca1) mimic.io-thrd.363681: client session connected, remote_server = 192.168.123.119:34802, current_count = 4 +D2020-12-27 22:24:00.4205 (1609079040420755632 58cbb) mimic.default4.0101000300000001: client session created, remote_server = 192.168.123.119:34801, current_count = 5 +D2020-12-27 22:24:00.4205 (1609079040420834961 58c9e) mimic.io-thrd.363678: client session connected, remote_server = 192.168.123.119:34801, current_count = 5 +D2020-12-27 22:24:00.425 (1609079040425047498 58c9d) mimic.io-thrd.363677: Database cleared. +D2020-12-27 22:24:00.425 (1609079040425056523 58c9d) mimic.io-thrd.363677: FILLING_DATABASE... +D2020-12-27 22:24:03.110 (1609079043110702124 58c9d) mimic.io-thrd.363677: Database filled. +[ RUN ] scan.ALL_SORT_KEY +D2020-12-27 22:24:03.110 (1609079043110723372 58c9d) mimic.io-thrd.363677: TESTING_HASH_SCAN, ALL SORT_KEYS .... +[ OK ] scan.ALL_SORT_KEY (3 ms) +[ RUN ] scan.BOUND_INCLUSIVE +D2020-12-27 22:24:03.113 (1609079043113865723 58c9d) mimic.io-thrd.363677: TESTING_HASH_SCAN, [start, stop]... +[ OK ] scan.BOUND_INCLUSIVE (1 ms) +[ RUN ] scan.BOUND_EXCLUSIVE +D2020-12-27 22:24:03.114 (1609079043114944300 58c9d) mimic.io-thrd.363677: TESTING_HASH_SCAN, (start, stop)... +[ OK ] scan.BOUND_EXCLUSIVE (1 ms) +[ RUN ] scan.ONE_POINT +D2020-12-27 22:24:03.115 (1609079043115396183 58c9d) mimic.io-thrd.363677: TESTING_HASH_SCAN, [start, start]... +[ OK ] scan.ONE_POINT (0 ms) +[ RUN ] scan.HALF_INCLUSIVE +D2020-12-27 22:24:03.115 (1609079043115511652 58c9d) mimic.io-thrd.363677: TESTING_HASH_SCAN, [start, start)... +[ OK ] scan.HALF_INCLUSIVE (0 ms) +[ RUN ] scan.VOID_SPAN +D2020-12-27 22:24:03.115 (1609079043115527255 58c9d) mimic.io-thrd.363677: TESTING_HASH_SCAN, [stop, start]... +[ OK ] scan.VOID_SPAN (0 ms) +[ RUN ] scan.OVERALL +D2020-12-27 22:24:03.115 (1609079043115544139 58c9d) mimic.io-thrd.363677: TEST OVERALL_SCAN... +[ OK ] scan.OVERALL (32 ms) +[ RUN ] scan.ITERATION_TIME_LIMIT +[ OK ] scan.ITERATION_TIME_LIMIT (62735 ms) +D2020-12-27 22:25:05.882 (1609079105882988152 58c9d) mimic.io-thrd.363677: TearDown... +D2020-12-27 22:25:05.882d (1609079105882992743 58c9d) mimic.io-thrd.363677: CLEARING_DATABASE... +D2020-12-27 22:25:12.210 (1609079112210433934 58c9d) mimic.io-thrd.363677: Database cleared. +[----------] 8 tests from scan (62772 ms total) + +[----------] Global test environment tear-down +[==========] 8 tests from 1 test case ran. (71792 ms total) +[ PASSED ] 8 tests. +dsn exit with code 0 +W2020-12-27 22:25:12.218 (1609079112218283551 58cf6) unknown.io-thrd.363766:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:25:12.218 (1609079112218302991 58cf6) unknown.io-thrd.363766:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:25:12.2207 (1609079112220416976 58cf6) unknown.io-thrd.363766: process(363766) start: 1609079112217, date: 2020-12-27 22:25:12.217 +D2020-12-27 22:25:12.220 (1609079112220655622 58cf6) unknown.io-thrd.363766: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:25:12.2201 (1609079112220661002 58cf6) unknown.io-thrd.363766: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:25:12.220� (1609079112220705353 58cf6) unknown.io-thrd.363766: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:25:12.2201 (1609079112220786884 58cf6) unknown.io-thrd.363766: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:25:12.220 (1609079112220875385 58cf6) unknown.io-thrd.363766: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:25:12.2201 (1609079112220878926 58cf6) unknown.io-thrd.363766: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:25:12.220� (1609079112220918364 58cf6) unknown.io-thrd.363766: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:25:12.2211 (1609079112221000416 58cf6) unknown.io-thrd.363766: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:25:12.221 (1609079112221097498 58cf6) unknown.io-thrd.363766: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:25:12.2211 (1609079112221101007 58cf6) unknown.io-thrd.363766: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:25:12.221� (1609079112221130243 58cf6) unknown.io-thrd.363766: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:25:12.2211 (1609079112221197501 58cf6) unknown.io-thrd.363766: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:25:12.221 (1609079112221203603 58cf6) unknown.io-thrd.363766: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:25:12.2211 (1609079112221236244 58d0f) mimic.io-thrd.363791: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:25:12.2218 (1609079112221391764 58cf6) unknown.io-thrd.363766: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:25:12.2211 (1609079112221427351 58d18) mimic.io-thrd.363800: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:25:12.2214 (1609079112221491762 58cf6) unknown.io-thrd.363766: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:25:12.221 (1609079112221549526 58cf6) unknown.io-thrd.363766: MainThread: app_name=temp +Note: Google Test filter = ttl.* +[==========] Running 2 tests from 1 test case. +[----------] Global test environment set-up. +[----------] 2 tests from ttl +[ RUN ] ttl.set_without_default_ttl +D2020-12-27 22:25:12.2211 (1609079112221679364 58cf6) mimic.io-thrd.363766: client session created, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:25:12.2211 (1609079112221757379 58cf8) mimic.io-thrd.363768: client session connected, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:26:17.2502 (1609079177250566108 58cf6) mimic.io-thrd.363766: client session created, remote_server = 127.0.0.1:34602, current_count = 2 +D2020-12-27 22:26:17.2502 (1609079177250798106 58cf9) mimic.io-thrd.363769: client session connected, remote_server = 127.0.0.1:34602, current_count = 2 +D2020-12-27 22:26:17.2513 (1609079177251144890 58cf8) mimic.io-thrd.363768: client session created, remote_server = 192.168.123.119:34601, current_count = 3 +D2020-12-27 22:26:17.2513 (1609079177251300715 58cf7) mimic.io-thrd.363767: client session connected, remote_server = 192.168.123.119:34601, current_count = 3 +D2020-12-27 22:26:17.2514 (1609079177251764501 58d13) mimic.default3.01008cf600030008: client session created, remote_server = 192.168.123.119:34802, current_count = 4 +D2020-12-27 22:26:17.2514 (1609079177251922744 58cf8) mimic.io-thrd.363768: client session connected, remote_server = 192.168.123.119:34802, current_count = 4 +[ OK ] ttl.set_without_default_ttl (140044 ms) +[ RUN ] ttl.set_with_default_ttl +[ OK ] ttl.set_with_default_ttl (140020 ms) +[----------] 2 tests from ttl (280064 ms total) + +[----------] Global test environment tear-down +[==========] 2 tests from 1 test case ran. (280064 ms total) +[ PASSED ] 2 tests. +dsn exit with code 0 +W2020-12-27 22:29:52.300 (1609079392300079767 59484) unknown.io-thrd.365700:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:29:52.300 (1609079392300127845 59484) unknown.io-thrd.365700:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:29:52.3038 (1609079392303184719 59484) unknown.io-thrd.365700: process(365700) start: 1609079392298, date: 2020-12-27 22:29:52.298 +D2020-12-27 22:29:52.303 (1609079392303411038 59484) unknown.io-thrd.365700: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:29:52.3031 (1609079392303416033 59484) unknown.io-thrd.365700: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:29:52.303� (1609079392303461749 59484) unknown.io-thrd.365700: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:29:52.3031 (1609079392303585235 59484) unknown.io-thrd.365700: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:29:52.303 (1609079392303728158 59484) unknown.io-thrd.365700: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:29:52.3031 (1609079392303731745 59484) unknown.io-thrd.365700: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:29:52.303� (1609079392303760426 59484) unknown.io-thrd.365700: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:29:52.3031 (1609079392303842828 59484) unknown.io-thrd.365700: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:29:52.303 (1609079392303974109 59484) unknown.io-thrd.365700: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:29:52.3031 (1609079392303977768 59484) unknown.io-thrd.365700: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:29:52.304� (1609079392304022333 59484) unknown.io-thrd.365700: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:29:52.3041 (1609079392304114157 59484) unknown.io-thrd.365700: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:29:52.304 (1609079392304147445 59484) unknown.io-thrd.365700: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:29:52.3041 (1609079392304213446 5949d) mimic.io-thrd.365725: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:29:52.3048 (1609079392304700469 59484) unknown.io-thrd.365700: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:29:52.3041 (1609079392304736998 594a6) mimic.io-thrd.365734: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:29:52.3044 (1609079392304818522 59484) unknown.io-thrd.365700: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:29:52.304 (1609079392304874490 59484) unknown.io-thrd.365700: MainThread: app_name=temp +Note: Google Test filter = lost_log.* +[==========] Running 1 test from 1 test case. +[----------] Global test environment set-up. +[----------] 1 test from lost_log +[ RUN ] lost_log.slog +create app table_for_lost_log +D2020-12-27 22:29:52.3051 (1609079392305007008 59484) mimic.io-thrd.365700: client session created, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:29:52.3051 (1609079392305087610 59485) mimic.io-thrd.365701: client session connected, remote_server = 127.0.0.1:34601, current_count = 1 +create app table_for_lost_log succeed, waiting for app ready +table_for_lost_log not ready yet, still waiting... (0/4) +table_for_lost_log not ready yet, still waiting... (0/4) +table_for_lost_log is ready now: (4/4) +table_for_lost_log is ready now! +D2020-12-27 22:29:58.3132 (1609079398313141207 5949e) mimic.default0.010094840003000f: client session created, remote_server = 192.168.123.119:34801, current_count = 2 +D2020-12-27 22:29:58.3132 (1609079398313432231 59486) mimic.io-thrd.365702: client session connected, remote_server = 192.168.123.119:34801, current_count = 2 +D2020-12-27 22:29:58.3143 (1609079398314632342 59484) mimic.io-thrd.365700: client session created, remote_server = 192.168.123.119:34802, current_count = 3 +D2020-12-27 22:29:58.3143 (1609079398314844490 59485) mimic.io-thrd.365701: client session connected, remote_server = 192.168.123.119:34802, current_count = 3 +D2020-12-27 22:29:58.3154 (1609079398315804875 59484) mimic.io-thrd.365700: client session created, remote_server = 192.168.123.119:34803, current_count = 4 +D2020-12-27 22:29:58.3154 (1609079398315983425 59486) mimic.io-thrd.365702: client session connected, remote_server = 192.168.123.119:34803, current_count = 4 +============ +start global_env() +meta1 pid: 361298 +meta1 dir: /home/smilencer/Code/incubator-pegasus/onebox/meta1 +project root: /home/smilencer/Code/incubator-pegasus +working dir: /home/smilencer/Code/incubator-pegasus/src/builder/test/function_test +D2020-12-27 22:30:01.745 (1609079401745664431 59484) mimic.io-thrd.365700: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +get ip: 192.168.123.119 +first stop the cluster +D2020-12-27 22:30:01.771 (1609079401771675637 59488) mimic.io-thrd.365704: asio read from 127.0.0.1:34601 failed: End of file +D2020-12-27 22:30:01.7713 (1609079401771692441 59488) mimic.io-thrd.365704: client session disconnected, remote_server = 127.0.0.1:34601, current_count = 3 +truncate slog for replica1 +ls -lcrt onebox/replica1/data/replica/slog | tail -n 1 | awk '{print $5,$9}' +truncate file with size: (log.1.0, 21350580) +truncate -s 7116860 onebox/replica1/data/replica/slog/log.1.0 +D2020-12-27 22:30:01.792 (1609079401792579400 59488) mimic.io-thrd.365704: asio read from 192.168.123.119:34803 failed: End of file +D2020-12-27 22:30:01.7922 (1609079401792589233 59488) mimic.io-thrd.365704: client session disconnected, remote_server = 192.168.123.119:34803, current_count = 2 +D2020-12-27 22:30:01.792 (1609079401792789929 59486) mimic.io-thrd.365702: asio read from 192.168.123.119:34802 failed: End of file +D2020-12-27 22:30:01.7921 (1609079401792795808 59486) mimic.io-thrd.365702: client session disconnected, remote_server = 192.168.123.119:34802, current_count = 1 +D2020-12-27 22:30:01.796 (1609079401796138502 59487) mimic.io-thrd.365703: asio read from 192.168.123.119:34801 failed: End of file +D2020-12-27 22:30:01.7960 (1609079401796151108 59487) mimic.io-thrd.365703: client session disconnected, remote_server = 192.168.123.119:34801, current_count = 0 +after truncated file size: 7116860 +restart onebox again +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Starting zookeeper ... already running as process 361261. +Zookeeper started at port 22181 +starting server +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 366045 366006 0 22:30 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta2 && /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta &>result & +smilenc+ 366053 366006 0 22:30 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta3 && /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta &>result & +smilenc+ 366095 366006 0 22:30 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 366138 366006 0 22:30 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 366180 366006 0 22:30 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 366252 366006 0 22:30 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +D2020-12-27 22:30:04.9941 (1609079404994936436 59484) mimic.io-thrd.365700: client session created, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:30:04.9951 (1609079404995065255 59485) mimic.io-thrd.365701: client session connected, remote_server = 127.0.0.1:34601, current_count = 1 +table_for_lost_log not ready yet, still waiting... (2/4) +table_for_lost_log not ready yet, still waiting... (2/4) +table_for_lost_log not ready yet, still waiting... (2/4) +table_for_lost_log not ready yet, still waiting... (1/4) +table_for_lost_log not ready yet, still waiting... (1/4) +table_for_lost_log not ready yet, still waiting... (3/4) +table_for_lost_log not ready yet, still waiting... (3/4) +table_for_lost_log not ready yet, still waiting... (3/4) +table_for_lost_log not ready yet, still waiting... (3/4) +table_for_lost_log is ready now: (4/4) +check keys wrote before +D2020-12-27 22:30:23.1 (1609079423001953944 59484) mimic.io-thrd.365700: client session created, remote_server = 192.168.123.119:34801, current_count = 2 +D2020-12-27 22:30:23.2 (1609079423002124953 59485) mimic.io-thrd.365701: client session connected, remote_server = 192.168.123.119:34801, current_count = 2 +D2020-12-27 22:30:23.2 (1609079423002396009 594a4) mimic.default6.0100948400034e4d: clear partition configuration cache 3.3 due to access failure ERR_INVALID_STATE +D2020-12-27 22:30:23.11 (1609079423011099541 5949e) mimic.default0.0101000400000002: client session created, remote_server = 192.168.123.119:34802, current_count = 3 +D2020-12-27 22:30:23.11 (1609079423011292328 59486) mimic.io-thrd.365702: client session connected, remote_server = 192.168.123.119:34802, current_count = 3 +D2020-12-27 22:30:23.12 (1609079423012483380 59484) mimic.io-thrd.365700: client session created, remote_server = 192.168.123.119:34803, current_count = 4 +D2020-12-27 22:30:23.12 (1609079423012740715 59485) mimic.io-thrd.365701: client session connected, remote_server = 192.168.123.119:34803, current_count = 4 +D2020-12-27 22:30:23.13 (1609079423013551086 5949f) mimic.default1.0100948400034e53: clear partition configuration cache 3.0 due to access failure ERR_INVALID_STATE +[ OK ] lost_log.slog (31368 ms) +[----------] 1 test from lost_log (31368 ms total) + +[----------] Global test environment tear-down +[==========] 1 test from 1 test case ran. (31368 ms total) +[ PASSED ] 1 test. +dsn exit with code 0 +W2020-12-27 22:30:23.679 (1609079423679954654 59e6f) unknown.io-thrd.368239:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:30:23.679 (1609079423679974967 59e6f) unknown.io-thrd.368239:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:30:23.6829 (1609079423682079863 59e6f) unknown.io-thrd.368239: process(368239) start: 1609079423679, date: 2020-12-27 22:30:23.679 +D2020-12-27 22:30:23.682 (1609079423682366015 59e6f) unknown.io-thrd.368239: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:30:23.6821 (1609079423682372016 59e6f) unknown.io-thrd.368239: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:30:23.682� (1609079423682424494 59e6f) unknown.io-thrd.368239: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:30:23.6821 (1609079423682522221 59e6f) unknown.io-thrd.368239: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:30:23.682 (1609079423682630949 59e6f) unknown.io-thrd.368239: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:30:23.6821 (1609079423682634756 59e6f) unknown.io-thrd.368239: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:30:23.682� (1609079423682665185 59e6f) unknown.io-thrd.368239: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:30:23.6821 (1609079423682736957 59e6f) unknown.io-thrd.368239: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:30:23.682 (1609079423682818860 59e6f) unknown.io-thrd.368239: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:30:23.6821 (1609079423682822359 59e6f) unknown.io-thrd.368239: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:30:23.682� (1609079423682851872 59e6f) unknown.io-thrd.368239: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:30:23.6821 (1609079423682934927 59e6f) unknown.io-thrd.368239: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:30:23.682 (1609079423682942290 59e6f) unknown.io-thrd.368239: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:30:23.6821 (1609079423682977180 59e88) mimic.io-thrd.368264: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:30:23.6838 (1609079423683145887 59e6f) unknown.io-thrd.368239: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:30:23.6831 (1609079423683180891 59e91) mimic.io-thrd.368273: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:30:23.6834 (1609079423683254167 59e6f) unknown.io-thrd.368239: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:30:23.683 (1609079423683314305 59e6f) unknown.io-thrd.368239: MainThread: app_name=temp +Note: Google Test filter = drop_and_recall.* +[==========] Running 1 test from 1 test case. +[----------] Global test environment set-up. +[----------] 1 test from drop_and_recall +[ RUN ] drop_and_recall.simple +create app simple_table +D2020-12-27 22:30:23.6831 (1609079423683454432 59e6f) mimic.io-thrd.368239: client session created, remote_server = 127.0.0.1:34602, current_count = 1 +D2020-12-27 22:30:23.6831 (1609079423683556560 59e70) mimic.io-thrd.368240: client session connected, remote_server = 127.0.0.1:34602, current_count = 1 +D2020-12-27 22:30:23.6832 (1609079423683789451 59e71) mimic.io-thrd.368241: client session created, remote_server = 192.168.123.119:34601, current_count = 2 +D2020-12-27 22:30:23.6832 (1609079423683860345 59e72) mimic.io-thrd.368242: client session connected, remote_server = 192.168.123.119:34601, current_count = 2 +create app simple_table succeed, waiting for app ready +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (3/4) +simple_table not ready yet, still waiting... (3/4) +simple_table not ready yet, still waiting... (3/4) +simple_table not ready yet, still waiting... (3/4) +simple_table not ready yet, still waiting... (3/4) +simple_table is ready now: (4/4) +simple_table is ready now! +app_id = 4 +write 10000 keys +D2020-12-27 22:30:43.6983 (1609079443698641575 59e6f) mimic.io-thrd.368239: client session created, remote_server = 127.0.0.1:34603, current_count = 3 +D2020-12-27 22:30:43.6983 (1609079443698861095 59e71) mimic.io-thrd.368241: client session connected, remote_server = 127.0.0.1:34603, current_count = 3 +D2020-12-27 22:30:43.6994 (1609079443699666700 59e8f) mimic.default6.01009e6f00030027: client session created, remote_server = 192.168.123.119:34801, current_count = 4 +D2020-12-27 22:30:43.6994 (1609079443699836891 59e72) mimic.io-thrd.368242: client session connected, remote_server = 192.168.123.119:34801, current_count = 4 +D2020-12-27 22:30:43.7015 (1609079443701107968 59e6f) mimic.io-thrd.368239: client session created, remote_server = 192.168.123.119:34802, current_count = 5 +D2020-12-27 22:30:43.7015 (1609079443701278065 59e70) mimic.io-thrd.368240: client session connected, remote_server = 192.168.123.119:34802, current_count = 5 +D2020-12-27 22:30:43.7026 (1609079443702319570 59e6f) mimic.io-thrd.368239: client session created, remote_server = 192.168.123.119:34803, current_count = 6 +D2020-12-27 22:30:43.7026 (1609079443702491021 59e72) mimic.io-thrd.368242: client session connected, remote_server = 192.168.123.119:34803, current_count = 6 +drop table simple_table +D2020-12-27 22:31:05.1710 (1609079465171015363 59e8e) mimic.default5.01009e6f00034e70: clear partition configuration cache 4.0 due to access failure ERR_OBJECT_NOT_FOUND +E2020-12-27 22:31:05.1790 (1609079465179714630 59e89) mimic.default0.0101000600000003: simple_table.client: query config reply, gpid = 4.0, err = ERR_OBJECT_NOT_FOUND +partition 0 is removed from server +D2020-12-27 22:31:05.1801 (1609079465180341510 59e8a) mimic.default1.01009e6f00034e72: clear partition configuration cache 4.1 due to access failure ERR_OBJECT_NOT_FOUND +E2020-12-27 22:31:05.1891 (1609079465189246031 59e8c) mimic.default3.0101000700000002: simple_table.client: query config reply, gpid = 4.1, err = ERR_OBJECT_NOT_FOUND +partition 1 is removed from server +D2020-12-27 22:31:05.1892 (1609079465189869989 59e8e) mimic.default5.01009e6f00034e74: clear partition configuration cache 4.2 due to access failure ERR_OBJECT_NOT_FOUND +E2020-12-27 22:31:05.1982 (1609079465198633917 59e89) mimic.default0.0101000600000006: simple_table.client: query config reply, gpid = 4.2, err = ERR_OBJECT_NOT_FOUND +partition 2 is removed from server +D2020-12-27 22:31:05.1993 (1609079465199257164 59e8a) mimic.default1.01009e6f00034e76: clear partition configuration cache 4.3 due to access failure ERR_OBJECT_NOT_FOUND +E2020-12-27 22:31:05.2083 (1609079465208106932 59e8c) mimic.default3.0101000700000005: simple_table.client: query config reply, gpid = 4.3, err = ERR_OBJECT_NOT_FOUND +partition 3 is removed from server +start to recall table +recall app ok, id(4), name(simple_table), partition_count(4), wait it ready +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (0/4) +simple_table not ready yet, still waiting... (1/4) +simple_table not ready yet, still waiting... (1/4) +simple_table not ready yet, still waiting... (1/4) +simple_table not ready yet, still waiting... (1/4) +simple_table not ready yet, still waiting... (3/4) +simple_table is ready now: (4/4) +[ OK ] drop_and_recall.simple (92210 ms) +[----------] 1 test from drop_and_recall (92210 ms total) + +[----------] Global test environment tear-down +[==========] 1 test from 1 test case ran. (92210 ms total) +[ PASSED ] 1 test. +dsn exit with code 0 +W2020-12-27 22:31:55.900 (1609079515900848076 5a4a5) unknown.io-thrd.369829:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:31:55.900 (1609079515900867957 5a4a5) unknown.io-thrd.369829:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:31:55.9030 (1609079515903015373 5a4a5) unknown.io-thrd.369829: process(369829) start: 1609079515900, date: 2020-12-27 22:31:55.900 +D2020-12-27 22:31:55.903 (1609079515903258686 5a4a5) unknown.io-thrd.369829: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:31:55.9031 (1609079515903263787 5a4a5) unknown.io-thrd.369829: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:31:55.903� (1609079515903307109 5a4a5) unknown.io-thrd.369829: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:31:55.9031 (1609079515903391677 5a4a5) unknown.io-thrd.369829: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:31:55.903 (1609079515903475251 5a4a5) unknown.io-thrd.369829: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:31:55.9031 (1609079515903479138 5a4a5) unknown.io-thrd.369829: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:31:55.903� (1609079515903523364 5a4a5) unknown.io-thrd.369829: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:31:55.9031 (1609079515903607063 5a4a5) unknown.io-thrd.369829: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:31:55.903 (1609079515903697709 5a4a5) unknown.io-thrd.369829: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:31:55.9031 (1609079515903701360 5a4a5) unknown.io-thrd.369829: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:31:55.903� (1609079515903731375 5a4a5) unknown.io-thrd.369829: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:31:55.9031 (1609079515903812465 5a4a5) unknown.io-thrd.369829: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:31:55.903 (1609079515903818116 5a4a5) unknown.io-thrd.369829: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:31:55.9031 (1609079515903857634 5a4be) mimic.io-thrd.369854: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:31:55.9058 (1609079515905063700 5a4a5) unknown.io-thrd.369829: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:31:55.9051 (1609079515905318833 5a4c7) mimic.io-thrd.369863: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:31:55.9054 (1609079515905870446 5a4a5) unknown.io-thrd.369829: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:31:55.905 (1609079515905946287 5a4a5) unknown.io-thrd.369829: MainThread: app_name=temp +Note: Google Test filter = restore_test.* +[==========] Running 1 test from 1 test case. +[----------] Global test environment set-up. +[----------] 1 test from restore_test +[ RUN ] restore_test.restore +============ +start global_env() +meta1 pid: 366045 +meta1 dir: /home/smilencer/Code/incubator-pegasus/onebox/meta1 +project root: /home/smilencer/Code/incubator-pegasus +working dir: /home/smilencer/Code/incubator-pegasus/src/builder/test/function_test +D2020-12-27 22:31:55.918 (1609079515918669294 5a4a5) unknown.io-thrd.369829: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +get ip: 192.168.123.119 +/home/smilencer/Code/incubator-pegasus +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Stopping zookeeper ... STOPPED +Clearing zookeeper ... CLEARED +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Starting zookeeper ... STARTED +Zookeeper started at port 22181 +starting server +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 369975 369913 0 22:31 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta2 && /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta &>result & +smilenc+ 369983 369913 0 22:31 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta3 && /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta &>result & +smilenc+ 370025 369913 0 22:31 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 370068 369913 0 22:31 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 370110 369913 0 22:31 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 370166 369913 0 22:31 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +D2020-12-27 22:32:02.3091 (1609079522309836758 5a4a5) mimic.io-thrd.369829: client session created, remote_server = 127.0.0.1:34603, current_count = 1 +D2020-12-27 22:32:02.3101 (1609079522310055411 5a4a7) mimic.io-thrd.369831: client session connected, remote_server = 127.0.0.1:34603, current_count = 1 +D2020-12-27 22:32:02.3102 (1609079522310532557 5a4a8) mimic.io-thrd.369832: client session created, remote_server = 192.168.123.119:34601, current_count = 2 +D2020-12-27 22:32:02.3102 (1609079522310706894 5a4a6) mimic.io-thrd.369830: client session connected, remote_server = 192.168.123.119:34601, current_count = 2 +create app backup_test succeed, waiting for app ready +backup_test not ready yet, still waiting... (0/8) +backup_test not ready yet, still waiting... (0/8) +backup_test not ready yet, still waiting... (0/8) +backup_test not ready yet, still waiting... (3/8) +backup_test not ready yet, still waiting... (3/8) +backup_test not ready yet, still waiting... (3/8) +backup_test not ready yet, still waiting... (3/8) +backup_test not ready yet, still waiting... (3/8) +backup_test is ready now: (8/8) +backup_test is ready now! +start to write 10000 key-value pairs, using set().. +D2020-12-27 22:32:20.3293 (1609079540329648019 5a4a5) mimic.io-thrd.369829: client session created, remote_server = 127.0.0.1:34601, current_count = 3 +D2020-12-27 22:32:20.3293 (1609079540329867165 5a4a6) mimic.io-thrd.369830: client session connected, remote_server = 127.0.0.1:34601, current_count = 3 +D2020-12-27 22:32:20.3304 (1609079540330331499 5a4c4) mimic.default5.0100a4a500030024: client session created, remote_server = 192.168.123.119:34803, current_count = 4 +D2020-12-27 22:32:20.3304 (1609079540330586402 5a4a9) mimic.io-thrd.369833: client session connected, remote_server = 192.168.123.119:34803, current_count = 4 +D2020-12-27 22:32:20.3325 (1609079540332644553 5a4a5) mimic.io-thrd.369829: client session created, remote_server = 192.168.123.119:34801, current_count = 5 +D2020-12-27 22:32:20.3325 (1609079540332895048 5a4a8) mimic.io-thrd.369832: client session connected, remote_server = 192.168.123.119:34801, current_count = 5 +D2020-12-27 22:32:20.3346 (1609079540334544607 5a4a5) mimic.io-thrd.369829: client session created, remote_server = 192.168.123.119:34802, current_count = 6 +D2020-12-27 22:32:20.3346 (1609079540334696675 5a4a9) mimic.io-thrd.369833: client session connected, remote_server = 192.168.123.119:34802, current_count = 6 +write data complete, total time = 2s +add backup policy succeed, policy_name = policy_1 +add backup policy complete with err = ERR_OK +start testing restore... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +sleep 3s to wait backup complete... +first backup_timestamp = 1609079542808 +sleep 1 second to wait complete... + new app_id = 4 +sleep 3s to wait app become healthy... +partition[1] is unhealthy, coz replica_cont = 1, but max_replica_count = 3 +sleep 3s to wait app become healthy... +partition[1] is unhealthy, coz replica_cont = 1, but max_replica_count = 3 +sleep 3s to wait app become healthy... +partition[1] is unhealthy, coz replica_cont = 1, but max_replica_count = 3 +sleep 3s to wait app become healthy... +start to get 10000 key-value pairs, using get()... +verify data complete, total time = 0s +restore passed..... +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Stopping zookeeper ... STOPPED +Clearing zookeeper ... CLEARED +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Starting zookeeper ... STARTED +Zookeeper started at port 22181 +starting server +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 375332 375270 0 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta2 && /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta &>result & +smilenc+ 375340 375270 0 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta3 && /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta &>result & +smilenc+ 375382 375270 0 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 375425 375270 0 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 375467 375270 0 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 375523 375270 0 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +Wait cluster to become healthy... +Sleeped for 1 seconds +Sleeped for 2 seconds +Sleeped for 3 seconds +Sleeped for 4 seconds +Sleeped for 5 seconds +Sleeped for 6 seconds +Sleeped for 7 seconds +Sleeped for 8 seconds +Sleeped for 9 seconds +Sleeped for 10 seconds +Sleeped for 11 seconds +Sleeped for 12 seconds +Sleeped for 13 seconds +Sleeped for 14 seconds +Sleeped for 15 seconds +Sleeped for 16 seconds +Sleeped for 17 seconds +Sleeped for 18 seconds +Sleeped for 19 seconds +Sleeped for 20 seconds +Cluster becomes healthy. +D2020-12-27 22:33:20.564 (1609079600564789770 5a4a9) mimic.io-thrd.369833: asio read from 127.0.0.1:34601 failed: End of file +D2020-12-27 22:33:20.5645 (1609079600564802484 5a4a9) mimic.io-thrd.369833: client session disconnected, remote_server = 127.0.0.1:34601, current_count = 5 +D2020-12-27 22:33:20.564 (1609079600564875813 5a4a9) mimic.io-thrd.369833: asio read from 192.168.123.119:34601 failed: End of file +D2020-12-27 22:33:20.5644 (1609079600564882032 5a4a9) mimic.io-thrd.369833: client session disconnected, remote_server = 192.168.123.119:34601, current_count = 4 +D2020-12-27 22:33:20.565 (1609079600565142154 5a4a9) mimic.io-thrd.369833: asio read from 127.0.0.1:34603 failed: End of file +D2020-12-27 22:33:20.5653 (1609079600565152340 5a4a9) mimic.io-thrd.369833: client session disconnected, remote_server = 127.0.0.1:34603, current_count = 3 +D2020-12-27 22:33:20.585 (1609079600585167297 5a4a9) mimic.io-thrd.369833: asio read from 192.168.123.119:34802 failed: End of file +D2020-12-27 22:33:20.5852 (1609079600585180010 5a4a9) mimic.io-thrd.369833: client session disconnected, remote_server = 192.168.123.119:34802, current_count = 2 +D2020-12-27 22:33:20.585 (1609079600585414540 5a4a7) mimic.io-thrd.369831: asio read from 192.168.123.119:34803 failed: End of file +D2020-12-27 22:33:20.5851 (1609079600585424725 5a4a7) mimic.io-thrd.369831: client session disconnected, remote_server = 192.168.123.119:34803, current_count = 1 +D2020-12-27 22:33:20.587 (1609079600587203859 5a4a6) mimic.io-thrd.369830: asio read from 192.168.123.119:34801 failed: End of file +D2020-12-27 22:33:20.5870 (1609079600587214228 5a4a6) mimic.io-thrd.369830: client session disconnected, remote_server = 192.168.123.119:34801, current_count = 0 +[ OK ] restore_test.restore (108926 ms) +[----------] 1 test from restore_test (108927 ms total) + +[----------] Global test environment tear-down +[==========] 1 test from 1 test case ran. (108927 ms total) +[ PASSED ] 1 test. +dsn exit with code 0 +W2020-12-27 22:33:44.839 (1609079624839785303 5c2cf) unknown.io-thrd.377551:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:33:44.839 (1609079624839807409 5c2cf) unknown.io-thrd.377551:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:33:44.8419 (1609079624841958470 5c2cf) unknown.io-thrd.377551: process(377551) start: 1609079624839, date: 2020-12-27 22:33:44.839 +D2020-12-27 22:33:44.842 (1609079624842217275 5c2cf) unknown.io-thrd.377551: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:33:44.8421 (1609079624842222504 5c2cf) unknown.io-thrd.377551: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:33:44.842� (1609079624842267942 5c2cf) unknown.io-thrd.377551: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:33:44.8421 (1609079624842359261 5c2cf) unknown.io-thrd.377551: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:33:44.842 (1609079624842454582 5c2cf) unknown.io-thrd.377551: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:33:44.8421 (1609079624842458345 5c2cf) unknown.io-thrd.377551: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:33:44.842� (1609079624842488949 5c2cf) unknown.io-thrd.377551: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:33:44.8421 (1609079624842567629 5c2cf) unknown.io-thrd.377551: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:33:44.842 (1609079624842661498 5c2cf) unknown.io-thrd.377551: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:33:44.8421 (1609079624842665132 5c2cf) unknown.io-thrd.377551: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:33:44.842� (1609079624842694632 5c2cf) unknown.io-thrd.377551: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:33:44.8421 (1609079624842767581 5c2cf) unknown.io-thrd.377551: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:33:44.842 (1609079624842773371 5c2cf) unknown.io-thrd.377551: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:33:44.8431 (1609079624843347075 5c2e8) mimic.io-thrd.377576: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:33:44.8448 (1609079624844821546 5c2cf) unknown.io-thrd.377551: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:33:44.8441 (1609079624844875819 5c2f1) mimic.io-thrd.377585: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:33:44.8454 (1609079624845026796 5c2cf) unknown.io-thrd.377551: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:33:44.845 (1609079624845105283 5c2cf) unknown.io-thrd.377551: MainThread: app_name=temp +Note: Google Test filter = recovery_test.* +[==========] Running 1 test from 1 test case. +[----------] Global test environment set-up. +[----------] 1 test from recovery_test +[ RUN ] recovery_test.recovery +============ +start global_env() +meta1 pid: 375332 +meta1 dir: /home/smilencer/Code/incubator-pegasus/onebox/meta1 +project root: /home/smilencer/Code/incubator-pegasus +working dir: /home/smilencer/Code/incubator-pegasus/src/builder/test/function_test +D2020-12-27 22:33:44.860 (1609079624860404351 5c2cf) unknown.io-thrd.377551: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +get ip: 192.168.123.119 +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Stopping zookeeper ... STOPPED +Clearing zookeeper ... CLEARED +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Starting zookeeper ... STARTED +Zookeeper started at port 22181 +starting server +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 377698 377636 0 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 377707 377636 0 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 377747 377636 0 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 377805 377636 0 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +sleep for a while to wait the new onebox start +D2020-12-27 22:33:51.1401 (1609079631140949983 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:33:51.1411 (1609079631141178464 5c2d1) mimic.io-thrd.377553: client session connected, remote_server = 127.0.0.1:34601, current_count = 1 +create app test_table succeed, waiting for app ready +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (2/4) +test_table not ready yet, still waiting... (2/4) +test_table not ready yet, still waiting... (2/4) +test_table not ready yet, still waiting... (2/4) +test_table not ready yet, still waiting... (2/4) +test_table is ready now: (4/4) +test_table is ready now! +write 2048 keys +D2020-12-27 22:34:09.1502 (1609079649150381965 5c2eb) mimic.default2.0100c2cf00030021: client session created, remote_server = 192.168.123.119:34803, current_count = 2 +D2020-12-27 22:34:09.1502 (1609079649150626197 5c2d1) mimic.io-thrd.377553: client session connected, remote_server = 192.168.123.119:34803, current_count = 2 +D2020-12-27 22:34:09.1513 (1609079649151685937 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 192.168.123.119:34801, current_count = 3 +D2020-12-27 22:34:09.1513 (1609079649151956140 5c2d0) mimic.io-thrd.377552: client session connected, remote_server = 192.168.123.119:34801, current_count = 3 +D2020-12-27 22:34:09.1534 (1609079649153141092 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 192.168.123.119:34802, current_count = 4 +D2020-12-27 22:34:09.1534 (1609079649153285664 5c2d1) mimic.io-thrd.377553: client session connected, remote_server = 192.168.123.119:34802, current_count = 4 +>>>>> test basic recovery <<<<< +smilenc+ 377698 1635 0 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +INFO: meta@1 stopped +smilenc+ 377707 1635 4 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +INFO: replica@1 stopped +smilenc+ 377747 1635 4 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +INFO: replica@2 stopped +smilenc+ 377805 1635 4 22:33 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +INFO: replica@3 stopped +D2020-12-27 22:34:09.676 (1609079649676394547 5c2d2) mimic.io-thrd.377554: asio read from 127.0.0.1:34601 failed: End of file +D2020-12-27 22:34:09.6763 (1609079649676409990 5c2d2) mimic.io-thrd.377554: client session disconnected, remote_server = 127.0.0.1:34601, current_count = 3 +D2020-12-27 22:34:09.714 (1609079649714954433 5c2d3) mimic.io-thrd.377555: asio read from 192.168.123.119:34801 failed: End of file +D2020-12-27 22:34:09.7142 (1609079649714962073 5c2d3) mimic.io-thrd.377555: client session disconnected, remote_server = 192.168.123.119:34801, current_count = 2 +D2020-12-27 22:34:09.748 (1609079649748081359 5c2d3) mimic.io-thrd.377555: asio read from 192.168.123.119:34802 failed: End of file +D2020-12-27 22:34:09.7481 (1609079649748096831 5c2d3) mimic.io-thrd.377555: client session disconnected, remote_server = 192.168.123.119:34802, current_count = 1 +sleep for a while to wait the socket to destroy +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 378900 378891 0 22:34 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +INFO: replica@1 started +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 378962 378905 0 22:34 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +INFO: replica@2 started +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 379201 379045 0 22:34 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +INFO: replica@3 started +D2020-12-27 22:34:09.780 (1609079649780608715 5c2d3) mimic.io-thrd.377555: asio read from 192.168.123.119:34803 failed: End of file +D2020-12-27 22:34:09.7800 (1609079649780643471 5c2d3) mimic.io-thrd.377555: client session disconnected, remote_server = 192.168.123.119:34803, current_count = 0 +sleep for a while to wait the replica to start +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 380733 380724 0 22:34 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +INFO: meta@1 started +sleep for a while to wait the meta to come to alive +Wait seconds: 30 +Skip bad nodes: false +Skip lost partitions: false +Node list: +============================= +192.168.123.119:34801 +192.168.123.119:34802 +192.168.123.119:34803 +============================= +D2020-12-27 22:34:34.9411 (1609079674941331274 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:34:34.9411 (1609079674941586737 5c2d1) mimic.io-thrd.377553: client session connected, remote_server = 127.0.0.1:34601, current_count = 1 +Recover result: ERR_OK +Wait seconds: 30 +Skip bad nodes: false +Skip lost partitions: false +Node list: +============================= +192.168.123.119:34801 +192.168.123.119:34802 +192.168.123.119:34803 +============================= +Recover result: ERR_SERVICE_ALREADY_RUNNING +create app test_table succeed, waiting for app ready +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table is ready now: (4/4) +test_table is ready now! +D2020-12-27 22:34:56.9962 (1609079696996090566 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 192.168.123.119:34803, current_count = 2 +D2020-12-27 22:34:56.9962 (1609079696996294784 5c2d3) mimic.io-thrd.377555: client session connected, remote_server = 192.168.123.119:34803, current_count = 2 +D2020-12-27 22:34:56.9963 (1609079696996805809 5c2e9) mimic.default0.0100c2cf0003104b: clear partition configuration cache 3.3 due to access failure ERR_INVALID_STATE +D2020-12-27 22:34:57.5 (1609079697005905184 5c2ef) mimic.default6.0101000100000002: client session created, remote_server = 192.168.123.119:34801, current_count = 3 +D2020-12-27 22:34:57.6 (1609079697006118108 5c2d0) mimic.io-thrd.377552: client session connected, remote_server = 192.168.123.119:34801, current_count = 3 +D2020-12-27 22:34:57.7 (1609079697007329811 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 192.168.123.119:34802, current_count = 4 +D2020-12-27 22:34:57.7 (1609079697007478100 5c2d3) mimic.io-thrd.377555: client session connected, remote_server = 192.168.123.119:34802, current_count = 4 +D2020-12-27 22:34:57.7 (1609079697007881847 5c2f0) mimic.default7.0100c2cf0003104f: clear partition configuration cache 3.1 due to access failure ERR_INVALID_STATE +D2020-12-27 22:34:57.17 (1609079697017736348 5c2ea) mimic.default1.0100c2cf00031051: clear partition configuration cache 3.0 due to access failure ERR_INVALID_STATE +>>>>> test recovery from subset of all nodes <<<<< +smilenc+ 380733 1635 0 22:34 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +INFO: meta@1 stopped +smilenc+ 378900 1635 1 22:34 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +INFO: replica@1 stopped +smilenc+ 378962 1635 1 22:34 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +INFO: replica@2 stopped +smilenc+ 379201 1635 1 22:34 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +INFO: replica@3 stopped +D2020-12-27 22:34:57.175 (1609079697175445985 5c2d2) mimic.io-thrd.377554: asio read from 127.0.0.1:34601 failed: End of file +D2020-12-27 22:34:57.1753 (1609079697175457940 5c2d2) mimic.io-thrd.377554: client session disconnected, remote_server = 127.0.0.1:34601, current_count = 3 +D2020-12-27 22:34:57.214 (1609079697214975569 5c2d1) mimic.io-thrd.377553: asio read from 192.168.123.119:34801 failed: End of file +D2020-12-27 22:34:57.2142 (1609079697214988333 5c2d1) mimic.io-thrd.377553: client session disconnected, remote_server = 192.168.123.119:34801, current_count = 2 +D2020-12-27 22:34:57.246 (1609079697246849843 5c2d1) mimic.io-thrd.377553: asio read from 192.168.123.119:34802 failed: End of file +D2020-12-27 22:34:57.2461 (1609079697246864491 5c2d1) mimic.io-thrd.377553: client session disconnected, remote_server = 192.168.123.119:34802, current_count = 1 +sleep for a while to wait the socket to destroy +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 381076 381067 0 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +INFO: replica@1 started +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 381156 381081 0 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +INFO: replica@2 started +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 381278 381221 0 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +INFO: replica@3 started +D2020-12-27 22:34:57.277 (1609079697277042965 5c2d2) mimic.io-thrd.377554: asio read from 192.168.123.119:34803 failed: End of file +D2020-12-27 22:34:57.2770 (1609079697277052737 5c2d2) mimic.io-thrd.377554: client session disconnected, remote_server = 192.168.123.119:34803, current_count = 0 +sleep for a while to wait the replica to start +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 382912 382903 0 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +INFO: meta@1 started +sleep for a while to wait the meta to come to alive +Wait seconds: 30 +Skip bad nodes: false +Skip lost partitions: false +Node list: +============================= +192.168.123.119:34801 +192.168.123.119:34802 +============================= +D2020-12-27 22:35:22.4191 (1609079722419452332 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:35:22.4191 (1609079722419664931 5c2d1) mimic.io-thrd.377553: client session connected, remote_server = 127.0.0.1:34601, current_count = 1 +Recover result: ERR_OK +============================= +WARNING: partition(1.0) only collects 2/3 of replicas, may lost data +WARNING: partition(1.1) only collects 2/3 of replicas, may lost data +WARNING: partition(1.2) only collects 2/3 of replicas, may lost data +WARNING: partition(1.3) only collects 2/3 of replicas, may lost data +WARNING: partition(2.0) only collects 2/3 of replicas, may lost data +WARNING: partition(2.1) only collects 2/3 of replicas, may lost data +WARNING: partition(2.2) only collects 2/3 of replicas, may lost data +WARNING: partition(2.3) only collects 2/3 of replicas, may lost data +WARNING: partition(2.4) only collects 2/3 of replicas, may lost data +WARNING: partition(2.5) only collects 2/3 of replicas, may lost data +WARNING: partition(2.6) only collects 2/3 of replicas, may lost data +WARNING: partition(2.7) only collects 2/3 of replicas, may lost data +WARNING: partition(3.0) only collects 2/3 of replicas, may lost data +WARNING: partition(3.1) only collects 2/3 of replicas, may lost data +WARNING: partition(3.2) only collects 2/3 of replicas, may lost data +WARNING: partition(3.3) only collects 2/3 of replicas, may lost data +============================= +create app test_table succeed, waiting for app ready +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table is ready now: (4/4) +test_table is ready now! +D2020-12-27 22:35:44.4312 (1609079744431119377 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 192.168.123.119:34801, current_count = 2 +D2020-12-27 22:35:44.4312 (1609079744431244147 5c2d0) mimic.io-thrd.377552: client session connected, remote_server = 192.168.123.119:34801, current_count = 2 +D2020-12-27 22:35:44.4313 (1609079744431442354 5c2f0) mimic.default7.0100c2cf00032072: clear partition configuration cache 3.3 due to access failure ERR_INVALID_STATE +D2020-12-27 22:35:44.4403 (1609079744440145651 5c2ea) mimic.default1.0101000500000003: client session created, remote_server = 192.168.123.119:34802, current_count = 3 +D2020-12-27 22:35:44.4403 (1609079744440403101 5c2d3) mimic.io-thrd.377555: client session connected, remote_server = 192.168.123.119:34802, current_count = 3 +D2020-12-27 22:35:44.4412 (1609079744441128009 5c2ef) mimic.default6.0100c2cf00032074: clear partition configuration cache 3.2 due to access failure ERR_INVALID_STATE +D2020-12-27 22:35:44.4501 (1609079744450824641 5c2f0) mimic.default7.0100c2cf00032076: clear partition configuration cache 3.1 due to access failure ERR_INVALID_STATE +D2020-12-27 22:35:44.4600 (1609079744460592580 5c2ef) mimic.default6.0100c2cf00032078: clear partition configuration cache 3.0 due to access failure ERR_INVALID_STATE +>>>>> test recovery, some partitions have been lost <<<<< +smilenc+ 382912 1635 0 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +INFO: meta@1 stopped +smilenc+ 381076 1635 2 22:35 pts/1 00:00:01 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +INFO: replica@1 stopped +smilenc+ 381156 1635 2 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +INFO: replica@2 stopped +smilenc+ 381278 1635 1 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +INFO: replica@3 stopped +D2020-12-27 22:35:44.633 (1609079744633145901 5c2d2) mimic.io-thrd.377554: asio read from 127.0.0.1:34601 failed: End of file +D2020-12-27 22:35:44.6332 (1609079744633156714 5c2d2) mimic.io-thrd.377554: client session disconnected, remote_server = 127.0.0.1:34601, current_count = 2 +D2020-12-27 22:35:44.671 (1609079744671910112 5c2d3) mimic.io-thrd.377555: asio read from 192.168.123.119:34801 failed: End of file +D2020-12-27 22:35:44.6711 (1609079744671922176 5c2d3) mimic.io-thrd.377555: client session disconnected, remote_server = 192.168.123.119:34801, current_count = 1 +D2020-12-27 22:35:44.701 (1609079744701676533 5c2d3) mimic.io-thrd.377555: asio read from 192.168.123.119:34802 failed: End of file +D2020-12-27 22:35:44.7010 (1609079744701687375 5c2d3) mimic.io-thrd.377555: client session disconnected, remote_server = 192.168.123.119:34802, current_count = 0 +sleep for a while to wait the socket to destroy +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/reps && rm -rf 2.0.pegasus +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1/data/replica/reps && rm -rf 2.1.pegasus +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2/data/replica/reps && rm -rf 2.2.pegasus +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1/data/replica/reps && rm -rf 2.3.pegasus +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 384017 384008 0 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +INFO: replica@1 started +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 384079 384022 0 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +INFO: replica@2 started +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 384219 384162 0 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +INFO: replica@3 started +sleep for a while to wait the replica to start +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 385715 385706 0 22:36 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +INFO: meta@1 started +sleep for a while to wait the meta to come to alive +Wait seconds: 30 +Skip bad nodes: false +Skip lost partitions: false +Node list: +============================= +192.168.123.119:34801 +192.168.123.119:34802 +192.168.123.119:34803 +============================= +D2020-12-27 22:36:09.8961 (1609079769896832628 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:36:09.8971 (1609079769897026928 5c2d1) mimic.io-thrd.377553: client session connected, remote_server = 127.0.0.1:34601, current_count = 1 +Recover result: ERR_OK +============================= +WARNING: partition(2.0) only collects 2/3 of replicas, may lost data +WARNING: partition(2.1) only collects 2/3 of replicas, may lost data +WARNING: partition(2.2) only collects 2/3 of replicas, may lost data +WARNING: partition(2.3) only collects 2/3 of replicas, may lost data +============================= +create app test_table succeed, waiting for app ready +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (2/4) +test_table not ready yet, still waiting... (2/4) +test_table not ready yet, still waiting... (3/4) +test_table not ready yet, still waiting... (3/4) +test_table not ready yet, still waiting... (3/4) +test_table is ready now: (4/4) +test_table is ready now! +D2020-12-27 22:36:41.9122 (1609079801912553187 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 192.168.123.119:34802, current_count = 2 +D2020-12-27 22:36:41.9122 (1609079801912801811 5c2d2) mimic.io-thrd.377554: client session connected, remote_server = 192.168.123.119:34802, current_count = 2 +D2020-12-27 22:36:41.9133 (1609079801913288013 5c2ef) mimic.default6.0100c2cf000330a8: clear partition configuration cache 3.3 due to access failure ERR_INVALID_STATE +D2020-12-27 22:36:41.9223 (1609079801922077321 5c2eb) mimic.default2.0101000100000008: client session created, remote_server = 192.168.123.119:34801, current_count = 3 +D2020-12-27 22:36:41.9223 (1609079801922324123 5c2d1) mimic.io-thrd.377553: client session connected, remote_server = 192.168.123.119:34801, current_count = 3 +D2020-12-27 22:36:41.9232 (1609079801923353875 5c2f0) mimic.default7.0100c2cf000330aa: clear partition configuration cache 3.2 due to access failure ERR_INVALID_STATE +D2020-12-27 22:36:41.9331 (1609079801933195973 5c2ef) mimic.default6.0100c2cf000330ac: clear partition configuration cache 3.1 due to access failure ERR_INVALID_STATE +D2020-12-27 22:36:41.9420 (1609079801942967499 5c2f0) mimic.default7.0100c2cf000330ae: clear partition configuration cache 3.0 due to access failure ERR_INVALID_STATE +>>>>> test recovery, app 1 is removed <<<<< +smilenc+ 385715 1635 0 22:36 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +INFO: meta@1 stopped +smilenc+ 384017 1635 2 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +INFO: replica@1 stopped +smilenc+ 384079 1635 2 22:35 pts/1 00:00:01 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +INFO: replica@2 stopped +smilenc+ 384219 1635 1 22:35 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +INFO: replica@3 stopped +D2020-12-27 22:36:42.110 (1609079802110582182 5c2d1) mimic.io-thrd.377553: asio read from 127.0.0.1:34601 failed: End of file +D2020-12-27 22:36:42.1102 (1609079802110593359 5c2d1) mimic.io-thrd.377553: client session disconnected, remote_server = 127.0.0.1:34601, current_count = 2 +D2020-12-27 22:36:42.149 (1609079802149218257 5c2d2) mimic.io-thrd.377554: asio read from 192.168.123.119:34801 failed: End of file +D2020-12-27 22:36:42.1491 (1609079802149233188 5c2d2) mimic.io-thrd.377554: client session disconnected, remote_server = 192.168.123.119:34801, current_count = 1 +D2020-12-27 22:36:42.181 (1609079802181415198 5c2d1) mimic.io-thrd.377553: asio read from 192.168.123.119:34802 failed: End of file +D2020-12-27 22:36:42.1810 (1609079802181425524 5c2d1) mimic.io-thrd.377553: client session disconnected, remote_server = 192.168.123.119:34802, current_count = 0 +sleep for a while to wait the socket to destroy +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1/data/replica/reps && rm -rf 1.*.pegasus +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2/data/replica/reps && rm -rf 1.*.pegasus +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/reps && rm -rf 1.*.pegasus +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 386987 386978 0 22:36 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +INFO: replica@1 started +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 387049 386992 0 22:36 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +INFO: replica@2 started +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 387194 387132 0 22:36 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +INFO: replica@3 started +sleep for a while to wait the replica to start +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 388424 388415 0 22:37 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +INFO: meta@1 started +sleep for a while to wait the meta to come to alive +Wait seconds: 30 +Skip bad nodes: false +Skip lost partitions: false +Node list: +============================= +192.168.123.119:34801 +192.168.123.119:34802 +192.168.123.119:34803 +============================= +D2020-12-27 22:37:07.3891 (1609079827389909326 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:37:07.3901 (1609079827390196472 5c2d0) mimic.io-thrd.377552: client session connected, remote_server = 127.0.0.1:34601, current_count = 1 +Recover result: ERR_OK +create app test_table succeed, waiting for app ready +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (0/4) +test_table not ready yet, still waiting... (1/4) +test_table not ready yet, still waiting... (1/4) +test_table not ready yet, still waiting... (1/4) +test_table not ready yet, still waiting... (3/4) +test_table not ready yet, still waiting... (3/4) +test_table not ready yet, still waiting... (3/4) +test_table not ready yet, still waiting... (3/4) +test_table not ready yet, still waiting... (3/4) +test_table is ready now: (4/4) +test_table is ready now! +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Stopping zookeeper ... STOPPED +Clearing zookeeper ... CLEARED +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Starting zookeeper ... STARTED +Zookeeper started at port 22181 +starting server +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 388671 388608 0 22:37 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta2 && /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta &>result & +smilenc+ 388679 388608 0 22:37 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta3 && /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta &>result & +smilenc+ 388721 388608 0 22:37 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 388764 388608 0 22:37 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 388776 388608 0 22:37 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 388864 388608 0 22:37 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +Wait cluster to become healthy... +Sleeped for 1 seconds +Sleeped for 2 seconds +Sleeped for 3 seconds +Sleeped for 4 seconds +Sleeped for 5 seconds +Sleeped for 6 seconds +Sleeped for 7 seconds +Sleeped for 8 seconds +Sleeped for 9 seconds +Sleeped for 10 seconds +Sleeped for 11 seconds +Sleeped for 12 seconds +Sleeped for 13 seconds +Sleeped for 14 seconds +Sleeped for 15 seconds +Sleeped for 16 seconds +Sleeped for 17 seconds +Sleeped for 18 seconds +Sleeped for 19 seconds +Sleeped for 20 seconds +Cluster becomes healthy. +D2020-12-27 22:37:39.4062 (1609079859406251686 5c2cf) mimic.io-thrd.377551: client session created, remote_server = 192.168.123.119:34801, current_count = 2 +D2020-12-27 22:37:39.4062 (1609079859406428670 5c2d0) mimic.io-thrd.377552: client session connected, remote_server = 192.168.123.119:34801, current_count = 2 +D2020-12-27 22:37:39.4063 (1609079859406977893 5c2ee) mimic.default5.0100c2cf000340de: clear partition configuration cache 3.3 due to access failure ERR_INVALID_STATE +D2020-12-27 22:37:39.4153 (1609079859415895063 5c2e9) mimic.default0.010100010000000e: client session created, remote_server = 192.168.123.119:34802, current_count = 3 +D2020-12-27 22:37:39.4163 (1609079859416134978 5c2d0) mimic.io-thrd.377552: client session connected, remote_server = 192.168.123.119:34802, current_count = 3 +D2020-12-27 22:37:39.578 (1609079859578256557 5c2d1) mimic.io-thrd.377553: asio read from 127.0.0.1:34601 failed: End of file +D2020-12-27 22:37:39.5782 (1609079859578269428 5c2d1) mimic.io-thrd.377553: client session disconnected, remote_server = 127.0.0.1:34601, current_count = 2 +D2020-12-27 22:37:39.586 (1609079859586870926 5c2d2) mimic.io-thrd.377554: asio read from 192.168.123.119:34801 failed: End of file +D2020-12-27 22:37:39.5861 (1609079859586884935 5c2d2) mimic.io-thrd.377554: client session disconnected, remote_server = 192.168.123.119:34801, current_count = 1 +D2020-12-27 22:37:39.587 (1609079859587127864 5c2d2) mimic.io-thrd.377554: asio read from 192.168.123.119:34802 failed: End of file +D2020-12-27 22:37:39.5870 (1609079859587140202 5c2d2) mimic.io-thrd.377554: client session disconnected, remote_server = 192.168.123.119:34802, current_count = 0 +[ OK ] recovery_test.recovery (259173 ms) +[----------] 1 test from recovery_test (259173 ms total) + +[----------] Global test environment tear-down +[==========] 1 test from 1 test case ran. (259173 ms total) +[ PASSED ] 1 test. +dsn exit with code 0 +W2020-12-27 22:38:04.24 (1609079884024911017 5f6d3) unknown.io-thrd.390867:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:38:04.24 (1609079884024943994 5f6d3) unknown.io-thrd.390867:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:38:04.2724 (1609079884027602446 5f6d3) unknown.io-thrd.390867: process(390867) start: 1609079884024, date: 2020-12-27 22:38:04.24 +D2020-12-27 22:38:04.27 (1609079884027997977 5f6d3) unknown.io-thrd.390867: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:38:04.28 (1609079884028005965 5f6d3) unknown.io-thrd.390867: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:38:04.28�� (1609079884028087075 5f6d3) unknown.io-thrd.390867: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:38:04.28 (1609079884028287946 5f6d3) unknown.io-thrd.390867: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:38:04.28 (1609079884028546283 5f6d3) unknown.io-thrd.390867: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:38:04.28 (1609079884028551005 5f6d3) unknown.io-thrd.390867: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:38:04.28�� (1609079884028602337 5f6d3) unknown.io-thrd.390867: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:38:04.28 (1609079884028753156 5f6d3) unknown.io-thrd.390867: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:38:04.28 (1609079884028938950 5f6d3) unknown.io-thrd.390867: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:38:04.28 (1609079884028942893 5f6d3) unknown.io-thrd.390867: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:38:04.28�� (1609079884028976606 5f6d3) unknown.io-thrd.390867: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:38:04.29 (1609079884029150610 5f6d3) unknown.io-thrd.390867: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:38:04.29 (1609079884029158425 5f6d3) unknown.io-thrd.390867: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:38:04.29 (1609079884029209961 5f6ec) mimic.io-thrd.390892: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:38:04.29 (1609079884029438627 5f6d3) unknown.io-thrd.390867: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:38:04.29 (1609079884029479282 5f6f5) mimic.io-thrd.390901: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:38:04.29 (1609079884029558791 5f6d3) unknown.io-thrd.390867: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:38:04.29 (1609079884029630590 5f6d3) unknown.io-thrd.390867: MainThread: app_name=temp +Note: Google Test filter = bulk_load_test.* +[==========] Running 2 tests from 1 test case. +[----------] Global test environment set-up. +[----------] 2 tests from bulk_load_test +[ RUN ] bulk_load_test.bulk_load_test_failed +============ +start global_env() +meta1 pid: 388671 +meta1 dir: /home/smilencer/Code/incubator-pegasus/onebox/meta1 +project root: /home/smilencer/Code/incubator-pegasus +working dir: /home/smilencer/Code/incubator-pegasus/src/builder/test/function_test +D2020-12-27 22:38:04.41 (1609079884041840667 5f6d3) unknown.io-thrd.390867: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +get ip: 192.168.123.119 +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Stopping zookeeper ... STOPPED +Clearing zookeeper ... CLEARED +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Starting zookeeper ... STARTED +Zookeeper started at port 22181 +starting server +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 391014 390952 0 22:38 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta2 && /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta &>result & +smilenc+ 391022 390952 0 22:38 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta3 && /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta &>result & +smilenc+ 391064 390952 0 22:38 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 391107 390952 0 22:38 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 391149 390952 0 22:38 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 391205 390952 0 22:38 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +Wait cluster to become healthy... +Sleeped for 1 seconds +Sleeped for 2 seconds +Sleeped for 3 seconds +Sleeped for 4 seconds +Sleeped for 5 seconds +Sleeped for 6 seconds +Sleeped for 7 seconds +Sleeped for 8 seconds +Sleeped for 9 seconds +Sleeped for 10 seconds +Sleeped for 11 seconds +Sleeped for 12 seconds +Sleeped for 13 seconds +Sleeped for 14 seconds +Sleeped for 15 seconds +Sleeped for 16 seconds +Sleeped for 17 seconds +Sleeped for 18 seconds +Sleeped for 19 seconds +Sleeped for 20 seconds +Cluster becomes healthy. +D2020-12-27 22:38:04.50 (1609079884050902196 5f6d3) mimic.io-thrd.390867: client session created, remote_server = 127.0.0.1:34603, current_count = 1 +D2020-12-27 22:38:04.50 (1609079884050994595 5f6d5) mimic.io-thrd.390869: client session connected, remote_server = 127.0.0.1:34603, current_count = 1 +D2020-12-27 22:38:04.51 (1609079884051123328 5f6d7) mimic.io-thrd.390871: client session created, remote_server = 192.168.123.119:34602, current_count = 2 +D2020-12-27 22:38:04.51 (1609079884051203216 5f6d6) mimic.io-thrd.390870: client session connected, remote_server = 192.168.123.119:34602, current_count = 2 +D2020-12-27 22:38:04.76 (1609079884076944910 5f6d5) mimic.io-thrd.390869: asio read from 127.0.0.1:34603 failed: End of file +D2020-12-27 22:38:04.76 (1609079884076967123 5f6d5) mimic.io-thrd.390869: client session disconnected, remote_server = 127.0.0.1:34603, current_count = 1 +D2020-12-27 22:38:04.77 (1609079884077018148 5f6d5) mimic.io-thrd.390869: asio read from 192.168.123.119:34602 failed: End of file +D2020-12-27 22:38:04.77 (1609079884077024059 5f6d5) mimic.io-thrd.390869: client session disconnected, remote_server = 192.168.123.119:34602, current_count = 0 +[ OK ] bulk_load_test.bulk_load_test_failed (24242 ms) +[ RUN ] bulk_load_test.bulk_load_tests +D2020-12-27 22:38:28.2801 (1609079908280838935 5f6d3) mimic.io-thrd.390867: client session created, remote_server = 127.0.0.1:34603, current_count = 1 +D2020-12-27 22:38:28.2811 (1609079908281030510 5f6d4) mimic.io-thrd.390868: client session connected, remote_server = 127.0.0.1:34603, current_count = 1 +D2020-12-27 22:38:28.2812 (1609079908281213828 5f6d7) mimic.io-thrd.390871: client session created, remote_server = 192.168.123.119:34601, current_count = 2 +D2020-12-27 22:38:28.2812 (1609079908281348526 5f6d6) mimic.io-thrd.390870: client session connected, remote_server = 192.168.123.119:34601, current_count = 2 +sleep 5s to query bulk status +sleep 5s to query bulk status +sleep 5s to query bulk status +sleep 5s to query bulk status +sleep 5s to query bulk status +mkdir: cannot create directory ‘onebox/block_service’: File exists +mkdir: cannot create directory ‘onebox/block_service/local_service’: File exists +D2020-12-27 22:38:53.4303 (1609079933430463593 5f6d3) mimic.io-thrd.390867: client session created, remote_server = 127.0.0.1:34601, current_count = 3 +D2020-12-27 22:38:53.4303 (1609079933430741086 5f6d6) mimic.io-thrd.390870: client session connected, remote_server = 127.0.0.1:34601, current_count = 3 +D2020-12-27 22:38:53.4314 (1609079933431206262 5f6ef) mimic.default2.0100f6d300030013: client session created, remote_server = 192.168.123.119:34802, current_count = 4 +D2020-12-27 22:38:53.4314 (1609079933431384539 5f6d5) mimic.io-thrd.390869: client session connected, remote_server = 192.168.123.119:34802, current_count = 4 +D2020-12-27 22:38:53.4325 (1609079933432695458 5f6d3) mimic.io-thrd.390867: client session created, remote_server = 192.168.123.119:34803, current_count = 5 +D2020-12-27 22:38:53.4325 (1609079933432828313 5f6d7) mimic.io-thrd.390871: client session connected, remote_server = 192.168.123.119:34803, current_count = 5 +D2020-12-27 22:38:53.4336 (1609079933433872712 5f6d3) mimic.io-thrd.390867: client session created, remote_server = 192.168.123.119:34801, current_count = 6 +D2020-12-27 22:38:53.4336 (1609079933433998316 5f6d6) mimic.io-thrd.390870: client session connected, remote_server = 192.168.123.119:34801, current_count = 6 +sleep 5s to query bulk status +sleep 5s to query bulk status +sleep 5s to query bulk status +sleep 5s to query bulk status +sleep 5s to query bulk status +sleep 5s to query bulk status +sleep 5s to query bulk status +sleep 5s to query bulk status +Start to verify data... +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Stopping zookeeper ... STOPPED +Clearing zookeeper ... CLEARED +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Starting zookeeper ... STARTED +Zookeeper started at port 22181 +starting server +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 394105 394042 0 22:41 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta2 && /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta &>result & +smilenc+ 394113 394042 0 22:41 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta3 && /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta &>result & +smilenc+ 394150 394042 0 22:41 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 394211 394042 0 22:41 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 394240 394042 0 22:41 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 394344 394042 0 22:41 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +Wait cluster to become healthy... +Sleeped for 1 seconds +Sleeped for 2 seconds +Sleeped for 3 seconds +Sleeped for 4 seconds +Sleeped for 5 seconds +Sleeped for 6 seconds +Sleeped for 7 seconds +Sleeped for 8 seconds +Sleeped for 9 seconds +Sleeped for 10 seconds +Sleeped for 11 seconds +Sleeped for 12 seconds +Sleeped for 13 seconds +Sleeped for 14 seconds +Sleeped for 15 seconds +Sleeped for 16 seconds +Sleeped for 17 seconds +Sleeped for 18 seconds +Sleeped for 19 seconds +Sleeped for 20 seconds +Cluster becomes healthy. +D2020-12-27 22:41:37.864 (1609080097864789806 5f6d5) mimic.io-thrd.390869: asio read from 127.0.0.1:34603 failed: End of file +D2020-12-27 22:41:37.8645 (1609080097864805586 5f6d5) mimic.io-thrd.390869: client session disconnected, remote_server = 127.0.0.1:34603, current_count = 5 +D2020-12-27 22:41:37.866 (1609080097866251414 5f6d4) mimic.io-thrd.390868: asio read from 127.0.0.1:34601 failed: End of file +D2020-12-27 22:41:37.8664 (1609080097866265926 5f6d4) mimic.io-thrd.390868: client session disconnected, remote_server = 127.0.0.1:34601, current_count = 4 +D2020-12-27 22:41:37.866 (1609080097866341197 5f6d4) mimic.io-thrd.390868: asio read from 192.168.123.119:34601 failed: End of file +D2020-12-27 22:41:37.8663 (1609080097866350083 5f6d4) mimic.io-thrd.390868: client session disconnected, remote_server = 192.168.123.119:34601, current_count = 3 +D2020-12-27 22:41:37.876 (1609080097876533893 5f6d4) mimic.io-thrd.390868: asio read from 192.168.123.119:34802 failed: End of file +D2020-12-27 22:41:37.8762 (1609080097876542301 5f6d4) mimic.io-thrd.390868: client session disconnected, remote_server = 192.168.123.119:34802, current_count = 2 +D2020-12-27 22:41:37.877 (1609080097877101304 5f6d5) mimic.io-thrd.390869: asio read from 192.168.123.119:34803 failed: End of file +D2020-12-27 22:41:37.8771 (1609080097877108011 5f6d5) mimic.io-thrd.390869: client session disconnected, remote_server = 192.168.123.119:34803, current_count = 1 +D2020-12-27 22:41:37.877 (1609080097877490052 5f6d6) mimic.io-thrd.390870: asio read from 192.168.123.119:34801 failed: End of file +D2020-12-27 22:41:37.8770 (1609080097877496326 5f6d6) mimic.io-thrd.390870: client session disconnected, remote_server = 192.168.123.119:34801, current_count = 0 +[ OK ] bulk_load_test.bulk_load_tests (213824 ms) +[----------] 2 tests from bulk_load_test (238066 ms total) + +[----------] Global test environment tear-down +[==========] 2 tests from 1 test case ran. (238066 ms total) +[ PASSED ] 2 tests. +dsn exit with code 0 +W2020-12-27 22:42:02.104 (1609080122104872517 60c15) unknown.io-thrd.396309:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +W2020-12-27 22:42:02.104 (1609080122104900000 60c15) unknown.io-thrd.396309:overwrite default thread pool for task RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK from THREAD_POOL_META_SERVER to THREAD_POOL_DEFAULT +D2020-12-27 22:42:02.1073 (1609080122107250390 60c15) unknown.io-thrd.396309: process(396309) start: 1609080122103, date: 2020-12-27 22:42:02.103 +D2020-12-27 22:42:02.107 (1609080122107501598 60c15) unknown.io-thrd.396309: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:42:02.1071 (1609080122107508334 60c15) unknown.io-thrd.396309: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_DSN ... +D2020-12-27 22:42:02.107� (1609080122107556150 60c15) unknown.io-thrd.396309: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:42:02.1071 (1609080122107656213 60c15) unknown.io-thrd.396309: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_DSN ... +D2020-12-27 22:42:02.107 (1609080122107752776 60c15) unknown.io-thrd.396309: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:42:02.1071 (1609080122107756545 60c15) unknown.io-thrd.396309: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:42:02.107� (1609080122107786543 60c15) unknown.io-thrd.396309: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:42:02.1071 (1609080122107869601 60c15) unknown.io-thrd.396309: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... +D2020-12-27 22:42:02.107 (1609080122107952901 60c15) unknown.io-thrd.396309: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:42:02.1071 (1609080122107956696 60c15) unknown.io-thrd.396309: [mimic] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... +D2020-12-27 22:42:02.107� (1609080122107986663 60c15) unknown.io-thrd.396309: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +E2020-12-27 22:42:02.1077 (1609080122107999801 60c15) unknown.io-thrd.396309: asio udp socket bind failed, port = 60927, error = Address already in use +D2020-12-27 22:42:02.108 (1609080122108030755 60c15) unknown.io-thrd.396309: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +D2020-12-27 22:42:02.1081 (1609080122108097780 60c15) unknown.io-thrd.396309: [mimic] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... +D2020-12-27 22:42:02.108 (1609080122108103664 60c15) unknown.io-thrd.396309: === service_node=[mimic], primary_address=[192.168.123.119:1] === +W2020-12-27 22:42:02.1081 (1609080122108145339 60c2e) mimic.io-thrd.396334: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:42:02.1088 (1609080122108606568 60c15) unknown.io-thrd.396309: [mimic] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 8, worker_share_core = true, partitioned = false, ... +W2020-12-27 22:42:02.1081 (1609080122108656214 60c37) mimic.io-thrd.396343: You may need priviledge to set thread priority. errno = 1 +D2020-12-27 22:42:02.1084 (1609080122108964467 60c15) unknown.io-thrd.396309: [mimic] thread pool [THREAD_POOL_META_SERVER] started, pool_code = THREAD_POOL_META_SERVER, worker_count = 4, worker_share_core = true, partitioned = false, ... +D2020-12-27 22:42:02.109 (1609080122109035654 60c15) unknown.io-thrd.396309: MainThread: app_name=temp +Note: Google Test filter = test_detect_hotspot.* +[==========] Running 1 test from 1 test case. +[----------] Global test environment set-up. +[----------] 1 test from test_detect_hotspot +[ RUN ] test_detect_hotspot.hotspot_exist +============ +start global_env() +meta1 pid: 394105 +meta1 dir: /home/smilencer/Code/incubator-pegasus/onebox/meta1 +project root: /home/smilencer/Code/incubator-pegasus +working dir: /home/smilencer/Code/incubator-pegasus/src/builder/test/function_test +D2020-12-27 22:42:02.121 (1609080122121841189 60c15) unknown.io-thrd.396309: get ip address from network interface(wlp0s20f3), addr(192.168.123.119), input interface("") +get ip: 192.168.123.119 +/home/smilencer/Code/incubator-pegasus +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Stopping zookeeper ... STOPPED +Clearing zookeeper ... CLEARED +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Starting zookeeper ... STARTED +Zookeeper started at port 22181 +starting server +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 396451 396389 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta2 && /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta &>result & +smilenc+ 396459 396389 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta3 && /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta &>result & +smilenc+ 396501 396389 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 396544 396389 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 396586 396389 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 396642 396389 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/collector && /home/smilencer/Code/incubator-pegasus/onebox/collector/pegasus_server config.ini -app_list collector &>result & +smilenc+ 396700 396389 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/collector/pegasus_server config.ini -app_list collector +Wait cluster to become healthy... +Sleeped for 1 seconds +Sleeped for 2 seconds +Sleeped for 3 seconds +Sleeped for 4 seconds +Sleeped for 5 seconds +Sleeped for 6 seconds +Sleeped for 7 seconds +Sleeped for 8 seconds +Sleeped for 9 seconds +Sleeped for 10 seconds +Sleeped for 11 seconds +Sleeped for 12 seconds +Sleeped for 13 seconds +Sleeped for 14 seconds +Sleeped for 15 seconds +Sleeped for 16 seconds +Sleeped for 17 seconds +Sleeped for 18 seconds +Sleeped for 19 seconds +Sleeped for 20 seconds +Cluster becomes healthy. +D2020-12-27 22:42:29.4101 (1609080149410820210 60c15) mimic.io-thrd.396309: client session created, remote_server = 127.0.0.1:34601, current_count = 1 +D2020-12-27 22:42:29.4111 (1609080149411028061 60c17) mimic.io-thrd.396311: client session connected, remote_server = 127.0.0.1:34601, current_count = 1 +create app hotspot_test succeed, waiting for app ready +hotspot_test not ready yet, still waiting... (0/8) +hotspot_test not ready yet, still waiting... (0/8) +hotspot_test not ready yet, still waiting... (0/8) +hotspot_test not ready yet, still waiting... (5/8) +hotspot_test not ready yet, still waiting... (5/8) +hotspot_test not ready yet, still waiting... (5/8) +hotspot_test not ready yet, still waiting... (5/8) +hotspot_test not ready yet, still waiting... (5/8) +hotspot_test is ready now: (8/8) +hotspot_test is ready now! +start testing detecting hotspot... +D2020-12-27 22:42:47.4862 (1609080167486796795 60c15) mimic.io-thrd.396309: client session created, remote_server = 192.168.123.119:34803, current_count = 2 +D2020-12-27 22:42:47.4862 (1609080167486983338 60c18) mimic.io-thrd.396312: client session connected, remote_server = 192.168.123.119:34803, current_count = 2 +hotkey is detecting now, now state: replication::hotkey_type::WRITE +D2020-12-27 22:42:47.4873 (1609080167487397335 60c15) mimic.io-thrd.396309: client session created, remote_server = 192.168.123.119:34802, current_count = 3 +D2020-12-27 22:42:47.4873 (1609080167487533219 60c16) mimic.io-thrd.396310: client session connected, remote_server = 192.168.123.119:34802, current_count = 3 +hotkey is detecting now, now state: replication::hotkey_type::WRITE +D2020-12-27 22:42:47.4884 (1609080167488061500 60c15) mimic.io-thrd.396309: client session created, remote_server = 192.168.123.119:34801, current_count = 4 +D2020-12-27 22:42:47.4884 (1609080167488230939 60c17) mimic.io-thrd.396311: client session connected, remote_server = 192.168.123.119:34801, current_count = 4 +hotkey is detecting now, now state: replication::hotkey_type::WRITE +hotkey is detecting now, now state: replication::hotkey_type::WRITE +hotkey is detecting now, now state: replication::hotkey_type::WRITE +hotkey is detecting now, now state: replication::hotkey_type::WRITE +hotkey is detecting now, now state: replication::hotkey_type::WRITE +hotkey is detecting now, now state: replication::hotkey_type::WRITE +/home/smilencer/Code/incubator-pegasus/src/test/function_test/test_detect_hotspot.cpp:123: Failure +Value of: find_hotkey + Actual: false +Expected: true +hotspot passed..... +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Stopping zookeeper ... STOPPED +Clearing zookeeper ... CLEARED +JMX enabled by default +Using config: /home/smilencer/Code/incubator-pegasus/.zk_install/zookeeper-3.4.6/bin/../conf/zoo.cfg +Starting zookeeper ... STARTED +Zookeeper started at port 22181 +starting server +cd /home/smilencer/Code/incubator-pegasus/onebox/meta1 && /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta &>result & +smilenc+ 399208 399146 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta1/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta2 && /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta &>result & +smilenc+ 399216 399146 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta2/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/meta3 && /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta &>result & +smilenc+ 399258 399146 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/meta3/pegasus_server config.ini -app_list meta +cd /home/smilencer/Code/incubator-pegasus/onebox/replica1 && /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica &>result & +smilenc+ 399301 399146 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica1/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica2 && /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica &>result & +smilenc+ 399343 399146 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica2/pegasus_server config.ini -app_list replica +cd /home/smilencer/Code/incubator-pegasus/onebox/replica3 && /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica &>result & +smilenc+ 399399 399146 0 22:42 pts/1 00:00:00 /home/smilencer/Code/incubator-pegasus/onebox/replica3/pegasus_server config.ini -app_list replica +Wait cluster to become healthy... +Sleeped for 1 seconds +Sleeped for 2 seconds +Sleeped for 3 seconds +Sleeped for 4 seconds +Sleeped for 5 seconds +Sleeped for 6 seconds +Sleeped for 7 seconds +Sleeped for 8 seconds +Sleeped for 9 seconds +Sleeped for 10 seconds +Sleeped for 11 seconds +Sleeped for 12 seconds +Sleeped for 13 seconds +Sleeped for 14 seconds +Sleeped for 15 seconds +Sleeped for 16 seconds +Sleeped for 17 seconds +Sleeped for 18 seconds +Sleeped for 19 seconds +Sleeped for 20 seconds +Sleeped for 21 seconds +Sleeped for 22 seconds +Sleeped for 23 seconds +Sleeped for 24 seconds +Sleeped for 25 seconds +Sleeped for 26 seconds +Sleeped for 27 seconds +Sleeped for 28 seconds +Sleeped for 29 seconds +Cluster becomes healthy. +D2020-12-27 22:42:47.525 (1609080167525788870 60c16) mimic.io-thrd.396310: asio read from 127.0.0.1:34601 failed: End of file +D2020-12-27 22:42:47.5253 (1609080167525802361 60c16) mimic.io-thrd.396310: client session disconnected, remote_server = 127.0.0.1:34601, current_count = 3 +D2020-12-27 22:42:47.537 (1609080167537883378 60c18) mimic.io-thrd.396312: asio read from 192.168.123.119:34801 failed: End of file +D2020-12-27 22:42:47.5372 (1609080167537892746 60c18) mimic.io-thrd.396312: client session disconnected, remote_server = 192.168.123.119:34801, current_count = 2 +D2020-12-27 22:42:47.540 (1609080167540481894 60c18) mimic.io-thrd.396312: asio read from 192.168.123.119:34802 failed: End of file +D2020-12-27 22:42:47.5401 (1609080167540489884 60c18) mimic.io-thrd.396312: client session disconnected, remote_server = 192.168.123.119:34802, current_count = 1 +D2020-12-27 22:42:47.540 (1609080167540516572 60c17) mimic.io-thrd.396311: asio read from 192.168.123.119:34803 failed: End of file +D2020-12-27 22:42:47.5400 (1609080167540522552 60c17) mimic.io-thrd.396311: client session disconnected, remote_server = 192.168.123.119:34803, current_count = 0 +[ FAILED ] test_detect_hotspot.hotspot_exist (79085 ms) +[----------] 1 test from test_detect_hotspot (79085 ms total) + +[----------] Global test environment tear-down +[==========] 1 test from 1 test case ran. (79085 ms total) +[ PASSED ] 0 tests. +[ FAILED ] 1 test, listed below: +[ FAILED ] test_detect_hotspot.hotspot_exist + + 1 FAILED TEST +dsn exit with code 1 +run test test_detect_hotspot load failed: pegasus_function_test config.ini temp +run test "pegasus_function_test" in /home/smilencer/Code/incubator-pegasus/src/builder/bin/pegasus_function_test failed diff --git a/rdsn b/rdsn index 29c6ccca84..0b07a633aa 160000 --- a/rdsn +++ b/rdsn @@ -1 +1 @@ -Subproject commit 29c6ccca84873f4735c690dc8a616b8ef93377eb +Subproject commit 0b07a633aae13300f731558718fd279a0ff2d7ae diff --git a/src/server/config.min.ini b/src/server/config.min.ini index 0094879713..76612b4ff7 100644 --- a/src/server/config.min.ini +++ b/src/server/config.min.ini @@ -142,6 +142,7 @@ available_detect_app = @APP_NAME@ available_detect_alert_script_dir = ./package/bin usage_stat_app = stat + enable_detect_hotkey = false [pegasus.clusters] onebox = @LOCAL_IP@:34601,@LOCAL_IP@:34602,@LOCAL_IP@:34603 diff --git a/src/server/hotspot_partition_calculator.cpp b/src/server/hotspot_partition_calculator.cpp index 2bd6b9d7eb..8c4f0485c2 100644 --- a/src/server/hotspot_partition_calculator.cpp +++ b/src/server/hotspot_partition_calculator.cpp @@ -50,7 +50,7 @@ DSN_DEFINE_int32("pegasus.collector", DSN_DEFINE_int32("pegasus.collector", occurrence_threshold, - 100, + 3, "hot paritiotion occurrence times' threshold to send rpc to detect hotkey"); void hotspot_partition_calculator::data_aggregate(const std::vector &partition_stats) @@ -170,7 +170,7 @@ void hotspot_partition_calculator::detect_hotkey_in_hotpartition(int data_type) } } -/*static*/ void hotspot_partition_calculator::send_detect_hotkey_request( +void hotspot_partition_calculator::send_detect_hotkey_request( const std::string &app_name, const uint64_t partition_index, const dsn::replication::hotkey_type::type hotkey_type, diff --git a/src/server/test/hotspot_partition_test.cpp b/src/server/test/hotspot_partition_test.cpp index 645fa8abf1..31d1e87c76 100644 --- a/src/server/test/hotspot_partition_test.cpp +++ b/src/server/test/hotspot_partition_test.cpp @@ -25,6 +25,7 @@ namespace pegasus { namespace server { DSN_DECLARE_int32(occurrence_threshold); +DSN_DECLARE_int32(enable_detect_hotkey); class hotspot_partition_test : public pegasus_server_test_base { @@ -33,8 +34,13 @@ class hotspot_partition_test : public pegasus_server_test_base { dsn::fail::setup(); dsn::fail::cfg("send_detect_hotkey_request", "return()"); + FLAGS_enable_detect_hotkey = true; }; - ~hotspot_partition_test() { dsn::fail::teardown(); } + ~hotspot_partition_test() + { + FLAGS_enable_detect_hotkey = false; + dsn::fail::teardown(); + } hotspot_partition_calculator calculator; @@ -126,6 +132,9 @@ TEST_F(hotspot_partition_test, hotspot_partition_policy) TEST_F(hotspot_partition_test, send_detect_hotkey_request) { + auto default_occurrence_threshold = FLAGS_occurrence_threshold; + FLAGS_occurrence_threshold = 100; + const int READ_HOT_PARTITION = 7; const int WRITE_HOT_PARTITION = 0; std::vector test_rows = generate_row_data(); @@ -139,6 +148,8 @@ TEST_F(hotspot_partition_test, send_detect_hotkey_request) expect_result[READ_HOT_PARTITION][0] = FLAGS_occurrence_threshold - back_to_normal; expect_result[WRITE_HOT_PARTITION][1] = FLAGS_occurrence_threshold - back_to_normal; aggregate_analyse_data(generate_row_data(), expect_result, back_to_normal); + + FLAGS_occurrence_threshold = default_occurrence_threshold; } } // namespace server diff --git a/src/shell/commands/detect_hotkey.cpp b/src/shell/commands/detect_hotkey.cpp index d2ffed7417..fd4bbe1845 100644 --- a/src/shell/commands/detect_hotkey.cpp +++ b/src/shell/commands/detect_hotkey.cpp @@ -40,6 +40,8 @@ bool generate_hotkey_request(dsn::replication::detect_hotkey_request &req, req.action = dsn::replication::detect_action::START; } else if (!strcasecmp(hotkey_action.c_str(), "stop")) { req.action = dsn::replication::detect_action::STOP; + } else if (!strcasecmp(hotkey_type.c_str(), "query")) { + req.action = dsn::replication::detect_action::QUERY; } else { err_info = fmt::format("\"{}\" is an invalid hotkey detect action (should be 'start' or 'stop')\n", diff --git a/src/test/function_test/run.sh b/src/test/function_test/run.sh index 3aa5d923f7..334b590351 100755 --- a/src/test/function_test/run.sh +++ b/src/test/function_test/run.sh @@ -71,4 +71,6 @@ if [ $on_travis == "NO" ]; then exit_if_fail $? "run test recovery failed: $test_case $config_file $table_name" GTEST_OUTPUT="xml:$REPORT_DIR/bulk_load.xml" GTEST_FILTER="bulk_load_test.*" ./$test_case $config_file $table_name exit_if_fail $? "run test bulk load failed: $test_case $config_file $table_name" + GTEST_OUTPUT="xml:$REPORT_DIR/test_detect_hotspot.xml" GTEST_FILTER="test_detect_hotspot.*" ./$test_case $config_file $table_name + exit_if_fail $? "run test test_detect_hotspot load failed: $test_case $config_file $table_name" fi diff --git a/src/test/function_test/test_detect_hotspot.cpp b/src/test/function_test/test_detect_hotspot.cpp new file mode 100644 index 0000000000..be063353a3 --- /dev/null +++ b/src/test/function_test/test_detect_hotspot.cpp @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "base/pegasus_const.h" +#include "global_env.h" + +using namespace ::dsn; +using namespace ::dsn::replication; +using namespace pegasus; + +static std::string generate_hash_key_by_random(bool is_hotkey, int probability = 100) +{ + if (is_hotkey && (dsn::rand::next_u32(100) < probability)) { + return "ThisisahotkeyThisisahotkey"; + } + static const std::string chars("abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "1234567890" + "!@#$%^&*()" + "`~-_=+[{]{\\|;:'\",<.>/? "); + std::string result; + for (int i = 0; i < 20; i++) { + result += chars[dsn::rand::next_u32(chars.size())]; + } + return result; +} + +class test_detect_hotspot : public testing::Test +{ +public: + virtual void SetUp() override + { + chdir(global_env::instance()._pegasus_root.c_str()); + system("pwd"); + + system("./run.sh clear_onebox"); + system("cp src/server/config.min.ini config-server-test-hotspot.ini"); + system("sed -i \"/^\\s*enable_detect_hotkey/c enable_detect_hotkey = " + "true\" config-server-test-hotspot.ini"); + system("./run.sh start_onebox -c -w --config_path config-server-test-hotspot.ini"); + std::this_thread::sleep_for(std::chrono::seconds(3)); + + std::vector meta_list; + replica_helper::load_meta_servers( + meta_list, PEGASUS_CLUSTER_SECTION_NAME.c_str(), "single_master_cluster"); + + ddl_client = std::make_shared(meta_list); + pg_client = + pegasus::pegasus_client_factory::get_client("single_master_cluster", app_name.c_str()); + + auto err = ddl_client->create_app(app_name.c_str(), "pegasus", 8, 3, {}, false); + ASSERT_EQ(dsn::ERR_OK, err); + } + + virtual void TearDown() override + { + chdir(global_env::instance()._pegasus_root.c_str()); + system("./run.sh clear_onebox"); + system("./run.sh start_onebox -w"); + chdir(global_env::instance()._working_dir.c_str()); + } + + void write_hotspot_data() + { + int64_t start = dsn_now_s(); + int err = PERR_OK; + ASSERT_NE(pg_client, nullptr); + + for (int i = 0; dsn_now_s() - start > max_detection_second; ++i %= 1000) { + std::string index = std::to_string(i); + std::string h_key = generate_hash_key_by_random(true, 50); + std::string s_key = "sortkey_" + index; + std::string value = "value_" + index; + err = pg_client->set(h_key, s_key, value); + ASSERT_EQ(err, PERR_OK); + } + + int32_t app_id; + int32_t partition_count; + std::vector partitions; + ddl_client->list_app(app_name, app_id, partition_count, partitions); + dsn::replication::detect_hotkey_response resp; + dsn::replication::detect_hotkey_request req; + req.type = dsn::replication::hotkey_type::type::WRITE; + req.action = dsn::replication::detect_action::QUERY; + bool find_hotkey = false; + for (int partition_index = 0; partition_index < partitions.size(); partition_index++) { + req.pid = dsn::gpid(app_id, partition_index); + auto errinfo = + ddl_client->detect_hotkey(partitions[partition_index].primary, req, resp); + ASSERT_EQ(errinfo, dsn::ERR_OK); + std::cout << resp.err_hint << std::endl; + if (!resp.hotkey_result.empty()) { + find_hotkey = true; + break; + } + } + ASSERT_TRUE(find_hotkey); + } + + const std::string app_name = "hotspot_test"; + const int64_t max_detection_second = 180; + std::shared_ptr ddl_client; + pegasus::pegasus_client *pg_client; +}; + +TEST_F(test_detect_hotspot, hotspot_exist) +{ + std::cout << "start testing detecting hotspot..." << std::endl; + write_hotspot_data(); + std::cout << "hotspot passed....." << std::endl; +} From e7edefecd5f531f771ba7b8c8d781dab098f2b16 Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Tue, 29 Dec 2020 22:15:29 +0800 Subject: [PATCH 06/19] update --- debug.txt | 796 +-- debug2.txt | 4786 +++-------------- rdsn | 2 +- src/server/hotkey_collector.cpp | 4 +- src/server/hotspot_partition_calculator.cpp | 7 +- .../function_test/test_detect_hotspot.cpp | 56 +- 6 files changed, 907 insertions(+), 4744 deletions(-) diff --git a/debug.txt b/debug.txt index 29c8a146c5..ea0c39b2d3 100644 --- a/debug.txt +++ b/debug.txt @@ -1,634 +1,162 @@ -D2020-12-29 16:52:34.524 (1609231954524151163 1c0701) unknown.io-thrd.1836801: rpc_engine.cpp:480:start(): [collector] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_THRIFT ... -D2020-12-29 16:52:34.524� (1609231954524231572 1c0701) unknown.io-thrd.1836801: rpc_address.cpp:121:ipv4_from_network_interface(): get ip address from network interface(enp0s31f6), addr(10.232.52.144), input interface("") -D2020-12-29 16:52:34.524 (1609231954524273847 1c0701) unknown.io-thrd.1836801: rpc_engine.cpp:480:start(): [collector] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_THRIFT ... -D2020-12-29 16:52:34.524 (1609231954524376332 1c0701) unknown.io-thrd.1836801: rpc_address.cpp:121:ipv4_from_network_interface(): get ip address from network interface(enp0s31f6), addr(10.232.52.144), input interface("") -D2020-12-29 16:52:34.524 (1609231954524380142 1c0701) unknown.io-thrd.1836801: rpc_engine.cpp:480:start(): [collector] network client started at port 1, channel = RPC_CHANNEL_TCP, fmt = NET_HDR_RAW ... -D2020-12-29 16:52:34.524� (1609231954524456918 1c0701) unknown.io-thrd.1836801: rpc_address.cpp:121:ipv4_from_network_interface(): get ip address from network interface(enp0s31f6), addr(10.232.52.144), input interface("") -D2020-12-29 16:52:34.524 (1609231954524499100 1c0701) unknown.io-thrd.1836801: rpc_engine.cpp:480:start(): [collector] network client started at port 1, channel = RPC_CHANNEL_UDP, fmt = NET_HDR_RAW ... -D2020-12-29 16:52:34.524 (1609231954524602582 1c0701) unknown.io-thrd.1836801: rpc_address.cpp:121:ipv4_from_network_interface(): get ip address from network interface(enp0s31f6), addr(10.232.52.144), input interface("") -W2020-12-29 16:52:34.524 (1609231954524634247 1c0701) unknown.io-thrd.1836801: rpc_engine.cpp:509:start(): [collector] network server started at port 34101, channel = RPC_CHANNEL_TCP, ... -D2020-12-29 16:52:34.524 (1609231954524713336 1c0701) unknown.io-thrd.1836801: rpc_address.cpp:121:ipv4_from_network_interface(): get ip address from network interface(enp0s31f6), addr(10.232.52.144), input interface("") -W2020-12-29 16:52:34.524 (1609231954524770372 1c0701) unknown.io-thrd.1836801: rpc_engine.cpp:509:start(): [collector] network server started at port 34101, channel = RPC_CHANNEL_UDP, ... -D2020-12-29 16:52:34.524 (1609231954524778198 1c0701) unknown.io-thrd.1836801: rpc_engine.cpp:517:start(): === service_node=[collector], primary_address=[10.232.52.144:34101] === -W2020-12-29 16:52:34.524 (1609231954524838474 1c0744) collector.io-thrd.1836868: task_worker.cpp:120:set_priority(): You may need priviledge to set thread priority. errno = 1 -D2020-12-29 16:52:34.525 (1609231954525596451 1c0701) unknown.io-thrd.1836801: task_engine.cpp:110:start(): [collector] thread pool [default] started, pool_code = THREAD_POOL_DEFAULT, worker_count = 5, worker_share_core = true, partitioned = false, ... -W2020-12-29 16:52:34.525 (1609231954525709415 1c074a) collector.io-thrd.1836874: task_worker.cpp:120:set_priority(): You may need priviledge to set thread priority. errno = 1 -W2020-12-29 16:52:34.525 (1609231954525732149 1c074b) collector.io-thrd.1836875: task_worker.cpp:120:set_priority(): You may need priviledge to set thread priority. errno = 1 -D2020-12-29 16:52:34.5255 (1609231954525894890 1c0701) unknown.io-thrd.1836801: task_engine.cpp:110:start(): [collector] thread pool [replica] started, pool_code = THREAD_POOL_REPLICATION, worker_count = 2, worker_share_core = true, partitioned = true, ... -D2020-12-29 16:52:40.528 (1609231960528940655 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 304 MB, memused_res = 42MB -D2020-12-29 16:52:40.529 (1609231960529377776 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609231960528), last_report_time_ms(1609231954528) -D2020-12-29 16:52:50.529 (1609231970529918271 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 304 MB, memused_res = 42MB -D2020-12-29 16:52:50.530 (1609231970530210216 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609231970529), last_report_time_ms(1609231960528) -D2020-12-29 16:53:00.530 (1609231980530724577 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 304 MB, memused_res = 42MB -D2020-12-29 16:53:00.531 (1609231980531009892 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609231980530), last_report_time_ms(1609231970529) -D2020-12-29 16:53:10.531 (1609231990531531463 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 304 MB, memused_res = 42MB -D2020-12-29 16:53:10.531 (1609231990531825643 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609231990531), last_report_time_ms(1609231980530) -D2020-12-29 16:53:20.531 (1609232000531900308 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 304 MB, memused_res = 42MB -D2020-12-29 16:53:20.531 (1609232000531996727 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232000531), last_report_time_ms(1609231990531) -D2020-12-29 16:53:30.532 (1609232010532056144 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 304 MB, memused_res = 42MB -D2020-12-29 16:53:30.532 (1609232010532150857 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232010532), last_report_time_ms(1609232000531) -D2020-12-29 16:53:34.528 (1609232014528385554 1c0746) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:53:34.528 (1609232014528405987 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:53:34.528 (1609232014528423755 1c0748) collector.default3.0101000000000003: info_collector.cpp:290:on_storage_size_stat(): start to stat storage size, remaining_retry_count = 3 -D2020-12-29 16:53:34.528 (1609232014528492815 1c0747) collector.default2.0101000000000002: network.cpp:649:send_message(): client session created, remote_server = 10.232.52.144:34601, current_count = 1 -D2020-12-29 16:53:34.528 (1609232014528507087 1c0749) collector.default4.0101000000000004: network.cpp:649:send_message(): client session created, remote_server = 10.232.52.144:34603, current_count = 2 -D2020-12-29 16:53:34.528 (1609232014528639388 1c073a) collector.io-thrd.1836858: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34601, current_count = 2 -D2020-12-29 16:53:34.528 (1609232014528700892 1c073a) collector.io-thrd.1836858: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34603, current_count = 2 -D2020-12-29 16:53:34.528 (1609232014528825081 1c0747) collector.default2.0101000000000002: network.cpp:649:send_message(): client session created, remote_server = 10.232.52.144:34801, current_count = 3 -D2020-12-29 16:53:34.528 (1609232014528883942 1c0747) collector.default2.0101000000000002: network.cpp:649:send_message(): client session created, remote_server = 10.232.52.144:34802, current_count = 4 -D2020-12-29 16:53:34.528 (1609232014528918145 1c0747) collector.default2.0101000000000002: network.cpp:649:send_message(): client session created, remote_server = 10.232.52.144:34803, current_count = 5 -D2020-12-29 16:53:34.528 (1609232014528985494 1c073a) collector.io-thrd.1836858: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34801, current_count = 5 -D2020-12-29 16:53:34.529 (1609232014529021964 1c073a) collector.io-thrd.1836858: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34802, current_count = 5 -D2020-12-29 16:53:34.529 (1609232014529052151 1c073a) collector.io-thrd.1836858: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34803, current_count = 5 -D2020-12-29 16:53:34.529 (1609232014529061744 1c0742) collector.io-thrd.1836866: network.cpp:690:on_server_session_accepted(): server session accepted, remote_client = 10.232.52.144:34666, current_count = 1 -D2020-12-29 16:53:34.529 (1609232014529069840 1c0742) collector.io-thrd.1836866: network.cpp:695:on_server_session_accepted(): ip session inserted, remote_client = 10.232.52.144:34666, current_count = 1 -D2020-12-29 16:53:34.529 (1609232014529162208 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(0), recent_day_fail_times(0), recent_hour_detect_times(0), recent_hour_fail_times(0) recent_minute_detect_times(0), recent_minute_fail_times(0) -D2020-12-29 16:53:34.529 (1609232014529199233 1c0749) collector.default4.0101000400000005: network.cpp:649:send_message(): client session created, remote_server = 10.232.52.144:34602, current_count = 6 -D2020-12-29 16:53:34.529 (1609232014529256906 1c073a) collector.io-thrd.1836858: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34602, current_count = 6 -D2020-12-29 16:53:34.551 (1609232014551453403 1c0746) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 0, total_write_qps = 4057.43 -D2020-12-29 16:53:37.529 (1609232017529271488 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(9), recent_day_fail_times(0), recent_hour_detect_times(9), recent_hour_fail_times(0) recent_minute_detect_times(9), recent_minute_fail_times(0) -D2020-12-29 16:53:40.529 (1609232020529320991 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(16), recent_day_fail_times(0), recent_hour_detect_times(16), recent_hour_fail_times(0) recent_minute_detect_times(16), recent_minute_fail_times(0) -D2020-12-29 16:53:40.532 (1609232020532214582 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 312 MB, memused_res = 44MB -D2020-12-29 16:53:40.532 (1609232020532431620 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232020532), last_report_time_ms(1609232010532) -D2020-12-29 16:53:42.530 (1609232022530197941 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:53:43.529 (1609232023529381045 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(26), recent_day_fail_times(0), recent_hour_detect_times(26), recent_hour_fail_times(0) recent_minute_detect_times(26), recent_minute_fail_times(0) -D2020-12-29 16:53:44.551 (1609232024551490529 1c0747) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:53:44.568 (1609232024568225533 1c0747) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 1.59981, total_write_qps = 4103.41 -D2020-12-29 16:53:46.529 (1609232026529424684 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(34), recent_day_fail_times(0), recent_hour_detect_times(34), recent_hour_fail_times(0) recent_minute_detect_times(34), recent_minute_fail_times(0) -D2020-12-29 16:53:49.529 (1609232029529500977 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(46), recent_day_fail_times(0), recent_hour_detect_times(46), recent_hour_fail_times(0) recent_minute_detect_times(46), recent_minute_fail_times(0) -D2020-12-29 16:53:50.530 (1609232030530943977 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:53:50.532 (1609232030532491082 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 312 MB, memused_res = 44MB -D2020-12-29 16:53:50.532 (1609232030532679001 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232030532), last_report_time_ms(1609232020532) -D2020-12-29 16:53:52.529 (1609232032529570851 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(55), recent_day_fail_times(0), recent_hour_detect_times(55), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) -D2020-12-29 16:53:54.568 (1609232034568265084 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:53:54.583 (1609232034583503058 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 3.19962, total_write_qps = 4145.12 -D2020-12-29 16:53:55.529 (1609232035529627898 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(63), recent_day_fail_times(0), recent_hour_detect_times(63), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) -D2020-12-29 16:53:58.529 (1609232038529693773 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(71), recent_day_fail_times(0), recent_hour_detect_times(71), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) -D2020-12-29 16:53:58.531 (1609232038531890414 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:54:00.532 (1609232040532738059 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 313 MB, memused_res = 45MB -D2020-12-29 16:54:00.532 (1609232040532940704 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232040532), last_report_time_ms(1609232030532) -D2020-12-29 16:54:01.529 (1609232041529771599 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(79), recent_day_fail_times(0), recent_hour_detect_times(79), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) -D2020-12-29 16:54:04.529 (1609232044529834610 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(87), recent_day_fail_times(0), recent_hour_detect_times(87), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) -D2020-12-29 16:54:04.583 (1609232044583547652 1c0749) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:54:04.598 (1609232044598440534 1c0749) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39972, total_write_qps = 3998.73 -D2020-12-29 16:54:06.532 (1609232046532603147 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:54:07.529 (1609232047529912137 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(95), recent_day_fail_times(0), recent_hour_detect_times(95), recent_hour_fail_times(0) recent_minute_detect_times(95), recent_minute_fail_times(0) -D2020-12-29 16:54:10.529 (1609232050529981045 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(103), recent_day_fail_times(0), recent_hour_detect_times(103), recent_hour_fail_times(0) recent_minute_detect_times(103), recent_minute_fail_times(0) -D2020-12-29 16:54:10.532 (1609232050532999430 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 313 MB, memused_res = 45MB -D2020-12-29 16:54:10.533 (1609232050533179752 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232050532), last_report_time_ms(1609232040532) -D2020-12-29 16:54:13.530 (1609232053530038430 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(111), recent_day_fail_times(0), recent_hour_detect_times(111), recent_hour_fail_times(0) recent_minute_detect_times(111), recent_minute_fail_times(0) -D2020-12-29 16:54:14.533 (1609232054533350545 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:54:14.598 (1609232054598472970 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 16:54:14.615 (1609232054615587233 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a write hot partition hotspot_test.3 -D2020-12-29 16:54:14.615 (1609232054615626664 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 3 1 4 3 -D2020-12-29 16:54:14.6155 (1609232054615918414 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start write hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -D2020-12-29 16:54:14.615 (1609232054615956708 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39972, total_write_qps = 4120.29 -D2020-12-29 16:54:16.530 (1609232056530094910 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(119), recent_day_fail_times(0), recent_hour_detect_times(119), recent_hour_fail_times(0) recent_minute_detect_times(119), recent_minute_fail_times(0) -D2020-12-29 16:54:19.530 (1609232059530482028 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(127), recent_day_fail_times(0), recent_hour_detect_times(127), recent_hour_fail_times(0) recent_minute_detect_times(127), recent_minute_fail_times(0) -D2020-12-29 16:54:20.533 (1609232060533237368 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 313 MB, memused_res = 45MB -D2020-12-29 16:54:20.533 (1609232060533423345 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232060533), last_report_time_ms(1609232050532) -D2020-12-29 16:54:22.530 (1609232062530545250 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(135), recent_day_fail_times(0), recent_hour_detect_times(135), recent_hour_fail_times(0) recent_minute_detect_times(135), recent_minute_fail_times(0) -D2020-12-29 16:54:22.534 (1609232062534119162 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:54:24.615 (1609232064615988492 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 16:54:24.631 (1609232064631394589 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a write hot partition hotspot_test.3 -D2020-12-29 16:54:24.631 (1609232064631420100 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 3 1 4 4 -D2020-12-29 16:54:24.6315 (1609232064631695440 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start write hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 16:54:24.6315 (1609232064631704988 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY still detecting replication::hotkey_type::WRITE hotkey, state is hotkey_collector_state::FINE_DETECTING -D2020-12-29 16:54:24.631 (1609232064631755274 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 3.19963, total_write_qps = 3911.42 -D2020-12-29 16:54:25.530 (1609232065530622665 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(143), recent_day_fail_times(0), recent_hour_detect_times(143), recent_hour_fail_times(0) recent_minute_detect_times(143), recent_minute_fail_times(0) -D2020-12-29 16:54:28.530 (1609232068530686241 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(151), recent_day_fail_times(0), recent_hour_detect_times(151), recent_hour_fail_times(0) recent_minute_detect_times(151), recent_minute_fail_times(0) -D2020-12-29 16:54:30.533 (1609232070533493342 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 313 MB, memused_res = 45MB -D2020-12-29 16:54:30.533 (1609232070533673481 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232070533), last_report_time_ms(1609232060533) -D2020-12-29 16:54:30.535 (1609232070535133845 1c0745) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:54:31.530 (1609232071530748664 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(159), recent_day_fail_times(0), recent_hour_detect_times(159), recent_hour_fail_times(0) recent_minute_detect_times(159), recent_minute_fail_times(0) -D2020-12-29 16:54:34.528 (1609232074528402971 1c0749) collector.default4.0101000000000005: available_detector.cpp:461:on_minute_report(): start to report on new minute, last_minute = 2020-12-29 16:52 -D2020-12-29 16:54:34.530 (1609232074530799244 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(167), recent_day_fail_times(0), recent_hour_detect_times(167), recent_hour_fail_times(0) recent_minute_detect_times(7), recent_minute_fail_times(0) -D2020-12-29 16:54:34.631 (1609232074631789092 1c0747) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 16:54:34.653 (1609232074653136406 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a write hot partition hotspot_test.3 -D2020-12-29 16:54:34.653 (1609232074653177130 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 3 1 4 5 -D2020-12-29 16:54:34.6535 (1609232074653438698 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start write hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 16:54:34.6535 (1609232074653449780 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY replication::hotkey_type::WRITE hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -D2020-12-29 16:54:34.653 (1609232074653506287 1c0747) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39973, total_write_qps = 3763.28 -D2020-12-29 16:54:37.530 (1609232077530847670 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(175), recent_day_fail_times(0), recent_hour_detect_times(175), recent_hour_fail_times(0) recent_minute_detect_times(15), recent_minute_fail_times(0) -D2020-12-29 16:54:38.535 (1609232078535837808 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:54:40.530 (1609232080530932715 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(183), recent_day_fail_times(0), recent_hour_detect_times(183), recent_hour_fail_times(0) recent_minute_detect_times(23), recent_minute_fail_times(0) -D2020-12-29 16:54:40.533 (1609232080533728600 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 46MB -D2020-12-29 16:54:40.533 (1609232080533913403 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232080533), last_report_time_ms(1609232070533) -D2020-12-29 16:54:43.531 (1609232083531036644 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(191), recent_day_fail_times(0), recent_hour_detect_times(191), recent_hour_fail_times(0) recent_minute_detect_times(31), recent_minute_fail_times(0) -D2020-12-29 16:54:44.653 (1609232084653538096 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 16:54:44.674 (1609232084674700655 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a write hot partition hotspot_test.3 -D2020-12-29 16:54:44.674 (1609232084674728152 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 3 1 4 6 -D2020-12-29 16:54:44.6755 (1609232084675030797 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start write hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 16:54:44.6755 (1609232084675039144 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY replication::hotkey_type::WRITE hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -D2020-12-29 16:54:44.675 (1609232084675095839 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.5997, total_write_qps = 4009.64 -D2020-12-29 16:54:46.531 (1609232086531091481 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(199), recent_day_fail_times(0), recent_hour_detect_times(199), recent_hour_fail_times(0) recent_minute_detect_times(39), recent_minute_fail_times(0) -D2020-12-29 16:54:46.536 (1609232086536525983 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:54:49.531 (1609232089531139964 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(207), recent_day_fail_times(0), recent_hour_detect_times(207), recent_hour_fail_times(0) recent_minute_detect_times(47), recent_minute_fail_times(0) -D2020-12-29 16:54:50.533 (1609232090533980651 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 46MB -D2020-12-29 16:54:50.534 (1609232090534176643 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232090533), last_report_time_ms(1609232080533) -D2020-12-29 16:54:52.531 (1609232092531199685 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(215), recent_day_fail_times(0), recent_hour_detect_times(215), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) -D2020-12-29 16:54:54.537 (1609232094537323039 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:54:54.675 (1609232094675131148 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 16:54:54.691 (1609232094691537674 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a write hot partition hotspot_test.3 -D2020-12-29 16:54:54.691 (1609232094691589090 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 3 1 4 7 -D2020-12-29 16:54:54.6915 (1609232094691882779 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start write hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -D2020-12-29 16:54:54.691 (1609232094691923617 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.99965, total_write_qps = 4081.03 -D2020-12-29 16:54:55.531 (1609232095531257935 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(223), recent_day_fail_times(0), recent_hour_detect_times(223), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) -D2020-12-29 16:54:58.531 (1609232098531298094 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(231), recent_day_fail_times(0), recent_hour_detect_times(231), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) -D2020-12-29 16:55:00.534 (1609232100534255160 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 47MB -D2020-12-29 16:55:00.534 (1609232100534465128 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232100534), last_report_time_ms(1609232090533) -D2020-12-29 16:55:01.531 (1609232101531336624 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(239), recent_day_fail_times(0), recent_hour_detect_times(239), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) -D2020-12-29 16:55:02.538 (1609232102538043862 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:55:04.531 (1609232104531421419 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(247), recent_day_fail_times(0), recent_hour_detect_times(247), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) -D2020-12-29 16:55:04.691 (1609232104691959939 1c0747) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:55:04.712 (1609232104712983849 1c0747) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39973, total_write_qps = 4061.14 -D2020-12-29 16:55:07.531 (1609232107531507321 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(255), recent_day_fail_times(0), recent_hour_detect_times(255), recent_hour_fail_times(0) recent_minute_detect_times(95), recent_minute_fail_times(0) -D2020-12-29 16:55:10.531 (1609232110531566919 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(263), recent_day_fail_times(0), recent_hour_detect_times(263), recent_hour_fail_times(0) recent_minute_detect_times(103), recent_minute_fail_times(0) -D2020-12-29 16:55:10.534 (1609232110534532093 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 47MB -D2020-12-29 16:55:10.534 (1609232110534711930 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232110534), last_report_time_ms(1609232100534) -D2020-12-29 16:55:10.538 (1609232110538775155 1c0745) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:55:13.531 (1609232113531662579 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(271), recent_day_fail_times(0), recent_hour_detect_times(271), recent_hour_fail_times(0) recent_minute_detect_times(111), recent_minute_fail_times(0) -D2020-12-29 16:55:14.713 (1609232114713029157 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:55:14.729 (1609232114729951888 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.5997, total_write_qps = 3968.25 -D2020-12-29 16:55:16.531 (1609232116531723851 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(279), recent_day_fail_times(0), recent_hour_detect_times(279), recent_hour_fail_times(0) recent_minute_detect_times(119), recent_minute_fail_times(0) -D2020-12-29 16:55:18.539 (1609232118539503712 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:55:19.531 (1609232119531774000 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(287), recent_day_fail_times(0), recent_hour_detect_times(287), recent_hour_fail_times(0) recent_minute_detect_times(127), recent_minute_fail_times(0) -D2020-12-29 16:55:20.534 (1609232120534795876 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 47MB -D2020-12-29 16:55:20.534 (1609232120534976952 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232120534), last_report_time_ms(1609232110534) -D2020-12-29 16:55:22.531 (1609232122531852344 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(295), recent_day_fail_times(0), recent_hour_detect_times(295), recent_hour_fail_times(0) recent_minute_detect_times(135), recent_minute_fail_times(0) -D2020-12-29 16:55:24.729 (1609232124729989730 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:55:24.751 (1609232124751337100 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.99966, total_write_qps = 4066.75 -D2020-12-29 16:55:25.531 (1609232125531906363 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(303), recent_day_fail_times(0), recent_hour_detect_times(303), recent_hour_fail_times(0) recent_minute_detect_times(143), recent_minute_fail_times(0) -D2020-12-29 16:55:26.540 (1609232126540166472 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:55:28.531 (1609232128531956478 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(311), recent_day_fail_times(0), recent_hour_detect_times(311), recent_hour_fail_times(0) recent_minute_detect_times(151), recent_minute_fail_times(0) -D2020-12-29 16:55:30.535 (1609232130535054168 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 47MB -D2020-12-29 16:55:30.535 (1609232130535271452 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232130534), last_report_time_ms(1609232120534) -D2020-12-29 16:55:31.532 (1609232131532036690 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(319), recent_day_fail_times(0), recent_hour_detect_times(319), recent_hour_fail_times(0) recent_minute_detect_times(159), recent_minute_fail_times(0) -D2020-12-29 16:55:34.528 (1609232134528487393 1c0747) collector.default2.0101000000000005: available_detector.cpp:461:on_minute_report(): start to report on new minute, last_minute = 2020-12-29 16:54 -D2020-12-29 16:55:34.532 (1609232134532088627 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(327), recent_day_fail_times(0), recent_hour_detect_times(327), recent_hour_fail_times(0) recent_minute_detect_times(7), recent_minute_fail_times(0) -D2020-12-29 16:55:34.541 (1609232134541136931 1c0745) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:55:34.751 (1609232134751372879 1c0749) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:55:34.773 (1609232134773465133 1c0749) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39973, total_write_qps = 3862.97 -D2020-12-29 16:55:37.532 (1609232137532146133 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(335), recent_day_fail_times(0), recent_hour_detect_times(335), recent_hour_fail_times(0) recent_minute_detect_times(15), recent_minute_fail_times(0) -D2020-12-29 16:55:40.532 (1609232140532220706 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(343), recent_day_fail_times(0), recent_hour_detect_times(343), recent_hour_fail_times(0) recent_minute_detect_times(23), recent_minute_fail_times(0) -D2020-12-29 16:55:40.535 (1609232140535365488 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 48MB -D2020-12-29 16:55:40.535 (1609232140535594179 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232140535), last_report_time_ms(1609232130534) -D2020-12-29 16:55:42.541 (1609232142541899364 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:55:43.532 (1609232143532292035 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(351), recent_day_fail_times(0), recent_hour_detect_times(351), recent_hour_fail_times(0) recent_minute_detect_times(31), recent_minute_fail_times(0) -D2020-12-29 16:55:44.773 (1609232144773500434 1c0746) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:55:44.789 (1609232144789031897 1c0746) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.59968, total_write_qps = 3909.4 -D2020-12-29 16:55:46.532 (1609232146532353723 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(359), recent_day_fail_times(0), recent_hour_detect_times(359), recent_hour_fail_times(0) recent_minute_detect_times(39), recent_minute_fail_times(0) -D2020-12-29 16:55:49.532 (1609232149532424067 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(367), recent_day_fail_times(0), recent_hour_detect_times(367), recent_hour_fail_times(0) recent_minute_detect_times(47), recent_minute_fail_times(0) -D2020-12-29 16:55:50.535 (1609232150535661145 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 315 MB, memused_res = 48MB -D2020-12-29 16:55:50.535 (1609232150535842419 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232150535), last_report_time_ms(1609232140535) -D2020-12-29 16:55:50.543 (1609232150543003250 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:55:52.532 (1609232152532477820 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(375), recent_day_fail_times(0), recent_hour_detect_times(375), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) -D2020-12-29 16:55:54.789 (1609232154789070106 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:55:54.810 (1609232154810602110 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.99962, total_write_qps = 3597.35 -D2020-12-29 16:55:55.532 (1609232155532539811 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(383), recent_day_fail_times(0), recent_hour_detect_times(383), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) -D2020-12-29 16:55:58.532 (1609232158532593368 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(391), recent_day_fail_times(0), recent_hour_detect_times(391), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) -D2020-12-29 16:55:58.543 (1609232158543800839 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:56:00.535 (1609232160535919280 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 315 MB, memused_res = 48MB -D2020-12-29 16:56:00.536 (1609232160536167564 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232160535), last_report_time_ms(1609232150535) -D2020-12-29 16:56:01.532 (1609232161532649554 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(399), recent_day_fail_times(0), recent_hour_detect_times(399), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) -D2020-12-29 16:56:04.532 (1609232164532699680 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(407), recent_day_fail_times(0), recent_hour_detect_times(407), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) -D2020-12-29 16:56:04.810 (1609232164810643135 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:56:04.827 (1609232164827938450 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39972, total_write_qps = 3877.44 -D2020-12-29 16:56:06.544 (1609232166544458159 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:56:07.532 (1609232167532755472 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(415), recent_day_fail_times(0), recent_hour_detect_times(415), recent_hour_fail_times(0) recent_minute_detect_times(95), recent_minute_fail_times(0) -D2020-12-29 16:56:10.532 (1609232170532806018 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(423), recent_day_fail_times(0), recent_hour_detect_times(423), recent_hour_fail_times(0) recent_minute_detect_times(103), recent_minute_fail_times(0) -D2020-12-29 16:56:10.536 (1609232170536242585 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 315 MB, memused_res = 48MB -D2020-12-29 16:56:10.536 (1609232170536502012 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232170536), last_report_time_ms(1609232160535) -D2020-12-29 16:56:13.532 (1609232173532855829 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(431), recent_day_fail_times(0), recent_hour_detect_times(431), recent_hour_fail_times(0) recent_minute_detect_times(111), recent_minute_fail_times(0) -D2020-12-29 16:56:14.545 (1609232174545172849 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:56:14.827 (1609232174827976036 1c0746) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:56:14.848 (1609232174848789640 1c0746) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.59968, total_write_qps = 3649.75 -D2020-12-29 16:56:16.532 (1609232176532911199 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(439), recent_day_fail_times(0), recent_hour_detect_times(439), recent_hour_fail_times(0) recent_minute_detect_times(119), recent_minute_fail_times(0) -D2020-12-29 16:56:19.532 (1609232179532980105 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(447), recent_day_fail_times(0), recent_hour_detect_times(447), recent_hour_fail_times(0) recent_minute_detect_times(127), recent_minute_fail_times(0) -D2020-12-29 16:56:20.536 (1609232180536586422 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 315 MB, memused_res = 48MB -D2020-12-29 16:56:20.536 (1609232180536880481 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232180536), last_report_time_ms(1609232170536) -D2020-12-29 16:56:22.533 (1609232182533034428 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(455), recent_day_fail_times(0), recent_hour_detect_times(455), recent_hour_fail_times(0) recent_minute_detect_times(135), recent_minute_fail_times(0) -D2020-12-29 16:56:22.546 (1609232182546006483 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:56:24.848 (1609232184848836786 1c0749) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:56:24.867 (1609232184867546835 1c0749) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.99965, total_write_qps = 3909.14 -D2020-12-29 16:56:25.533 (1609232185533110796 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(463), recent_day_fail_times(0), recent_hour_detect_times(463), recent_hour_fail_times(0) recent_minute_detect_times(143), recent_minute_fail_times(0) -D2020-12-29 16:56:28.533 (1609232188533184665 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(471), recent_day_fail_times(0), recent_hour_detect_times(471), recent_hour_fail_times(0) recent_minute_detect_times(151), recent_minute_fail_times(0) -D2020-12-29 16:56:30.536 (1609232190536961514 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 315 MB, memused_res = 49MB -D2020-12-29 16:56:30.537 (1609232190537193813 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232190536), last_report_time_ms(1609232180536) -D2020-12-29 16:56:30.546 (1609232190546781126 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:56:31.533 (1609232191533278913 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(479), recent_day_fail_times(0), recent_hour_detect_times(479), recent_hour_fail_times(0) recent_minute_detect_times(159), recent_minute_fail_times(0) -D2020-12-29 16:56:34.528 (1609232194528577632 1c0746) collector.default1.0101000000000005: available_detector.cpp:461:on_minute_report(): start to report on new minute, last_minute = 2020-12-29 16:55 -D2020-12-29 16:56:34.533 (1609232194533342782 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(487), recent_day_fail_times(0), recent_hour_detect_times(487), recent_hour_fail_times(0) recent_minute_detect_times(7), recent_minute_fail_times(0) -D2020-12-29 16:56:34.867 (1609232194867584280 1c0749) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:56:34.882 (1609232194882817403 1c0749) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39969, total_write_qps = 3510.14 -D2020-12-29 16:56:37.533 (1609232197533387566 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(495), recent_day_fail_times(0), recent_hour_detect_times(495), recent_hour_fail_times(0) recent_minute_detect_times(15), recent_minute_fail_times(0) -D2020-12-29 16:56:38.548 (1609232198548024543 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:56:40.533 (1609232200533443059 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(503), recent_day_fail_times(0), recent_hour_detect_times(503), recent_hour_fail_times(0) recent_minute_detect_times(23), recent_minute_fail_times(0) -D2020-12-29 16:56:40.537 (1609232200537264455 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 315 MB, memused_res = 49MB -D2020-12-29 16:56:40.537 (1609232200537493530 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232200537), last_report_time_ms(1609232190536) -D2020-12-29 16:56:43.533 (1609232203533478777 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(511), recent_day_fail_times(0), recent_hour_detect_times(511), recent_hour_fail_times(0) recent_minute_detect_times(31), recent_minute_fail_times(0) -D2020-12-29 16:56:44.882 (1609232204882850860 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:56:44.900 (1609232204900681277 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.59966, total_write_qps = 3620.83 -D2020-12-29 16:56:46.533 (1609232206533540664 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(519), recent_day_fail_times(0), recent_hour_detect_times(519), recent_hour_fail_times(0) recent_minute_detect_times(39), recent_minute_fail_times(0) -D2020-12-29 16:56:46.548 (1609232206548708500 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:56:49.533 (1609232209533589380 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(527), recent_day_fail_times(0), recent_hour_detect_times(527), recent_hour_fail_times(0) recent_minute_detect_times(47), recent_minute_fail_times(0) -D2020-12-29 16:56:50.537 (1609232210537564007 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 315 MB, memused_res = 49MB -D2020-12-29 16:56:50.537 (1609232210537751693 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232210537), last_report_time_ms(1609232200537) -D2020-12-29 16:56:52.533 (1609232212533645682 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(535), recent_day_fail_times(0), recent_hour_detect_times(535), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) -D2020-12-29 16:56:54.549 (1609232214549480724 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:56:54.900 (1609232214900720251 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:56:54.916 (1609232214916541628 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.99964, total_write_qps = 3964.63 -D2020-12-29 16:56:55.533 (1609232215533695024 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(543), recent_day_fail_times(0), recent_hour_detect_times(543), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) -D2020-12-29 16:56:58.533 (1609232218533749307 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(551), recent_day_fail_times(0), recent_hour_detect_times(551), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) -D2020-12-29 16:57:00.537 (1609232220537838863 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 49MB -D2020-12-29 16:57:00.538 (1609232220538059703 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232220537), last_report_time_ms(1609232210537) -D2020-12-29 16:57:01.533 (1609232221533806863 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(559), recent_day_fail_times(0), recent_hour_detect_times(559), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) -D2020-12-29 16:57:02.550 (1609232222550239676 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:57:04.533 (1609232224533858537 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(567), recent_day_fail_times(0), recent_hour_detect_times(567), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) -D2020-12-29 16:57:04.916 (1609232224916580556 1c0749) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:57:04.933 (1609232224933653861 1c0749) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39972, total_write_qps = 3879.55 -D2020-12-29 16:57:07.533 (1609232227533914133 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(575), recent_day_fail_times(0), recent_hour_detect_times(575), recent_hour_fail_times(0) recent_minute_detect_times(95), recent_minute_fail_times(0) -D2020-12-29 16:57:10.533 (1609232230533954046 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(583), recent_day_fail_times(0), recent_hour_detect_times(583), recent_hour_fail_times(0) recent_minute_detect_times(103), recent_minute_fail_times(0) -D2020-12-29 16:57:10.538 (1609232230538130958 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 49MB -D2020-12-29 16:57:10.538 (1609232230538310127 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232230538), last_report_time_ms(1609232220537) -D2020-12-29 16:57:10.550 (1609232230550921933 1c0745) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:57:13.533 (1609232233533999884 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(591), recent_day_fail_times(0), recent_hour_detect_times(591), recent_hour_fail_times(0) recent_minute_detect_times(111), recent_minute_fail_times(0) -D2020-12-29 16:57:14.933 (1609232234933691968 1c0746) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:57:14.955 (1609232234955675060 1c0746) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.59971, total_write_qps = 3967.65 -D2020-12-29 16:57:16.534 (1609232236534044925 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(599), recent_day_fail_times(0), recent_hour_detect_times(599), recent_hour_fail_times(0) recent_minute_detect_times(119), recent_minute_fail_times(0) -D2020-12-29 16:57:18.552 (1609232238552507081 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:57:19.534 (1609232239534146587 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(607), recent_day_fail_times(0), recent_hour_detect_times(607), recent_hour_fail_times(0) recent_minute_detect_times(127), recent_minute_fail_times(0) -D2020-12-29 16:57:20.538 (1609232240538391961 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 49MB -D2020-12-29 16:57:20.538 (1609232240538563377 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232240538), last_report_time_ms(1609232230538) -D2020-12-29 16:57:22.534 (1609232242534226672 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(615), recent_day_fail_times(0), recent_hour_detect_times(615), recent_hour_fail_times(0) recent_minute_detect_times(135), recent_minute_fail_times(0) -D2020-12-29 16:57:24.955 (1609232244955715332 1c0746) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:57:24.973 (1609232244973960095 1c0746) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.99967, total_write_qps = 3480.91 -D2020-12-29 16:57:25.534 (1609232245534291806 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(623), recent_day_fail_times(0), recent_hour_detect_times(623), recent_hour_fail_times(0) recent_minute_detect_times(143), recent_minute_fail_times(0) -D2020-12-29 16:57:26.553 (1609232246553244206 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:57:28.534 (1609232248534346892 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(631), recent_day_fail_times(0), recent_hour_detect_times(631), recent_hour_fail_times(0) recent_minute_detect_times(151), recent_minute_fail_times(0) -D2020-12-29 16:57:30.538 (1609232250538638102 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 50MB -D2020-12-29 16:57:30.538 (1609232250538822047 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232250538), last_report_time_ms(1609232240538) -D2020-12-29 16:57:31.534 (1609232251534384466 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(639), recent_day_fail_times(0), recent_hour_detect_times(639), recent_hour_fail_times(0) recent_minute_detect_times(159), recent_minute_fail_times(0) -D2020-12-29 16:57:34.528 (1609232254528668227 1c0749) collector.default4.0101000000000005: available_detector.cpp:461:on_minute_report(): start to report on new minute, last_minute = 2020-12-29 16:56 -D2020-12-29 16:57:34.534 (1609232254534437947 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(647), recent_day_fail_times(0), recent_hour_detect_times(647), recent_hour_fail_times(0) recent_minute_detect_times(7), recent_minute_fail_times(0) -D2020-12-29 16:57:34.554 (1609232254554055266 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:57:34.973 (1609232254973996875 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:57:34.989 (1609232254989319764 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39559, total_write_qps = 3459.57 -D2020-12-29 16:57:37.534 (1609232257534482236 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(655), recent_day_fail_times(0), recent_hour_detect_times(655), recent_hour_fail_times(0) recent_minute_detect_times(15), recent_minute_fail_times(0) -D2020-12-29 16:57:40.534 (1609232260534539407 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(663), recent_day_fail_times(0), recent_hour_detect_times(663), recent_hour_fail_times(0) recent_minute_detect_times(23), recent_minute_fail_times(0) -D2020-12-29 16:57:40.538 (1609232260538901079 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 50MB -D2020-12-29 16:57:40.539 (1609232260539094151 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232260538), last_report_time_ms(1609232250538) -D2020-12-29 16:57:42.554 (1609232262554818665 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:57:43.534 (1609232263534619514 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(671), recent_day_fail_times(0), recent_hour_detect_times(671), recent_hour_fail_times(0) recent_minute_detect_times(31), recent_minute_fail_times(0) -D2020-12-29 16:57:44.989 (1609232264989353806 1c0747) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:57:45.4 (1609232265004615585 1c0747) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.89967, total_write_qps = 4004.13 -D2020-12-29 16:57:46.534 (1609232266534662098 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(679), recent_day_fail_times(0), recent_hour_detect_times(679), recent_hour_fail_times(0) recent_minute_detect_times(39), recent_minute_fail_times(0) -D2020-12-29 16:57:49.534 (1609232269534714249 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(687), recent_day_fail_times(0), recent_hour_detect_times(687), recent_hour_fail_times(0) recent_minute_detect_times(47), recent_minute_fail_times(0) -D2020-12-29 16:57:50.539 (1609232270539168255 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 50MB -D2020-12-29 16:57:50.539 (1609232270539365188 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232270539), last_report_time_ms(1609232260538) -D2020-12-29 16:57:50.555 (1609232270555750421 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:57:52.534 (1609232272534775932 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(695), recent_day_fail_times(0), recent_hour_detect_times(695), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) -D2020-12-29 16:57:55.4V (1609232275004650222 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:57:55.20 (1609232275020278590 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.69967, total_write_qps = 4014.73 -D2020-12-29 16:57:55.534 (1609232275534858200 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(703), recent_day_fail_times(0), recent_hour_detect_times(703), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) -D2020-12-29 16:57:58.534 (1609232278534900134 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(711), recent_day_fail_times(0), recent_hour_detect_times(711), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) -D2020-12-29 16:57:58.556 (1609232278556426726 1c0745) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:58:00.539 (1609232280539446361 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 50MB -D2020-12-29 16:58:00.539 (1609232280539637302 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232280539), last_report_time_ms(1609232270539) -D2020-12-29 16:58:01.534 (1609232281534955235 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(719), recent_day_fail_times(0), recent_hour_detect_times(719), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) -D2020-12-29 16:58:04.535 (1609232284535011881 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(727), recent_day_fail_times(0), recent_hour_detect_times(727), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) -D2020-12-29 16:58:05.20 (1609232285020318343 1c0746) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:58:05.40 (1609232285040829488 1c0746) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39972, total_write_qps = 3767.56 -D2020-12-29 16:58:06.557 (1609232286557106793 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:58:07.535 (1609232287535070133 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(735), recent_day_fail_times(0), recent_hour_detect_times(735), recent_hour_fail_times(0) recent_minute_detect_times(95), recent_minute_fail_times(0) -D2020-12-29 16:58:10.535 (1609232290535122050 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(743), recent_day_fail_times(0), recent_hour_detect_times(743), recent_hour_fail_times(0) recent_minute_detect_times(103), recent_minute_fail_times(0) -D2020-12-29 16:58:10.539 (1609232290539707643 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 50MB -D2020-12-29 16:58:10.539 (1609232290539918157 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232290539), last_report_time_ms(1609232280539) -D2020-12-29 16:58:13.535 (1609232293535171294 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(751), recent_day_fail_times(0), recent_hour_detect_times(751), recent_hour_fail_times(0) recent_minute_detect_times(111), recent_minute_fail_times(0) -D2020-12-29 16:58:14.557 (1609232294557862283 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:58:15.40 (1609232295040862295 1c0746) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:58:15.57 (1609232295057798148 1c0746) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.89966, total_write_qps = 3856.04 -D2020-12-29 16:58:16.535 (1609232296535219595 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(759), recent_day_fail_times(0), recent_hour_detect_times(759), recent_hour_fail_times(0) recent_minute_detect_times(119), recent_minute_fail_times(0) -D2020-12-29 16:58:19.535 (1609232299535277310 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(767), recent_day_fail_times(0), recent_hour_detect_times(767), recent_hour_fail_times(0) recent_minute_detect_times(127), recent_minute_fail_times(0) -D2020-12-29 16:58:20.540 (1609232300540010022 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 50MB -D2020-12-29 16:58:20.540 (1609232300540192374 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232300539), last_report_time_ms(1609232290539) -D2020-12-29 16:58:22.535 (1609232302535328801 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(775), recent_day_fail_times(0), recent_hour_detect_times(775), recent_hour_fail_times(0) recent_minute_detect_times(135), recent_minute_fail_times(0) -D2020-12-29 16:58:22.558 (1609232302558711860 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:58:25.57 (1609232305057846335 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:58:25.74 (1609232305074320630 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.69968, total_write_qps = 3744.26 -D2020-12-29 16:58:25.535 (1609232305535381675 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(783), recent_day_fail_times(0), recent_hour_detect_times(783), recent_hour_fail_times(0) recent_minute_detect_times(143), recent_minute_fail_times(0) -D2020-12-29 16:58:28.535 (1609232308535444054 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(791), recent_day_fail_times(0), recent_hour_detect_times(791), recent_hour_fail_times(0) recent_minute_detect_times(151), recent_minute_fail_times(0) -D2020-12-29 16:58:30.540 (1609232310540270585 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 50MB -D2020-12-29 16:58:30.540 (1609232310540483422 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232310540), last_report_time_ms(1609232300539) -D2020-12-29 16:58:30.559 (1609232310559473122 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:58:31.535 (1609232311535496156 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(799), recent_day_fail_times(0), recent_hour_detect_times(799), recent_hour_fail_times(0) recent_minute_detect_times(159), recent_minute_fail_times(0) -D2020-12-29 16:58:34.528 (1609232314528779161 1c0746) collector.default1.0101000000000005: available_detector.cpp:461:on_minute_report(): start to report on new minute, last_minute = 2020-12-29 16:57 -D2020-12-29 16:58:34.535 (1609232314535548692 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(807), recent_day_fail_times(0), recent_hour_detect_times(807), recent_hour_fail_times(0) recent_minute_detect_times(7), recent_minute_fail_times(0) -D2020-12-29 16:58:35.74 (1609232315074358847 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:58:35.95 (1609232315095738166 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39972, total_write_qps = 3939.14 -D2020-12-29 16:58:37.535 (1609232317535610937 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(815), recent_day_fail_times(0), recent_hour_detect_times(815), recent_hour_fail_times(0) recent_minute_detect_times(15), recent_minute_fail_times(0) -D2020-12-29 16:58:38.560 (1609232318560424228 1c0745) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:58:40.535 (1609232320535663201 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(823), recent_day_fail_times(0), recent_hour_detect_times(823), recent_hour_fail_times(0) recent_minute_detect_times(23), recent_minute_fail_times(0) -D2020-12-29 16:58:40.540 (1609232320540587216 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 50MB -D2020-12-29 16:58:40.540 (1609232320540749362 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232320540), last_report_time_ms(1609232310540) -D2020-12-29 16:58:43.535 (1609232323535722747 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(831), recent_day_fail_times(0), recent_hour_detect_times(831), recent_hour_fail_times(0) recent_minute_detect_times(31), recent_minute_fail_times(0) -D2020-12-29 16:58:45.95 (1609232325095776520 1c0746) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:58:45.112 (1609232325112684383 1c0746) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.89966, total_write_qps = 3981.23 -D2020-12-29 16:58:46.535 (1609232326535773419 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(839), recent_day_fail_times(0), recent_hour_detect_times(839), recent_hour_fail_times(0) recent_minute_detect_times(39), recent_minute_fail_times(0) -D2020-12-29 16:58:46.561 (1609232326561151988 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:58:49.535 (1609232329535843023 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(847), recent_day_fail_times(0), recent_hour_detect_times(847), recent_hour_fail_times(0) recent_minute_detect_times(47), recent_minute_fail_times(0) -D2020-12-29 16:58:50.540 (1609232330540820838 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 50MB -D2020-12-29 16:58:50.541 (1609232330541009728 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232330540), last_report_time_ms(1609232320540) -D2020-12-29 16:58:52.535 (1609232332535897165 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(855), recent_day_fail_times(0), recent_hour_detect_times(855), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) -D2020-12-29 16:58:54.562 (1609232334562165824 1c0745) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:58:55.112 (1609232335112724848 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:58:55.132 (1609232335132769155 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.69969, total_write_qps = 3933.55 -D2020-12-29 16:58:55.535 (1609232335535996851 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(863), recent_day_fail_times(0), recent_hour_detect_times(863), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) -D2020-12-29 16:58:58.536 (1609232338536058728 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(871), recent_day_fail_times(0), recent_hour_detect_times(871), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) -D2020-12-29 16:59:00.541 (1609232340541130214 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 51MB -D2020-12-29 16:59:00.541 (1609232340541463461 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232340541), last_report_time_ms(1609232330540) -D2020-12-29 16:59:01.536 (1609232341536126456 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(879), recent_day_fail_times(0), recent_hour_detect_times(879), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) -D2020-12-29 16:59:02.563 (1609232342563256880 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:59:04.536 (1609232344536206007 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(887), recent_day_fail_times(0), recent_hour_detect_times(887), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) -D2020-12-29 16:59:05.132 (1609232345132812757 1c0747) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:59:05.155 (1609232345155767604 1c0747) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39965, total_write_qps = 3247.88 -D2020-12-29 16:59:07.536 (1609232347536268164 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(895), recent_day_fail_times(0), recent_hour_detect_times(895), recent_hour_fail_times(0) recent_minute_detect_times(95), recent_minute_fail_times(0) -D2020-12-29 16:59:10.536 (1609232350536324001 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(903), recent_day_fail_times(0), recent_hour_detect_times(903), recent_hour_fail_times(0) recent_minute_detect_times(103), recent_minute_fail_times(0) -D2020-12-29 16:59:10.541 (1609232350541524148 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 51MB -D2020-12-29 16:59:10.541 (1609232350541711244 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232350541), last_report_time_ms(1609232340541) -D2020-12-29 16:59:10.564 (1609232350564040430 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:59:13.536 (1609232353536380255 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(911), recent_day_fail_times(0), recent_hour_detect_times(911), recent_hour_fail_times(0) recent_minute_detect_times(111), recent_minute_fail_times(0) -D2020-12-29 16:59:15.155 (1609232355155813161 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:59:15.178 (1609232355178768257 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 3.19802, total_write_qps = 3301.63 -D2020-12-29 16:59:16.536 (1609232356536454353 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(919), recent_day_fail_times(0), recent_hour_detect_times(919), recent_hour_fail_times(0) recent_minute_detect_times(119), recent_minute_fail_times(0) -D2020-12-29 16:59:18.564 (1609232358564748421 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:59:19.536 (1609232359536506334 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(927), recent_day_fail_times(0), recent_hour_detect_times(927), recent_hour_fail_times(0) recent_minute_detect_times(127), recent_minute_fail_times(0) -D2020-12-29 16:59:20.541 (1609232360541776474 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 51MB -D2020-12-29 16:59:20.541 (1609232360541954042 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232360541), last_report_time_ms(1609232350541) -D2020-12-29 16:59:22.536 (1609232362536563970 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(935), recent_day_fail_times(0), recent_hour_detect_times(935), recent_hour_fail_times(0) recent_minute_detect_times(135), recent_minute_fail_times(0) -D2020-12-29 16:59:25.178 (1609232365178809734 1c0749) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:59:25.198 (1609232365198088166 1c0749) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39974, total_write_qps = 3363.72 -D2020-12-29 16:59:25.536 (1609232365536615516 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(943), recent_day_fail_times(0), recent_hour_detect_times(943), recent_hour_fail_times(0) recent_minute_detect_times(143), recent_minute_fail_times(0) -D2020-12-29 16:59:26.565 (1609232366565490257 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:59:28.536 (1609232368536665109 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(951), recent_day_fail_times(0), recent_hour_detect_times(951), recent_hour_fail_times(0) recent_minute_detect_times(151), recent_minute_fail_times(0) -D2020-12-29 16:59:30.542 (1609232370542021371 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 51MB -D2020-12-29 16:59:30.542 (1609232370542239688 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232370541), last_report_time_ms(1609232360541) -D2020-12-29 16:59:31.536 (1609232371536718298 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(959), recent_day_fail_times(0), recent_hour_detect_times(959), recent_hour_fail_times(0) recent_minute_detect_times(159), recent_minute_fail_times(0) -D2020-12-29 16:59:34.528 (1609232374528859085 1c0749) collector.default4.0101000000000005: available_detector.cpp:461:on_minute_report(): start to report on new minute, last_minute = 2020-12-29 16:58 -D2020-12-29 16:59:34.536 (1609232374536772927 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(967), recent_day_fail_times(0), recent_hour_detect_times(967), recent_hour_fail_times(0) recent_minute_detect_times(7), recent_minute_fail_times(0) -D2020-12-29 16:59:34.566 (1609232374566247496 1c0745) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:59:35.198 (1609232375198124728 1c0746) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:59:35.214 (1609232375214556197 1c0746) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.3997, total_write_qps = 3577.04 -D2020-12-29 16:59:37.536 (1609232377536832029 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(975), recent_day_fail_times(0), recent_hour_detect_times(975), recent_hour_fail_times(0) recent_minute_detect_times(15), recent_minute_fail_times(0) -D2020-12-29 16:59:40.536 (1609232380536895366 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(983), recent_day_fail_times(0), recent_hour_detect_times(983), recent_hour_fail_times(0) recent_minute_detect_times(23), recent_minute_fail_times(0) -D2020-12-29 16:59:40.542 (1609232380542318774 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 51MB -D2020-12-29 16:59:40.542 (1609232380542540784 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232380542), last_report_time_ms(1609232370541) -D2020-12-29 16:59:42.567 (1609232382567057088 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:59:43.536 (1609232383536949946 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(991), recent_day_fail_times(0), recent_hour_detect_times(991), recent_hour_fail_times(0) recent_minute_detect_times(31), recent_minute_fail_times(0) -D2020-12-29 16:59:45.214 (1609232385214602075 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:59:45.235 (1609232385235810778 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 3.19953, total_write_qps = 3531.88 -D2020-12-29 16:59:46.536 (1609232386536998118 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(999), recent_day_fail_times(0), recent_hour_detect_times(999), recent_hour_fail_times(0) recent_minute_detect_times(39), recent_minute_fail_times(0) -D2020-12-29 16:59:49.537 (1609232389537056363 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1007), recent_day_fail_times(0), recent_hour_detect_times(1007), recent_hour_fail_times(0) recent_minute_detect_times(47), recent_minute_fail_times(0) -D2020-12-29 16:59:50.542 (1609232390542658077 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 52MB -D2020-12-29 16:59:50.542 (1609232390542894614 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232390542), last_report_time_ms(1609232380542) -D2020-12-29 16:59:50.567 (1609232390567753646 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 16:59:52.537 (1609232392537105252 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1015), recent_day_fail_times(0), recent_hour_detect_times(1015), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) -D2020-12-29 16:59:55.235 (1609232395235862061 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 16:59:55.286 (1609232395286804358 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39966, total_write_qps = 3587.71 -D2020-12-29 16:59:55.537 (1609232395537174763 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1023), recent_day_fail_times(0), recent_hour_detect_times(1023), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) -D2020-12-29 16:59:58.537 (1609232398537246543 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1031), recent_day_fail_times(0), recent_hour_detect_times(1031), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) -D2020-12-29 16:59:58.568 (1609232398568524848 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:00:00.542 (1609232400542976483 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 52MB -D2020-12-29 17:00:00.543 (1609232400543289072 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232400542), last_report_time_ms(1609232390542) -D2020-12-29 17:00:01.537 (1609232401537311138 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1039), recent_day_fail_times(0), recent_hour_detect_times(1039), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) -D2020-12-29 17:00:04.538 (1609232404538919316 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1047), recent_day_fail_times(0), recent_hour_detect_times(1047), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) -D2020-12-29 17:00:05.286 (1609232405286852894 1c0749) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 17:00:05.310 (1609232405310354486 1c0749) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.3997, total_write_qps = 2093.23 -D2020-12-29 17:00:06.569 (1609232406569723898 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:00:07.538 (1609232407538984294 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1055), recent_day_fail_times(0), recent_hour_detect_times(1055), recent_hour_fail_times(0) recent_minute_detect_times(95), recent_minute_fail_times(0) -D2020-12-29 17:00:10.539 (1609232410539041831 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1063), recent_day_fail_times(0), recent_hour_detect_times(1063), recent_hour_fail_times(0) recent_minute_detect_times(103), recent_minute_fail_times(0) -D2020-12-29 17:00:10.543 (1609232410543360192 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 316 MB, memused_res = 52MB -D2020-12-29 17:00:10.543 (1609232410543541066 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232410543), last_report_time_ms(1609232400542) -D2020-12-29 17:00:13.539 (1609232413539098627 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1071), recent_day_fail_times(0), recent_hour_detect_times(1071), recent_hour_fail_times(0) recent_minute_detect_times(111), recent_minute_fail_times(0) -D2020-12-29 17:00:14.570 (1609232414570573381 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:00:15.310 (1609232415310407857 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 17:00:15.334 (1609232415334827648 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 3.19961, total_write_qps = 3431.3 -D2020-12-29 17:00:16.539 (1609232416539161243 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1079), recent_day_fail_times(0), recent_hour_detect_times(1079), recent_hour_fail_times(0) recent_minute_detect_times(119), recent_minute_fail_times(0) -D2020-12-29 17:00:19.539 (1609232419539238969 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1087), recent_day_fail_times(0), recent_hour_detect_times(1087), recent_hour_fail_times(0) recent_minute_detect_times(127), recent_minute_fail_times(0) -D2020-12-29 17:00:20.543 (1609232420543614874 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 52MB -D2020-12-29 17:00:20.543 (1609232420543804778 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232420543), last_report_time_ms(1609232410543) -D2020-12-29 17:00:22.539 (1609232422539292778 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1095), recent_day_fail_times(0), recent_hour_detect_times(1095), recent_hour_fail_times(0) recent_minute_detect_times(135), recent_minute_fail_times(0) -D2020-12-29 17:00:22.571 (1609232422571361439 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:00:25.334 (1609232425334868542 1c0749) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 17:00:25.361 (1609232425361585593 1c0749) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39974, total_write_qps = 3963.16 -D2020-12-29 17:00:25.539 (1609232425539352020 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1103), recent_day_fail_times(0), recent_hour_detect_times(1103), recent_hour_fail_times(0) recent_minute_detect_times(143), recent_minute_fail_times(0) -D2020-12-29 17:00:28.539 (1609232428539414204 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1111), recent_day_fail_times(0), recent_hour_detect_times(1111), recent_hour_fail_times(0) recent_minute_detect_times(151), recent_minute_fail_times(0) -D2020-12-29 17:00:30.543 (1609232430543881117 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 52MB -D2020-12-29 17:00:30.544 (1609232430544064502 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232430543), last_report_time_ms(1609232420543) -D2020-12-29 17:00:30.572 (1609232430572102316 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:00:31.539 (1609232431539475746 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1119), recent_day_fail_times(0), recent_hour_detect_times(1119), recent_hour_fail_times(0) recent_minute_detect_times(159), recent_minute_fail_times(0) -D2020-12-29 17:00:34.528 (1609232434528959276 1c0745) collector.default0.0101000000000005: available_detector.cpp:437:on_hour_report(): start to report on new hour, last_hour = 2020-12-29 16 -D2020-12-29 17:00:34.529 (1609232434529019571 1c0745) collector.default0.0101000000000005: available_detector.cpp:461:on_minute_report(): start to report on new minute, last_minute = 2020-12-29 16:59 -D2020-12-29 17:00:34.539 (1609232434539535041 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1127), recent_day_fail_times(0), recent_hour_detect_times(7), recent_hour_fail_times(0) recent_minute_detect_times(7), recent_minute_fail_times(0) -D2020-12-29 17:00:35.361 (1609232435361629359 1c0745) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 17:00:35.378 (1609232435378573245 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a write hot partition hotspot_test.3 -D2020-12-29 17:00:35.378 (1609232435378598652 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 3 1 6 3 -D2020-12-29 17:00:35.3785 (1609232435378888168 1c0745) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start write hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -D2020-12-29 17:00:35.378 (1609232435378927973 1c0745) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39971, total_write_qps = 3829.13 -D2020-12-29 17:00:37.539 (1609232437539607377 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1135), recent_day_fail_times(0), recent_hour_detect_times(15), recent_hour_fail_times(0) recent_minute_detect_times(15), recent_minute_fail_times(0) -D2020-12-29 17:00:38.572 (1609232438572806062 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:00:40.539 (1609232440539658292 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1143), recent_day_fail_times(0), recent_hour_detect_times(23), recent_hour_fail_times(0) recent_minute_detect_times(23), recent_minute_fail_times(0) -D2020-12-29 17:00:40.544 (1609232440544149002 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 52MB -D2020-12-29 17:00:40.544 (1609232440544393659 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232440544), last_report_time_ms(1609232430543) -D2020-12-29 17:00:43.539 (1609232443539735027 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1151), recent_day_fail_times(0), recent_hour_detect_times(31), recent_hour_fail_times(0) recent_minute_detect_times(31), recent_minute_fail_times(0) -D2020-12-29 17:00:45.378 (1609232445378957410 1c0747) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 17:00:45.394 (1609232445394026705 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a write hot partition hotspot_test.3 -D2020-12-29 17:00:45.394 (1609232445394053625 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 3 1 6 4 -D2020-12-29 17:00:45.3945 (1609232445394324579 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start write hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 17:00:45.3945 (1609232445394337875 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY still detecting replication::hotkey_type::WRITE hotkey, state is hotkey_collector_state::FINE_DETECTING -D2020-12-29 17:00:45.394 (1609232445394397433 1c0747) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 3.1996, total_write_qps = 3947.32 -D2020-12-29 17:00:46.539 (1609232446539791103 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1159), recent_day_fail_times(0), recent_hour_detect_times(39), recent_hour_fail_times(0) recent_minute_detect_times(39), recent_minute_fail_times(0) -D2020-12-29 17:00:46.573 (1609232446573729713 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:00:49.539 (1609232449539840542 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1167), recent_day_fail_times(0), recent_hour_detect_times(47), recent_hour_fail_times(0) recent_minute_detect_times(47), recent_minute_fail_times(0) -D2020-12-29 17:00:50.544 (1609232450544487510 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 52MB -D2020-12-29 17:00:50.544 (1609232450544665528 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232450544), last_report_time_ms(1609232440544) -D2020-12-29 17:00:52.539 (1609232452539896123 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1175), recent_day_fail_times(0), recent_hour_detect_times(55), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) -D2020-12-29 17:00:54.574 (1609232454574470206 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:00:55.394 (1609232455394428367 1c0749) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 17:00:55.414 (1609232455414954712 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a write hot partition hotspot_test.3 -D2020-12-29 17:00:55.414 (1609232455414978050 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 3 1 6 5 -D2020-12-29 17:00:55.4155 (1609232455415272964 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start write hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 17:00:55.4155 (1609232455415283052 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY replication::hotkey_type::WRITE hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -D2020-12-29 17:00:55.415 (1609232455415347761 1c0749) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 2.39971, total_write_qps = 3916.84 -D2020-12-29 17:00:55.539 (1609232455539952311 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1183), recent_day_fail_times(0), recent_hour_detect_times(63), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) -D2020-12-29 17:00:58.540 (1609232458540022086 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1191), recent_day_fail_times(0), recent_hour_detect_times(71), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) -D2020-12-29 17:01:00.544 (1609232460544767041 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 52MB -D2020-12-29 17:01:00.545 (1609232460545016686 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232460544), last_report_time_ms(1609232450544) -D2020-12-29 17:01:01.540 (1609232461540086029 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1199), recent_day_fail_times(0), recent_hour_detect_times(79), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) -D2020-12-29 17:01:02.575 (1609232462575261560 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:01:04.540 (1609232464540144700 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1207), recent_day_fail_times(0), recent_hour_detect_times(87), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) -D2020-12-29 17:01:05.415 (1609232465415379268 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 17:01:05.434 (1609232465434083807 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 9955.86, total_write_qps = 1023.57 -D2020-12-29 17:01:07.540 (1609232467540205491 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1215), recent_day_fail_times(0), recent_hour_detect_times(95), recent_hour_fail_times(0) recent_minute_detect_times(95), recent_minute_fail_times(0) -D2020-12-29 17:01:10.540 (1609232470540262888 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1223), recent_day_fail_times(0), recent_hour_detect_times(103), recent_hour_fail_times(0) recent_minute_detect_times(103), recent_minute_fail_times(0) -D2020-12-29 17:01:10.545 (1609232470545076093 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 52MB -D2020-12-29 17:01:10.545 (1609232470545261454 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232470545), last_report_time_ms(1609232460544) -D2020-12-29 17:01:10.576 (1609232470576168765 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:01:13.540 (1609232473540321496 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1231), recent_day_fail_times(0), recent_hour_detect_times(111), recent_hour_fail_times(0) recent_minute_detect_times(111), recent_minute_fail_times(0) -D2020-12-29 17:01:15.434 (1609232475434121997 1c0746) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 17:01:15.450 (1609232475450469403 1c0746) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 14262.1, total_write_qps = 3.59953 -D2020-12-29 17:01:16.540 (1609232476540433267 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1239), recent_day_fail_times(0), recent_hour_detect_times(119), recent_hour_fail_times(0) recent_minute_detect_times(119), recent_minute_fail_times(0) -D2020-12-29 17:01:18.576 (1609232478576919583 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:01:19.540 (1609232479540502691 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1247), recent_day_fail_times(0), recent_hour_detect_times(127), recent_hour_fail_times(0) recent_minute_detect_times(127), recent_minute_fail_times(0) -D2020-12-29 17:01:20.545 (1609232480545331267 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 52MB -D2020-12-29 17:01:20.545 (1609232480545497427 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232480545), last_report_time_ms(1609232470545) -D2020-12-29 17:01:22.540 (1609232482540563643 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1255), recent_day_fail_times(0), recent_hour_detect_times(135), recent_hour_fail_times(0) recent_minute_detect_times(135), recent_minute_fail_times(0) -D2020-12-29 17:01:25.450 (1609232485450513214 1c0749) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 17:01:25.471 (1609232485471928227 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.0 -D2020-12-29 17:01:25.471 (1609232485471962549 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 0 0 5 3 -D2020-12-29 17:01:25.4725 (1609232485472253371 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.0, server address: 10.232.52.144:34803 -E2020-12-29 17:01:25.472 (1609232485472258829 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.1 -D2020-12-29 17:01:25.472 (1609232485472273041 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 1 0 5 3 -D2020-12-29 17:01:25.4725 (1609232485472506044 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.1, server address: 10.232.52.144:34802 -E2020-12-29 17:01:25.472 (1609232485472512850 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.2 -D2020-12-29 17:01:25.472 (1609232485472526364 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 2 0 5 3 -D2020-12-29 17:01:25.4725 (1609232485472763829 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.2, server address: 10.232.52.144:34801 -E2020-12-29 17:01:25.472 (1609232485472772314 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 -D2020-12-29 17:01:25.472 (1609232485472786580 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 3 0 5 3 -D2020-12-29 17:01:25.4725 (1609232485472972804 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 17:01:25.472 (1609232485472979798 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.4 -D2020-12-29 17:01:25.473 (1609232485473001062 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 4 0 5 3 -D2020-12-29 17:01:25.4735 (1609232485473197019 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.4, server address: 10.232.52.144:34802 -E2020-12-29 17:01:25.473 (1609232485473202747 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.5 -D2020-12-29 17:01:25.473 (1609232485473218735 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 5 0 5 3 -D2020-12-29 17:01:25.4735 (1609232485473413430 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.5, server address: 10.232.52.144:34801 -E2020-12-29 17:01:25.473 (1609232485473418230 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.6 -D2020-12-29 17:01:25.473 (1609232485473428310 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 6 0 5 3 -D2020-12-29 17:01:25.4735 (1609232485473637713 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.6, server address: 10.232.52.144:34803 -E2020-12-29 17:01:25.473 (1609232485473643508 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.7 -D2020-12-29 17:01:25.473 (1609232485473655457 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 7 0 5 3 -D2020-12-29 17:01:25.4735 (1609232485473859677 1c0749) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.7, server address: 10.232.52.144:34802 -D2020-12-29 17:01:25.473 (1609232485473912423 1c0749) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 15646.6, total_write_qps = 2.5997 -D2020-12-29 17:01:25.540 (1609232485540614672 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1263), recent_day_fail_times(0), recent_hour_detect_times(143), recent_hour_fail_times(0) recent_minute_detect_times(143), recent_minute_fail_times(0) -D2020-12-29 17:01:26.577 (1609232486577633817 1c0745) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:01:28.540 (1609232488540667101 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1271), recent_day_fail_times(0), recent_hour_detect_times(151), recent_hour_fail_times(0) recent_minute_detect_times(151), recent_minute_fail_times(0) -D2020-12-29 17:01:30.545 (1609232490545570329 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 52MB -D2020-12-29 17:01:30.545 (1609232490545756788 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232490545), last_report_time_ms(1609232480545) -D2020-12-29 17:01:31.540 (1609232491540722780 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1279), recent_day_fail_times(0), recent_hour_detect_times(159), recent_hour_fail_times(0) recent_minute_detect_times(159), recent_minute_fail_times(0) -D2020-12-29 17:01:34.529 (1609232494529080361 1c0749) collector.default4.0101000000000005: available_detector.cpp:461:on_minute_report(): start to report on new minute, last_minute = 2020-12-29 17:00 -D2020-12-29 17:01:34.540 (1609232494540770574 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1287), recent_day_fail_times(0), recent_hour_detect_times(167), recent_hour_fail_times(0) recent_minute_detect_times(7), recent_minute_fail_times(0) -D2020-12-29 17:01:34.578 (1609232494578326309 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:01:35.473 (1609232495473944449 1c0747) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 17:01:35.490 (1609232495490264636 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.0 -D2020-12-29 17:01:35.490 (1609232495490295118 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 0 0 4 4 -D2020-12-29 17:01:35.4905 (1609232495490544364 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.0, server address: 10.232.52.144:34803 -E2020-12-29 17:01:35.4905 (1609232495490552031 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.0, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:35.490 (1609232495490561839 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.1 -D2020-12-29 17:01:35.490 (1609232495490567614 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 1 0 4 4 -D2020-12-29 17:01:35.4905 (1609232495490795951 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.1, server address: 10.232.52.144:34802 -E2020-12-29 17:01:35.4905 (1609232495490806632 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.1, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:35.490 (1609232495490819388 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.2 -D2020-12-29 17:01:35.490 (1609232495490825804 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 2 0 4 4 -D2020-12-29 17:01:35.4915 (1609232495491045212 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.2, server address: 10.232.52.144:34801 -E2020-12-29 17:01:35.4915 (1609232495491057742 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.2, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:35.491 (1609232495491077053 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 -D2020-12-29 17:01:35.491 (1609232495491083977 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 3 0 4 4 -D2020-12-29 17:01:35.4915 (1609232495491271457 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 17:01:35.4915 (1609232495491277226 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:35.491 (1609232495491289597 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.4 -D2020-12-29 17:01:35.491 (1609232495491298258 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 4 0 4 4 -D2020-12-29 17:01:35.4915 (1609232495491495551 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.4, server address: 10.232.52.144:34802 -E2020-12-29 17:01:35.4915 (1609232495491501756 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.4, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:35.491 (1609232495491511861 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.5 -D2020-12-29 17:01:35.491 (1609232495491522717 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 5 0 4 4 -D2020-12-29 17:01:35.4915 (1609232495491704471 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.5, server address: 10.232.52.144:34801 -E2020-12-29 17:01:35.4915 (1609232495491711262 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.5, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:35.491 (1609232495491722764 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.6 -D2020-12-29 17:01:35.491 (1609232495491728785 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 6 0 4 4 -D2020-12-29 17:01:35.4915 (1609232495491930618 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.6, server address: 10.232.52.144:34803 -E2020-12-29 17:01:35.4915 (1609232495491937127 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.6, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:35.491 (1609232495491947286 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.7 -D2020-12-29 17:01:35.491 (1609232495491961140 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 7 0 4 4 -D2020-12-29 17:01:35.4925 (1609232495492177871 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.7, server address: 10.232.52.144:34802 -E2020-12-29 17:01:35.4925 (1609232495492183720 1c0747) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.7, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -D2020-12-29 17:01:35.492 (1609232495492248731 1c0747) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 15740.6, total_write_qps = 2.6997 -D2020-12-29 17:01:37.540 (1609232497540827511 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1295), recent_day_fail_times(0), recent_hour_detect_times(175), recent_hour_fail_times(0) recent_minute_detect_times(15), recent_minute_fail_times(0) -D2020-12-29 17:01:40.540 (1609232500540883962 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1303), recent_day_fail_times(0), recent_hour_detect_times(183), recent_hour_fail_times(0) recent_minute_detect_times(23), recent_minute_fail_times(0) -D2020-12-29 17:01:40.545 (1609232500545816589 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 53MB -D2020-12-29 17:01:40.545 (1609232500545994397 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232500545), last_report_time_ms(1609232490545) -D2020-12-29 17:01:42.579 (1609232502579018349 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:01:43.540 (1609232503540932891 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1311), recent_day_fail_times(0), recent_hour_detect_times(191), recent_hour_fail_times(0) recent_minute_detect_times(31), recent_minute_fail_times(0) -D2020-12-29 17:01:45.492 (1609232505492284308 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 17:01:45.509 (1609232505509502984 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.0 -D2020-12-29 17:01:45.509 (1609232505509533570 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 0 0 4 5 -D2020-12-29 17:01:45.5095 (1609232505509779522 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.0, server address: 10.232.52.144:34803 -E2020-12-29 17:01:45.5095 (1609232505509789436 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.0, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:45.509 (1609232505509803418 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.1 -D2020-12-29 17:01:45.509 (1609232505509810603 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 1 0 4 5 -D2020-12-29 17:01:45.5105 (1609232505510030183 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.1, server address: 10.232.52.144:34802 -E2020-12-29 17:01:45.5105 (1609232505510037780 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.1, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:45.510 (1609232505510055023 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.2 -D2020-12-29 17:01:45.510 (1609232505510061523 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 2 0 4 5 -D2020-12-29 17:01:45.5105 (1609232505510262636 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.2, server address: 10.232.52.144:34801 -E2020-12-29 17:01:45.5105 (1609232505510270049 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.2, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:45.510 (1609232505510284544 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 -D2020-12-29 17:01:45.510 (1609232505510294301 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 3 0 4 5 -D2020-12-29 17:01:45.5105 (1609232505510513678 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 17:01:45.5105 (1609232505510520602 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:45.510 (1609232505510530592 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.4 -D2020-12-29 17:01:45.510 (1609232505510537542 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 4 0 4 5 -D2020-12-29 17:01:45.5105 (1609232505510724748 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.4, server address: 10.232.52.144:34802 -E2020-12-29 17:01:45.5105 (1609232505510733115 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.4, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:45.510 (1609232505510746187 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.5 -D2020-12-29 17:01:45.510 (1609232505510752332 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 5 0 4 5 -D2020-12-29 17:01:45.5105 (1609232505510958044 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.5, server address: 10.232.52.144:34801 -E2020-12-29 17:01:45.5105 (1609232505510965519 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.5, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:45.510 (1609232505510984788 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.6 -D2020-12-29 17:01:45.510 (1609232505510990755 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 6 0 4 5 -D2020-12-29 17:01:45.5115 (1609232505511203752 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.6, server address: 10.232.52.144:34803 -E2020-12-29 17:01:45.5115 (1609232505511209905 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.6, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -E2020-12-29 17:01:45.511 (1609232505511220643 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:159:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.7 -D2020-12-29 17:01:45.511 (1609232505511226308 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): !!!!!! 7 0 4 5 -D2020-12-29 17:01:45.5115 (1609232505511437545 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:205:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.7, server address: 10.232.52.144:34802 -E2020-12-29 17:01:45.5115 (1609232505511446788 1c0748) collector.default3.0101000000000001: hotspot_partition_calculator.cpp:219:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.7, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -D2020-12-29 17:01:45.511 (1609232505511514991 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16170, total_write_qps = 3.59958 -D2020-12-29 17:01:46.540 (1609232506540988054 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1319), recent_day_fail_times(0), recent_hour_detect_times(199), recent_hour_fail_times(0) recent_minute_detect_times(39), recent_minute_fail_times(0) -D2020-12-29 17:01:49.541 (1609232509541052232 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1327), recent_day_fail_times(0), recent_hour_detect_times(207), recent_hour_fail_times(0) recent_minute_detect_times(47), recent_minute_fail_times(0) -D2020-12-29 17:01:50.546 (1609232510546060808 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 53MB -D2020-12-29 17:01:50.546 (1609232510546244538 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232510546), last_report_time_ms(1609232500545) -D2020-12-29 17:01:50.579 (1609232510579776030 1c0749) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:01:52.541 (1609232512541112544 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1335), recent_day_fail_times(0), recent_hour_detect_times(215), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) -D2020-12-29 17:01:55.511 (1609232515511549337 1c0747) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 17:01:55.529 (1609232515529206773 1c0747) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 14937.4, total_write_qps = 2.6997 -D2020-12-29 17:01:55.541 (1609232515541171291 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1343), recent_day_fail_times(0), recent_hour_detect_times(223), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) -D2020-12-29 17:01:58.541 (1609232518541235675 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1351), recent_day_fail_times(0), recent_hour_detect_times(231), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) -D2020-12-29 17:01:58.580 (1609232518580502021 1c0747) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:02:00.546 (1609232520546322060 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 53MB -D2020-12-29 17:02:00.546 (1609232520546513156 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232520546), last_report_time_ms(1609232510546) -D2020-12-29 17:02:01.541 (1609232521541317960 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1359), recent_day_fail_times(0), recent_hour_detect_times(239), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) -D2020-12-29 17:02:04.541 (1609232524541373279 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1367), recent_day_fail_times(0), recent_hour_detect_times(247), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) -D2020-12-29 17:02:05.529 (1609232525529249855 1c0747) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 17:02:05.546 (1609232525546640750 1c0747) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 14772, total_write_qps = 2.69969 -D2020-12-29 17:02:06.581 (1609232526581236487 1c0745) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:02:07.541 (1609232527541455613 1c0748) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1375), recent_day_fail_times(0), recent_hour_detect_times(255), recent_hour_fail_times(0) recent_minute_detect_times(95), recent_minute_fail_times(0) -D2020-12-29 17:02:10.541 (1609232530541504712 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1383), recent_day_fail_times(0), recent_hour_detect_times(263), recent_hour_fail_times(0) recent_minute_detect_times(103), recent_minute_fail_times(0) -D2020-12-29 17:02:10.546 (1609232530546576228 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 53MB -D2020-12-29 17:02:10.546 (1609232530546739869 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232530546), last_report_time_ms(1609232520546) -D2020-12-29 17:02:13.541 (1609232533541555844 1c0745) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1391), recent_day_fail_times(0), recent_hour_detect_times(271), recent_hour_fail_times(0) recent_minute_detect_times(111), recent_minute_fail_times(0) -D2020-12-29 17:02:14.582 (1609232534582051607 1c0746) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:02:15.546 (1609232535546678745 1c0748) collector.default3.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 17:02:15.570 (1609232535570097581 1c0748) collector.default3.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 15701.4, total_write_qps = 3.4996 -D2020-12-29 17:02:16.541 (1609232536541617190 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1399), recent_day_fail_times(0), recent_hour_detect_times(279), recent_hour_fail_times(0) recent_minute_detect_times(119), recent_minute_fail_times(0) -D2020-12-29 17:02:19.541 (1609232539541679896 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1407), recent_day_fail_times(0), recent_hour_detect_times(287), recent_hour_fail_times(0) recent_minute_detect_times(127), recent_minute_fail_times(0) -D2020-12-29 17:02:20.546 (1609232540546811099 1c074f) unknown.io-thrd.1836879: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 53MB -D2020-12-29 17:02:20.547 (1609232540547004226 1c074f) unknown.io-thrd.1836879: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232540546), last_report_time_ms(1609232530546) -D2020-12-29 17:02:22.541 (1609232542541729720 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1415), recent_day_fail_times(0), recent_hour_detect_times(295), recent_hour_fail_times(0) recent_minute_detect_times(135), recent_minute_fail_times(0) -D2020-12-29 17:02:22.582 (1609232542582939422 1c0748) collector.default3.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:02:25.541 (1609232545541805156 1c0749) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1423), recent_day_fail_times(0), recent_hour_detect_times(303), recent_hour_fail_times(0) recent_minute_detect_times(143), recent_minute_fail_times(0) -D2020-12-29 17:02:25.570 (1609232545570131426 1c0746) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -D2020-12-29 17:02:25.586 (1609232545586595134 1c0746) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 15737.4, total_write_qps = 2.69971 -D2020-12-29 17:02:28.541 (1609232548541850441 1c0746) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1431), recent_day_fail_times(0), recent_hour_detect_times(311), recent_hour_fail_times(0) recent_minute_detect_times(151), recent_minute_fail_times(0) -D2020-12-29 17:02:30.547 (1609232550547077657 1c074e) unknown.io-thrd.1836878: builtin_counters.cpp:36:update_counters(): memused_virt = 317 MB, memused_res = 53MB -D2020-12-29 17:02:30.547 (1609232550547259509 1c074e) unknown.io-thrd.1836878: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232550547), last_report_time_ms(1609232540546) -D2020-12-29 17:02:30.583 (1609232550583770792 1c0745) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 17:02:31.541 (1609232551541914053 1c0747) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(1439), recent_day_fail_times(0), recent_hour_detect_times(319), recent_hour_fail_times(0) recent_minute_detect_times(159), recent_minute_fail_times(0) +D2020-12-29 21:33:15.840 (1609248795840224105 1d62bc) collector.io-thrd.1925820: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34801, current_count = 4 +D2020-12-29 21:33:15.840 (1609248795840232986 1d62c7) collector.default0.010100040000000e: network.cpp:649:send_message(): client session created, remote_server = 10.232.52.144:34803, current_count = 5 +D2020-12-29 21:33:15.840 (1609248795840260095 1d62bc) collector.io-thrd.1925820: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34802, current_count = 5 +D2020-12-29 21:33:15.840 (1609248795840919072 1d62bc) collector.io-thrd.1925820: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34803, current_count = 5 +E2020-12-29 21:33:15.857 (1609248795857993212 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00010121 +E2020-12-29 21:33:15.858 (1609248795858031891 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 +E2020-12-29 21:33:15.858 (1609248795858204752 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 +E2020-12-29 21:33:15.858 (1609248795858213054 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0000 +E2020-12-29 21:33:15.858 (1609248795858397676 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00000000 +E2020-12-29 21:33:15.858 (1609248795858405982 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 +D2020-12-29 21:33:15.858 (1609248795858562859 1d62c8) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 6394.83, total_write_qps = 0 +D2020-12-29 21:33:18.839 (1609248798839979489 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(8), recent_day_fail_times(0), recent_hour_detect_times(8), recent_hour_fail_times(0) recent_minute_detect_times(8), recent_minute_fail_times(0) +D2020-12-29 21:33:21.840 (1609248801840025987 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(16), recent_day_fail_times(0), recent_hour_detect_times(16), recent_hour_fail_times(0) recent_minute_detect_times(16), recent_minute_fail_times(0) +D2020-12-29 21:33:21.842 (1609248801842909631 1d62d1) unknown.io-thrd.1925841: builtin_counters.cpp:36:update_counters(): memused_virt = 312 MB, memused_res = 44MB +D2020-12-29 21:33:21.843 (1609248801843121408 1d62d1) unknown.io-thrd.1925841: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248801842), last_report_time_ms(1609248791842) +D2020-12-29 21:33:23.841 (1609248803841907024 1d62c8) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 +D2020-12-29 21:33:24.840 (1609248804840094178 1d62c8) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(24), recent_day_fail_times(0), recent_hour_detect_times(24), recent_hour_fail_times(0) recent_minute_detect_times(24), recent_minute_fail_times(0) +D2020-12-29 21:33:25.858 (1609248805858591358 1d62c9) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps +E2020-12-29 21:33:25.880 (1609248805880208379 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111211 +E2020-12-29 21:33:25.880 (1609248805880245844 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 +E2020-12-29 21:33:25.880 (1609248805880265924 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 +E2020-12-29 21:33:25.880 (1609248805880277967 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 3001 +E2020-12-29 21:33:25.880 (1609248805880294300 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11211211 +E2020-12-29 21:33:25.880 (1609248805880308612 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11211211 +D2020-12-29 21:33:25.880 (1609248805880333855 1d62c9) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 15769.8, total_write_qps = 2.19974 +D2020-12-29 21:33:27.840 (1609248807840144013 1d62ca) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(32), recent_day_fail_times(0), recent_hour_detect_times(32), recent_hour_fail_times(0) recent_minute_detect_times(32), recent_minute_fail_times(0) +D2020-12-29 21:33:30.840 (1609248810840258499 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(46), recent_day_fail_times(0), recent_hour_detect_times(46), recent_hour_fail_times(0) recent_minute_detect_times(46), recent_minute_fail_times(0) +D2020-12-29 21:33:31.842 (1609248811842663526 1d62cb) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 +D2020-12-29 21:33:31.843 (1609248811843185984 1d62d0) unknown.io-thrd.1925840: builtin_counters.cpp:36:update_counters(): memused_virt = 312 MB, memused_res = 45MB +D2020-12-29 21:33:31.843 (1609248811843404044 1d62d0) unknown.io-thrd.1925840: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248811843), last_report_time_ms(1609248801842) +D2020-12-29 21:33:33.840 (1609248813840336373 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(55), recent_day_fail_times(0), recent_hour_detect_times(55), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) +D2020-12-29 21:33:35.880 (1609248815880374985 1d62c9) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps +E2020-12-29 21:33:35.896 (1609248815896907285 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 +E2020-12-29 21:33:35.896 (1609248815896946249 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 +E2020-12-29 21:33:35.896 (1609248815896963591 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 +E2020-12-29 21:33:35.896 (1609248815896969407 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 3000 +E2020-12-29 21:33:35.896 (1609248815896980526 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 22122122 +E2020-12-29 21:33:35.896 (1609248815896987840 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 22122122 +D2020-12-29 21:33:35.897 (1609248815897015947 1d62c9) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 15591.5, total_write_qps = 3.29963 +D2020-12-29 21:33:36.840 (1609248816840408314 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(63), recent_day_fail_times(0), recent_hour_detect_times(63), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) +D2020-12-29 21:33:39.840 (1609248819840671959 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(71), recent_day_fail_times(0), recent_hour_detect_times(71), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) +D2020-12-29 21:33:39.843 (1609248819843713798 1d62c9) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 +D2020-12-29 21:33:41.843 (1609248821843491489 1d62d1) unknown.io-thrd.1925841: builtin_counters.cpp:36:update_counters(): memused_virt = 313 MB, memused_res = 45MB +D2020-12-29 21:33:41.843 (1609248821843703431 1d62d1) unknown.io-thrd.1925841: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248821843), last_report_time_ms(1609248811843) +D2020-12-29 21:33:42.840 (1609248822840740370 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(79), recent_day_fail_times(0), recent_hour_detect_times(79), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) +D2020-12-29 21:33:45.840 (1609248825840816889 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(87), recent_day_fail_times(0), recent_hour_detect_times(87), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) +D2020-12-29 21:33:45.897 (1609248825897050041 1d62c8) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps +E2020-12-29 21:33:45.918 (1609248825918234957 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00050000 +E2020-12-29 21:33:45.918 (1609248825918279772 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 +E2020-12-29 21:33:45.918 (1609248825918307924 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 +E2020-12-29 21:33:45.918 (1609248825918316003 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0200 +E2020-12-29 21:33:45.918 (1609248825918329705 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 +E2020-12-29 21:33:45.918 (1609248825918340428 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 +D2020-12-29 21:33:45.918 (1609248825918361942 1d62c8) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 15668, total_write_qps = 2.69967 +D2020-12-29 21:33:47.844 (1609248827844447978 1d62c7) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 +D2020-12-29 21:33:48.840 (1609248828840984078 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(95), recent_day_fail_times(0), recent_hour_detect_times(95), recent_hour_fail_times(0) recent_minute_detect_times(95), recent_minute_fail_times(0) +D2020-12-29 21:33:51.841 (1609248831841082859 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(103), recent_day_fail_times(0), recent_hour_detect_times(103), recent_hour_fail_times(0) recent_minute_detect_times(103), recent_minute_fail_times(0) +D2020-12-29 21:33:51.843 (1609248831843765979 1d62d0) unknown.io-thrd.1925840: builtin_counters.cpp:36:update_counters(): memused_virt = 313 MB, memused_res = 45MB +D2020-12-29 21:33:51.843 (1609248831843946408 1d62d0) unknown.io-thrd.1925840: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248831843), last_report_time_ms(1609248821843) +D2020-12-29 21:33:54.841 (1609248834841216102 1d62ca) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(111), recent_day_fail_times(0), recent_hour_detect_times(111), recent_hour_fail_times(0) recent_minute_detect_times(111), recent_minute_fail_times(0) +D2020-12-29 21:33:55.845 (1609248835845189417 1d62c8) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 +D2020-12-29 21:33:55.918 (1609248835918397038 1d62c7) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps +E2020-12-29 21:33:55.938 (1609248835938535620 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00060000 +E2020-12-29 21:33:55.938 (1609248835938577745 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 +E2020-12-29 21:33:55.938 (1609248835938596515 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 +E2020-12-29 21:33:55.938 (1609248835938607891 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0002 +E2020-12-29 21:33:55.938 (1609248835938619332 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11211211 +E2020-12-29 21:33:55.938 (1609248835938626585 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11211211 +D2020-12-29 21:33:55.938 (1609248835938646514 1d62c7) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16489.6, total_write_qps = 2.89966 +D2020-12-29 21:33:57.841 (1609248837841268503 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(119), recent_day_fail_times(0), recent_hour_detect_times(119), recent_hour_fail_times(0) recent_minute_detect_times(119), recent_minute_fail_times(0) +D2020-12-29 21:34:00.841 (1609248840841353462 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(127), recent_day_fail_times(0), recent_hour_detect_times(127), recent_hour_fail_times(0) recent_minute_detect_times(127), recent_minute_fail_times(0) +D2020-12-29 21:34:01.844 (1609248841844008585 1d62d1) unknown.io-thrd.1925841: builtin_counters.cpp:36:update_counters(): memused_virt = 313 MB, memused_res = 46MB +D2020-12-29 21:34:01.844 (1609248841844189956 1d62d1) unknown.io-thrd.1925841: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248841843), last_report_time_ms(1609248831843) +D2020-12-29 21:34:03.841 (1609248843841424262 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(135), recent_day_fail_times(0), recent_hour_detect_times(135), recent_hour_fail_times(0) recent_minute_detect_times(135), recent_minute_fail_times(0) +D2020-12-29 21:34:03.845 (1609248843845911032 1d62c8) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 +D2020-12-29 21:34:05.938 (1609248845938677241 1d62c8) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps +E2020-12-29 21:34:05.955 (1609248845955163416 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00050000 +E2020-12-29 21:34:05.955 (1609248845955211436 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 +E2020-12-29 21:34:05.955 (1609248845955221581 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 +D2020-12-29 21:34:05.955 (1609248845955229843 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:169:detect_hotkey_in_hotpartition(): !!!!!! 3 0 5 3 +D2020-12-29 21:34:05.9555 (1609248845955515865 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:210:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 +E2020-12-29 21:34:05.955 (1609248845955535168 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 +E2020-12-29 21:34:05.955 (1609248845955551303 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0020 +E2020-12-29 21:34:05.955 (1609248845955565754 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 +E2020-12-29 21:34:05.955 (1609248845955580228 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 +D2020-12-29 21:34:05.955 (1609248845955604530 1d62c8) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16421.9, total_write_qps = 3.29963 +D2020-12-29 21:34:06.841 (1609248846841506658 1d62c8) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(143), recent_day_fail_times(0), recent_hour_detect_times(143), recent_hour_fail_times(0) recent_minute_detect_times(143), recent_minute_fail_times(0) +D2020-12-29 21:34:09.841 (1609248849841551696 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(151), recent_day_fail_times(0), recent_hour_detect_times(151), recent_hour_fail_times(0) recent_minute_detect_times(151), recent_minute_fail_times(0) +D2020-12-29 21:34:11.844 (1609248851844260881 1d62d0) unknown.io-thrd.1925840: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 46MB +D2020-12-29 21:34:11.844 (1609248851844443648 1d62d0) unknown.io-thrd.1925840: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248851844), last_report_time_ms(1609248841843) +D2020-12-29 21:34:11.846 (1609248851846706612 1d62cb) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 +D2020-12-29 21:34:12.841 (1609248852841683150 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(159), recent_day_fail_times(0), recent_hour_detect_times(159), recent_hour_fail_times(0) recent_minute_detect_times(159), recent_minute_fail_times(0) +D2020-12-29 21:34:15.839 (1609248855839454245 1d62c9) collector.default2.0101000000000005: available_detector.cpp:461:on_minute_report(): start to report on new minute, last_minute = 2020-12-29 21:32 +D2020-12-29 21:34:15.841 (1609248855841722589 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(167), recent_day_fail_times(0), recent_hour_detect_times(167), recent_hour_fail_times(0) recent_minute_detect_times(7), recent_minute_fail_times(0) +D2020-12-29 21:34:15.955 (1609248855955634914 1d62cb) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps +E2020-12-29 21:34:15.972 (1609248855972122668 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00040000 +E2020-12-29 21:34:15.972 (1609248855972158462 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 +E2020-12-29 21:34:15.972 (1609248855972168079 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 +D2020-12-29 21:34:15.972 (1609248855972176282 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:169:detect_hotkey_in_hotpartition(): !!!!!! 3 0 4 4 +D2020-12-29 21:34:15.9725 (1609248855972454602 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:210:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 +E2020-12-29 21:34:15.9725 (1609248855972463232 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:224:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::FINE_DETECTING +E2020-12-29 21:34:15.972 (1609248855972502444 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 +E2020-12-29 21:34:15.972 (1609248855972524725 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0021 +E2020-12-29 21:34:15.972 (1609248855972540776 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 +E2020-12-29 21:34:15.972 (1609248855972551357 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 +D2020-12-29 21:34:15.972 (1609248855972568010 1d62cb) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16325.2, total_write_qps = 2.89967 +D2020-12-29 21:34:18.841 (1609248858841822896 1d62c8) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(175), recent_day_fail_times(0), recent_hour_detect_times(175), recent_hour_fail_times(0) recent_minute_detect_times(15), recent_minute_fail_times(0) +D2020-12-29 21:34:19.847 (1609248859847412760 1d62c9) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 +D2020-12-29 21:34:21.841 (1609248861841885828 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(183), recent_day_fail_times(0), recent_hour_detect_times(183), recent_hour_fail_times(0) recent_minute_detect_times(23), recent_minute_fail_times(0) +D2020-12-29 21:34:21.844 (1609248861844508638 1d62d1) unknown.io-thrd.1925841: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 46MB +D2020-12-29 21:34:21.844 (1609248861844685735 1d62d1) unknown.io-thrd.1925841: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248861844), last_report_time_ms(1609248851844) +D2020-12-29 21:34:24.841 (1609248864841939984 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(191), recent_day_fail_times(0), recent_hour_detect_times(191), recent_hour_fail_times(0) recent_minute_detect_times(31), recent_minute_fail_times(0) +D2020-12-29 21:34:25.972 (1609248865972597958 1d62cb) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps +E2020-12-29 21:34:25.989 (1609248865989068658 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00040000 +E2020-12-29 21:34:25.989 (1609248865989112487 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 +E2020-12-29 21:34:25.989 (1609248865989119921 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 +D2020-12-29 21:34:25.989 (1609248865989127012 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:169:detect_hotkey_in_hotpartition(): !!!!!! 3 0 4 5 +D2020-12-29 21:34:25.9895 (1609248865989390581 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:210:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 +E2020-12-29 21:34:25.9895 (1609248865989402059 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:224:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection +E2020-12-29 21:34:25.989 (1609248865989430605 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 +E2020-12-29 21:34:25.989 (1609248865989439345 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0001 +E2020-12-29 21:34:25.989 (1609248865989455758 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11211211 +E2020-12-29 21:34:25.989 (1609248865989468081 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 +D2020-12-29 21:34:25.989 (1609248865989484349 1d62cb) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16351.8, total_write_qps = 2.79966 +D2020-12-29 21:34:27.841 (1609248867841993495 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(199), recent_day_fail_times(0), recent_hour_detect_times(199), recent_hour_fail_times(0) recent_minute_detect_times(39), recent_minute_fail_times(0) +D2020-12-29 21:34:27.848 (1609248867848105914 1d62c7) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 +D2020-12-29 21:34:30.842 (1609248870842037088 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(207), recent_day_fail_times(0), recent_hour_detect_times(207), recent_hour_fail_times(0) recent_minute_detect_times(47), recent_minute_fail_times(0) +D2020-12-29 21:34:31.844 (1609248871844770812 1d62d0) unknown.io-thrd.1925840: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 46MB +D2020-12-29 21:34:31.844 (1609248871844946162 1d62d0) unknown.io-thrd.1925840: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248871844), last_report_time_ms(1609248861844) +D2020-12-29 21:34:33.842 (1609248873842095964 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(215), recent_day_fail_times(0), recent_hour_detect_times(215), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) +D2020-12-29 21:34:35.848 (1609248875848837038 1d62c9) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 +D2020-12-29 21:34:35.989 (1609248875989514077 1d62c8) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps +E2020-12-29 21:34:36.10 (1609248876010431515 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00040000 +E2020-12-29 21:34:36.10 (1609248876010465724 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 +E2020-12-29 21:34:36.10 (1609248876010472911 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 +D2020-12-29 21:34:36.10 (1609248876010486907 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:169:detect_hotkey_in_hotpartition(): !!!!!! 3 0 4 6 +D2020-12-29 21:34:36.10.5 (1609248876010775557 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:210:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 +E2020-12-29 21:34:36.10.5 (1609248876010787757 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:224:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection +E2020-12-29 21:34:36.10 (1609248876010820580 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 +E2020-12-29 21:34:36.10 (1609248876010835431 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0002 +E2020-12-29 21:34:36.10 (1609248876010856338 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 +E2020-12-29 21:34:36.10 (1609248876010867159 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 +D2020-12-29 21:34:36.10 (1609248876010881451 1d62c8) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16509.2, total_write_qps = 3.29957 +D2020-12-29 21:34:36.842 (1609248876842158904 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(223), recent_day_fail_times(0), recent_hour_detect_times(223), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) +D2020-12-29 21:34:39.842 (1609248879842214458 1d62c8) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(231), recent_day_fail_times(0), recent_hour_detect_times(231), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) +D2020-12-29 21:34:41.845 (1609248881845006257 1d62d1) unknown.io-thrd.1925841: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 47MB +D2020-12-29 21:34:41.845 (1609248881845185719 1d62d1) unknown.io-thrd.1925841: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248881844), last_report_time_ms(1609248871844) +D2020-12-29 21:34:42.842 (1609248882842300341 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(239), recent_day_fail_times(0), recent_hour_detect_times(239), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) +D2020-12-29 21:34:43.849 (1609248883849502091 1d62c8) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 +D2020-12-29 21:34:45.842 (1609248885842354161 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(247), recent_day_fail_times(0), recent_hour_detect_times(247), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) +D2020-12-29 21:34:46.10 (1609248886010921470 1d62cb) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps +E2020-12-29 21:34:46.31 (1609248886031362369 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00040000 +E2020-12-29 21:34:46.31 (1609248886031397671 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 +E2020-12-29 21:34:46.31 (1609248886031424105 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 +D2020-12-29 21:34:46.31 (1609248886031439776 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:169:detect_hotkey_in_hotpartition(): !!!!!! 3 0 4 7 +D2020-12-29 21:34:46.31.5 (1609248886031704678 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:210:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 +E2020-12-29 21:34:46.31.5 (1609248886031714144 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:224:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection +E2020-12-29 21:34:46.31 (1609248886031746990 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 +E2020-12-29 21:34:46.31 (1609248886031760704 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0020 +E2020-12-29 21:34:46.31 (1609248886031782687 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 +E2020-12-29 21:34:46.31 (1609248886031792110 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 +D2020-12-29 21:34:46.31 (1609248886031809761 1d62cb) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16514.9, total_write_qps = 2.69968 diff --git a/debug2.txt b/debug2.txt index 68bddd5a81..9210c6820b 100644 --- a/debug2.txt +++ b/debug2.txt @@ -1,4077 +1,709 @@ -D2020-12-29 16:53:14.974 (1609231994974501525 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:14.980 (1609231994980163781 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:14.980 (1609231994980174920 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:14.980 (1609231994980206930 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:14.983 (1609231994983073423 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:14.983 (1609231994983081782 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:14.983 (1609231994983107520 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:14.989 (1609231994989994309 1c0719) replica.replica0.0300070f000101f6: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 43 -D2020-12-29 16:53:14.9905 (1609231994990002360 1c0719) replica.replica0.0300070f000101f6: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 43, confirmed_decree = -1 -D2020-12-29 16:53:14.992 (1609231994992460580 1c071a) replica.replica1.0300070f00010201: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42 -D2020-12-29 16:53:14.9925 (1609231994992478292 1c071a) replica.replica1.0300070f00010201: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42, confirmed_decree = -1 -D2020-12-29 16:53:15.3 (1609231995003464881 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:15.3 (1609231995003475847 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:15.3 (1609231995003505064 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:16.518 (1609231996518499190 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609231996518] -D2020-12-29 16:53:16.518 (1609231996518635342 1c0734) replica. fd1.030c000000000013: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609231996518], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:16.518 (1609231996518649577 1c0734) replica. fd1.030c000000000013: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609231996518 -D2020-12-29 16:53:19.518 (1609231999518556927 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609231999518] -D2020-12-29 16:53:19.518 (1609231999518685950 1c0733) replica. fd0.030c00010000000f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609231999518], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:19.518 (1609231999518695048 1c0733) replica. fd0.030c00010000000f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609231999518 -D2020-12-29 16:53:20.523 (1609232000523486383 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 1460 MB, memused_res = 225MB -D2020-12-29 16:53:20.524 (1609232000524546969 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232000523), last_report_time_ms(1609231990519) -D2020-12-29 16:53:22.518 (1609232002518615071 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232002518] -D2020-12-29 16:53:22.518 (1609232002518740377 1c0733) replica. fd0.030c000100000011: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232002518], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:22.518 (1609232002518747898 1c0733) replica. fd0.030c000100000011: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232002518 -D2020-12-29 16:53:24.512 (1609232004512034152 1c0712) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:53:24.591 (1609232004591443183 1c0719) replica.replica0.0300070f00019c4a: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5026 -D2020-12-29 16:53:24.591 (1609232004591474626 1c0719) replica.replica0.0300070f00019c4a: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5026, confirmed_decree = -1 -D2020-12-29 16:53:24.599 (1609232004599613218 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:24.599 (1609232004599625244 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.599 (1609232004599654612 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.601 (1609232004601008450 1c0719) replica.replica0.0300070f00019c72: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4944 -D2020-12-29 16:53:24.6015 (1609232004601016354 1c0719) replica.replica0.0300070f00019c72: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4944, confirmed_decree = -1 -D2020-12-29 16:53:24.635 (1609232004635302768 1c071a) replica.replica1.0300070f00019cfd: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 16:53:24.635 (1609232004635317595 1c071a) replica.replica1.0300070f00019cfd: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 16:53:24.656 (1609232004656800030 1c071a) replica.replica1.0300070f00019d54: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 16:53:24.6565 (1609232004656808553 1c071a) replica.replica1.0300070f00019d54: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 16:53:24.738 (1609232004738180348 1c0719) replica.replica0.0300070f00019ea8: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 16:53:24.7385 (1609232004738191206 1c0719) replica.replica0.0300070f00019ea8: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 16:53:24.768 (1609232004768505455 1c071a) replica.replica1.0300070f00019f28: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 16:53:24.7685 (1609232004768517196 1c071a) replica.replica1.0300070f00019f28: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 16:53:24.853 (1609232004853978593 1c0719) replica.replica0.0300070f0001a071: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 16:53:24.8535 (1609232004853988574 1c0719) replica.replica0.0300070f0001a071: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 16:53:24.896 (1609232004896965154 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:24.896 (1609232004896977702 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.897 (1609232004897000675 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.898 (1609232004898500164 1c0719) replica.replica0.0300070f0001a129: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 16:53:24.8985 (1609232004898510448 1c0719) replica.replica0.0300070f0001a129: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 16:53:24.901 (1609232004901831952 1c071a) replica.replica1.0300070f0001a138: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 16:53:24.9015 (1609232004901841860 1c071a) replica.replica1.0300070f0001a138: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 16:53:24.935 (1609232004935582372 1c071a) replica.replica1.0300070f0001a1c6: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5165 -D2020-12-29 16:53:24.9355 (1609232004935591352 1c071a) replica.replica1.0300070f0001a1c6: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5165, confirmed_decree = -1 -D2020-12-29 16:53:24.973 (1609232004973506587 1c0719) replica.replica0.0300070f0001a267: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 16:53:24.9735 (1609232004973516822 1c0719) replica.replica0.0300070f0001a267: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 16:53:24.974 (1609232004974314712 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:24.974 (1609232004974323836 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.974 (1609232004974374881 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.974 (1609232004974591361 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:24.974 (1609232004974601184 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.974 (1609232004974629843 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.980 (1609232004980240449 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:24.980 (1609232004980249658 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.980 (1609232004980280894 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.983 (1609232004983206543 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:24.983 (1609232004983215841 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.983 (1609232004983249832 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:24.990 (1609232004990059528 1c0719) replica.replica0.0300070f0001a2a3: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5123 -D2020-12-29 16:53:24.9905 (1609232004990070792 1c0719) replica.replica0.0300070f0001a2a3: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5123, confirmed_decree = -1 -D2020-12-29 16:53:24.992 (1609232004992531409 1c071a) replica.replica1.0300070f0001a2ae: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5190 -D2020-12-29 16:53:24.9925 (1609232004992539454 1c071a) replica.replica1.0300070f0001a2ae: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5190, confirmed_decree = -1 -D2020-12-29 16:53:25.3 (1609232005003566292 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:25.3 (1609232005003581663 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:25.3 (1609232005003630872 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:25.170 (1609232005170426499 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:53:25.170 (1609232005170447028 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.1702 (1609232005170533073 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5119 -D2020-12-29 16:53:25.1709 (1609232005170540422 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.1702 (1609232005170648993 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5119 -D2020-12-29 16:53:25.1709 (1609232005170657694 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.1702 (1609232005170781536 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5119 -D2020-12-29 16:53:25.1709 (1609232005170797818 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.1702 (1609232005170844459 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5119 -D2020-12-29 16:53:25.1709 (1609232005170916259 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5119 -D2020-12-29 16:53:25.1709 (1609232005170921055 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.1702 (1609232005170924343 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.1702 (1609232005170927606 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.1702 (1609232005170943827 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.1702 (1609232005170981454 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5375 -D2020-12-29 16:53:25.1705 (1609232005170996433 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5119 -D2020-12-29 16:53:25.1719 (1609232005171000133 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.1712 (1609232005171003261 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.1712 (1609232005171006604 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.1712 (1609232005171108077 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5119 -D2020-12-29 16:53:25.1719 (1609232005171116883 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 16:53:25.171 (1609232005171123446 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1364:garbage_collection(): gc_shared: too few files to delete, file_count_limit = 100, reserved_log_count = 1, reserved_log_size = 7295921, current_log_index = 1 -D2020-12-29 16:53:25.171 (1609232005171130690 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 712551 -D2020-12-29 16:53:25.518 (1609232005518674313 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232005518] -D2020-12-29 16:53:25.518 (1609232005518801990 1c0734) replica. fd1.030c000000000015: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232005518], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:25.518 (1609232005518810217 1c0734) replica. fd1.030c000000000015: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232005518 -D2020-12-29 16:53:28.518 (1609232008518728422 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232008518] -D2020-12-29 16:53:28.518 (1609232008518861257 1c0734) replica. fd1.030c000000000017: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232008518], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:28.518 (1609232008518887278 1c0734) replica. fd1.030c000000000017: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232008518 -D2020-12-29 16:53:30.524 (1609232010524629730 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 1471 MB, memused_res = 243MB -D2020-12-29 16:53:30.525 (1609232010525741833 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232010524), last_report_time_ms(1609232000523) -D2020-12-29 16:53:31.518 (1609232011518791947 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232011518] -D2020-12-29 16:53:31.518 (1609232011518920444 1c0733) replica. fd0.030c000100000013: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232011518], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:31.518 (1609232011518928918 1c0733) replica. fd0.030c000100000013: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232011518 -D2020-12-29 16:53:33.945 (1609232013945513134 1c071a) replica.replica1.0300070f000230a1: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.10000 init_prepare, mutation_tid=77856 -D2020-12-29 16:53:34.511 (1609232014511137930 1c0715) replica.default3.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:53:34.511 (1609232014511318472 1c0713) replica.default1.030100030000000d: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:53:34.511 (1609232014511361221 1c0713) replica.default1.030100030000000d: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:53:34.512 (1609232014512098755 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:53:34.518 (1609232014518850024 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232014518] -D2020-12-29 16:53:34.518 (1609232014518959883 1c0733) replica. fd0.030c000100000015: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232014518], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:34.518 (1609232014518967174 1c0733) replica. fd0.030c000100000015: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232014518 -D2020-12-29 16:53:34.528 (1609232014528963994 1c070f) replica.io-thrd.1836815: network.cpp:690:on_server_session_accepted(): server session accepted, remote_client = 10.232.52.144:57934, current_count = 6 -D2020-12-29 16:53:34.528 (1609232014528971716 1c070f) replica.io-thrd.1836815: network.cpp:695:on_server_session_accepted(): ip session increased, remote_client = 10.232.52.144:57934, current_count = 6 -D2020-12-29 16:53:34.591 (1609232014591504941 1c0719) replica.replica0.0300070f00023aab: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10132 -D2020-12-29 16:53:34.5915 (1609232014591516821 1c0719) replica.replica0.0300070f00023aab: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10132, confirmed_decree = -1 -D2020-12-29 16:53:34.599 (1609232014599693491 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:34.599 (1609232014599704771 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.599 (1609232014599734797 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.601 (1609232014601147363 1c0719) replica.replica0.0300070f00023ad4: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9975 -D2020-12-29 16:53:34.6015 (1609232014601154625 1c0719) replica.replica0.0300070f00023ad4: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9975, confirmed_decree = -1 -D2020-12-29 16:53:34.621 (1609232014621503773 1c0719) replica.replica0.0300070f00023b22: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.10000 init_prepare, mutation_tid=80538 -D2020-12-29 16:53:34.636 (1609232014636313753 1c071a) replica.replica1.0300070f00023b55: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 16:53:34.6365 (1609232014636330623 1c071a) replica.replica1.0300070f00023b55: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 16:53:34.654 (1609232014654876066 1c071a) replica.replica1.0300070f00023b8b: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.10000 init_prepare, mutation_tid=80642 -D2020-12-29 16:53:34.656 (1609232014656934875 1c071a) replica.replica1.0300070f00023b93: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 16:53:34.6565 (1609232014656944821 1c071a) replica.replica1.0300070f00023b93: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 16:53:34.738 (1609232014738324709 1c0719) replica.replica0.0300070f00023ce4: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 16:53:34.7385 (1609232014738337819 1c0719) replica.replica0.0300070f00023ce4: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 16:53:34.768 (1609232014768667766 1c071a) replica.replica1.0300070f00023d64: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 16:53:34.7685 (1609232014768693956 1c071a) replica.replica1.0300070f00023d64: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 16:53:34.854 (1609232014854158208 1c0719) replica.replica0.0300070f00023eb8: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 16:53:34.8545 (1609232014854166778 1c0719) replica.replica0.0300070f00023eb8: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 16:53:34.897 (1609232014897091353 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:34.897 (1609232014897104154 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.897 (1609232014897129376 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.898 (1609232014898630862 1c0719) replica.replica0.0300070f00023f72: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 -D2020-12-29 16:53:34.8985 (1609232014898642413 1c0719) replica.replica0.0300070f00023f72: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 -D2020-12-29 16:53:34.901 (1609232014901959726 1c071a) replica.replica1.0300070f00023f81: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 16:53:34.9015 (1609232014901968180 1c071a) replica.replica1.0300070f00023f81: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 16:53:34.935 (1609232014935672321 1c071a) replica.replica1.0300070f00024010: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10301 -D2020-12-29 16:53:34.935 (1609232014935687177 1c071a) replica.replica1.0300070f00024010: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10301, confirmed_decree = -1 -D2020-12-29 16:53:34.973 (1609232014973637405 1c0719) replica.replica0.0300070f000240ae: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 16:53:34.9735 (1609232014973649156 1c0719) replica.replica0.0300070f000240ae: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 16:53:34.974 (1609232014974470142 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:34.974 (1609232014974479048 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.974 (1609232014974503602 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.974 (1609232014974713192 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:34.974 (1609232014974721467 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.974 (1609232014974745654 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.980 (1609232014980338002 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:34.980 (1609232014980346062 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.980 (1609232014980367588 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.983 (1609232014983336968 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:34.983 (1609232014983347639 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.983 (1609232014983377436 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:34.990 (1609232014990122530 1c0719) replica.replica0.0300070f000240f0: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10083 -D2020-12-29 16:53:34.9905 (1609232014990134831 1c0719) replica.replica0.0300070f000240f0: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10083, confirmed_decree = -1 -D2020-12-29 16:53:34.992 (1609232014992612001 1c071a) replica.replica1.0300070f000240fb: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10152 -D2020-12-29 16:53:34.9925 (1609232014992620416 1c071a) replica.replica1.0300070f000240fb: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10152, confirmed_decree = -1 -D2020-12-29 16:53:35.3 (1609232015003674729 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:35.3 (1609232015003688705 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:35.3 (1609232015003710411 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:37.518 (1609232017518894361 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232017518] -D2020-12-29 16:53:37.519 (1609232017519022704 1c0734) replica. fd1.030c000000000019: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232017518], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:37.519 (1609232017519033519 1c0734) replica. fd1.030c000000000019: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232017518 -D2020-12-29 16:53:40.518 (1609232020518945597 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232020518] -D2020-12-29 16:53:40.519 (1609232020519078117 1c0734) replica. fd1.030c00000000001b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232020518], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:40.519 (1609232020519084795 1c0734) replica. fd1.030c00000000001b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232020518 -D2020-12-29 16:53:40.525 (1609232020525829667 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 1484 MB, memused_res = 260MB -D2020-12-29 16:53:40.526 (1609232020526934728 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232020525), last_report_time_ms(1609232010524) -D2020-12-29 16:53:43.518 (1609232023518999796 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232023518] -D2020-12-29 16:53:43.519 (1609232023519139472 1c0733) replica. fd0.030c000100000017: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232023518], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:43.519 (1609232023519148527 1c0733) replica. fd0.030c000100000017: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232023518 -D2020-12-29 16:53:44.512 (1609232024512158091 1c0712) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:53:44.591 (1609232024591594726 1c0719) replica.replica0.0300070f0002dc87: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15149 -D2020-12-29 16:53:44.5915 (1609232024591602690 1c0719) replica.replica0.0300070f0002dc87: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15149, confirmed_decree = -1 -D2020-12-29 16:53:44.599 (1609232024599808713 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:44.599 (1609232024599818089 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.599 (1609232024599843858 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.601 (1609232024601238918 1c0719) replica.replica0.0300070f0002dcb0: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 14913 -D2020-12-29 16:53:44.6015 (1609232024601248184 1c0719) replica.replica0.0300070f0002dcb0: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 14913, confirmed_decree = -1 -D2020-12-29 16:53:44.635 (1609232024635702348 1c071a) replica.replica1.0300070f0002dd38: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 16:53:44.6355 (1609232024635728245 1c071a) replica.replica1.0300070f0002dd38: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 16:53:44.657 (1609232024657016620 1c071a) replica.replica1.0300070f0002dd90: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 16:53:44.657 (1609232024657028680 1c071a) replica.replica1.0300070f0002dd90: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 16:53:44.738 (1609232024738460075 1c0719) replica.replica0.0300070f0002dedd: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 16:53:44.7385 (1609232024738470870 1c0719) replica.replica0.0300070f0002dedd: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 16:53:44.768 (1609232024768773069 1c071a) replica.replica1.0300070f0002df5d: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 16:53:44.7685 (1609232024768784461 1c071a) replica.replica1.0300070f0002df5d: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 16:53:44.854 (1609232024854268876 1c0719) replica.replica0.0300070f0002e0b5: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 16:53:44.8545 (1609232024854293547 1c0719) replica.replica0.0300070f0002e0b5: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 16:53:44.897 (1609232024897211350 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:44.897 (1609232024897230782 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.897 (1609232024897263965 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.898 (1609232024898800939 1c0719) replica.replica0.0300070f0002e14a: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 -D2020-12-29 16:53:44.8985 (1609232024898812550 1c0719) replica.replica0.0300070f0002e14a: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 -D2020-12-29 16:53:44.902 (1609232024902051374 1c071a) replica.replica1.0300070f0002e157: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 16:53:44.9025 (1609232024902061288 1c071a) replica.replica1.0300070f0002e157: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 16:53:44.935 (1609232024935774406 1c071a) replica.replica1.0300070f0002e1d9: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15283 -D2020-12-29 16:53:44.9355 (1609232024935786433 1c071a) replica.replica1.0300070f0002e1d9: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15283, confirmed_decree = -1 -D2020-12-29 16:53:44.973 (1609232024973722963 1c0719) replica.replica0.0300070f0002e25c: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 16:53:44.9735 (1609232024973732166 1c0719) replica.replica0.0300070f0002e25c: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 16:53:44.974 (1609232024974547619 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:44.974 (1609232024974557177 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.974 (1609232024974589954 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.974 (1609232024974838022 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:44.974 (1609232024974847055 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.974 (1609232024974875882 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.980 (1609232024980403842 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:44.980 (1609232024980412181 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.980 (1609232024980435001 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.983 (1609232024983418058 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:44.983 (1609232024983428052 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.983 (1609232024983457362 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:44.990 (1609232024990249755 1c0719) replica.replica0.0300070f0002e29c: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15039 -D2020-12-29 16:53:44.9905 (1609232024990262545 1c0719) replica.replica0.0300070f0002e29c: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15039, confirmed_decree = -1 -D2020-12-29 16:53:44.992 (1609232024992700257 1c071a) replica.replica1.0300070f0002e2a4: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15067 -D2020-12-29 16:53:44.9925 (1609232024992711165 1c071a) replica.replica1.0300070f0002e2a4: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15067, confirmed_decree = -1 -D2020-12-29 16:53:45.3 (1609232025003838525 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:45.3 (1609232025003858460 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:45.3 (1609232025003891103 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:46.283 (1609232026283862313 1c0719) replica.replica0.0300070f0002f763: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.20000 init_prepare, mutation_tid=128702 -D2020-12-29 16:53:46.519 (1609232026519074156 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232026519] -D2020-12-29 16:53:46.519 (1609232026519186681 1c0733) replica. fd0.030c000100000019: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232026519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:46.519 (1609232026519194415 1c0733) replica. fd0.030c000100000019: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232026519 -D2020-12-29 16:53:49.519 (1609232029519133909 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232029519] -D2020-12-29 16:53:49.519 (1609232029519263031 1c0734) replica. fd1.030c00000000001d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232029519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:49.519 (1609232029519270805 1c0734) replica. fd1.030c00000000001d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232029519 -D2020-12-29 16:53:50.527 (1609232030527018203 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 1511 MB, memused_res = 292MB -D2020-12-29 16:53:50.528 (1609232030528104460 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232030526), last_report_time_ms(1609232020525) -D2020-12-29 16:53:50.538 (1609232030538327115 1c0719) replica.replica0.0300070f00033c81: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.30000 init_prepare, mutation_tid=146391 -D2020-12-29 16:53:52.519 (1609232032519201863 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232032519] -D2020-12-29 16:53:52.519 (1609232032519328828 1c0734) replica. fd1.030c00000000001f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232032519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:52.519 (1609232032519336451 1c0734) replica. fd1.030c00000000001f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232032519 -D2020-12-29 16:53:54.512 (1609232034512209105 1c0714) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:53:54.591 (1609232034591690315 1c0719) replica.replica0.0300070f00037e46: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17725 -D2020-12-29 16:53:54.591 (1609232034591701461 1c0719) replica.replica0.0300070f00037e46: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17725, confirmed_decree = -1 -D2020-12-29 16:53:54.599 (1609232034599875798 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:54.599 (1609232034599902433 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.599 (1609232034599931989 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.601 (1609232034601313179 1c0719) replica.replica0.0300070f00037e70: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17474 -D2020-12-29 16:53:54.6015 (1609232034601326324 1c0719) replica.replica0.0300070f00037e70: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17474, confirmed_decree = -1 -D2020-12-29 16:53:54.635 (1609232034635897964 1c071a) replica.replica1.0300070f00037ef5: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 16:53:54.6355 (1609232034635909636 1c071a) replica.replica1.0300070f00037ef5: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 16:53:54.657 (1609232034657117440 1c071a) replica.replica1.0300070f00037f4a: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 -D2020-12-29 16:53:54.6575 (1609232034657126026 1c071a) replica.replica1.0300070f00037f4a: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 -D2020-12-29 16:53:54.738 (1609232034738532534 1c0719) replica.replica0.0300070f0003809d: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 -D2020-12-29 16:53:54.7385 (1609232034738544180 1c0719) replica.replica0.0300070f0003809d: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 -D2020-12-29 16:53:54.768 (1609232034768860036 1c071a) replica.replica1.0300070f0003811d: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 -D2020-12-29 16:53:54.7685 (1609232034768871958 1c071a) replica.replica1.0300070f0003811d: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 -D2020-12-29 16:53:54.784 (1609232034784916788 1c0719) replica.replica0.0300070f00038160: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.40000 init_prepare, mutation_tid=164013 -D2020-12-29 16:53:54.854 (1609232034854362563 1c0719) replica.replica0.0300070f0003827c: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 -D2020-12-29 16:53:54.8545 (1609232034854385563 1c0719) replica.replica0.0300070f0003827c: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 -D2020-12-29 16:53:54.897 (1609232034897306612 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:54.897 (1609232034897319976 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.897 (1609232034897344341 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.898 (1609232034898828137 1c0719) replica.replica0.0300070f00038329: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 16:53:54.8985 (1609232034898836515 1c0719) replica.replica0.0300070f00038329: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 16:53:54.902 (1609232034902197674 1c071a) replica.replica1.0300070f00038338: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 -D2020-12-29 16:53:54.9025 (1609232034902207831 1c071a) replica.replica1.0300070f00038338: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 -D2020-12-29 16:53:54.935 (1609232034935880967 1c071a) replica.replica1.0300070f000383c2: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17882 -D2020-12-29 16:53:54.9355 (1609232034935890471 1c071a) replica.replica1.0300070f000383c2: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17882, confirmed_decree = -1 -D2020-12-29 16:53:54.973 (1609232034973813243 1c0719) replica.replica0.0300070f00038463: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 -D2020-12-29 16:53:54.9735 (1609232034973824586 1c0719) replica.replica0.0300070f00038463: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 -D2020-12-29 16:53:54.974 (1609232034974652856 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:54.974 (1609232034974660295 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.974 (1609232034974697816 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.975 (1609232034975065243 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:54.975 (1609232034975074712 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.975 (1609232034975101401 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.980 (1609232034980483683 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:54.980 (1609232034980491598 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.980 (1609232034980510456 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.983 (1609232034983495234 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:54.983 (1609232034983504198 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.983 (1609232034983530423 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:54.990 (1609232034990404837 1c0719) replica.replica0.0300070f000384a0: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17665 -D2020-12-29 16:53:54.990 (1609232034990416665 1c0719) replica.replica0.0300070f000384a0: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17665, confirmed_decree = -1 -D2020-12-29 16:53:54.992 (1609232034992775107 1c071a) replica.replica1.0300070f000384aa: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17617 -D2020-12-29 16:53:54.9925 (1609232034992784356 1c071a) replica.replica1.0300070f000384aa: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17617, confirmed_decree = -1 -D2020-12-29 16:53:55.3 (1609232035003969028 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:53:55.3 (1609232035003983283 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:55.4 (1609232035004000474 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:53:55.1711 (1609232035171166945 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:53:55.171 (1609232035171192633 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 16:53:55.1718 (1609232035171252034 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 17532 -D2020-12-29 16:53:55.1712 (1609232035171258624 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 8 -D2020-12-29 16:53:55.1718 (1609232035171310245 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 17577 -D2020-12-29 16:53:55.1717 (1609232035171315664 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 16:53:55.1718 (1609232035171440131 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 17626 -D2020-12-29 16:53:55.1716 (1609232035171448137 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 7 -D2020-12-29 16:53:55.1717 (1609232035171573040 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 17485 -D2020-12-29 16:53:55.1715 (1609232035171671153 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 17543 -D2020-12-29 16:53:55.1713 (1609232035171677072 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 16:53:55.1715 (1609232035171678951 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 16:53:55.1718 (1609232035171682950 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 16:53:55.1718 (1609232035171687849 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 16:53:55.1718 (1609232035171739587 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 17983 -D2020-12-29 16:53:55.1713 (1609232035171838257 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 40726 -D2020-12-29 16:53:55.1716 (1609232035171847012 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 16:53:55.1718 (1609232035171852133 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 16:53:55.1715 (1609232035171856293 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 16:53:55.1718 (1609232035171925494 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 17826 -D2020-12-29 16:53:55.1716 (1609232035171929873 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 16:53:55.171 (1609232035171934522 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1364:garbage_collection(): gc_shared: too few files to delete, file_count_limit = 100, reserved_log_count = 1, reserved_log_size = 28745487, current_log_index = 1 -D2020-12-29 16:53:55.171 (1609232035171939671 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 778929 -D2020-12-29 16:53:55.519 (1609232035519266221 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232035519] -D2020-12-29 16:53:55.519 (1609232035519405612 1c0733) replica. fd0.030c00010000001b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232035519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:55.519 (1609232035519413445 1c0733) replica. fd0.030c00010000001b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232035519 -D2020-12-29 16:53:58.519 (1609232038519319094 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232038519] -D2020-12-29 16:53:58.519 (1609232038519429207 1c0733) replica. fd0.030c00010000001d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232038519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:53:58.519 (1609232038519456603 1c0733) replica. fd0.030c00010000001d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232038519 -D2020-12-29 16:53:59.431 (1609232039431709966 1c0719) replica.replica0.0300070f0003c6de: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.50000 init_prepare, mutation_tid=181793 -D2020-12-29 16:54:00.528 (1609232040528193207 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 1536 MB, memused_res = 322MB -D2020-12-29 16:54:00.529 (1609232040529289512 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232040528), last_report_time_ms(1609232030526) -D2020-12-29 16:54:01.519 (1609232041519370898 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232041519] -D2020-12-29 16:54:01.519 (1609232041519493863 1c0734) replica. fd1.030c000000000021: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232041519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:01.519 (1609232041519502556 1c0734) replica. fd1.030c000000000021: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232041519 -D2020-12-29 16:54:02.114 (1609232042114860542 1c0719) replica.replica0.0300070f0003f26c: mutation_log.cpp:820:mark_new_offset(): switch log file by limit, old_file = /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.1.0, size = 33554469 -D2020-12-29 16:54:02.114o (1609232042114895386 1c0719) replica.replica0.0300070f0003f26c: mutation_log.cpp:731:create_new_log_file(): create new log file /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.2.33554469 succeed, time_used = 28611 ns -D2020-12-29 16:54:03.214 (1609232043214689036 1c071a) replica.replica1.0300070f00040428: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.20000 init_prepare, mutation_tid=197481 -D2020-12-29 16:54:03.708 (1609232043708866793 1c0719) replica.replica0.0300070f00040c2d: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.60000 init_prepare, mutation_tid=199534 -D2020-12-29 16:54:04.229 (1609232044229809126 1c071a) replica.replica1.0300070f000414a3: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.20000 init_prepare, mutation_tid=201700 -D2020-12-29 16:54:04.511 (1609232044511219994 1c0714) replica.default2.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:54:04.511- (1609232044511419100 1c0713) replica.default1.0301000200000005: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:54:04.511 (1609232044511465873 1c0713) replica.default1.0301000200000005: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:54:04.512 (1609232044512263955 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:54:04.519 (1609232044519422476 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232044519] -D2020-12-29 16:54:04.519 (1609232044519549376 1c0734) replica. fd1.030c000000000023: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232044519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:04.519 (1609232044519558784 1c0734) replica. fd1.030c000000000023: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232044519 -D2020-12-29 16:54:04.591 (1609232044591763230 1c0719) replica.replica0.0300070f00041a5e: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 20197 -D2020-12-29 16:54:04.5915 (1609232044591772055 1c0719) replica.replica0.0300070f00041a5e: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 20197, confirmed_decree = -1 -D2020-12-29 16:54:04.599 (1609232044599971837 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:04.599 (1609232044599980616 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.600 (1609232044600004491 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.601 (1609232044601463747 1c0719) replica.replica0.0300070f00041a6b: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19992 -D2020-12-29 16:54:04.6015 (1609232044601474259 1c0719) replica.replica0.0300070f00041a6b: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19992, confirmed_decree = -1 -D2020-12-29 16:54:04.635 (1609232044635990811 1c071a) replica.replica1.0300070f00041afd: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 16:54:04.6365 (1609232044636001904 1c071a) replica.replica1.0300070f00041afd: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 16:54:04.657 (1609232044657195002 1c071a) replica.replica1.0300070f00041b56: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 16:54:04.6575 (1609232044657205800 1c071a) replica.replica1.0300070f00041b56: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 16:54:04.738 (1609232044738589437 1c0719) replica.replica0.0300070f00041ca8: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 16:54:04.7385 (1609232044738606354 1c0719) replica.replica0.0300070f00041ca8: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 16:54:04.768 (1609232044768948630 1c071a) replica.replica1.0300070f00041d25: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 16:54:04.7685 (1609232044768977920 1c071a) replica.replica1.0300070f00041d25: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 16:54:04.854 (1609232044854404075 1c0719) replica.replica0.0300070f00041e75: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 16:54:04.8545 (1609232044854413086 1c0719) replica.replica0.0300070f00041e75: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 16:54:04.897 (1609232044897437451 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:04.897 (1609232044897450666 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.897 (1609232044897476304 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.898 (1609232044898944730 1c0719) replica.replica0.0300070f00041f2e: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 16:54:04.8985 (1609232044898954170 1c0719) replica.replica0.0300070f00041f2e: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 16:54:04.902 (1609232044902198565 1c071a) replica.replica1.0300070f00041f3d: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 16:54:04.902 (1609232044902208601 1c071a) replica.replica1.0300070f00041f3d: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 16:54:04.935 (1609232044935970387 1c071a) replica.replica1.0300070f00041fc8: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 20327 -D2020-12-29 16:54:04.9355 (1609232044935983454 1c071a) replica.replica1.0300070f00041fc8: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 20327, confirmed_decree = -1 -D2020-12-29 16:54:04.973 (1609232044973902551 1c0719) replica.replica0.0300070f00042065: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 16:54:04.9735 (1609232044973912631 1c0719) replica.replica0.0300070f00042065: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 16:54:04.974 (1609232044974750167 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:04.974 (1609232044974759725 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.974 (1609232044974790009 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.975 (1609232044975182931 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:04.975 (1609232044975190127 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.975 (1609232044975218326 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.980 (1609232044980608376 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:04.980 (1609232044980619348 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.980 (1609232044980643190 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.983 (1609232044983567878 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:04.983 (1609232044983577553 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.983 (1609232044983589865 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:04.990 (1609232044990497548 1c0719) replica.replica0.0300070f000420a4: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 20119 -D2020-12-29 16:54:04.9905 (1609232044990510576 1c0719) replica.replica0.0300070f000420a4: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 20119, confirmed_decree = -1 -D2020-12-29 16:54:04.992 (1609232044992871013 1c071a) replica.replica1.0300070f000420af: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 20147 -D2020-12-29 16:54:04.9925 (1609232044992878838 1c071a) replica.replica1.0300070f000420af: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 20147, confirmed_decree = -1 -D2020-12-29 16:54:05.4 (1609232045004075228 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:05.4 (1609232045004089944 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:05.4 (1609232045004123282 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:07.519 (1609232047519481242 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232047519] -D2020-12-29 16:54:07.519 (1609232047519613343 1c0733) replica. fd0.030c00010000001f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232047519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:07.519 (1609232047519623803 1c0733) replica. fd0.030c00010000001f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232047519 -D2020-12-29 16:54:08.48 (1609232048048512279 1c0719) replica.replica0.0300070f000451d2: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.70000 init_prepare, mutation_tid=217346 -D2020-12-29 16:54:10.519 (1609232050519539527 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232050519] -D2020-12-29 16:54:10.519 (1609232050519699509 1c0733) replica. fd0.030c000100000021: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232050519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:10.519 (1609232050519708469 1c0733) replica. fd0.030c000100000021: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232050519 -D2020-12-29 16:54:10.529 (1609232050529372488 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 1555 MB, memused_res = 347MB -D2020-12-29 16:54:10.530 (1609232050530643617 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232050529), last_report_time_ms(1609232040528) -D2020-12-29 16:54:12.387 (1609232052387208732 1c0719) replica.replica0.0300070f0004977a: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.80000 init_prepare, mutation_tid=235176 -D2020-12-29 16:54:13.519 (1609232053519612247 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232053519] -D2020-12-29 16:54:13.519 (1609232053519746663 1c0734) replica. fd1.030c000000000025: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232053519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:13.519 (1609232053519756176 1c0734) replica. fd1.030c000000000025: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232053519 -D2020-12-29 16:54:14.512 (1609232054512321918 1c0712) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:54:14.591 (1609232054591856323 1c0719) replica.replica0.0300070f0004b5ae: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22766 -D2020-12-29 16:54:14.5915 (1609232054591884445 1c0719) replica.replica0.0300070f0004b5ae: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22766, confirmed_decree = -1 -D2020-12-29 16:54:14.600 (1609232054600038510 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:14.600 (1609232054600049367 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.600 (1609232054600078029 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.603 (1609232054603103677 1c0719) replica.replica0.0300070f0004b5ce: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22442 -D2020-12-29 16:54:14.6035 (1609232054603114619 1c0719) replica.replica0.0300070f0004b5ce: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22442, confirmed_decree = -1 -D2020-12-29 16:54:14.615 (1609232054615846149 1c0715) replica.default3.0300070f0004b5f7: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = replication::detect_action::START -D2020-12-29 16:54:14.615 (1609232054615857128 1c0715) replica.default3.0300070f0004b5f7: hotkey_collector.cpp:265:on_start_detect(): [3.3@10.232.52.144:34803] starting to detect replication::hotkey_type::WRITE hotkey -D2020-12-29 16:54:14.636 (1609232054636140651 1c071a) replica.replica1.0300070f0004b640: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 13 -D2020-12-29 16:54:14.6365 (1609232054636151040 1c071a) replica.replica1.0300070f0004b640: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 13, confirmed_decree = -1 -D2020-12-29 16:54:14.657 (1609232054657309955 1c071a) replica.replica1.0300070f0004b68c: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18 -D2020-12-29 16:54:14.6575 (1609232054657319160 1c071a) replica.replica1.0300070f0004b68c: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18, confirmed_decree = -1 -D2020-12-29 16:54:14.738 (1609232054738696365 1c0719) replica.replica0.0300070f0004b7d7: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18 -D2020-12-29 16:54:14.7385 (1609232054738708928 1c0719) replica.replica0.0300070f0004b7d7: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18, confirmed_decree = -1 -D2020-12-29 16:54:14.769 (1609232054769027005 1c071a) replica.replica1.0300070f0004b849: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18 -D2020-12-29 16:54:14.7695 (1609232054769038098 1c071a) replica.replica1.0300070f0004b849: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18, confirmed_decree = -1 -D2020-12-29 16:54:14.854 (1609232054854500346 1c0719) replica.replica0.0300070f0004b99a: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18 -D2020-12-29 16:54:14.8545 (1609232054854508601 1c0719) replica.replica0.0300070f0004b99a: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18, confirmed_decree = -1 -D2020-12-29 16:54:14.897 (1609232054897587097 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:14.897 (1609232054897602631 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.897 (1609232054897627984 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.899 (1609232054899136245 1c0719) replica.replica0.0300070f0004ba4f: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 -D2020-12-29 16:54:14.8995 (1609232054899144624 1c0719) replica.replica0.0300070f0004ba4f: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 -D2020-12-29 16:54:14.902 (1609232054902276325 1c071a) replica.replica1.0300070f0004ba5e: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18 -D2020-12-29 16:54:14.9025 (1609232054902286481 1c071a) replica.replica1.0300070f0004ba5e: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18, confirmed_decree = -1 -D2020-12-29 16:54:14.936 (1609232054936080132 1c071a) replica.replica1.0300070f0004bae9: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22780 -D2020-12-29 16:54:14.9365 (1609232054936090841 1c071a) replica.replica1.0300070f0004bae9: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22780, confirmed_decree = -1 -D2020-12-29 16:54:14.974 (1609232054974067231 1c0719) replica.replica0.0300070f0004bb86: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18 -D2020-12-29 16:54:14.9745 (1609232054974091378 1c0719) replica.replica0.0300070f0004bb86: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18, confirmed_decree = -1 -D2020-12-29 16:54:14.974 (1609232054974835265 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:14.974 (1609232054974844549 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.974 (1609232054974873474 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.975 (1609232054975295904 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:14.975 (1609232054975306332 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.975 (1609232054975329535 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.980 (1609232054980681944 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:14.980 (1609232054980692694 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.980 (1609232054980720842 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.983 (1609232054983628016 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:14.983 (1609232054983636630 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.983 (1609232054983662907 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:14.990 (1609232054990582842 1c0719) replica.replica0.0300070f0004bbc5: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22651 -D2020-12-29 16:54:14.9905 (1609232054990593553 1c0719) replica.replica0.0300070f0004bbc5: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22651, confirmed_decree = -1 -D2020-12-29 16:54:14.992 (1609232054992949572 1c071a) replica.replica1.0300070f0004bbcf: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22656 -D2020-12-29 16:54:14.9925 (1609232054992977365 1c071a) replica.replica1.0300070f0004bbcf: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22656, confirmed_decree = -1 -D2020-12-29 16:54:15.4 (1609232055004176313 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:15.4 (1609232055004193700 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:15.4 (1609232055004234494 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:16.519 (1609232056519676163 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232056519] -D2020-12-29 16:54:16.519 (1609232056519806714 1c0734) replica. fd1.030c000000000027: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232056519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:16.519 (1609232056519814456 1c0734) replica. fd1.030c000000000027: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232056519 -D2020-12-29 16:54:17.134 (1609232057134764889 1c0719) replica.replica0.0300070f0004dcff: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.90000 init_prepare, mutation_tid=252954 -D2020-12-29 16:54:19.519 (1609232059519740547 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232059519] -D2020-12-29 16:54:19.519 (1609232059519865144 1c0733) replica. fd0.030c000100000023: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232059519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:19.519 (1609232059519873778 1c0733) replica. fd0.030c000100000023: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232059519 -D2020-12-29 16:54:20.530 (1609232060530740889 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 1631 MB, memused_res = 373MB -D2020-12-29 16:54:20.531 (1609232060531857401 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232060530), last_report_time_ms(1609232050529) -D2020-12-29 16:54:21.600 (1609232061600540402 1c0719) replica.replica0.0300070f000521ed: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.100000 init_prepare, mutation_tid=270598 -D2020-12-29 16:54:22.519 (1609232062519803166 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232062519] -D2020-12-29 16:54:22.519 (1609232062519976830 1c0733) replica. fd0.030c000100000025: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232062519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:22.519 (1609232062519984451 1c0733) replica. fd0.030c000100000025: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232062519 -D2020-12-29 16:54:24.512 (1609232064512381323 1c0714) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:54:24.549 (1609232064549702457 1c071a) replica.replica1.0306000000000005: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:24.552 (1609232064552435581 1c0719) replica.replica0.0306000100000004: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:24.592 (1609232064592006044 1c0719) replica.replica0.0300070f0005498b: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25155 -D2020-12-29 16:54:24.5925 (1609232064592014555 1c0719) replica.replica0.0300070f0005498b: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25155, confirmed_decree = -1 -D2020-12-29 16:54:24.600 (1609232064600120100 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:24.600 (1609232064600131243 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.600 (1609232064600159156 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.601 (1609232064601604762 1c0719) replica.replica0.0300070f000549b3: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 24675 -D2020-12-29 16:54:24.6015 (1609232064601612653 1c0719) replica.replica0.0300070f000549b3: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 24675, confirmed_decree = -1 -D2020-12-29 16:54:24.631 (1609232064631629461 1c0714) replica.default2.0300070f00054a03: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = replication::detect_action::START -W2020-12-29 16:54:24.631 (1609232064631642282 1c0714) replica.default2.0300070f00054a03: hotkey_collector.cpp:249:on_start_detect(): [3.3@10.232.52.144:34803] still detecting replication::hotkey_type::WRITE hotkey, state is hotkey_collector_state::FINE_DETECTING -D2020-12-29 16:54:24.636 (1609232064636219662 1c071a) replica.replica1.0300070f00054a17: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 13 -D2020-12-29 16:54:24.6365 (1609232064636229365 1c071a) replica.replica1.0300070f00054a17: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 13, confirmed_decree = -1 -D2020-12-29 16:54:24.640 (1609232064640751673 1c0719) replica.replica0.030600000000000a: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:24.647 (1609232064647406718 1c071a) replica.replica1.0306000100000009: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.5@10.232.52.144:34803: trigger non-emergency checkpoint -E2020-12-29 16:54:24.656 (1609232064656093162 1c0714) replica.default2.0306000000000040: hotkey_collector.cpp:173:change_state_by_result(): [3.3@10.232.52.144:34803] Find the hotkey: ThisisahotkeyThisisahotkey -D2020-12-29 16:54:24.657 (1609232064657399083 1c071a) replica.replica1.0300070f00054a68: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 -D2020-12-29 16:54:24.6575 (1609232064657412096 1c071a) replica.replica1.0300070f00054a68: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 -D2020-12-29 16:54:24.713 (1609232064713634246 1c071a) replica.replica1.030600000000000f: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:24.717 (1609232064717820304 1c071a) replica.replica1.030600010000000e: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:24.738 (1609232064738800238 1c0719) replica.replica0.0300070f00054b9b: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 -D2020-12-29 16:54:24.7385 (1609232064738811528 1c0719) replica.replica0.0300070f00054b9b: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 -D2020-12-29 16:54:24.769 (1609232064769118013 1c071a) replica.replica1.0300070f00054c15: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 -D2020-12-29 16:54:24.7695 (1609232064769130029 1c071a) replica.replica1.0300070f00054c15: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 -D2020-12-29 16:54:24.785 (1609232064785563999 1c0719) replica.replica0.0306000000000014: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:24.854 (1609232064854595795 1c0719) replica.replica0.0300070f00054d5a: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 -D2020-12-29 16:54:24.8545 (1609232064854606349 1c0719) replica.replica0.0300070f00054d5a: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 -D2020-12-29 16:54:24.892 (1609232064892215867 1c0719) replica.replica0.0306000000000019: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:24.895 (1609232064895772680 1c071a) replica.replica1.0306000100000016: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:24.897 (1609232064897674954 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:24.897 (1609232064897683709 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.897 (1609232064897707926 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.899 (1609232064899228109 1c0719) replica.replica0.0300070f00054e0e: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 -D2020-12-29 16:54:24.8995 (1609232064899238711 1c0719) replica.replica0.0300070f00054e0e: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 -D2020-12-29 16:54:24.902 (1609232064902364945 1c071a) replica.replica1.0300070f00054e1c: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 -D2020-12-29 16:54:24.9025 (1609232064902376220 1c071a) replica.replica1.0300070f00054e1c: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 -D2020-12-29 16:54:24.936 (1609232064936188985 1c071a) replica.replica1.0300070f00054e98: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25175 -D2020-12-29 16:54:24.9365 (1609232064936202534 1c071a) replica.replica1.0300070f00054e98: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25175, confirmed_decree = -1 -D2020-12-29 16:54:24.974 (1609232064974088628 1c0719) replica.replica0.0300070f00054f2c: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 -D2020-12-29 16:54:24.9745 (1609232064974102012 1c0719) replica.replica0.0300070f00054f2c: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 -D2020-12-29 16:54:24.974 (1609232064974915370 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:24.974 (1609232064974923987 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.974 (1609232064974948629 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.975 (1609232064975385523 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:24.975 (1609232064975397877 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.975 (1609232064975427008 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.980 (1609232064980753171 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:24.980 (1609232064980776515 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.980 (1609232064980802435 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.983 (1609232064983706452 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:24.983 (1609232064983716679 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.983 (1609232064983743645 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:24.990 (1609232064990676495 1c0719) replica.replica0.0300070f00054f6f: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 24956 -D2020-12-29 16:54:24.9905 (1609232064990686533 1c0719) replica.replica0.0300070f00054f6f: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 24956, confirmed_decree = -1 -D2020-12-29 16:54:24.993 (1609232064993049838 1c071a) replica.replica1.0300070f00054f78: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25008 -D2020-12-29 16:54:24.9935 (1609232064993060739 1c071a) replica.replica1.0300070f00054f78: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25008, confirmed_decree = -1 -D2020-12-29 16:54:25.4 (1609232065004279378 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:25.4 (1609232065004297262 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:25.4 (1609232065004328432 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:25.171 (1609232065171966711 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:54:25.171 (1609232065171978573 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 20 -D2020-12-29 16:54:25.1720 (1609232065172022611 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 24775 -D2020-12-29 16:54:25.1725 (1609232065172025232 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 20 -D2020-12-29 16:54:25.1720 (1609232065172183154 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 24831 -D2020-12-29 16:54:25.1721 (1609232065172189221 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 20 -D2020-12-29 16:54:25.1720 (1609232065172199969 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 25295 -D2020-12-29 16:54:25.1725 (1609232065172206324 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 10 -D2020-12-29 16:54:25.1720 (1609232065172286543 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 24870 -D2020-12-29 16:54:25.1720 (1609232065172302088 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 24881 -D2020-12-29 16:54:25.1721 (1609232065172308166 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 16:54:25.1728 (1609232065172312525 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 -D2020-12-29 16:54:25.1723 (1609232065172314374 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 20 -D2020-12-29 16:54:25.1720 (1609232065172316292 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 20 -D2020-12-29 16:54:25.1720 (1609232065172430119 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 25212 -D2020-12-29 16:54:25.1722 (1609232065172466205 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 106955 -D2020-12-29 16:54:25.1725 (1609232065172468827 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 20 -D2020-12-29 16:54:25.1720 (1609232065172484525 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 -D2020-12-29 16:54:25.1723 (1609232065172489059 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 20 -D2020-12-29 16:54:25.1720 (1609232065172556586 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 25113 -D2020-12-29 16:54:25.1723 (1609232065172562340 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 20 -D2020-12-29 16:54:25.172 (1609232065172583955 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 2, reserved_log_size = 49377428, reserved_smallest_log = 1, reserved_largest_log = 2, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:54:25.172 (1609232065172592320 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 633034 -D2020-12-29 16:54:25.519 (1609232065519863141 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232065519] -D2020-12-29 16:54:25.519 (1609232065519990849 1c0734) replica. fd1.030c000000000029: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232065519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:25.519 (1609232065519999245 1c0734) replica. fd1.030c000000000029: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232065519 -D2020-12-29 16:54:26.556 (1609232066556603966 1c0719) replica.replica0.0300070f000566fc: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.110000 init_prepare, mutation_tid=288258 -D2020-12-29 16:54:28.519 (1609232068519929359 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232068519] -D2020-12-29 16:54:28.520 (1609232068520064968 1c0734) replica. fd1.030c00000000002b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232068519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:28.520 (1609232068520071022 1c0734) replica. fd1.030c00000000002b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232068519 -D2020-12-29 16:54:30.531 (1609232070531938934 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 1668 MB, memused_res = 399MB -D2020-12-29 16:54:30.532 (1609232070532937044 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232070531), last_report_time_ms(1609232060530) -D2020-12-29 16:54:30.959 (1609232070959169869 1c0719) replica.replica0.0300070f0005ac9a: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.120000 init_prepare, mutation_tid=306077 -D2020-12-29 16:54:31.519 (1609232071519993885 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232071519] -D2020-12-29 16:54:31.520 (1609232071520111211 1c0733) replica. fd0.030c000100000027: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232071519], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:31.520 (1609232071520119687 1c0733) replica. fd0.030c000100000027: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232071519 -D2020-12-29 16:54:34.511 (1609232074511306142 1c0716) replica.default4.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:54:34.511 (1609232074511496083 1c0712) replica.default0.0301000400000009: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:54:34.511 (1609232074511542360 1c0712) replica.default0.0301000400000009: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:54:34.512 (1609232074512438533 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:54:34.520 (1609232074520046857 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232074520] -D2020-12-29 16:54:34.520 (1609232074520172094 1c0733) replica. fd0.030c000100000029: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232074520], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:34.520 (1609232074520178287 1c0733) replica. fd0.030c000100000029: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232074520 -D2020-12-29 16:54:34.592 (1609232074592059547 1c0719) replica.replica0.0300070f0005e3d1: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27662 -D2020-12-29 16:54:34.5925 (1609232074592068643 1c0719) replica.replica0.0300070f0005e3d1: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27662, confirmed_decree = -1 -D2020-12-29 16:54:34.600 (1609232074600257788 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:34.600 (1609232074600269551 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.600 (1609232074600294955 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.601 (1609232074601674946 1c0719) replica.replica0.0300070f0005e3f9: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27126 -D2020-12-29 16:54:34.6015 (1609232074601689490 1c0719) replica.replica0.0300070f0005e3f9: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27126, confirmed_decree = -1 -D2020-12-29 16:54:34.622 (1609232074622393633 1c0719) replica.replica0.030600000000001f: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:34.636 (1609232074636439464 1c071a) replica.replica1.0300070f0005e471: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17 -D2020-12-29 16:54:34.6365 (1609232074636452423 1c071a) replica.replica1.0300070f0005e471: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17, confirmed_decree = -1 -D2020-12-29 16:54:34.646 (1609232074646363983 1c0719) replica.replica0.030600010000001c: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:34.653 (1609232074653375733 1c0714) replica.default2.0300070f0005e48a: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = replication::detect_action::START -W2020-12-29 16:54:34.653 (1609232074653386965 1c0714) replica.default2.0300070f0005e48a: hotkey_collector.cpp:258:on_start_detect(): [3.3@10.232.52.144:34803] replication::hotkey_type::WRITE hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -D2020-12-29 16:54:34.657 (1609232074657498153 1c071a) replica.replica1.0300070f0005e49c: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26 -D2020-12-29 16:54:34.6575 (1609232074657509612 1c071a) replica.replica1.0300070f0005e49c: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26, confirmed_decree = -1 -D2020-12-29 16:54:34.738 (1609232074738893305 1c0719) replica.replica0.0300070f0005e5e5: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 16:54:34.738 (1609232074738904843 1c0719) replica.replica0.0300070f0005e5e5: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 16:54:34.758 (1609232074758888350 1c071a) replica.replica1.0306000000000024: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:34.769 (1609232074769195691 1c071a) replica.replica1.0300070f0005e662: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 16:54:34.7695 (1609232074769207957 1c071a) replica.replica1.0300070f0005e662: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 16:54:34.854 (1609232074854727075 1c0719) replica.replica0.0300070f0005e7c2: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 16:54:34.8545 (1609232074854750449 1c0719) replica.replica0.0300070f0005e7c2: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 16:54:34.897 (1609232074897772770 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:34.897 (1609232074897787474 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.897 (1609232074897818992 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.899 (1609232074899395563 1c0719) replica.replica0.0300070f0005e866: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12 -D2020-12-29 16:54:34.8995 (1609232074899406841 1c0719) replica.replica0.0300070f0005e866: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12, confirmed_decree = -1 -D2020-12-29 16:54:34.902 (1609232074902424080 1c071a) replica.replica1.0300070f0005e874: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 16:54:34.9025 (1609232074902435286 1c071a) replica.replica1.0300070f0005e874: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 16:54:34.936 (1609232074936268498 1c071a) replica.replica1.0300070f0005e8fd: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27616 -D2020-12-29 16:54:34.9365 (1609232074936280131 1c071a) replica.replica1.0300070f0005e8fd: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27616, confirmed_decree = -1 -D2020-12-29 16:54:34.974 (1609232074974190251 1c0719) replica.replica0.0300070f0005e999: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 16:54:34.9745 (1609232074974200403 1c0719) replica.replica0.0300070f0005e999: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 16:54:34.974 (1609232074974999382 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:34.975 (1609232074975009319 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.975 (1609232074975077749 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.975 (1609232074975534662 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:34.975 (1609232074975545661 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.975 (1609232074975571789 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.980 (1609232074980853087 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:34.980 (1609232074980862288 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.980 (1609232074980884744 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.983 (1609232074983777898 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:34.983 (1609232074983785880 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.983 (1609232074983806141 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:34.990 (1609232074990807887 1c0719) replica.replica0.0300070f0005e9d8: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27407 -D2020-12-29 16:54:34.9905 (1609232074990821804 1c0719) replica.replica0.0300070f0005e9d8: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27407, confirmed_decree = -1 -D2020-12-29 16:54:34.993 (1609232074993129592 1c071a) replica.replica1.0300070f0005e9e2: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27525 -D2020-12-29 16:54:34.9935 (1609232074993139098 1c071a) replica.replica1.0300070f0005e9e2: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27525, confirmed_decree = -1 -D2020-12-29 16:54:35.4U (1609232075004375927 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:35.4 (1609232075004394014 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:35.4 (1609232075004416281 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:35.489 (1609232075489303296 1c0719) replica.replica0.0300070f0005f1ca: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.130000 init_prepare, mutation_tid=323772 -D2020-12-29 16:54:37.520 (1609232077520107878 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232077520] -D2020-12-29 16:54:37.520 (1609232077520237546 1c0734) replica. fd1.030c00000000002d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232077520], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:37.520 (1609232077520245482 1c0734) replica. fd1.030c00000000002d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232077520 -D2020-12-29 16:54:39.823 (1609232079823061002 1c0719) replica.replica0.0300070f00063721: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.140000 init_prepare, mutation_tid=341520 -D2020-12-29 16:54:40.520 (1609232080520168616 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232080520] -D2020-12-29 16:54:40.520 (1609232080520303051 1c0734) replica. fd1.030c00000000002f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232080520], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:40.520 (1609232080520311252 1c0734) replica. fd1.030c00000000002f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232080520 -D2020-12-29 16:54:40.533 (1609232080533019751 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 1752 MB, memused_res = 425MB -D2020-12-29 16:54:40.534 (1609232080534105072 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232080532), last_report_time_ms(1609232070531) -D2020-12-29 16:54:43.201 (1609232083201387731 1c071a) replica.replica1.0300070f00066d2b: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.30000 init_prepare, mutation_tid=355352 -D2020-12-29 16:54:43.520 (1609232083520229095 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232083520] -D2020-12-29 16:54:43.520 (1609232083520357980 1c0733) replica. fd0.030c00010000002b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232083520], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:43.520 (1609232083520367133 1c0733) replica. fd0.030c00010000002b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232083520 -D2020-12-29 16:54:44.178 (1609232084178566718 1c0719) replica.replica0.0300070f00067cc4: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.150000 init_prepare, mutation_tid=359343 -D2020-12-29 16:54:44.512 (1609232084512482886 1c0715) replica.default3.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:54:44.592 (1609232084592149371 1c0719) replica.replica0.0300070f00068361: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 30226 -D2020-12-29 16:54:44.5925 (1609232084592180010 1c0719) replica.replica0.0300070f00068361: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 30226, confirmed_decree = -1 -D2020-12-29 16:54:44.600 (1609232084600373046 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:44.600 (1609232084600382434 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.600 (1609232084600408687 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.601 (1609232084601761896 1c0719) replica.replica0.0300070f00068388: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29636 -D2020-12-29 16:54:44.6015 (1609232084601775960 1c0719) replica.replica0.0300070f00068388: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29636, confirmed_decree = -1 -D2020-12-29 16:54:44.636 (1609232084636518114 1c071a) replica.replica1.0300070f0006840d: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17 -D2020-12-29 16:54:44.6365 (1609232084636532423 1c071a) replica.replica1.0300070f0006840d: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17, confirmed_decree = -1 -D2020-12-29 16:54:44.656 (1609232084656475918 1c071a) replica.replica1.0306000100000024: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:44.656 (1609232084656718230 1c0719) replica.replica0.0306000000000041: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:44.657 (1609232084657553060 1c071a) replica.replica1.0300070f00068458: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 -D2020-12-29 16:54:44.6575 (1609232084657563814 1c071a) replica.replica1.0300070f00068458: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 -D2020-12-29 16:54:44.674 (1609232084674953643 1c0713) replica.default1.0300070f00068491: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = replication::detect_action::START -W2020-12-29 16:54:44.674 (1609232084674962435 1c0713) replica.default1.0300070f00068491: hotkey_collector.cpp:258:on_start_detect(): [3.3@10.232.52.144:34803] replication::hotkey_type::WRITE hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -D2020-12-29 16:54:44.738 (1609232084738963315 1c0719) replica.replica0.0300070f0006857f: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28 -D2020-12-29 16:54:44.7385 (1609232084738997297 1c0719) replica.replica0.0300070f0006857f: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28, confirmed_decree = -1 -D2020-12-29 16:54:44.769 (1609232084769298813 1c071a) replica.replica1.0300070f000685f5: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28 -D2020-12-29 16:54:44.7695 (1609232084769314369 1c071a) replica.replica1.0300070f000685f5: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28, confirmed_decree = -1 -D2020-12-29 16:54:44.808 (1609232084808871937 1c071a) replica.replica1.0306000100000029: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:44.809 (1609232084809140865 1c0719) replica.replica0.0306000000000046: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.5@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:44.854 (1609232084854822954 1c0719) replica.replica0.0300070f00068746: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28 -D2020-12-29 16:54:44.8545 (1609232084854834482 1c0719) replica.replica0.0300070f00068746: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28, confirmed_decree = -1 -D2020-12-29 16:54:44.897 (1609232084897912882 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:44.897 (1609232084897934776 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.897 (1609232084897957022 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.899 (1609232084899478178 1c0719) replica.replica0.0300070f000687fb: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 13 -D2020-12-29 16:54:44.8995 (1609232084899487248 1c0719) replica.replica0.0300070f000687fb: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 13, confirmed_decree = -1 -D2020-12-29 16:54:44.902 (1609232084902492212 1c071a) replica.replica1.0300070f00068809: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28 -D2020-12-29 16:54:44.9025 (1609232084902502753 1c071a) replica.replica1.0300070f00068809: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28, confirmed_decree = -1 -D2020-12-29 16:54:44.915 (1609232084915971760 1c071a) replica.replica1.030600010000002e: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:44.929 (1609232084929716622 1c071a) replica.replica1.030600000000004c: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:44.936 (1609232084936353213 1c071a) replica.replica1.0300070f0006888c: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 30202 -D2020-12-29 16:54:44.9365 (1609232084936363188 1c071a) replica.replica1.0300070f0006888c: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 30202, confirmed_decree = -1 -D2020-12-29 16:54:44.974 (1609232084974255665 1c0719) replica.replica0.0300070f0006891e: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28 -D2020-12-29 16:54:44.9745 (1609232084974268126 1c0719) replica.replica0.0300070f0006891e: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28, confirmed_decree = -1 -D2020-12-29 16:54:44.975 (1609232084975122458 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:44.975 (1609232084975131830 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.975 (1609232084975168662 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.975 (1609232084975642476 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:44.975 (1609232084975651668 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.975 (1609232084975678097 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.980 (1609232084980921608 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:44.980 (1609232084980931647 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.980 (1609232084980954972 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.983 (1609232084983869316 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:44.983 (1609232084983880009 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.983 (1609232084983927586 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:44.986 (1609232084986787839 1c0719) replica.replica0.0306000100000034: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:44.990 (1609232084990917682 1c0719) replica.replica0.0300070f00068962: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29960 -D2020-12-29 16:54:44.9905 (1609232084990925905 1c0719) replica.replica0.0300070f00068962: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29960, confirmed_decree = -1 -D2020-12-29 16:54:44.993 (1609232084993234709 1c071a) replica.replica1.0300070f0006896d: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 30062 -D2020-12-29 16:54:44.9935 (1609232084993242522 1c071a) replica.replica1.0300070f0006896d: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 30062, confirmed_decree = -1 -D2020-12-29 16:54:45.4U (1609232085004476340 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:45.4 (1609232085004490784 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:45.4 (1609232085004521013 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:46.153 (1609232086153936000 1c071a) replica.replica1.0300070f00069bcb: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.30000 init_prepare, mutation_tid=367273 -D2020-12-29 16:54:46.520 (1609232086520297109 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232086520] -D2020-12-29 16:54:46.520 (1609232086520431010 1c0733) replica. fd0.030c00010000002d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232086520], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:46.520 (1609232086520440970 1c0733) replica. fd0.030c00010000002d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232086520 -D2020-12-29 16:54:48.519 (1609232088519005373 1c0719) replica.replica0.0300070f0006c19e: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.160000 init_prepare, mutation_tid=376953 -D2020-12-29 16:54:49.520 (1609232089520374146 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232089520] -D2020-12-29 16:54:49.520 (1609232089520482509 1c0734) replica. fd1.030c000000000031: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232089520], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:49.520 (1609232089520512011 1c0734) replica. fd1.030c000000000031: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232089520 -D2020-12-29 16:54:50.192 (1609232090192783990 1c0719) replica.replica0.0300070f0006dc75: mutation_log.cpp:820:mark_new_offset(): switch log file by limit, old_file = /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.2.33554469, size = 33554536 -D2020-12-29 16:54:50.192o (1609232090192841231 1c0719) replica.replica0.0300070f0006dc75: mutation_log.cpp:731:create_new_log_file(): create new log file /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.3.67109005 succeed, time_used = 44891 ns -D2020-12-29 16:54:50.534 (1609232090534185768 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 1788 MB, memused_res = 451MB -D2020-12-29 16:54:50.535 (1609232090535262246 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232090534), last_report_time_ms(1609232080532) -D2020-12-29 16:54:52.520 (1609232092520420396 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232092520] -D2020-12-29 16:54:52.520 (1609232092520550295 1c0734) replica. fd1.030c000000000033: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232092520], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:52.520 (1609232092520559586 1c0734) replica. fd1.030c000000000033: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232092520 -D2020-12-29 16:54:52.818 (1609232092818207190 1c0719) replica.replica0.0300070f0007069b: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.170000 init_prepare, mutation_tid=394610 -D2020-12-29 16:54:54.0U (1609232094000283068 1c0714) replica.default2.0300070f00071995: replica_stub.cpp:2800:on_detect_hotkey(): [3.0@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = Unknown -D2020-12-29 16:54:54.0 (1609232094000291396 1c0714) replica.default2.0300070f00071995: hotkey_collector.cpp:292:query_result(): [3.0@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::STOPPED -D2020-12-29 16:54:54.0U (1609232094000565977 1c0713) replica.default1.0300070f00071996: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = Unknown -D2020-12-29 16:54:54.0U (1609232094000630468 1c0716) replica.default4.0300070f00071997: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = replication::detect_action::STOP -D2020-12-29 16:54:54.0 (1609232094000646988 1c0716) replica.default4.0300070f00071997: hotkey_collector.cpp:282:on_stop_detect(): [3.3@10.232.52.144:34803] replication::hotkey_type::WRITE hotkey stopped, cache cleared -D2020-12-29 16:54:54.0U (1609232094000710106 1c0712) replica.default0.0300070f00071998: replica_stub.cpp:2800:on_detect_hotkey(): [3.0@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = Unknown -D2020-12-29 16:54:54.0 (1609232094000715195 1c0712) replica.default0.0300070f00071998: hotkey_collector.cpp:292:query_result(): [3.0@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::STOPPED -D2020-12-29 16:54:54.0U (1609232094000937047 1c0716) replica.default4.0300070f00071999: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = Unknown -D2020-12-29 16:54:54.0 (1609232094000942371 1c0716) replica.default4.0300070f00071999: hotkey_collector.cpp:292:query_result(): [3.3@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::STOPPED -D2020-12-29 16:54:54.1U (1609232094001174586 1c0713) replica.default1.0300070f0007199a: replica_stub.cpp:2800:on_detect_hotkey(): [3.6@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = Unknown -D2020-12-29 16:54:54.1 (1609232094001182520 1c0713) replica.default1.0300070f0007199a: hotkey_collector.cpp:292:query_result(): [3.6@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::STOPPED -D2020-12-29 16:54:54.512 (1609232094512539333 1c0715) replica.default3.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:54:54.592 (1609232094592244957 1c0719) replica.replica0.0300070f00072301: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32933 -D2020-12-29 16:54:54.5925 (1609232094592255073 1c0719) replica.replica0.0300070f00072301: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32933, confirmed_decree = -1 -D2020-12-29 16:54:54.593 (1609232094593018122 1c0719) replica.replica0.0306000100000039: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:54:54.600 (1609232094600449453 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:54.600 (1609232094600460069 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.600 (1609232094600484652 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.601 (1609232094601848310 1c0719) replica.replica0.0300070f0007232a: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32342 -D2020-12-29 16:54:54.6015 (1609232094601858406 1c0719) replica.replica0.0300070f0007232a: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32342, confirmed_decree = -1 -D2020-12-29 16:54:54.636 (1609232094636652882 1c071a) replica.replica1.0300070f000723bb: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18 -D2020-12-29 16:54:54.6365 (1609232094636666379 1c071a) replica.replica1.0300070f000723bb: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18, confirmed_decree = -1 -D2020-12-29 16:54:54.657 (1609232094657638752 1c071a) replica.replica1.0300070f00072413: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32 -D2020-12-29 16:54:54.6575 (1609232094657647504 1c071a) replica.replica1.0300070f00072413: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32, confirmed_decree = -1 -D2020-12-29 16:54:54.691 (1609232094691825204 1c0716) replica.default4.0300070f00072478: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = replication::detect_action::START -D2020-12-29 16:54:54.691 (1609232094691833837 1c0716) replica.default4.0300070f00072478: hotkey_collector.cpp:265:on_start_detect(): [3.3@10.232.52.144:34803] starting to detect replication::hotkey_type::WRITE hotkey -D2020-12-29 16:54:54.739 (1609232094739020967 1c0719) replica.replica0.0300070f0007252d: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31 -D2020-12-29 16:54:54.7395 (1609232094739031324 1c0719) replica.replica0.0300070f0007252d: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31, confirmed_decree = -1 -D2020-12-29 16:54:54.769 (1609232094769378571 1c071a) replica.replica1.0300070f000725ab: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31 -D2020-12-29 16:54:54.7695 (1609232094769388941 1c071a) replica.replica1.0300070f000725ab: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31, confirmed_decree = -1 -D2020-12-29 16:54:54.854 (1609232094854916392 1c0719) replica.replica0.0300070f000726f8: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31 -D2020-12-29 16:54:54.8545 (1609232094854926417 1c0719) replica.replica0.0300070f000726f8: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31, confirmed_decree = -1 -D2020-12-29 16:54:54.898 (1609232094898050390 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:54.898 (1609232094898072089 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.898 (1609232094898103971 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.899 (1609232094899618241 1c0719) replica.replica0.0300070f000727af: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17 -D2020-12-29 16:54:54.899 (1609232094899626365 1c0719) replica.replica0.0300070f000727af: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17, confirmed_decree = -1 -D2020-12-29 16:54:54.902 (1609232094902585155 1c071a) replica.replica1.0300070f000727bb: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31 -D2020-12-29 16:54:54.9025 (1609232094902615572 1c071a) replica.replica1.0300070f000727bb: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31, confirmed_decree = -1 -D2020-12-29 16:54:54.936 (1609232094936457089 1c071a) replica.replica1.0300070f0007283a: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32892 -D2020-12-29 16:54:54.9365 (1609232094936469535 1c071a) replica.replica1.0300070f0007283a: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32892, confirmed_decree = -1 -D2020-12-29 16:54:54.974 (1609232094974336846 1c0719) replica.replica0.0300070f000728d1: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31 -D2020-12-29 16:54:54.9745 (1609232094974346775 1c0719) replica.replica0.0300070f000728d1: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31, confirmed_decree = -1 -D2020-12-29 16:54:54.975 (1609232094975207803 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:54.975 (1609232094975216328 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.975 (1609232094975238455 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.975 (1609232094975784427 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:54.975 (1609232094975792572 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.975 (1609232094975819107 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.980 (1609232094980991391 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:54.981 (1609232094981002761 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.981 (1609232094981033249 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.983 (1609232094983974159 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:54.983 (1609232094983982018 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.984 (1609232094984028247 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:54.990 (1609232094990970146 1c0719) replica.replica0.0300070f00072915: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32728 -D2020-12-29 16:54:54.9905 (1609232094990978091 1c0719) replica.replica0.0300070f00072915: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32728, confirmed_decree = -1 -D2020-12-29 16:54:54.993 (1609232094993302790 1c071a) replica.replica1.0300070f00072920: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32869 -D2020-12-29 16:54:54.9935 (1609232094993310015 1c071a) replica.replica1.0300070f00072920: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32869, confirmed_decree = -1 -D2020-12-29 16:54:55.4U (1609232095004599772 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:54:55.4 (1609232095004613743 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:55.4 (1609232095004634836 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:54:55.172 (1609232095172623802 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:54:55.172 (1609232095172636143 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 30 -D2020-12-29 16:54:55.1720 (1609232095172707817 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 32519 -D2020-12-29 16:54:55.1729 (1609232095172711452 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 30 -D2020-12-29 16:54:55.1720 (1609232095172822188 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 32729 -D2020-12-29 16:54:55.1729 (1609232095172825287 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 30 -D2020-12-29 16:54:55.1720 (1609232095172958800 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 32977 -D2020-12-29 16:54:55.1727 (1609232095172992881 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 -D2020-12-29 16:54:55.1733 (1609232095173069155 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 32675 -D2020-12-29 16:54:55.1735 (1609232095173094178 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 32578 -D2020-12-29 16:54:55.1738 (1609232095173100313 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 11 -D2020-12-29 16:54:55.1731 (1609232095173103470 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 17 -D2020-12-29 16:54:55.1737 (1609232095173105674 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 31 -D2020-12-29 16:54:55.1731 (1609232095173107386 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 30 -D2020-12-29 16:54:55.1730 (1609232095173176422 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33096 -D2020-12-29 16:54:55.1736 (1609232095173285841 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 173166 -D2020-12-29 16:54:55.1736 (1609232095173290124 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 30 -D2020-12-29 16:54:55.1730 (1609232095173293774 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 17 -D2020-12-29 16:54:55.1737 (1609232095173295646 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 30 -D2020-12-29 16:54:55.1730 (1609232095173351623 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 32904 -D2020-12-29 16:54:55.1734 (1609232095173372417 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 30 -D2020-12-29 16:54:55.173 (1609232095173392682 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 3, reserved_log_size = 70668099, reserved_smallest_log = 1, reserved_largest_log = 3, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:54:55.173 (1609232095173400849 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 784310 -D2020-12-29 16:54:55.520 (1609232095520484011 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232095520] -D2020-12-29 16:54:55.520 (1609232095520597625 1c0733) replica. fd0.030c00010000002f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232095520], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:55.520 (1609232095520605543 1c0733) replica. fd0.030c00010000002f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232095520 -D2020-12-29 16:54:58.520 (1609232098520543530 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232098520] -D2020-12-29 16:54:58.520 (1609232098520667549 1c0733) replica. fd0.030c000100000031: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232098520], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:54:58.520 (1609232098520677203 1c0733) replica. fd0.030c000100000031: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232098520 -D2020-12-29 16:55:00.535 (1609232100535344309 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 1813 MB, memused_res = 475MB -D2020-12-29 16:55:00.536 (1609232100536382106 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232100535), last_report_time_ms(1609232090534) -D2020-12-29 16:55:01.520 (1609232101520607745 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232101520] -D2020-12-29 16:55:01.520 (1609232101520739277 1c0734) replica. fd1.030c000000000035: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232101520], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:01.520 (1609232101520747419 1c0734) replica. fd1.030c000000000035: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232101520 -D2020-12-29 16:55:04.511 (1609232104511435465 1c0712) replica.default0.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:55:04.512 (1609232104512641934 1c0712) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:55:04.520 (1609232104520720831 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232104520] -D2020-12-29 16:55:04.622 (1609232104622408198 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:04.622 (1609232104622421789 1c0719) replica.replica0.0300070f0007bedd: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37458 -D2020-12-29 16:55:04.622 (1609232104622434069 1c0714) replica.default2.0301000000000025: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:55:04.622 (1609232104622443778 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.6225 (1609232104622456326 1c0719) replica.replica0.0300070f0007bedd: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37458, confirmed_decree = -1 -D2020-12-29 16:55:04.622 (1609232104622489281 1c0714) replica.default2.0301000000000025: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:55:04.622 (1609232104622519086 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.622 (1609232104622542156 1c0734) replica. fd1.030c000000000037: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232104520], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:04.622 (1609232104622550172 1c0734) replica. fd1.030c000000000037: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232104520 -D2020-12-29 16:55:04.622 (1609232104622580817 1c0719) replica.replica0.0300070f0007bede: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37887 -D2020-12-29 16:55:04.6225 (1609232104622587827 1c0719) replica.replica0.0300070f0007bede: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37887, confirmed_decree = -1 -D2020-12-29 16:55:04.636 (1609232104636777108 1c071a) replica.replica1.0300070f0007bf14: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 -D2020-12-29 16:55:04.6365 (1609232104636798909 1c071a) replica.replica1.0300070f0007bf14: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 -D2020-12-29 16:55:04.661 (1609232104661072048 1c071a) replica.replica1.0300070f0007bf5c: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 36 -D2020-12-29 16:55:04.6615 (1609232104661086760 1c071a) replica.replica1.0300070f0007bf5c: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 36, confirmed_decree = -1 -D2020-12-29 16:55:04.739 (1609232104739171527 1c0719) replica.replica0.0300070f0007c06e: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35 -D2020-12-29 16:55:04.7395 (1609232104739185130 1c0719) replica.replica0.0300070f0007c06e: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35, confirmed_decree = -1 -D2020-12-29 16:55:04.769 (1609232104769462105 1c071a) replica.replica1.0300070f0007c0e8: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35 -D2020-12-29 16:55:04.7695 (1609232104769473711 1c071a) replica.replica1.0300070f0007c0e8: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35, confirmed_decree = -1 -D2020-12-29 16:55:04.854 (1609232104854999629 1c0719) replica.replica0.0300070f0007c247: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35 -D2020-12-29 16:55:04.8555 (1609232104855008034 1c0719) replica.replica0.0300070f0007c247: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35, confirmed_decree = -1 -D2020-12-29 16:55:04.898 (1609232104898179554 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:04.898 (1609232104898193909 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.898 (1609232104898222631 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.899 (1609232104899702563 1c0719) replica.replica0.0300070f0007c2fb: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17 -D2020-12-29 16:55:04.8995 (1609232104899713972 1c0719) replica.replica0.0300070f0007c2fb: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 17, confirmed_decree = -1 -D2020-12-29 16:55:04.902 (1609232104902697151 1c071a) replica.replica1.0300070f0007c309: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35 -D2020-12-29 16:55:04.9025 (1609232104902705390 1c071a) replica.replica1.0300070f0007c309: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35, confirmed_decree = -1 -D2020-12-29 16:55:04.936 (1609232104936567472 1c071a) replica.replica1.0300070f0007c391: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37850 -D2020-12-29 16:55:04.9365 (1609232104936594171 1c071a) replica.replica1.0300070f0007c391: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37850, confirmed_decree = -1 -D2020-12-29 16:55:04.974 (1609232104974429235 1c0719) replica.replica0.0300070f0007c41d: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35 -D2020-12-29 16:55:04.9745 (1609232104974440214 1c0719) replica.replica0.0300070f0007c41d: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35, confirmed_decree = -1 -D2020-12-29 16:55:04.975 (1609232104975271417 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:04.975 (1609232104975280991 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.975 (1609232104975310153 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.975 (1609232104975853696 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:04.975 (1609232104975865046 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.975 (1609232104975891525 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.981 (1609232104981075664 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:04.981 (1609232104981084387 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.981 (1609232104981116550 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.984 (1609232104984077627 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:04.984 (1609232104984085000 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.984 (1609232104984104496 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:04.991 (1609232104991044083 1c0719) replica.replica0.0300070f0007c45f: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37718 -D2020-12-29 16:55:04.9915 (1609232104991082391 1c0719) replica.replica0.0300070f0007c45f: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37718, confirmed_decree = -1 -D2020-12-29 16:55:04.993 (1609232104993387230 1c071a) replica.replica1.0300070f0007c469: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37773 -D2020-12-29 16:55:04.9935 (1609232104993396265 1c071a) replica.replica1.0300070f0007c469: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37773, confirmed_decree = -1 -D2020-12-29 16:55:05.4 (1609232105004677662 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:05.4 (1609232105004691323 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:05.4 (1609232105004720593 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:07.622 (1609232107622458375 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232107622] -D2020-12-29 16:55:07.622 (1609232107622559148 1c0733) replica. fd0.030c000100000033: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232107622], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:07.622 (1609232107622566539 1c0733) replica. fd0.030c000100000033: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232107622 -D2020-12-29 16:55:08.382 (1609232108382348304 1c0719) replica.replica0.0300070f0007f9f8: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.180000 init_prepare, mutation_tid=456866 -D2020-12-29 16:55:08.822 (1609232108822699007 1c071a) replica.replica1.0300070f000800e1: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.40000 init_prepare, mutation_tid=458635 -D2020-12-29 16:55:10.203 (1609232110203883638 1c071a) replica.replica1.0300070f000816be: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.40000 init_prepare, mutation_tid=464232 -D2020-12-29 16:55:10.536 (1609232110536446903 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 1830 MB, memused_res = 498MB -D2020-12-29 16:55:10.537 (1609232110537466699 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232110536), last_report_time_ms(1609232100535) -D2020-12-29 16:55:10.622 (1609232110622506855 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232110622] -D2020-12-29 16:55:10.622 (1609232110622615549 1c0733) replica. fd0.030c000100000035: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232110622], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:10.622 (1609232110622625346 1c0733) replica. fd0.030c000100000035: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232110622 -D2020-12-29 16:55:13.622 (1609232113622566171 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232113622] -D2020-12-29 16:55:13.622 (1609232113622674838 1c0734) replica. fd1.030c000000000039: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232113622], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:13.622 (1609232113622681732 1c0734) replica. fd1.030c000000000039: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232113622 -D2020-12-29 16:55:14.512 (1609232114512743690 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:55:14.602 (1609232114602021213 1c0719) replica.replica0.0300070f00085c72: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42433 -D2020-12-29 16:55:14.6025 (1609232114602030202 1c0719) replica.replica0.0300070f00085c72: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42433, confirmed_decree = -1 -D2020-12-29 16:55:14.619 (1609232114619123184 1c0719) replica.replica0.0300070f00085cb7: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42906 -D2020-12-29 16:55:14.6195 (1609232114619132387 1c0719) replica.replica0.0300070f00085cb7: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42906, confirmed_decree = -1 -D2020-12-29 16:55:14.622 (1609232114622637451 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:14.622 (1609232114622646364 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.622 (1609232114622676205 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.636 (1609232114636931765 1c071a) replica.replica1.0300070f00085cfd: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 20 -D2020-12-29 16:55:14.6365 (1609232114636945053 1c071a) replica.replica1.0300070f00085cfd: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 20, confirmed_decree = -1 -D2020-12-29 16:55:14.661 (1609232114661121389 1c071a) replica.replica1.0300070f00085d5f: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 39 -D2020-12-29 16:55:14.6615 (1609232114661130279 1c071a) replica.replica1.0300070f00085d5f: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 39, confirmed_decree = -1 -D2020-12-29 16:55:14.739 (1609232114739301442 1c0719) replica.replica0.0300070f00085e7d: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38 -D2020-12-29 16:55:14.7395 (1609232114739311458 1c0719) replica.replica0.0300070f00085e7d: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38, confirmed_decree = -1 -D2020-12-29 16:55:14.769 (1609232114769586961 1c071a) replica.replica1.0300070f00085efb: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38 -D2020-12-29 16:55:14.7695 (1609232114769597698 1c071a) replica.replica1.0300070f00085efb: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38, confirmed_decree = -1 -D2020-12-29 16:55:14.855 (1609232114855104516 1c0719) replica.replica0.0300070f00086055: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38 -D2020-12-29 16:55:14.8555 (1609232114855114591 1c0719) replica.replica0.0300070f00086055: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38, confirmed_decree = -1 -D2020-12-29 16:55:14.898 (1609232114898314660 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:14.898 (1609232114898330234 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.898 (1609232114898362188 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.899 (1609232114899789160 1c0719) replica.replica0.0300070f0008610c: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 -D2020-12-29 16:55:14.8995 (1609232114899797943 1c0719) replica.replica0.0300070f0008610c: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 -D2020-12-29 16:55:14.902 (1609232114902771948 1c071a) replica.replica1.0300070f00086119: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38 -D2020-12-29 16:55:14.9025 (1609232114902782118 1c071a) replica.replica1.0300070f00086119: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38, confirmed_decree = -1 -D2020-12-29 16:55:14.936 (1609232114936634771 1c071a) replica.replica1.0300070f000861a7: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42949 -D2020-12-29 16:55:14.9365 (1609232114936666597 1c071a) replica.replica1.0300070f000861a7: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42949, confirmed_decree = -1 -D2020-12-29 16:55:14.974 (1609232114974520130 1c0719) replica.replica0.0300070f00086239: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38 -D2020-12-29 16:55:14.9745 (1609232114974530435 1c0719) replica.replica0.0300070f00086239: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38, confirmed_decree = -1 -D2020-12-29 16:55:14.975 (1609232114975344462 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:14.975 (1609232114975352254 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.975 (1609232114975377461 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.975 (1609232114975925651 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:14.975 (1609232114975932501 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.975 (1609232114975955310 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.981 (1609232114981157697 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:14.981 (1609232114981166349 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.981 (1609232114981191471 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.984 (1609232114984135156 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:14.984 (1609232114984143848 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.984 (1609232114984167211 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:14.991 (1609232114991136798 1c0719) replica.replica0.0300070f0008627a: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42768 -D2020-12-29 16:55:14.9915 (1609232114991177617 1c0719) replica.replica0.0300070f0008627a: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42768, confirmed_decree = -1 -D2020-12-29 16:55:14.993 (1609232114993488094 1c071a) replica.replica1.0300070f00086285: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42823 -D2020-12-29 16:55:14.9935 (1609232114993496150 1c071a) replica.replica1.0300070f00086285: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42823, confirmed_decree = -1 -D2020-12-29 16:55:15.4U (1609232115004770274 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:15.4 (1609232115004787469 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:15.4 (1609232115004816869 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:16.622 (1609232116622614574 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232116622] -D2020-12-29 16:55:16.622 (1609232116622717023 1c0734) replica. fd1.030c00000000003b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232116622], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:16.622 (1609232116622724636 1c0734) replica. fd1.030c00000000003b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232116622 -D2020-12-29 16:55:19.622 (1609232119622670646 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232119622] -D2020-12-29 16:55:19.622 (1609232119622787574 1c0733) replica. fd0.030c000100000037: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232119622], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:19.622 (1609232119622799426 1c0733) replica. fd0.030c000100000037: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232119622 -D2020-12-29 16:55:20.537 (1609232120537550945 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 1856 MB, memused_res = 521MB -D2020-12-29 16:55:20.538 (1609232120538588362 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232120537), last_report_time_ms(1609232110536) -D2020-12-29 16:55:22.622 (1609232122622727742 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232122622] -D2020-12-29 16:55:22.622 (1609232122622829589 1c0733) replica. fd0.030c000100000039: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232122622], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:22.622 (1609232122622860042 1c0733) replica. fd0.030c000100000039: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232122622 -D2020-12-29 16:55:24.512 (1609232124512804255 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:55:24.602 (1609232124602098540 1c0719) replica.replica0.0300070f0008fb77: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 47518 -D2020-12-29 16:55:24.6025 (1609232124602109001 1c0719) replica.replica0.0300070f0008fb77: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 47518, confirmed_decree = -1 -D2020-12-29 16:55:24.619 (1609232124619216300 1c0719) replica.replica0.0300070f0008fbbf: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48017 -D2020-12-29 16:55:24.6195 (1609232124619225785 1c0719) replica.replica0.0300070f0008fbbf: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48017, confirmed_decree = -1 -D2020-12-29 16:55:24.622 (1609232124622719562 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:24.622 (1609232124622729246 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.622 (1609232124622758873 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.637 (1609232124637113220 1c071a) replica.replica1.0300070f0008fc0a: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 -D2020-12-29 16:55:24.6375 (1609232124637123171 1c071a) replica.replica1.0300070f0008fc0a: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 -D2020-12-29 16:55:24.661 (1609232124661204072 1c071a) replica.replica1.0300070f0008fc6f: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42 -D2020-12-29 16:55:24.6615 (1609232124661217078 1c071a) replica.replica1.0300070f0008fc6f: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 42, confirmed_decree = -1 -D2020-12-29 16:55:24.743 (1609232124743063355 1c0719) replica.replica0.0300070f0008fd8e: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 41 -D2020-12-29 16:55:24.7435 (1609232124743075619 1c0719) replica.replica0.0300070f0008fd8e: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 41, confirmed_decree = -1 -D2020-12-29 16:55:24.769 (1609232124769661401 1c071a) replica.replica1.0300070f0008fdf0: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 41 -D2020-12-29 16:55:24.769 (1609232124769672471 1c071a) replica.replica1.0300070f0008fdf0: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 41, confirmed_decree = -1 -D2020-12-29 16:55:24.855 (1609232124855189908 1c0719) replica.replica0.0300070f0008ff38: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 41 -D2020-12-29 16:55:24.8555 (1609232124855198732 1c0719) replica.replica0.0300070f0008ff38: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 41, confirmed_decree = -1 -D2020-12-29 16:55:24.898 (1609232124898440492 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:24.898 (1609232124898453100 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.898 (1609232124898480553 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.899 (1609232124899904497 1c0719) replica.replica0.0300070f0008ffee: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 -D2020-12-29 16:55:24.899 (1609232124899912438 1c0719) replica.replica0.0300070f0008ffee: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 -D2020-12-29 16:55:24.902 (1609232124902841663 1c071a) replica.replica1.0300070f0008fffb: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 41 -D2020-12-29 16:55:24.9025 (1609232124902849345 1c071a) replica.replica1.0300070f0008fffb: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 41, confirmed_decree = -1 -D2020-12-29 16:55:24.936 (1609232124936712193 1c071a) replica.replica1.0300070f00090083: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48012 -D2020-12-29 16:55:24.9365 (1609232124936720985 1c071a) replica.replica1.0300070f00090083: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48012, confirmed_decree = -1 -D2020-12-29 16:55:24.974 (1609232124974599580 1c0719) replica.replica0.0300070f00090120: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 41 -D2020-12-29 16:55:24.9745 (1609232124974625316 1c0719) replica.replica0.0300070f00090120: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 41, confirmed_decree = -1 -D2020-12-29 16:55:24.975 (1609232124975414169 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:24.975 (1609232124975423636 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.975 (1609232124975449432 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.976 (1609232124976124472 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:24.976 (1609232124976132165 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.976 (1609232124976159334 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.981 (1609232124981226603 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:24.981 (1609232124981236552 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.981 (1609232124981261859 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.984 (1609232124984199912 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:24.984 (1609232124984208128 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.984 (1609232124984231205 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:24.991 (1609232124991242205 1c0719) replica.replica0.0300070f0009015e: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 47772 -D2020-12-29 16:55:24.9915 (1609232124991252753 1c0719) replica.replica0.0300070f0009015e: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 47772, confirmed_decree = -1 -D2020-12-29 16:55:24.993 (1609232124993578728 1c071a) replica.replica1.0300070f00090168: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 47868 -D2020-12-29 16:55:24.9935 (1609232124993604913 1c071a) replica.replica1.0300070f00090168: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 47868, confirmed_decree = -1 -D2020-12-29 16:55:25.4 (1609232125004866184 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:25.4 (1609232125004879979 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:25.4 (1609232125004912741 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:25.1730 (1609232125173429315 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:55:25.173 (1609232125173440827 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 40 -D2020-12-29 16:55:25.1730 (1609232125173478084 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 47742 -D2020-12-29 16:55:25.1732 (1609232125173482012 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 40 -D2020-12-29 16:55:25.1730 (1609232125173578078 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 47787 -D2020-12-29 16:55:25.1737 (1609232125173582265 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 40 -D2020-12-29 16:55:25.1730 (1609232125173680341 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 48076 -D2020-12-29 16:55:25.1736 (1609232125173685019 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 21 -D2020-12-29 16:55:25.1731 (1609232125173798948 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 47657 -D2020-12-29 16:55:25.1737 (1609232125173914070 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 47457 -D2020-12-29 16:55:25.1737 (1609232125173932238 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 18 -D2020-12-29 16:55:25.1738 (1609232125173936014 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 20 -D2020-12-29 16:55:25.1730 (1609232125173937885 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 41 -D2020-12-29 16:55:25.1731 (1609232125173939758 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 40 -D2020-12-29 16:55:25.1740 (1609232125174026106 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 48068 -D2020-12-29 16:55:25.1748 (1609232125174097687 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 188462 -D2020-12-29 16:55:25.1742 (1609232125174100564 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 40 -D2020-12-29 16:55:25.1740 (1609232125174104006 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 20 -D2020-12-29 16:55:25.1740 (1609232125174105876 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 40 -D2020-12-29 16:55:25.1740 (1609232125174119028 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 48118 -D2020-12-29 16:55:25.1748 (1609232125174120793 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 40 -D2020-12-29 16:55:25.174 (1609232125174137466 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 3, reserved_log_size = 91541607, reserved_smallest_log = 1, reserved_largest_log = 3, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:55:25.174 (1609232125174143332 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 721201 -D2020-12-29 16:55:25.622 (1609232125622809252 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232125622] -D2020-12-29 16:55:25.622 (1609232125622912916 1c0734) replica. fd1.030c00000000003d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232125622], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:25.622 (1609232125622919858 1c0734) replica. fd1.030c00000000003d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232125622 -D2020-12-29 16:55:28.442 (1609232128442132963 1c0719) replica.replica0.0300070f000930a7: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.190000 init_prepare, mutation_tid=536362 -D2020-12-29 16:55:28.622 (1609232128622875799 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232128622] -D2020-12-29 16:55:28.623 (1609232128623002658 1c0734) replica. fd1.030c00000000003f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232128622], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:28.623 (1609232128623010050 1c0734) replica. fd1.030c00000000003f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232128622 -D2020-12-29 16:55:29.41 (1609232129041310100 1c071a) replica.replica1.0300070f00093a42: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.50000 init_prepare, mutation_tid=538819 -D2020-12-29 16:55:30.116 (1609232130116654846 1c071a) replica.replica1.0300070f00094b47: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.50000 init_prepare, mutation_tid=543176 -D2020-12-29 16:55:30.538 (1609232130538657252 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 1877 MB, memused_res = 548MB -D2020-12-29 16:55:30.539 (1609232130539741333 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232130538), last_report_time_ms(1609232120537) -D2020-12-29 16:55:31.622 (1609232131622924333 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232131622] -D2020-12-29 16:55:31.623 (1609232131623031178 1c0733) replica. fd0.030c00010000003b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232131622], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:31.623 (1609232131623040039 1c0733) replica. fd0.030c00010000003b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232131622 -D2020-12-29 16:55:34.511 (1609232134511546533 1c0715) replica.default3.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:55:34.511 (1609232134511730199 1c0712) replica.default0.0301000300000013: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:55:34.511 (1609232134511778204 1c0712) replica.default0.0301000300000013: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:55:34.512 (1609232134512850462 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:55:34.602 (1609232134602190774 1c0719) replica.replica0.0300070f000992f8: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52430 -D2020-12-29 16:55:34.6025 (1609232134602219314 1c0719) replica.replica0.0300070f000992f8: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52430, confirmed_decree = -1 -D2020-12-29 16:55:34.619 (1609232134619310974 1c0719) replica.replica0.0300070f0009933c: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52901 -D2020-12-29 16:55:34.6195 (1609232134619322023 1c0719) replica.replica0.0300070f0009933c: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52901, confirmed_decree = -1 -D2020-12-29 16:55:34.622 (1609232134622796956 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:34.622 (1609232134622806798 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.622 (1609232134622832408 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.622 (1609232134622976173 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232134622] -D2020-12-29 16:55:34.623 (1609232134623088702 1c0733) replica. fd0.030c00010000003d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232134622], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:34.623 (1609232134623095285 1c0733) replica. fd0.030c00010000003d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232134622 -D2020-12-29 16:55:34.637 (1609232134637258567 1c071a) replica.replica1.0300070f0009937d: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22 -D2020-12-29 16:55:34.6375 (1609232134637272906 1c071a) replica.replica1.0300070f0009937d: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22, confirmed_decree = -1 -D2020-12-29 16:55:34.661 (1609232134661295032 1c071a) replica.replica1.0300070f000993da: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 47 -D2020-12-29 16:55:34.6615 (1609232134661304546 1c071a) replica.replica1.0300070f000993da: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 47, confirmed_decree = -1 -D2020-12-29 16:55:34.739 (1609232134739467493 1c0719) replica.replica0.0300070f0009951a: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45 -D2020-12-29 16:55:34.7395 (1609232134739495822 1c0719) replica.replica0.0300070f0009951a: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45, confirmed_decree = -1 -D2020-12-29 16:55:34.769 (1609232134769829282 1c071a) replica.replica1.0300070f0009957e: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45 -D2020-12-29 16:55:34.7695 (1609232134769842836 1c071a) replica.replica1.0300070f0009957e: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45, confirmed_decree = -1 -D2020-12-29 16:55:34.855 (1609232134855311979 1c0719) replica.replica0.0300070f000996ce: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45 -D2020-12-29 16:55:34.8555 (1609232134855323120 1c0719) replica.replica0.0300070f000996ce: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45, confirmed_decree = -1 -D2020-12-29 16:55:34.898 (1609232134898583124 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:34.898 (1609232134898598198 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.898 (1609232134898632507 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.900 (1609232134900037965 1c0719) replica.replica0.0300070f00099782: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 16:55:34.9005 (1609232134900049361 1c0719) replica.replica0.0300070f00099782: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 16:55:34.902 (1609232134902937838 1c071a) replica.replica1.0300070f0009978f: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45 -D2020-12-29 16:55:34.9025 (1609232134902948897 1c071a) replica.replica1.0300070f0009978f: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45, confirmed_decree = -1 -D2020-12-29 16:55:34.936 (1609232134936819017 1c071a) replica.replica1.0300070f00099819: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52809 -D2020-12-29 16:55:34.9365 (1609232134936838988 1c071a) replica.replica1.0300070f00099819: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52809, confirmed_decree = -1 -D2020-12-29 16:55:34.974 (1609232134974720829 1c0719) replica.replica0.0300070f000998a9: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45 -D2020-12-29 16:55:34.9745 (1609232134974733844 1c0719) replica.replica0.0300070f000998a9: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45, confirmed_decree = -1 -D2020-12-29 16:55:34.975 (1609232134975480514 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:34.975 (1609232134975489150 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.975 (1609232134975512715 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.976 (1609232134976228831 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:34.976 (1609232134976237707 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.976 (1609232134976267166 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.981 (1609232134981331161 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:34.981 (1609232134981340431 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.981 (1609232134981364458 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.984 (1609232134984264758 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:34.984 (1609232134984272148 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.984 (1609232134984297118 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:34.991 (1609232134991335681 1c0719) replica.replica0.0300070f000998ec: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52621 -D2020-12-29 16:55:34.9915 (1609232134991346599 1c0719) replica.replica0.0300070f000998ec: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52621, confirmed_decree = -1 -D2020-12-29 16:55:34.993 (1609232134993651380 1c071a) replica.replica1.0300070f000998f6: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52733 -D2020-12-29 16:55:34.9935 (1609232134993661369 1c071a) replica.replica1.0300070f000998f6: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52733, confirmed_decree = -1 -D2020-12-29 16:55:35.4U (1609232135004950558 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:35.4 (1609232135004963671 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:35.4 (1609232135004991607 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:37.623 (1609232137623032675 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232137623] -D2020-12-29 16:55:37.623 (1609232137623186568 1c0734) replica. fd1.030c000000000041: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232137623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:37.623 (1609232137623194060 1c0734) replica. fd1.030c000000000041: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232137623 -D2020-12-29 16:55:38.646 (1609232138646548187 1c0719) replica.replica0.0300070f0009d26a: mutation_log.cpp:820:mark_new_offset(): switch log file by limit, old_file = /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.3.67109005, size = 33554434 -D2020-12-29 16:55:38.646o (1609232138646602823 1c0719) replica.replica0.0300070f0009d26a: mutation_log.cpp:731:create_new_log_file(): create new log file /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.4.100663439 succeed, time_used = 44471 ns -D2020-12-29 16:55:40.539 (1609232140539830514 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 1902 MB, memused_res = 570MB -D2020-12-29 16:55:40.541 (1609232140541085613 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232140539), last_report_time_ms(1609232130538) -D2020-12-29 16:55:40.623 (1609232140623119438 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232140623] -D2020-12-29 16:55:40.623 (1609232140623279023 1c0734) replica. fd1.030c000000000043: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232140623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:40.623 (1609232140623288024 1c0734) replica. fd1.030c000000000043: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232140623 -D2020-12-29 16:55:43.623 (1609232143623193558 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232143623] -D2020-12-29 16:55:43.623 (1609232143623337535 1c0733) replica. fd0.030c00010000003f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232143623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:43.623 (1609232143623366881 1c0733) replica. fd0.030c00010000003f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232143623 -D2020-12-29 16:55:44.512 (1609232144512903364 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:55:44.602 (1609232144602289049 1c0719) replica.replica0.0300070f000a1f6f: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 56997 -D2020-12-29 16:55:44.602 (1609232144602299504 1c0719) replica.replica0.0300070f000a1f6f: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 56997, confirmed_decree = -1 -D2020-12-29 16:55:44.619 (1609232144619372773 1c0719) replica.replica0.0300070f000a1fb4: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57383 -D2020-12-29 16:55:44.6195 (1609232144619383787 1c0719) replica.replica0.0300070f000a1fb4: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57383, confirmed_decree = -1 -D2020-12-29 16:55:44.622 (1609232144622874179 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:44.622 (1609232144622884299 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.622 (1609232144622919831 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.637 (1609232144637398240 1c071a) replica.replica1.0300070f000a1ffe: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26 -D2020-12-29 16:55:44.6375 (1609232144637406015 1c071a) replica.replica1.0300070f000a1ffe: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26, confirmed_decree = -1 -D2020-12-29 16:55:44.661 (1609232144661377633 1c071a) replica.replica1.0300070f000a205a: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 50 -D2020-12-29 16:55:44.6615 (1609232144661388175 1c071a) replica.replica1.0300070f000a205a: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 50, confirmed_decree = -1 -D2020-12-29 16:55:44.739 (1609232144739549571 1c0719) replica.replica0.0300070f000a2197: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48 -D2020-12-29 16:55:44.7395 (1609232144739581981 1c0719) replica.replica0.0300070f000a2197: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48, confirmed_decree = -1 -D2020-12-29 16:55:44.769 (1609232144769962460 1c071a) replica.replica1.0300070f000a220c: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48 -D2020-12-29 16:55:44.7695 (1609232144769973091 1c071a) replica.replica1.0300070f000a220c: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48, confirmed_decree = -1 -D2020-12-29 16:55:44.855 (1609232144855382096 1c0719) replica.replica0.0300070f000a232c: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48 -D2020-12-29 16:55:44.8555 (1609232144855392078 1c0719) replica.replica0.0300070f000a232c: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48, confirmed_decree = -1 -D2020-12-29 16:55:44.898 (1609232144898715765 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:44.898 (1609232144898728560 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.898 (1609232144898757355 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.900 (1609232144900133743 1c0719) replica.replica0.0300070f000a23e0: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 16:55:44.9005 (1609232144900143528 1c0719) replica.replica0.0300070f000a23e0: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 16:55:44.903 (1609232144903044527 1c071a) replica.replica1.0300070f000a23ed: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48 -D2020-12-29 16:55:44.9035 (1609232144903061313 1c071a) replica.replica1.0300070f000a23ed: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48, confirmed_decree = -1 -D2020-12-29 16:55:44.936 (1609232144936915919 1c071a) replica.replica1.0300070f000a2478: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57331 -D2020-12-29 16:55:44.9365 (1609232144936927266 1c071a) replica.replica1.0300070f000a2478: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57331, confirmed_decree = -1 -D2020-12-29 16:55:44.974 (1609232144974814978 1c0719) replica.replica0.0300070f000a2507: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48 -D2020-12-29 16:55:44.9745 (1609232144974823981 1c0719) replica.replica0.0300070f000a2507: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48, confirmed_decree = -1 -D2020-12-29 16:55:44.975 (1609232144975576671 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:44.975 (1609232144975605142 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.975 (1609232144975633828 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.976 (1609232144976312076 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:44.976 (1609232144976322504 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.976 (1609232144976353401 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.981 (1609232144981401220 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:44.981 (1609232144981410666 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.981 (1609232144981435828 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.984 (1609232144984334716 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:44.984 (1609232144984344014 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.984 (1609232144984370200 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:44.991 (1609232144991393737 1c0719) replica.replica0.0300070f000a2549: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57118 -D2020-12-29 16:55:44.9915 (1609232144991401593 1c0719) replica.replica0.0300070f000a2549: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57118, confirmed_decree = -1 -D2020-12-29 16:55:44.993 (1609232144993731882 1c071a) replica.replica1.0300070f000a2553: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57087 -D2020-12-29 16:55:44.9935 (1609232144993743039 1c071a) replica.replica1.0300070f000a2553: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57087, confirmed_decree = -1 -D2020-12-29 16:55:45.5U (1609232145005188089 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:45.5 (1609232145005209137 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:45.5 (1609232145005282633 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:46.623 (1609232146623246674 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232146623] -D2020-12-29 16:55:46.623 (1609232146623355284 1c0733) replica. fd0.030c000100000041: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232146623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:46.623 (1609232146623362550 1c0733) replica. fd0.030c000100000041: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232146623 -D2020-12-29 16:55:49.623 (1609232149623299495 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232149623] -D2020-12-29 16:55:49.623 (1609232149623399958 1c0734) replica. fd1.030c000000000045: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232149623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:49.623 (1609232149623409104 1c0734) replica. fd1.030c000000000045: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232149623 -D2020-12-29 16:55:49.729 (1609232149729191798 1c0719) replica.replica0.0300070f000a6ad3: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.200000 init_prepare, mutation_tid=616747 -D2020-12-29 16:55:50.522 (1609232150522778436 1c071a) replica.replica1.0300070f000a772e: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.60000 init_prepare, mutation_tid=619910 -D2020-12-29 16:55:50.541 (1609232150541184139 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 1920 MB, memused_res = 593MB -D2020-12-29 16:55:50.542 (1609232150542203477 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232150541), last_report_time_ms(1609232140539) -D2020-12-29 16:55:51.270 (1609232151270612707 1c071a) replica.replica1.0300070f000a82d5: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.60000 init_prepare, mutation_tid=622892 -D2020-12-29 16:55:52.623 (1609232152623356120 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232152623] -D2020-12-29 16:55:52.623 (1609232152623463929 1c0734) replica. fd1.030c000000000047: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232152623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:52.623 (1609232152623488190 1c0734) replica. fd1.030c000000000047: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232152623 -D2020-12-29 16:55:54.512 (1609232154512962797 1c0714) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:55:54.602 (1609232154602370713 1c0719) replica.replica0.0300070f000ab4f8: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61782 -D2020-12-29 16:55:54.6025 (1609232154602381171 1c0719) replica.replica0.0300070f000ab4f8: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61782, confirmed_decree = -1 -D2020-12-29 16:55:54.619 (1609232154619449865 1c0719) replica.replica0.0300070f000ab53c: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 62239 -D2020-12-29 16:55:54.6195 (1609232154619458189 1c0719) replica.replica0.0300070f000ab53c: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 62239, confirmed_decree = -1 -D2020-12-29 16:55:54.622 (1609232154622965710 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:54.622 (1609232154622973964 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.622 (1609232154622992544 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.637 (1609232154637483626 1c071a) replica.replica1.0300070f000ab586: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26 -D2020-12-29 16:55:54.637 (1609232154637496501 1c071a) replica.replica1.0300070f000ab586: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26, confirmed_decree = -1 -D2020-12-29 16:55:54.661 (1609232154661495129 1c071a) replica.replica1.0300070f000ab5e2: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 53 -D2020-12-29 16:55:54.6615 (1609232154661505049 1c071a) replica.replica1.0300070f000ab5e2: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 53, confirmed_decree = -1 -D2020-12-29 16:55:54.739 (1609232154739674351 1c0719) replica.replica0.0300070f000ab709: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 51 -D2020-12-29 16:55:54.7395 (1609232154739706212 1c0719) replica.replica0.0300070f000ab709: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 51, confirmed_decree = -1 -D2020-12-29 16:55:54.769 (1609232154769976888 1c071a) replica.replica1.0300070f000ab77e: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 51 -D2020-12-29 16:55:54.7695 (1609232154769988053 1c071a) replica.replica1.0300070f000ab77e: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 51, confirmed_decree = -1 -D2020-12-29 16:55:54.855 (1609232154855514810 1c0719) replica.replica0.0300070f000ab88d: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 51 -D2020-12-29 16:55:54.8555 (1609232154855524516 1c0719) replica.replica0.0300070f000ab88d: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 51, confirmed_decree = -1 -D2020-12-29 16:55:54.898 (1609232154898858262 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:54.898 (1609232154898873716 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.898 (1609232154898919141 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.900 (1609232154900259300 1c0719) replica.replica0.0300070f000ab93e: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26 -D2020-12-29 16:55:54.9005 (1609232154900270135 1c0719) replica.replica0.0300070f000ab93e: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26, confirmed_decree = -1 -D2020-12-29 16:55:54.903 (1609232154903136086 1c071a) replica.replica1.0300070f000ab94b: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 51 -D2020-12-29 16:55:54.9035 (1609232154903145193 1c071a) replica.replica1.0300070f000ab94b: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 51, confirmed_decree = -1 -D2020-12-29 16:55:54.937 (1609232154937028808 1c071a) replica.replica1.0300070f000ab9d1: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 62060 -D2020-12-29 16:55:54.9375 (1609232154937038776 1c071a) replica.replica1.0300070f000ab9d1: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 62060, confirmed_decree = -1 -D2020-12-29 16:55:54.974 (1609232154974899542 1c0719) replica.replica0.0300070f000aba66: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 51 -D2020-12-29 16:55:54.9745 (1609232154974913275 1c0719) replica.replica0.0300070f000aba66: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 51, confirmed_decree = -1 -D2020-12-29 16:55:54.975 (1609232154975675949 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:54.975 (1609232154975687818 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.975 (1609232154975718965 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.976 (1609232154976438797 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:54.976 (1609232154976448028 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.976 (1609232154976476025 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.981 (1609232154981480515 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:54.981 (1609232154981491661 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.981 (1609232154981519450 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.984 (1609232154984404226 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:54.984 (1609232154984412248 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.984 (1609232154984439004 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:54.991 (1609232154991480979 1c0719) replica.replica0.0300070f000abaa8: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61903 -D2020-12-29 16:55:54.9915 (1609232154991490416 1c0719) replica.replica0.0300070f000abaa8: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61903, confirmed_decree = -1 -D2020-12-29 16:55:54.993 (1609232154993801474 1c071a) replica.replica1.0300070f000abab2: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61779 -D2020-12-29 16:55:54.9935 (1609232154993811208 1c071a) replica.replica1.0300070f000abab2: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61779, confirmed_decree = -1 -D2020-12-29 16:55:55.5 (1609232155005355291 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:55:55.5 (1609232155005370682 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:55.5 (1609232155005398912 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:55:55.1741 (1609232155174182172 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:55:55.174 (1609232155174198637 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 50 -D2020-12-29 16:55:55.1740 (1609232155174327453 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 61866 -D2020-12-29 16:55:55.1746 (1609232155174333599 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 50 -D2020-12-29 16:55:55.1740 (1609232155174375380 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 61787 -D2020-12-29 16:55:55.1747 (1609232155174377598 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 50 -D2020-12-29 16:55:55.1740 (1609232155174450673 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 62354 -D2020-12-29 16:55:55.1744 (1609232155174464138 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 25 -D2020-12-29 16:55:55.1745 (1609232155174498268 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 61937 -D2020-12-29 16:55:55.1747 (1609232155174592101 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 61767 -D2020-12-29 16:55:55.1747 (1609232155174613094 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 -D2020-12-29 16:55:55.1743 (1609232155174618302 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 -D2020-12-29 16:55:55.1743 (1609232155174620103 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 52 -D2020-12-29 16:55:55.1742 (1609232155174621843 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 50 -D2020-12-29 16:55:55.1740 (1609232155174712321 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 62062 -D2020-12-29 16:55:55.1742 (1609232155174813830 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 202410 -D2020-12-29 16:55:55.1740 (1609232155174816597 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 50 -D2020-12-29 16:55:55.1740 (1609232155174818392 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 26 -D2020-12-29 16:55:55.1746 (1609232155174820063 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 50 -D2020-12-29 16:55:55.1740 (1609232155174947312 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 61965 -D2020-12-29 16:55:55.1745 (1609232155174951708 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 50 -D2020-12-29 16:55:55.174 (1609232155174972287 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 4, reserved_log_size = 111055615, reserved_smallest_log = 1, reserved_largest_log = 4, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:55:55.174 (1609232155174990262 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 815211 -D2020-12-29 16:55:55.623 (1609232155623407387 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232155623] -D2020-12-29 16:55:55.623 (1609232155623521095 1c0733) replica. fd0.030c000100000043: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232155623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:55.623 (1609232155623530182 1c0733) replica. fd0.030c000100000043: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232155623 -D2020-12-29 16:55:58.623 (1609232158623462727 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232158623] -D2020-12-29 16:55:58.623 (1609232158623573804 1c0733) replica. fd0.030c000100000045: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232158623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:55:58.623 (1609232158623581239 1c0733) replica. fd0.030c000100000045: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232158623 -D2020-12-29 16:56:00.542 (1609232160542279781 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 1958 MB, memused_res = 621MB -D2020-12-29 16:56:00.543 (1609232160543516646 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232160542), last_report_time_ms(1609232150541) -D2020-12-29 16:56:01.623 (1609232161623520867 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232161623] -D2020-12-29 16:56:01.623 (1609232161623625990 1c0734) replica. fd1.030c000000000049: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232161623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:01.623 (1609232161623634071 1c0734) replica. fd1.030c000000000049: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232161623 -D2020-12-29 16:56:04.511 (1609232164511624584 1c0714) replica.default2.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:56:04.511 (1609232164511824705 1c0715) replica.default3.030100020000001f: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:56:04.511 (1609232164511871361 1c0715) replica.default3.030100020000001f: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:56:04.513 (1609232164513027265 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:56:04.549 (1609232164549746275 1c071a) replica.replica1.0306000000000005: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:04.552 (1609232164552464692 1c0719) replica.replica0.0306000100000004: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:04.602 (1609232164602480064 1c0719) replica.replica0.0300070f000b4b5c: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 66637 -D2020-12-29 16:56:04.6025 (1609232164602507272 1c0719) replica.replica0.0300070f000b4b5c: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 66637, confirmed_decree = -1 -D2020-12-29 16:56:04.619 (1609232164619555205 1c0719) replica.replica0.0300070f000b4b94: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 67004 -D2020-12-29 16:56:04.6195 (1609232164619568440 1c0719) replica.replica0.0300070f000b4b94: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 67004, confirmed_decree = -1 -D2020-12-29 16:56:04.623 (1609232164623033340 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:04.623 (1609232164623055088 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.623 (1609232164623087672 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.623 (1609232164623574660 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232164623] -D2020-12-29 16:56:04.623 (1609232164623695788 1c0734) replica. fd1.030c00000000004b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232164623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:04.623 (1609232164623702711 1c0734) replica. fd1.030c00000000004b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232164623 -D2020-12-29 16:56:04.637 (1609232164637627385 1c071a) replica.replica1.0300070f000b4bd4: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27 -D2020-12-29 16:56:04.6375 (1609232164637640256 1c071a) replica.replica1.0300070f000b4bd4: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27, confirmed_decree = -1 -D2020-12-29 16:56:04.640 (1609232164640791289 1c0719) replica.replica0.030600000000000a: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:04.647 (1609232164647493016 1c071a) replica.replica1.0306000100000009: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.5@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:04.661 (1609232164661581993 1c071a) replica.replica1.0300070f000b4c2a: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57 -D2020-12-29 16:56:04.6615 (1609232164661590801 1c071a) replica.replica1.0300070f000b4c2a: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57, confirmed_decree = -1 -D2020-12-29 16:56:04.713 (1609232164713675574 1c071a) replica.replica1.030600000000000f: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:04.717 (1609232164717853853 1c071a) replica.replica1.030600010000000e: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:04.739 (1609232164739763629 1c0719) replica.replica0.0300070f000b4d4e: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55 -D2020-12-29 16:56:04.7395 (1609232164739776263 1c0719) replica.replica0.0300070f000b4d4e: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55, confirmed_decree = -1 -D2020-12-29 16:56:04.770 (1609232164770068356 1c071a) replica.replica1.0300070f000b4dc3: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55 -D2020-12-29 16:56:04.7705 (1609232164770081606 1c071a) replica.replica1.0300070f000b4dc3: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55, confirmed_decree = -1 -D2020-12-29 16:56:04.785 (1609232164785606665 1c0719) replica.replica0.0306000000000014: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:04.855 (1609232164855808566 1c0719) replica.replica0.0300070f000b4ed9: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55 -D2020-12-29 16:56:04.8555 (1609232164855818547 1c0719) replica.replica0.0300070f000b4ed9: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55, confirmed_decree = -1 -D2020-12-29 16:56:04.892 (1609232164892268774 1c0719) replica.replica0.0306000000000019: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:04.895 (1609232164895803937 1c071a) replica.replica1.0306000100000016: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:04.898 (1609232164898959805 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:04.898 (1609232164898970644 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.899 (1609232164899001175 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.900 (1609232164900374378 1c0719) replica.replica0.0300070f000b4f8a: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27 -D2020-12-29 16:56:04.9005 (1609232164900384266 1c0719) replica.replica0.0300070f000b4f8a: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 27, confirmed_decree = -1 -D2020-12-29 16:56:04.903 (1609232164903198168 1c071a) replica.replica1.0300070f000b4f98: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55 -D2020-12-29 16:56:04.9035 (1609232164903205955 1c071a) replica.replica1.0300070f000b4f98: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55, confirmed_decree = -1 -D2020-12-29 16:56:04.937 (1609232164937114494 1c071a) replica.replica1.0300070f000b501a: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 66951 -D2020-12-29 16:56:04.9375 (1609232164937125806 1c071a) replica.replica1.0300070f000b501a: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 66951, confirmed_decree = -1 -D2020-12-29 16:56:04.974 (1609232164974986840 1c0719) replica.replica0.0300070f000b50a8: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55 -D2020-12-29 16:56:04.9755 (1609232164975000623 1c0719) replica.replica0.0300070f000b50a8: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55, confirmed_decree = -1 -D2020-12-29 16:56:04.975 (1609232164975760170 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:04.975 (1609232164975769602 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.975 (1609232164975797280 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.976 (1609232164976572578 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:04.976 (1609232164976580628 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.976 (1609232164976606889 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.981 (1609232164981586949 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:04.981 (1609232164981595062 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.981 (1609232164981613236 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.984 (1609232164984486636 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:04.984 (1609232164984495086 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.984 (1609232164984520213 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:04.991 (1609232164991601258 1c0719) replica.replica0.0300070f000b50ea: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 66623 -D2020-12-29 16:56:04.9915 (1609232164991611749 1c0719) replica.replica0.0300070f000b50ea: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 66623, confirmed_decree = -1 -D2020-12-29 16:56:04.994 (1609232164994102067 1c071a) replica.replica1.0300070f000b50f1: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 66461 -D2020-12-29 16:56:04.9945 (1609232164994110377 1c071a) replica.replica1.0300070f000b50f1: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 66461, confirmed_decree = -1 -D2020-12-29 16:56:05.5U (1609232165005439679 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:05.5 (1609232165005459102 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:05.5 (1609232165005494618 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:07.623 (1609232167623633063 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232167623] -D2020-12-29 16:56:07.623 (1609232167623754742 1c0733) replica. fd0.030c000100000047: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232167623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:07.623 (1609232167623763036 1c0733) replica. fd0.030c000100000047: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232167623 -D2020-12-29 16:56:08.170 (1609232168170668198 1c0719) replica.replica0.03040000001fc088: mutation_log.cpp:820:mark_new_offset(): switch log file by limit, old_file = /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/reps/3.3.pegasus/plog/log.1.0, size = 33585090 -D2020-12-29 16:56:08.170o (1609232168170721840 1c0719) replica.replica0.03040000001fc088: mutation_log.cpp:731:create_new_log_file(): create new log file /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/reps/3.3.pegasus/plog/log.2.33585090 succeed, time_used = 41037 ns -D2020-12-29 16:56:10.543 (1609232170543599593 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2004 MB, memused_res = 648MB -D2020-12-29 16:56:10.544 (1609232170544786865 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232170543), last_report_time_ms(1609232160542) -D2020-12-29 16:56:10.623 (1609232170623731033 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232170623] -D2020-12-29 16:56:10.623 (1609232170623868933 1c0733) replica. fd0.030c000100000049: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232170623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:10.623 (1609232170623876344 1c0733) replica. fd0.030c000100000049: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232170623 -D2020-12-29 16:56:11.26 (1609232171026284776 1c0719) replica.replica0.0300070f000ba4d6: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.210000 init_prepare, mutation_tid=697092 -D2020-12-29 16:56:11.896 (1609232171896597224 1c071a) replica.replica1.0300070f000bb1c6: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.70000 init_prepare, mutation_tid=700404 -D2020-12-29 16:56:12.143 (1609232172143289859 1c071a) replica.replica1.0300070f000bb498: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.70000 init_prepare, mutation_tid=701126 -D2020-12-29 16:56:13.623 (1609232173623787404 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232173623] -D2020-12-29 16:56:13.623 (1609232173623903056 1c0734) replica. fd1.030c00000000004d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232173623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:13.623 (1609232173623910289 1c0734) replica. fd1.030c00000000004d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232173623 -D2020-12-29 16:56:14.513 (1609232174513083132 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:56:14.602 (1609232174602548473 1c0719) replica.replica0.0300070f000bdb75: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71315 -D2020-12-29 16:56:14.6025 (1609232174602561138 1c0719) replica.replica0.0300070f000bdb75: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71315, confirmed_decree = -1 -D2020-12-29 16:56:14.619 (1609232174619631104 1c0719) replica.replica0.0300070f000bdbbd: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71507 -D2020-12-29 16:56:14.6195 (1609232174619639234 1c0719) replica.replica0.0300070f000bdbbd: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71507, confirmed_decree = -1 -D2020-12-29 16:56:14.622 (1609232174622427948 1c0719) replica.replica0.030600000000001f: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:14.623 (1609232174623125381 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:14.623 (1609232174623158045 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.623 (1609232174623184266 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.637 (1609232174637946437 1c071a) replica.replica1.0300070f000bdc06: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28 -D2020-12-29 16:56:14.637 (1609232174637960668 1c071a) replica.replica1.0300070f000bdc06: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28, confirmed_decree = -1 -D2020-12-29 16:56:14.646 (1609232174646396569 1c0719) replica.replica0.030600010000001c: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:14.661 (1609232174661664958 1c071a) replica.replica1.0300070f000bdc65: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 60 -D2020-12-29 16:56:14.6615 (1609232174661679986 1c071a) replica.replica1.0300070f000bdc65: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 60, confirmed_decree = -1 -D2020-12-29 16:56:14.739 (1609232174739873320 1c0719) replica.replica0.0300070f000bdda3: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58 -D2020-12-29 16:56:14.7395 (1609232174739885212 1c0719) replica.replica0.0300070f000bdda3: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58, confirmed_decree = -1 -D2020-12-29 16:56:14.758 (1609232174758931012 1c071a) replica.replica1.0306000000000024: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:14.770 (1609232174770141085 1c071a) replica.replica1.0300070f000bde1e: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58 -D2020-12-29 16:56:14.7705 (1609232174770151638 1c071a) replica.replica1.0300070f000bde1e: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58, confirmed_decree = -1 -D2020-12-29 16:56:14.855 (1609232174855880348 1c0719) replica.replica0.0300070f000bdf66: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58 -D2020-12-29 16:56:14.8555 (1609232174855888703 1c0719) replica.replica0.0300070f000bdf66: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58, confirmed_decree = -1 -D2020-12-29 16:56:14.899 (1609232174899110298 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:14.899 (1609232174899124209 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.899 (1609232174899158889 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.900 (1609232174900521024 1c0719) replica.replica0.0300070f000be013: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31 -D2020-12-29 16:56:14.9005 (1609232174900531233 1c0719) replica.replica0.0300070f000be013: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31, confirmed_decree = -1 -D2020-12-29 16:56:14.903 (1609232174903281165 1c071a) replica.replica1.0300070f000be01f: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58 -D2020-12-29 16:56:14.9035 (1609232174903291706 1c071a) replica.replica1.0300070f000be01f: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58, confirmed_decree = -1 -D2020-12-29 16:56:14.937 (1609232174937176636 1c071a) replica.replica1.0300070f000be0a6: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71565 -D2020-12-29 16:56:14.9375 (1609232174937188373 1c071a) replica.replica1.0300070f000be0a6: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71565, confirmed_decree = -1 -D2020-12-29 16:56:14.975 (1609232174975076597 1c0719) replica.replica0.0300070f000be140: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58 -D2020-12-29 16:56:14.9755 (1609232174975085536 1c0719) replica.replica0.0300070f000be140: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58, confirmed_decree = -1 -D2020-12-29 16:56:14.975 (1609232174975836124 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:14.975 (1609232174975845035 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.975 (1609232174975876731 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.976 (1609232174976676627 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:14.976 (1609232174976704045 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.976 (1609232174976735956 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.981 (1609232174981666744 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:14.981 (1609232174981675203 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.981 (1609232174981700961 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.984 (1609232174984548158 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:14.984 (1609232174984557072 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.984 (1609232174984586176 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:14.991 (1609232174991693854 1c0719) replica.replica0.0300070f000be182: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71236 -D2020-12-29 16:56:14.9915 (1609232174991707586 1c0719) replica.replica0.0300070f000be182: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71236, confirmed_decree = -1 -D2020-12-29 16:56:14.993 (1609232174993941076 1c071a) replica.replica1.0300070f000be18c: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71101 -D2020-12-29 16:56:14.9935 (1609232174993952053 1c071a) replica.replica1.0300070f000be18c: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71101, confirmed_decree = -1 -D2020-12-29 16:56:15.5 (1609232175005543441 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:15.5 (1609232175005554800 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:15.5 (1609232175005580316 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:16.623 (1609232176623845278 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232176623] -D2020-12-29 16:56:16.623 (1609232176623973371 1c0734) replica. fd1.030c00000000004f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232176623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:16.623 (1609232176623982169 1c0734) replica. fd1.030c00000000004f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232176623 -D2020-12-29 16:56:19.623 (1609232179623913604 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232179623] -D2020-12-29 16:56:19.624 (1609232179624008515 1c0733) replica. fd0.030c00010000004b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232179623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:19.624 (1609232179624014733 1c0733) replica. fd0.030c00010000004b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232179623 -D2020-12-29 16:56:20.544 (1609232180544860694 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2022 MB, memused_res = 671MB -D2020-12-29 16:56:20.545 (1609232180545904335 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232180544), last_report_time_ms(1609232170543) -D2020-12-29 16:56:22.623 (1609232182623971189 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232182623] -D2020-12-29 16:56:22.624 (1609232182624132510 1c0733) replica. fd0.030c00010000004d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232182623], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:22.624 (1609232182624144448 1c0733) replica. fd0.030c00010000004d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232182623 -D2020-12-29 16:56:24.513 (1609232184513156572 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:56:24.602 (1609232184602641030 1c0719) replica.replica0.0300070f000c714d: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 76081 -D2020-12-29 16:56:24.6025 (1609232184602657163 1c0719) replica.replica0.0300070f000c714d: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 76081, confirmed_decree = -1 -D2020-12-29 16:56:24.619 (1609232184619749881 1c0719) replica.replica0.0300070f000c7180: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 76305 -D2020-12-29 16:56:24.6195 (1609232184619762484 1c0719) replica.replica0.0300070f000c7180: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 76305, confirmed_decree = -1 -D2020-12-29 16:56:24.623 (1609232184623238758 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:24.623 (1609232184623251275 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.623 (1609232184623307998 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.638 (1609232184638150394 1c071a) replica.replica1.0300070f000c71b5: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 -D2020-12-29 16:56:24.6385 (1609232184638172624 1c071a) replica.replica1.0300070f000c71b5: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 -D2020-12-29 16:56:24.656 (1609232184656518732 1c071a) replica.replica1.0306000100000024: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:24.656 (1609232184656771861 1c0719) replica.replica0.0306000000000041: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:24.661 (1609232184661770335 1c071a) replica.replica1.0300070f000c71f7: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 63 -D2020-12-29 16:56:24.6615 (1609232184661783925 1c071a) replica.replica1.0300070f000c71f7: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 63, confirmed_decree = -1 -D2020-12-29 16:56:24.739 (1609232184739973729 1c0719) replica.replica0.0300070f000c72d7: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61 -D2020-12-29 16:56:24.739 (1609232184739988154 1c0719) replica.replica0.0300070f000c72d7: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61, confirmed_decree = -1 -D2020-12-29 16:56:24.770 (1609232184770244232 1c071a) replica.replica1.0300070f000c7335: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61 -D2020-12-29 16:56:24.7705 (1609232184770260616 1c071a) replica.replica1.0300070f000c7335: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61, confirmed_decree = -1 -D2020-12-29 16:56:24.808 (1609232184808913302 1c071a) replica.replica1.0306000100000029: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:24.809 (1609232184809186224 1c0719) replica.replica0.0306000000000046: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.5@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:24.863 (1609232184863093160 1c0719) replica.replica0.0300070f000c73f8: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61 -D2020-12-29 16:56:24.8635 (1609232184863105214 1c0719) replica.replica0.0300070f000c73f8: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61, confirmed_decree = -1 -D2020-12-29 16:56:24.899 (1609232184899239158 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:24.899 (1609232184899253125 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.899 (1609232184899277548 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.900 (1609232184900601658 1c0719) replica.replica0.0300070f000c7486: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31 -D2020-12-29 16:56:24.9005 (1609232184900611284 1c0719) replica.replica0.0300070f000c7486: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31, confirmed_decree = -1 -D2020-12-29 16:56:24.903 (1609232184903364830 1c071a) replica.replica1.0300070f000c7492: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61 -D2020-12-29 16:56:24.9035 (1609232184903373318 1c071a) replica.replica1.0300070f000c7492: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61, confirmed_decree = -1 -D2020-12-29 16:56:24.916 (1609232184916019281 1c071a) replica.replica1.030600010000002e: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:24.929 (1609232184929756785 1c071a) replica.replica1.030600000000004c: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:24.937 (1609232184937256062 1c071a) replica.replica1.0300070f000c7516: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 76274 -D2020-12-29 16:56:24.9375 (1609232184937266127 1c071a) replica.replica1.0300070f000c7516: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 76274, confirmed_decree = -1 -D2020-12-29 16:56:24.975 (1609232184975173999 1c0719) replica.replica0.0300070f000c75b2: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61 -D2020-12-29 16:56:24.9755 (1609232184975184052 1c0719) replica.replica0.0300070f000c75b2: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61, confirmed_decree = -1 -D2020-12-29 16:56:24.975 (1609232184975919372 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:24.975 (1609232184975929404 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.975 (1609232184975976581 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.976 (1609232184976778060 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:24.976 (1609232184976785624 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.976 (1609232184976807187 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.981 (1609232184981740048 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:24.981 (1609232184981749212 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.981 (1609232184981776091 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.984 (1609232184984627654 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:24.984 (1609232184984636087 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.984 (1609232184984663269 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:24.986 (1609232184986818203 1c0719) replica.replica0.0306000100000034: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:24.991 (1609232184991871854 1c0719) replica.replica0.0300070f000c75e5: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75924 -D2020-12-29 16:56:24.9915 (1609232184991885380 1c0719) replica.replica0.0300070f000c75e5: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75924, confirmed_decree = -1 -D2020-12-29 16:56:24.994 (1609232184994054033 1c071a) replica.replica1.0300070f000c75ec: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75780 -D2020-12-29 16:56:24.9945 (1609232184994068404 1c071a) replica.replica1.0300070f000c75ec: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75780, confirmed_decree = -1 -D2020-12-29 16:56:25.5U (1609232185005625044 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:25.5 (1609232185005673185 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:25.5 (1609232185005721183 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:25.175 (1609232185175120413 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:56:25.175 (1609232185175142805 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 60 -D2020-12-29 16:56:25.1750 (1609232185175295386 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 76145 -D2020-12-29 16:56:25.1755 (1609232185175301070 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 60 -D2020-12-29 16:56:25.1750 (1609232185175425862 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 75685 -D2020-12-29 16:56:25.1755 (1609232185175431179 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 60 -D2020-12-29 16:56:25.1750 (1609232185175556131 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 76318 -D2020-12-29 16:56:25.1758 (1609232185175564245 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 31 -D2020-12-29 16:56:25.1751 (1609232185175692006 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 75813 -D2020-12-29 16:56:25.1753 (1609232185175766759 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 76263 -D2020-12-29 16:56:25.1753 (1609232185175774877 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 29 -D2020-12-29 16:56:25.1759 (1609232185175781512 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 28 -D2020-12-29 16:56:25.1758 (1609232185175784358 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 62 -D2020-12-29 16:56:25.1752 (1609232185175808564 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 60 -D2020-12-29 16:56:25.1750 (1609232185175868713 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 76309 -D2020-12-29 16:56:25.1759 (1609232185175910818 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 216695 -D2020-12-29 16:56:25.1755 (1609232185175913997 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 60 -D2020-12-29 16:56:25.1750 (1609232185175920966 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 28 -D2020-12-29 16:56:25.1758 (1609232185175923362 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 60 -D2020-12-29 16:56:25.1750 (1609232185175984289 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 76268 -D2020-12-29 16:56:25.1758 (1609232185175987885 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 60 -D2020-12-29 16:56:25.176 (1609232185176015128 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 4, reserved_log_size = 130600441, reserved_smallest_log = 1, reserved_largest_log = 4, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:56:25.176 (1609232185176024577 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 986514 -D2020-12-29 16:56:25.624 (1609232185624051705 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232185624] -D2020-12-29 16:56:25.624 (1609232185624183956 1c0734) replica. fd1.030c000000000051: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232185624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:25.624 (1609232185624196006 1c0734) replica. fd1.030c000000000051: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232185624 -D2020-12-29 16:56:28.624 (1609232188624111500 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232188624] -D2020-12-29 16:56:28.624 (1609232188624224432 1c0734) replica. fd1.030c000000000053: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232188624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:28.624 (1609232188624231324 1c0734) replica. fd1.030c000000000053: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232188624 -D2020-12-29 16:56:30.545 (1609232190545997097 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2043 MB, memused_res = 697MB -D2020-12-29 16:56:30.547 (1609232190547496148 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232190545), last_report_time_ms(1609232180544) -D2020-12-29 16:56:31.455 (1609232191455972422 1c071a) replica.replica1.0300070f000cc96c: mutation_log.cpp:820:mark_new_offset(): switch log file by limit, old_file = /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.4.100663439, size = 33554604 -D2020-12-29 16:56:31.456o (1609232191456029160 1c071a) replica.replica1.0300070f000cc96c: mutation_log.cpp:731:create_new_log_file(): create new log file /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.5.134218043 succeed, time_used = 43193 ns -D2020-12-29 16:56:31.624 (1609232191624181297 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232191624] -D2020-12-29 16:56:31.624 (1609232191624516058 1c0733) replica. fd0.030c00010000004f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232191624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:31.624 (1609232191624533710 1c0733) replica. fd0.030c00010000004f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232191624 -D2020-12-29 16:56:33.43 (1609232193043251244 1c0719) replica.replica0.0300070f000cdd05: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.220000 init_prepare, mutation_tid=776968 -D2020-12-29 16:56:34.0U (1609232194000213707 1c0714) replica.default2.0300070f000ce9a1: replica_stub.cpp:2800:on_detect_hotkey(): [3.0@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = Unknown -D2020-12-29 16:56:34.0 (1609232194000222294 1c0714) replica.default2.0300070f000ce9a1: hotkey_collector.cpp:292:query_result(): [3.0@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::STOPPED -D2020-12-29 16:56:34.0U (1609232194000518242 1c0713) replica.default1.0300070f000ce9a2: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = Unknown -D2020-12-29 16:56:34.0 (1609232194000524569 1c0713) replica.default1.0300070f000ce9a2: hotkey_collector.cpp:292:query_result(): [3.3@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::COARSE_DETECTING -D2020-12-29 16:56:34.40 (1609232194040391381 1c071a) replica.replica1.0300070f000cea13: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.80000 init_prepare, mutation_tid=780308 -D2020-12-29 16:56:34.92 (1609232194092790907 1c071a) replica.replica1.0300070f000ceab1: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.80000 init_prepare, mutation_tid=780466 -D2020-12-29 16:56:34.511 (1609232194511706015 1c0715) replica.default3.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:56:34.511 (1609232194511940587 1c0712) replica.default0.0301000300000029: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:56:34.511 (1609232194511984339 1c0712) replica.default0.0301000300000029: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:56:34.513 (1609232194513237766 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:56:34.593 (1609232194593061330 1c0719) replica.replica0.0306000100000039: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:56:34.602 (1609232194602728786 1c0719) replica.replica0.0300070f000cf1d8: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 80208 -D2020-12-29 16:56:34.6025 (1609232194602741798 1c0719) replica.replica0.0300070f000cf1d8: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 80208, confirmed_decree = -1 -D2020-12-29 16:56:34.619 (1609232194619850273 1c0719) replica.replica0.0300070f000cf21c: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 80379 -D2020-12-29 16:56:34.6195 (1609232194619859878 1c0719) replica.replica0.0300070f000cf21c: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 80379, confirmed_decree = -1 -D2020-12-29 16:56:34.623 (1609232194623358207 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:34.623 (1609232194623369574 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.623 (1609232194623398523 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.624 (1609232194624306902 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232194624] -D2020-12-29 16:56:34.624 (1609232194624417481 1c0733) replica. fd0.030c000100000051: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232194624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:34.624 (1609232194624425234 1c0733) replica. fd0.030c000100000051: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232194624 -D2020-12-29 16:56:34.638 (1609232194638467053 1c071a) replica.replica1.0300070f000cf260: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 30 -D2020-12-29 16:56:34.6385 (1609232194638487666 1c071a) replica.replica1.0300070f000cf260: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 30, confirmed_decree = -1 -D2020-12-29 16:56:34.661 (1609232194661838275 1c071a) replica.replica1.0300070f000cf2b8: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68 -D2020-12-29 16:56:34.6615 (1609232194661846891 1c071a) replica.replica1.0300070f000cf2b8: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68, confirmed_decree = -1 -D2020-12-29 16:56:34.740 (1609232194740061313 1c0719) replica.replica0.0300070f000cf3ed: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65 -D2020-12-29 16:56:34.7405 (1609232194740073507 1c0719) replica.replica0.0300070f000cf3ed: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65, confirmed_decree = -1 -D2020-12-29 16:56:34.770 (1609232194770357534 1c071a) replica.replica1.0300070f000cf469: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65 -D2020-12-29 16:56:34.7705 (1609232194770366629 1c071a) replica.replica1.0300070f000cf469: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65, confirmed_decree = -1 -D2020-12-29 16:56:34.856 (1609232194856069564 1c0719) replica.replica0.0300070f000cf5c3: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65 -D2020-12-29 16:56:34.8565 (1609232194856087862 1c0719) replica.replica0.0300070f000cf5c3: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65, confirmed_decree = -1 -D2020-12-29 16:56:34.899 (1609232194899353631 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:34.899 (1609232194899367201 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.899 (1609232194899420067 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.900 (1609232194900783477 1c0719) replica.replica0.0300070f000cf64e: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35 -D2020-12-29 16:56:34.9005 (1609232194900791421 1c0719) replica.replica0.0300070f000cf64e: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35, confirmed_decree = -1 -D2020-12-29 16:56:34.903 (1609232194903433085 1c071a) replica.replica1.0300070f000cf659: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65 -D2020-12-29 16:56:34.903 (1609232194903461332 1c071a) replica.replica1.0300070f000cf659: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65, confirmed_decree = -1 -D2020-12-29 16:56:34.937 (1609232194937351028 1c071a) replica.replica1.0300070f000cf6d1: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 80407 -D2020-12-29 16:56:34.9375 (1609232194937363192 1c071a) replica.replica1.0300070f000cf6d1: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 80407, confirmed_decree = -1 -D2020-12-29 16:56:34.975 (1609232194975246298 1c0719) replica.replica0.0300070f000cf762: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65 -D2020-12-29 16:56:34.9755 (1609232194975255253 1c0719) replica.replica0.0300070f000cf762: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65, confirmed_decree = -1 -D2020-12-29 16:56:34.976 (1609232194976038125 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:34.976 (1609232194976045987 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.976 (1609232194976070065 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.976 (1609232194976838944 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:34.976 (1609232194976849041 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.976 (1609232194976881513 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.981 (1609232194981818900 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:34.981 (1609232194981829129 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.981 (1609232194981857903 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.984 (1609232194984743855 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:34.984 (1609232194984755085 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.984 (1609232194984800819 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:34.991 (1609232194991930712 1c0719) replica.replica0.0300070f000cf7a4: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 80155 -D2020-12-29 16:56:34.9915 (1609232194991941021 1c0719) replica.replica0.0300070f000cf7a4: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 80155, confirmed_decree = -1 -D2020-12-29 16:56:34.994 (1609232194994133862 1c071a) replica.replica1.0300070f000cf7ae: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 79810 -D2020-12-29 16:56:34.9945 (1609232194994142051 1c071a) replica.replica1.0300070f000cf7ae: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 79810, confirmed_decree = -1 -D2020-12-29 16:56:35.5U (1609232195005780006 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:35.5 (1609232195005796924 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:35.5 (1609232195005836508 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:37.624 (1609232197624358839 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232197624] -D2020-12-29 16:56:37.624 (1609232197624474968 1c0734) replica. fd1.030c000000000055: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232197624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:37.624 (1609232197624480787 1c0734) replica. fd1.030c000000000055: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232197624 -D2020-12-29 16:56:40.547 (1609232200547579617 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2059 MB, memused_res = 718MB -D2020-12-29 16:56:40.548 (1609232200548593714 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232200547), last_report_time_ms(1609232190545) -D2020-12-29 16:56:40.624 (1609232200624405798 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232200624] -D2020-12-29 16:56:40.624 (1609232200624518465 1c0734) replica. fd1.030c000000000057: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232200624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:40.624 (1609232200624526097 1c0734) replica. fd1.030c000000000057: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232200624 -D2020-12-29 16:56:43.624 (1609232203624469047 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232203624] -D2020-12-29 16:56:43.624 (1609232203624572653 1c0733) replica. fd0.030c000100000053: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232203624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:43.624 (1609232203624580754 1c0733) replica. fd0.030c000100000053: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232203624 -D2020-12-29 16:56:44.513 (1609232204513293186 1c0712) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:56:44.602 (1609232204602844987 1c0719) replica.replica0.0300070f000d8be1: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85065 -D2020-12-29 16:56:44.6025 (1609232204602854926 1c0719) replica.replica0.0300070f000d8be1: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85065, confirmed_decree = -1 -D2020-12-29 16:56:44.619 (1609232204619922444 1c0719) replica.replica0.0300070f000d8c28: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85355 -D2020-12-29 16:56:44.6195 (1609232204619934159 1c0719) replica.replica0.0300070f000d8c28: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85355, confirmed_decree = -1 -D2020-12-29 16:56:44.623 (1609232204623439716 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:44.623 (1609232204623448707 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.623 (1609232204623472580 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.638 (1609232204638612761 1c071a) replica.replica1.0300070f000d8c76: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31 -D2020-12-29 16:56:44.6385 (1609232204638622686 1c071a) replica.replica1.0300070f000d8c76: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31, confirmed_decree = -1 -D2020-12-29 16:56:44.661 (1609232204661912769 1c071a) replica.replica1.0300070f000d8cd0: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71 -D2020-12-29 16:56:44.6615 (1609232204661924917 1c071a) replica.replica1.0300070f000d8cd0: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71, confirmed_decree = -1 -D2020-12-29 16:56:44.740 (1609232204740158020 1c0719) replica.replica0.0300070f000d8e06: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68 -D2020-12-29 16:56:44.7405 (1609232204740168188 1c0719) replica.replica0.0300070f000d8e06: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68, confirmed_decree = -1 -D2020-12-29 16:56:44.770 (1609232204770526316 1c071a) replica.replica1.0300070f000d8e7c: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68 -D2020-12-29 16:56:44.7705 (1609232204770536803 1c071a) replica.replica1.0300070f000d8e7c: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68, confirmed_decree = -1 -D2020-12-29 16:56:44.856 (1609232204856158047 1c0719) replica.replica0.0300070f000d8fd0: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68 -D2020-12-29 16:56:44.8565 (1609232204856168433 1c0719) replica.replica0.0300070f000d8fd0: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68, confirmed_decree = -1 -D2020-12-29 16:56:44.899 (1609232204899533607 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:44.899 (1609232204899545401 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.899 (1609232204899575044 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.900 (1609232204900912668 1c0719) replica.replica0.0300070f000d907b: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35 -D2020-12-29 16:56:44.9005 (1609232204900924961 1c0719) replica.replica0.0300070f000d907b: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35, confirmed_decree = -1 -D2020-12-29 16:56:44.903 (1609232204903518896 1c071a) replica.replica1.0300070f000d9087: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68 -D2020-12-29 16:56:44.9035 (1609232204903529147 1c071a) replica.replica1.0300070f000d9087: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68, confirmed_decree = -1 -D2020-12-29 16:56:44.937 (1609232204937450388 1c071a) replica.replica1.0300070f000d910f: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85383 -D2020-12-29 16:56:44.9375 (1609232204937483490 1c071a) replica.replica1.0300070f000d910f: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85383, confirmed_decree = -1 -D2020-12-29 16:56:44.975 (1609232204975369388 1c0719) replica.replica0.0300070f000d91a1: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68 -D2020-12-29 16:56:44.9755 (1609232204975380374 1c0719) replica.replica0.0300070f000d91a1: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 68, confirmed_decree = -1 -D2020-12-29 16:56:44.976 (1609232204976108786 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:44.976 (1609232204976118513 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.976 (1609232204976149133 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.977 (1609232204977001967 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:44.977 (1609232204977009353 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.977 (1609232204977036052 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.981 (1609232204981895397 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:44.981 (1609232204981905100 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.981 (1609232204981930259 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.984 (1609232204984834952 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:44.984 (1609232204984843532 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.984 (1609232204984864858 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:44.992 (1609232204992050028 1c0719) replica.replica0.0300070f000d91e3: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85185 -D2020-12-29 16:56:44.9925 (1609232204992079734 1c0719) replica.replica0.0300070f000d91e3: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85185, confirmed_decree = -1 -D2020-12-29 16:56:44.994 (1609232204994179242 1c071a) replica.replica1.0300070f000d91ed: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 84645 -D2020-12-29 16:56:44.9945 (1609232204994188681 1c071a) replica.replica1.0300070f000d91ed: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 84645, confirmed_decree = -1 -D2020-12-29 16:56:45.5 (1609232205005878581 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:45.5 (1609232205005889446 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:45.5 (1609232205005915956 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:46.624 (1609232206624521031 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232206624] -D2020-12-29 16:56:46.624 (1609232206624635638 1c0733) replica. fd0.030c000100000055: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232206624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:46.624 (1609232206624641909 1c0733) replica. fd0.030c000100000055: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232206624 -D2020-12-29 16:56:49.624 (1609232209624570199 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232209624] -D2020-12-29 16:56:49.624 (1609232209624678876 1c0734) replica. fd1.030c000000000059: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232209624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:49.624 (1609232209624688821 1c0734) replica. fd1.030c000000000059: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232209624 -D2020-12-29 16:56:50.548 (1609232210548677912 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2080 MB, memused_res = 745MB -D2020-12-29 16:56:50.549 (1609232210549742993 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232210548), last_report_time_ms(1609232200547) -D2020-12-29 16:56:52.624 (1609232212624617632 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232212624] -D2020-12-29 16:56:52.624 (1609232212624720676 1c0734) replica. fd1.030c00000000005b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232212624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:52.624 (1609232212624745513 1c0734) replica. fd1.030c00000000005b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232212624 -D2020-12-29 16:56:53.279 (1609232213279683624 1c0719) replica.replica0.0300070f000e11e3: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.230000 init_prepare, mutation_tid=855996 -D2020-12-29 16:56:54.342 (1609232214342733800 1c071a) replica.replica1.0300070f000e21fa: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.90000 init_prepare, mutation_tid=860115 -D2020-12-29 16:56:54.467 (1609232214467609194 1c071a) replica.replica1.0300070f000e23db: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.90000 init_prepare, mutation_tid=860596 -D2020-12-29 16:56:54.513 (1609232214513345688 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:56:54.602 (1609232214602877595 1c0719) replica.replica0.0300070f000e25e8: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 89971 -D2020-12-29 16:56:54.602 (1609232214602893171 1c0719) replica.replica0.0300070f000e25e8: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 89971, confirmed_decree = -1 -D2020-12-29 16:56:54.620 (1609232214620028953 1c0719) replica.replica0.0300070f000e262d: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 90274 -D2020-12-29 16:56:54.6205 (1609232214620037913 1c0719) replica.replica0.0300070f000e262d: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 90274, confirmed_decree = -1 -D2020-12-29 16:56:54.623 (1609232214623508502 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:54.623 (1609232214623517120 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.623 (1609232214623536108 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.638 (1609232214638772626 1c071a) replica.replica1.0300070f000e2677: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35 -D2020-12-29 16:56:54.6385 (1609232214638781210 1c071a) replica.replica1.0300070f000e2677: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35, confirmed_decree = -1 -D2020-12-29 16:56:54.661 (1609232214661995601 1c071a) replica.replica1.0300070f000e26d2: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 74 -D2020-12-29 16:56:54.6625 (1609232214662003788 1c071a) replica.replica1.0300070f000e26d2: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 74, confirmed_decree = -1 -D2020-12-29 16:56:54.740 (1609232214740229622 1c0719) replica.replica0.0300070f000e2806: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71 -D2020-12-29 16:56:54.7405 (1609232214740241513 1c0719) replica.replica0.0300070f000e2806: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71, confirmed_decree = -1 -D2020-12-29 16:56:54.770 (1609232214770583374 1c071a) replica.replica1.0300070f000e287c: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71 -D2020-12-29 16:56:54.7705 (1609232214770593320 1c071a) replica.replica1.0300070f000e287c: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71, confirmed_decree = -1 -D2020-12-29 16:56:54.856 (1609232214856256243 1c0719) replica.replica0.0300070f000e29c0: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71 -D2020-12-29 16:56:54.8565 (1609232214856268050 1c0719) replica.replica0.0300070f000e29c0: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71, confirmed_decree = -1 -D2020-12-29 16:56:54.899 (1609232214899673858 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:54.899 (1609232214899687383 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.899 (1609232214899725015 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.900 (1609232214900966903 1c0719) replica.replica0.0300070f000e2a6f: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 36 -D2020-12-29 16:56:54.9005 (1609232214900978915 1c0719) replica.replica0.0300070f000e2a6f: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 36, confirmed_decree = -1 -D2020-12-29 16:56:54.903 (1609232214903618542 1c071a) replica.replica1.0300070f000e2a74: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71 -D2020-12-29 16:56:54.9035 (1609232214903630036 1c071a) replica.replica1.0300070f000e2a74: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71, confirmed_decree = -1 -D2020-12-29 16:56:54.937 (1609232214937540758 1c071a) replica.replica1.0300070f000e2adc: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 90246 -D2020-12-29 16:56:54.9375 (1609232214937567689 1c071a) replica.replica1.0300070f000e2adc: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 90246, confirmed_decree = -1 -D2020-12-29 16:56:54.975 (1609232214975398794 1c0719) replica.replica0.0300070f000e2b74: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71 -D2020-12-29 16:56:54.9755 (1609232214975412548 1c0719) replica.replica0.0300070f000e2b74: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71, confirmed_decree = -1 -D2020-12-29 16:56:54.976 (1609232214976192956 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:54.976 (1609232214976203662 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.976 (1609232214976231552 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.977 (1609232214977127193 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:54.977 (1609232214977135370 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.977 (1609232214977157620 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.981 (1609232214981966421 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:54.981 (1609232214981977564 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.982 (1609232214982005274 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.984 (1609232214984904241 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:54.984 (1609232214984914451 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.984 (1609232214984937645 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:54.992 (1609232214992082805 1c0719) replica.replica0.0300070f000e2bb6: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 90133 -D2020-12-29 16:56:54.9925 (1609232214992108218 1c0719) replica.replica0.0300070f000e2bb6: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 90133, confirmed_decree = -1 -D2020-12-29 16:56:54.994 (1609232214994280215 1c071a) replica.replica1.0300070f000e2bbf: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 89546 -D2020-12-29 16:56:54.9945 (1609232214994289006 1c071a) replica.replica1.0300070f000e2bbf: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 89546, confirmed_decree = -1 -D2020-12-29 16:56:55.5U (1609232215005955633 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:56:55.5 (1609232215005965641 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:55.5 (1609232215005995253 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:56:55.176 (1609232215176055601 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:56:55.176 (1609232215176066954 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 70 -D2020-12-29 16:56:55.1760 (1609232215176172527 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90093 -D2020-12-29 16:56:55.1763 (1609232215176176338 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 70 -D2020-12-29 16:56:55.1760 (1609232215176181994 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 89641 -D2020-12-29 16:56:55.1761 (1609232215176185433 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 70 -D2020-12-29 16:56:55.1760 (1609232215176301038 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90323 -D2020-12-29 16:56:55.1763 (1609232215176307572 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 35 -D2020-12-29 16:56:55.1765 (1609232215176404343 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90065 -D2020-12-29 16:56:55.1765 (1609232215176475150 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90178 -D2020-12-29 16:56:55.1768 (1609232215176481203 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 -D2020-12-29 16:56:55.1763 (1609232215176485833 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 31 -D2020-12-29 16:56:55.1761 (1609232215176487725 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 73 -D2020-12-29 16:56:55.1763 (1609232215176490644 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 70 -D2020-12-29 16:56:55.1760 (1609232215176610549 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90181 -D2020-12-29 16:56:55.1761 (1609232215176650393 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 230829 -D2020-12-29 16:56:55.1769 (1609232215176653137 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 70 -D2020-12-29 16:56:55.1760 (1609232215176659560 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 31 -D2020-12-29 16:56:55.1761 (1609232215176661560 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 70 -D2020-12-29 16:56:55.1760 (1609232215176748908 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90186 -D2020-12-29 16:56:55.1766 (1609232215176751165 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 70 -D2020-12-29 16:56:55.176 (1609232215176770115 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 5, reserved_log_size = 150007684, reserved_smallest_log = 1, reserved_largest_log = 5, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:56:55.176 (1609232215176784409 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 735490 -D2020-12-29 16:56:55.624 (1609232215624670710 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232215624] -D2020-12-29 16:56:55.624 (1609232215624772273 1c0733) replica. fd0.030c000100000057: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232215624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:55.624 (1609232215624779745 1c0733) replica. fd0.030c000100000057: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232215624 -D2020-12-29 16:56:58.624 (1609232218624723933 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232218624] -D2020-12-29 16:56:58.624 (1609232218624835117 1c0733) replica. fd0.030c000100000059: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232218624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:56:58.624 (1609232218624844360 1c0733) replica. fd0.030c000100000059: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232218624 -D2020-12-29 16:57:00.549 (1609232220549857889 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2101 MB, memused_res = 771MB -D2020-12-29 16:57:00.550 (1609232220550921190 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232220549), last_report_time_ms(1609232210548) -D2020-12-29 16:57:01.624 (1609232221624789152 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232221624] -D2020-12-29 16:57:01.624 (1609232221624896861 1c0734) replica. fd1.030c00000000005d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232221624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:01.624 (1609232221624905137 1c0734) replica. fd1.030c00000000005d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232221624 -D2020-12-29 16:57:04.511 (1609232224511796598 1c0716) replica.default4.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:57:04.511 (1609232224511968936 1c0712) replica.default0.0301000400000019: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:57:04.512 (1609232224512024926 1c0712) replica.default0.0301000400000019: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:57:04.513 (1609232224513401821 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:57:04.602 (1609232224602966482 1c0719) replica.replica0.0300070f000ebd98: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 94817 -D2020-12-29 16:57:04.6025 (1609232224602998302 1c0719) replica.replica0.0300070f000ebd98: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 94817, confirmed_decree = -1 -D2020-12-29 16:57:04.620 (1609232224620104589 1c0719) replica.replica0.0300070f000ebddc: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95134 -D2020-12-29 16:57:04.6205 (1609232224620114801 1c0719) replica.replica0.0300070f000ebddc: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95134, confirmed_decree = -1 -D2020-12-29 16:57:04.623 (1609232224623569043 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:04.623 (1609232224623581365 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.623 (1609232224623622001 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.624 (1609232224624849565 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232224624] -D2020-12-29 16:57:04.624 (1609232224624960778 1c0734) replica. fd1.030c00000000005f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232224624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:04.624 (1609232224624971222 1c0734) replica. fd1.030c00000000005f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232224624 -D2020-12-29 16:57:04.638 (1609232224638923951 1c071a) replica.replica1.0300070f000ebe25: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35 -D2020-12-29 16:57:04.6385 (1609232224638933395 1c071a) replica.replica1.0300070f000ebe25: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35, confirmed_decree = -1 -D2020-12-29 16:57:04.662 (1609232224662063893 1c071a) replica.replica1.0300070f000ebe79: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78 -D2020-12-29 16:57:04.6625 (1609232224662072994 1c071a) replica.replica1.0300070f000ebe79: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78, confirmed_decree = -1 -D2020-12-29 16:57:04.740 (1609232224740306216 1c0719) replica.replica0.0300070f000ebfad: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75 -D2020-12-29 16:57:04.7405 (1609232224740332204 1c0719) replica.replica0.0300070f000ebfad: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75, confirmed_decree = -1 -D2020-12-29 16:57:04.770 (1609232224770637513 1c071a) replica.replica1.0300070f000ec029: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75 -D2020-12-29 16:57:04.7705 (1609232224770649559 1c071a) replica.replica1.0300070f000ec029: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75, confirmed_decree = -1 -D2020-12-29 16:57:04.856 (1609232224856371278 1c0719) replica.replica0.0300070f000ec185: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75 -D2020-12-29 16:57:04.8565 (1609232224856379694 1c0719) replica.replica0.0300070f000ec185: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75, confirmed_decree = -1 -D2020-12-29 16:57:04.899 (1609232224899766934 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:04.899 (1609232224899779633 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.899 (1609232224899789930 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.901 (1609232224901144540 1c0719) replica.replica0.0300070f000ec238: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37 -D2020-12-29 16:57:04.9015 (1609232224901154235 1c0719) replica.replica0.0300070f000ec238: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 37, confirmed_decree = -1 -D2020-12-29 16:57:04.903 (1609232224903693427 1c071a) replica.replica1.0300070f000ec243: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75 -D2020-12-29 16:57:04.9035 (1609232224903701980 1c071a) replica.replica1.0300070f000ec243: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75, confirmed_decree = -1 -D2020-12-29 16:57:04.937 (1609232224937639213 1c071a) replica.replica1.0300070f000ec2c3: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95047 -D2020-12-29 16:57:04.9375 (1609232224937649451 1c071a) replica.replica1.0300070f000ec2c3: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95047, confirmed_decree = -1 -D2020-12-29 16:57:04.975 (1609232224975481104 1c0719) replica.replica0.0300070f000ec35e: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75 -D2020-12-29 16:57:04.9755 (1609232224975489802 1c0719) replica.replica0.0300070f000ec35e: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75, confirmed_decree = -1 -D2020-12-29 16:57:04.976 (1609232224976281370 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:04.976 (1609232224976290650 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.976 (1609232224976320401 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.977 (1609232224977234228 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:04.977 (1609232224977244013 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.977 (1609232224977276534 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.982 (1609232224982040369 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:04.982 (1609232224982050832 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.982 (1609232224982079043 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.984 (1609232224984970035 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:04.984 (1609232224984978705 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.985 (1609232224985003000 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:04.992 (1609232224992174750 1c0719) replica.replica0.0300070f000ec3a1: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95028 -D2020-12-29 16:57:04.9925 (1609232224992185401 1c0719) replica.replica0.0300070f000ec3a1: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95028, confirmed_decree = -1 -D2020-12-29 16:57:04.994 (1609232224994333993 1c071a) replica.replica1.0300070f000ec3ab: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 94496 -D2020-12-29 16:57:04.9945 (1609232224994342522 1c071a) replica.replica1.0300070f000ec3ab: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 94496, confirmed_decree = -1 -D2020-12-29 16:57:05.6 (1609232225006050974 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:05.6 (1609232225006064167 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:05.6 (1609232225006092208 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:07.624 (1609232227624903369 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232227624] -D2020-12-29 16:57:07.625 (1609232227625014392 1c0733) replica. fd0.030c00010000005b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232227624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:07.625 (1609232227625022338 1c0733) replica. fd0.030c00010000005b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232227624 -D2020-12-29 16:57:10.550 (1609232230550999742 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2123 MB, memused_res = 799MB -D2020-12-29 16:57:10.552 (1609232230552067854 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232230550), last_report_time_ms(1609232220549) -D2020-12-29 16:57:10.624 (1609232230624960945 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232230624] -D2020-12-29 16:57:10.625 (1609232230625075506 1c0733) replica. fd0.030c00010000005d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232230624], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:10.625 (1609232230625085390 1c0733) replica. fd0.030c00010000005d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232230624 -D2020-12-29 16:57:13.625 (1609232233625012309 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232233625] -D2020-12-29 16:57:13.625 (1609232233625112332 1c0734) replica. fd1.030c000000000061: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232233625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:13.625 (1609232233625120266 1c0734) replica. fd1.030c000000000061: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232233625 -D2020-12-29 16:57:13.728 (1609232233728764336 1c0719) replica.replica0.0300070f000f4c51: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.240000 init_prepare, mutation_tid=936447 -D2020-12-29 16:57:14.427 (1609232234427126268 1c071a) replica.replica1.0300070f000f5750: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.100000 init_prepare, mutation_tid=939262 -D2020-12-29 16:57:14.513 (1609232234513452676 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:57:14.603 (1609232234603056621 1c0719) replica.replica0.0300070f000f5a0c: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 99902 -D2020-12-29 16:57:14.6035 (1609232234603067793 1c0719) replica.replica0.0300070f000f5a0c: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 99902, confirmed_decree = -1 -D2020-12-29 16:57:14.620 (1609232234620166028 1c0719) replica.replica0.0300070f000f5a52: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 100041 -D2020-12-29 16:57:14.620 (1609232234620174943 1c0719) replica.replica0.0300070f000f5a52: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 100041, confirmed_decree = -1 -D2020-12-29 16:57:14.623 (1609232234623662510 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:14.623 (1609232234623672094 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.623 (1609232234623702883 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.639 (1609232234639092726 1c071a) replica.replica1.0300070f000f5a9f: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 39 -D2020-12-29 16:57:14.6395 (1609232234639100939 1c071a) replica.replica1.0300070f000f5a9f: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 39, confirmed_decree = -1 -D2020-12-29 16:57:14.662 (1609232234662138442 1c071a) replica.replica1.0300070f000f5afa: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81 -D2020-12-29 16:57:14.6625 (1609232234662160797 1c071a) replica.replica1.0300070f000f5afa: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81, confirmed_decree = -1 -D2020-12-29 16:57:14.740 (1609232234740434349 1c0719) replica.replica0.0300070f000f5c29: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78 -D2020-12-29 16:57:14.7405 (1609232234740443859 1c0719) replica.replica0.0300070f000f5c29: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78, confirmed_decree = -1 -D2020-12-29 16:57:14.742 (1609232234742987607 1c071a) replica.replica1.0300070f000f5c33: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.100000 init_prepare, mutation_tid=940508 -D2020-12-29 16:57:14.770 (1609232234770738642 1c071a) replica.replica1.0300070f000f5c9d: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78 -D2020-12-29 16:57:14.7705 (1609232234770749868 1c071a) replica.replica1.0300070f000f5c9d: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78, confirmed_decree = -1 -D2020-12-29 16:57:14.856 (1609232234856465701 1c0719) replica.replica0.0300070f000f5de1: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78 -D2020-12-29 16:57:14.8565 (1609232234856475113 1c0719) replica.replica0.0300070f000f5de1: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78, confirmed_decree = -1 -D2020-12-29 16:57:14.899 (1609232234899822787 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:14.899 (1609232234899838577 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.899 (1609232234899866556 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.901 (1609232234901292182 1c0719) replica.replica0.0300070f000f5e84: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38 -D2020-12-29 16:57:14.9015 (1609232234901303724 1c0719) replica.replica0.0300070f000f5e84: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 38, confirmed_decree = -1 -D2020-12-29 16:57:14.903 (1609232234903782900 1c071a) replica.replica1.0300070f000f5e8f: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78 -D2020-12-29 16:57:14.9035 (1609232234903805212 1c071a) replica.replica1.0300070f000f5e8f: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78, confirmed_decree = -1 -D2020-12-29 16:57:14.939 (1609232234939149506 1c071a) replica.replica1.0300070f000f5f04: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 99959 -D2020-12-29 16:57:14.9395 (1609232234939160921 1c071a) replica.replica1.0300070f000f5f04: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 99959, confirmed_decree = -1 -D2020-12-29 16:57:14.975 (1609232234975592811 1c0719) replica.replica0.0300070f000f5f83: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78 -D2020-12-29 16:57:14.9755 (1609232234975602844 1c0719) replica.replica0.0300070f000f5f83: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78, confirmed_decree = -1 -D2020-12-29 16:57:14.976 (1609232234976366155 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:14.976 (1609232234976375167 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.976 (1609232234976404987 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.977 (1609232234977378206 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:14.977 (1609232234977386497 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.977 (1609232234977407024 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.982 (1609232234982120717 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:14.982 (1609232234982133057 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.982 (1609232234982158942 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.985 (1609232234985034842 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:14.985 (1609232234985044574 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.985 (1609232234985072005 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:14.992 (1609232234992258360 1c0719) replica.replica0.0300070f000f5fc3: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 99957 -D2020-12-29 16:57:14.9925 (1609232234992269187 1c0719) replica.replica0.0300070f000f5fc3: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 99957, confirmed_decree = -1 -D2020-12-29 16:57:14.994 (1609232234994416166 1c071a) replica.replica1.0300070f000f5fcc: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 99415 -D2020-12-29 16:57:14.9945 (1609232234994426401 1c071a) replica.replica1.0300070f000f5fcc: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 99415, confirmed_decree = -1 -D2020-12-29 16:57:15.6U (1609232235006134216 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:15.6 (1609232235006146832 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:15.6 (1609232235006170790 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:16.625 (1609232236625082404 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232236625] -D2020-12-29 16:57:16.625 (1609232236625284067 1c0734) replica. fd1.030c000000000063: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232236625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:16.625 (1609232236625293298 1c0734) replica. fd1.030c000000000063: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232236625 -D2020-12-29 16:57:19.625 (1609232239625204049 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232239625] -D2020-12-29 16:57:19.625 (1609232239625326501 1c0733) replica. fd0.030c00010000005f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232239625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:19.625 (1609232239625335642 1c0733) replica. fd0.030c00010000005f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232239625 -D2020-12-29 16:57:20.552 (1609232240552163472 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2138 MB, memused_res = 819MB -D2020-12-29 16:57:20.553 (1609232240553173570 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232240552), last_report_time_ms(1609232230550) -D2020-12-29 16:57:22.625 (1609232242625264778 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232242625] -D2020-12-29 16:57:22.625 (1609232242625382867 1c0733) replica. fd0.030c000100000061: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232242625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:22.625 (1609232242625391152 1c0733) replica. fd0.030c000100000061: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232242625 -D2020-12-29 16:57:22.788 (1609232242788196829 1c0719) replica.replica0.0300070f000fc06f: mutation_log.cpp:820:mark_new_offset(): switch log file by limit, old_file = /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.5.134218043, size = 33554444 -D2020-12-29 16:57:22.788o (1609232242788277249 1c0719) replica.replica0.0300070f000fc06f: mutation_log.cpp:731:create_new_log_file(): create new log file /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.6.167772487 succeed, time_used = 46148 ns -D2020-12-29 16:57:24.513 (1609232244513504705 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:57:24.603 (1609232244603153819 1c0719) replica.replica0.0300070f000fd646: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 103834 -D2020-12-29 16:57:24.6035 (1609232244603167949 1c0719) replica.replica0.0300070f000fd646: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 103834, confirmed_decree = -1 -D2020-12-29 16:57:24.620 (1609232244620265751 1c0719) replica.replica0.0300070f000fd67a: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 104036 -D2020-12-29 16:57:24.6205 (1609232244620278585 1c0719) replica.replica0.0300070f000fd67a: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 104036, confirmed_decree = -1 -D2020-12-29 16:57:24.623 (1609232244623750600 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:24.623 (1609232244623760568 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.623 (1609232244623794160 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.639 (1609232244639208605 1c071a) replica.replica1.0300070f000fd6b4: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 39 -D2020-12-29 16:57:24.6395 (1609232244639224561 1c071a) replica.replica1.0300070f000fd6b4: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 39, confirmed_decree = -1 -D2020-12-29 16:57:24.656 (1609232244656448945 1c0713) replica.default1.0306000000000040: hotkey_collector.cpp:302:terminate_if_timeout(): [3.3@10.232.52.144:34803] hotkey collector work time is exhausted but no hotkey has been found -D2020-12-29 16:57:24.662 (1609232244662336128 1c071a) replica.replica1.0300070f000fd6f7: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 84 -D2020-12-29 16:57:24.6625 (1609232244662357336 1c071a) replica.replica1.0300070f000fd6f7: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 84, confirmed_decree = -1 -D2020-12-29 16:57:24.740 (1609232244740552166 1c0719) replica.replica0.0300070f000fd7de: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81 -D2020-12-29 16:57:24.7405 (1609232244740568577 1c0719) replica.replica0.0300070f000fd7de: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81, confirmed_decree = -1 -D2020-12-29 16:57:24.770 (1609232244770843820 1c071a) replica.replica1.0300070f000fd83b: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81 -D2020-12-29 16:57:24.7705 (1609232244770855481 1c071a) replica.replica1.0300070f000fd83b: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81, confirmed_decree = -1 -D2020-12-29 16:57:24.856 (1609232244856588861 1c0719) replica.replica0.0300070f000fd948: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81 -D2020-12-29 16:57:24.8565 (1609232244856600844 1c0719) replica.replica0.0300070f000fd948: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81, confirmed_decree = -1 -D2020-12-29 16:57:24.899 (1609232244899961075 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:24.899 (1609232244899979354 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.900 (1609232244900011020 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.901 (1609232244901405827 1c0719) replica.replica0.0300070f000fd9dc: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 39 -D2020-12-29 16:57:24.9015 (1609232244901425694 1c0719) replica.replica0.0300070f000fd9dc: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 39, confirmed_decree = -1 -D2020-12-29 16:57:24.904 (1609232244904027140 1c071a) replica.replica1.0300070f000fd9e4: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81 -D2020-12-29 16:57:24.9045 (1609232244904040193 1c071a) replica.replica1.0300070f000fd9e4: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81, confirmed_decree = -1 -D2020-12-29 16:57:24.937 (1609232244937859926 1c071a) replica.replica1.0300070f000fda3b: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 103855 -D2020-12-29 16:57:24.9375 (1609232244937894741 1c071a) replica.replica1.0300070f000fda3b: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 103855, confirmed_decree = -1 -D2020-12-29 16:57:24.975 (1609232244975683753 1c0719) replica.replica0.0300070f000fdaaa: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81 -D2020-12-29 16:57:24.9755 (1609232244975695426 1c0719) replica.replica0.0300070f000fdaaa: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81, confirmed_decree = -1 -D2020-12-29 16:57:24.976 (1609232244976449265 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:24.976 (1609232244976458700 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.976 (1609232244976484510 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.977 (1609232244977493565 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:24.977 (1609232244977504656 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.977 (1609232244977538765 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.982 (1609232244982204541 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:24.982 (1609232244982216334 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.982 (1609232244982244715 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.985 (1609232244985113208 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:24.985 (1609232244985121528 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.985 (1609232244985145679 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:24.992 (1609232244992348878 1c0719) replica.replica0.0300070f000fdae4: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 103882 -D2020-12-29 16:57:24.9925 (1609232244992385144 1c0719) replica.replica0.0300070f000fdae4: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 103882, confirmed_decree = -1 -D2020-12-29 16:57:24.994 (1609232244994492181 1c071a) replica.replica1.0300070f000fdaed: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 103296 -D2020-12-29 16:57:24.9945 (1609232244994503294 1c071a) replica.replica1.0300070f000fdaed: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 103296, confirmed_decree = -1 -D2020-12-29 16:57:25.6U (1609232245006216688 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:25.6 (1609232245006225587 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:25.6 (1609232245006250088 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:25.1760 (1609232245176819736 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:57:25.176 (1609232245176832620 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 80 -D2020-12-29 16:57:25.1760 (1609232245176972530 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 103815 -D2020-12-29 16:57:25.1765 (1609232245176976499 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 80 -D2020-12-29 16:57:25.1770 (1609232245177061942 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 103210 -D2020-12-29 16:57:25.1770 (1609232245177064410 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 80 -D2020-12-29 16:57:25.1770 (1609232245177168178 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 104111 -D2020-12-29 16:57:25.1771 (1609232245177188948 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 38 -D2020-12-29 16:57:25.1778 (1609232245177316904 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 103781 -D2020-12-29 16:57:25.1771 (1609232245177463849 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 104142 -D2020-12-29 16:57:25.1772 (1609232245177469333 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 36 -D2020-12-29 16:57:25.1776 (1609232245177472920 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 38 -D2020-12-29 16:57:25.1778 (1609232245177474936 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 83 -D2020-12-29 16:57:25.1773 (1609232245177476854 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 80 -D2020-12-29 16:57:25.1770 (1609232245177484666 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 104218 -D2020-12-29 16:57:25.1778 (1609232245177538885 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 244457 -D2020-12-29 16:57:25.1777 (1609232245177541319 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 80 -D2020-12-29 16:57:25.1770 (1609232245177543219 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 39 -D2020-12-29 16:57:25.1779 (1609232245177545741 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 80 -D2020-12-29 16:57:25.1770 (1609232245177555259 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 103935 -D2020-12-29 16:57:25.1775 (1609232245177557293 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 80 -D2020-12-29 16:57:25.177 (1609232245177580630 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 6, reserved_log_size = 169052941, reserved_smallest_log = 1, reserved_largest_log = 6, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:57:25.177 (1609232245177594034 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 781409 -D2020-12-29 16:57:25.625 (1609232245625340338 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232245625] -D2020-12-29 16:57:25.625 (1609232245625460092 1c0734) replica. fd1.030c000000000065: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232245625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:25.625 (1609232245625469587 1c0734) replica. fd1.030c000000000065: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232245625 -D2020-12-29 16:57:28.625 (1609232248625394687 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232248625] -D2020-12-29 16:57:28.625 (1609232248625511557 1c0734) replica. fd1.030c000000000067: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232248625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:28.625 (1609232248625522345 1c0734) replica. fd1.030c000000000067: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232248625 -D2020-12-29 16:57:30.575 (1609232250575113199 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2217 MB, memused_res = 848MB -D2020-12-29 16:57:30.576 (1609232250576171855 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232250553), last_report_time_ms(1609232240552) -D2020-12-29 16:57:31.625 (1609232251625451576 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232251625] -D2020-12-29 16:57:31.625 (1609232251625552886 1c0733) replica. fd0.030c000100000063: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232251625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:31.625 (1609232251625560111 1c0733) replica. fd0.030c000100000063: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232251625 -D2020-12-29 16:57:34.511 (1609232254511866784 1c0713) replica.default1.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:57:34.512 (1609232254512047174 1c0715) replica.default3.0301000100000037: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:57:34.512 (1609232254512091329 1c0715) replica.default3.0301000100000037: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:57:34.513 (1609232254513568413 1c0715) replica.default3.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:57:34.603 (1609232254603241235 1c0719) replica.replica0.0300070f001069f7: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108557 -D2020-12-29 16:57:34.6035 (1609232254603276786 1c0719) replica.replica0.0300070f001069f7: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108557, confirmed_decree = -1 -D2020-12-29 16:57:34.620 (1609232254620357548 1c0719) replica.replica0.0300070f00106a3d: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108817 -D2020-12-29 16:57:34.6205 (1609232254620368911 1c0719) replica.replica0.0300070f00106a3d: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108817, confirmed_decree = -1 -D2020-12-29 16:57:34.623 (1609232254623832238 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:34.623 (1609232254623839608 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.623 (1609232254623859668 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.625 (1609232254625503884 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232254625] -D2020-12-29 16:57:34.625 (1609232254625615100 1c0733) replica. fd0.030c000100000065: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232254625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:34.625 (1609232254625623392 1c0733) replica. fd0.030c000100000065: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232254625 -D2020-12-29 16:57:34.639 (1609232254639308624 1c071a) replica.replica1.0300070f00106a8a: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 43 -D2020-12-29 16:57:34.6395 (1609232254639317002 1c071a) replica.replica1.0300070f00106a8a: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 43, confirmed_decree = -1 -D2020-12-29 16:57:34.662 (1609232254662387499 1c071a) replica.replica1.0300070f00106aea: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 89 -D2020-12-29 16:57:34.6625 (1609232254662398323 1c071a) replica.replica1.0300070f00106aea: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 89, confirmed_decree = -1 -D2020-12-29 16:57:34.740 (1609232254740658313 1c0719) replica.replica0.0300070f00106c1a: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85 -D2020-12-29 16:57:34.7405 (1609232254740669299 1c0719) replica.replica0.0300070f00106c1a: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85, confirmed_decree = -1 -D2020-12-29 16:57:34.770 (1609232254770927303 1c071a) replica.replica1.0300070f00106c92: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85 -D2020-12-29 16:57:34.7705 (1609232254770937089 1c071a) replica.replica1.0300070f00106c92: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85, confirmed_decree = -1 -D2020-12-29 16:57:34.856 (1609232254856650303 1c0719) replica.replica0.0300070f00106def: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85 -D2020-12-29 16:57:34.8565 (1609232254856660479 1c0719) replica.replica0.0300070f00106def: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85, confirmed_decree = -1 -D2020-12-29 16:57:34.900 (1609232254900100038 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:34.900 (1609232254900116384 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.900 (1609232254900150711 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.901 (1609232254901584015 1c0719) replica.replica0.0300070f00106ea1: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 40 -D2020-12-29 16:57:34.9015 (1609232254901593500 1c0719) replica.replica0.0300070f00106ea1: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 40, confirmed_decree = -1 -D2020-12-29 16:57:34.904 (1609232254904067696 1c071a) replica.replica1.0300070f00106ead: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85 -D2020-12-29 16:57:34.9045 (1609232254904080418 1c071a) replica.replica1.0300070f00106ead: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85, confirmed_decree = -1 -D2020-12-29 16:57:34.937 (1609232254937982978 1c071a) replica.replica1.0300070f00106f34: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108693 -D2020-12-29 16:57:34.9375 (1609232254937994070 1c071a) replica.replica1.0300070f00106f34: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108693, confirmed_decree = -1 -D2020-12-29 16:57:34.975 (1609232254975761776 1c0719) replica.replica0.0300070f00106fcb: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85 -D2020-12-29 16:57:34.9755 (1609232254975792976 1c0719) replica.replica0.0300070f00106fcb: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 85, confirmed_decree = -1 -D2020-12-29 16:57:34.976 (1609232254976519562 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:34.976 (1609232254976528034 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.976 (1609232254976555883 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.977 (1609232254977623989 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:34.977 (1609232254977632397 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.977 (1609232254977659929 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.982 (1609232254982285473 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:34.982 (1609232254982296130 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.982 (1609232254982330508 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.985 (1609232254985185499 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:34.985 (1609232254985193539 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.985 (1609232254985229961 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:34.992 (1609232254992447904 1c0719) replica.replica0.0300070f00106fe6: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108637 -D2020-12-29 16:57:34.9925 (1609232254992460261 1c0719) replica.replica0.0300070f00106fe6: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108637, confirmed_decree = -1 -D2020-12-29 16:57:34.994 (1609232254994548731 1c071a) replica.replica1.0300070f00106ff0: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 107920 -D2020-12-29 16:57:34.9945 (1609232254994574740 1c071a) replica.replica1.0300070f00106ff0: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 107920, confirmed_decree = -1 -D2020-12-29 16:57:35.6U (1609232255006293949 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:35.6 (1609232255006309164 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:35.6 (1609232255006338928 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:36.558 (1609232256558791469 1c0719) replica.replica0.0300070f001087e8: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.250000 init_prepare, mutation_tid=1017184 -D2020-12-29 16:57:36.792 (1609232256792480924 1c071a) replica.replica1.0300070f00108b97: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.110000 init_prepare, mutation_tid=1018127 -D2020-12-29 16:57:37.273 (1609232257273963401 1c071a) replica.replica1.0300070f0010932a: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.110000 init_prepare, mutation_tid=1020066 -D2020-12-29 16:57:37.625 (1609232257625559503 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232257625] -D2020-12-29 16:57:37.625 (1609232257625654349 1c0734) replica. fd1.030c000000000069: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232257625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:37.625 (1609232257625661681 1c0734) replica. fd1.030c000000000069: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232257625 -D2020-12-29 16:57:40.576 (1609232260576249883 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2234 MB, memused_res = 868MB -D2020-12-29 16:57:40.577 (1609232260577350950 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232260576), last_report_time_ms(1609232250553) -D2020-12-29 16:57:40.625 (1609232260625609598 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232260625] -D2020-12-29 16:57:40.625 (1609232260625728879 1c0734) replica. fd1.030c00000000006b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232260625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:40.625 (1609232260625737071 1c0734) replica. fd1.030c00000000006b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232260625 -D2020-12-29 16:57:43.625 (1609232263625656537 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232263625] -D2020-12-29 16:57:43.625 (1609232263625765112 1c0733) replica. fd0.030c000100000067: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232263625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:43.625 (1609232263625793720 1c0733) replica. fd0.030c000100000067: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232263625 -D2020-12-29 16:57:44.513 (1609232264513620200 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:57:44.549 (1609232264549785677 1c071a) replica.replica1.0306000000000005: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:44.552 (1609232264552496984 1c0719) replica.replica0.0306000100000004: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:44.603 (1609232264603326503 1c0719) replica.replica0.0300070f00110692: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 113584 -D2020-12-29 16:57:44.603 (1609232264603336875 1c0719) replica.replica0.0300070f00110692: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 113584, confirmed_decree = -1 -D2020-12-29 16:57:44.620 (1609232264620437512 1c0719) replica.replica0.0300070f001106d8: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 113746 -D2020-12-29 16:57:44.6205 (1609232264620448737 1c0719) replica.replica0.0300070f001106d8: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 113746, confirmed_decree = -1 -D2020-12-29 16:57:44.623 (1609232264623892477 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:44.623 (1609232264623901409 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.623 (1609232264623926694 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.639 (1609232264639378806 1c071a) replica.replica1.0300070f00110725: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 43 -D2020-12-29 16:57:44.6395 (1609232264639390369 1c071a) replica.replica1.0300070f00110725: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 43, confirmed_decree = -1 -D2020-12-29 16:57:44.640 (1609232264640825866 1c0719) replica.replica0.030600000000000a: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:44.647 (1609232264647527189 1c071a) replica.replica1.0306000100000009: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.5@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:44.662 (1609232264662480241 1c071a) replica.replica1.0300070f00110782: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 92 -D2020-12-29 16:57:44.6625 (1609232264662508388 1c071a) replica.replica1.0300070f00110782: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 92, confirmed_decree = -1 -D2020-12-29 16:57:44.713 (1609232264713734603 1c071a) replica.replica1.030600000000000f: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:44.717 (1609232264717910476 1c071a) replica.replica1.030600010000000e: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:44.740 (1609232264740748646 1c0719) replica.replica0.0300070f001108b8: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 88 -D2020-12-29 16:57:44.7405 (1609232264740757899 1c0719) replica.replica0.0300070f001108b8: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 88, confirmed_decree = -1 -D2020-12-29 16:57:44.771 (1609232264771015684 1c071a) replica.replica1.0300070f00110932: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 88 -D2020-12-29 16:57:44.7715 (1609232264771026165 1c071a) replica.replica1.0300070f00110932: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 88, confirmed_decree = -1 -D2020-12-29 16:57:44.785 (1609232264785641386 1c0719) replica.replica0.0306000000000014: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:44.856 (1609232264856749971 1c0719) replica.replica0.0300070f00110a87: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 88 -D2020-12-29 16:57:44.8565 (1609232264856761951 1c0719) replica.replica0.0300070f00110a87: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 88, confirmed_decree = -1 -D2020-12-29 16:57:44.892 (1609232264892303290 1c0719) replica.replica0.0306000000000019: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:44.895 (1609232264895837368 1c071a) replica.replica1.0306000100000016: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:44.900 (1609232264900225673 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:44.900 (1609232264900238646 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.900 (1609232264900263835 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.901 (1609232264901691692 1c0719) replica.replica0.0300070f00110b3e: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 44 -D2020-12-29 16:57:44.9015 (1609232264901718036 1c0719) replica.replica0.0300070f00110b3e: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 44, confirmed_decree = -1 -D2020-12-29 16:57:44.904 (1609232264904210021 1c071a) replica.replica1.0300070f00110b49: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 88 -D2020-12-29 16:57:44.9045 (1609232264904217279 1c071a) replica.replica1.0300070f00110b49: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 88, confirmed_decree = -1 -D2020-12-29 16:57:44.938 (1609232264938101795 1c071a) replica.replica1.0300070f00110bd3: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 113745 -D2020-12-29 16:57:44.9385 (1609232264938112135 1c071a) replica.replica1.0300070f00110bd3: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 113745, confirmed_decree = -1 -D2020-12-29 16:57:44.975 (1609232264975841983 1c0719) replica.replica0.0300070f00110c6f: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 88 -D2020-12-29 16:57:44.9755 (1609232264975853422 1c0719) replica.replica0.0300070f00110c6f: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 88, confirmed_decree = -1 -D2020-12-29 16:57:44.976 (1609232264976603670 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:44.976 (1609232264976615302 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.976 (1609232264976652438 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.977 (1609232264977771379 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:44.977 (1609232264977782198 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.977 (1609232264977819591 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.982 (1609232264982378265 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:44.982 (1609232264982386778 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.982 (1609232264982422296 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.985 (1609232264985266737 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:44.985 (1609232264985274458 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.985 (1609232264985297085 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:44.992 (1609232264992518811 1c0719) replica.replica0.0300070f00110ca9: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 113630 -D2020-12-29 16:57:44.9925 (1609232264992528911 1c0719) replica.replica0.0300070f00110ca9: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 113630, confirmed_decree = -1 -D2020-12-29 16:57:44.994 (1609232264994651261 1c071a) replica.replica1.0300070f00110caa: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 112916 -D2020-12-29 16:57:44.9945 (1609232264994661468 1c071a) replica.replica1.0300070f00110caa: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 112916, confirmed_decree = -1 -D2020-12-29 16:57:45.6 (1609232265006375451 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:45.6 (1609232265006385722 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:45.6 (1609232265006411887 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:46.625 (1609232266625717870 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232266625] -D2020-12-29 16:57:46.625 (1609232266625820822 1c0733) replica. fd0.030c000100000069: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232266625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:46.625 (1609232266625828290 1c0733) replica. fd0.030c000100000069: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232266625 -D2020-12-29 16:57:49.625 (1609232269625767801 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232269625] -D2020-12-29 16:57:49.625 (1609232269625883418 1c0734) replica. fd1.030c00000000006d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232269625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:49.625 (1609232269625908037 1c0734) replica. fd1.030c00000000006d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232269625 -D2020-12-29 16:57:50.577 (1609232270577428371 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2254 MB, memused_res = 895MB -D2020-12-29 16:57:50.578 (1609232270578528886 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232270577), last_report_time_ms(1609232260576) -D2020-12-29 16:57:52.625 (1609232272625816957 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232272625] -D2020-12-29 16:57:52.625 (1609232272625925086 1c0734) replica. fd1.030c00000000006f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232272625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:52.625 (1609232272625932021 1c0734) replica. fd1.030c00000000006f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232272625 -D2020-12-29 16:57:54.513 (1609232274513678286 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:57:54.603 (1609232274603425137 1c0719) replica.replica0.0300070f0011a2ed: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118578 -D2020-12-29 16:57:54.6035 (1609232274603437400 1c0719) replica.replica0.0300070f0011a2ed: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118578, confirmed_decree = -1 -D2020-12-29 16:57:54.620 (1609232274620531373 1c0719) replica.replica0.0300070f0011a333: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118654 -D2020-12-29 16:57:54.6205 (1609232274620539545 1c0719) replica.replica0.0300070f0011a333: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118654, confirmed_decree = -1 -D2020-12-29 16:57:54.622 (1609232274622505095 1c0719) replica.replica0.030600000000001f: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:54.623 (1609232274623964083 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:54.623 (1609232274623973467 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.624 (1609232274624002327 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.639 (1609232274639545645 1c071a) replica.replica1.0300070f0011a380: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 44 -D2020-12-29 16:57:54.6395 (1609232274639576755 1c071a) replica.replica1.0300070f0011a380: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 44, confirmed_decree = -1 -D2020-12-29 16:57:54.646 (1609232274646484656 1c0719) replica.replica0.030600010000001c: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:54.662 (1609232274662553214 1c071a) replica.replica1.0300070f0011a3df: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95 -D2020-12-29 16:57:54.6625 (1609232274662562738 1c071a) replica.replica1.0300070f0011a3df: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95, confirmed_decree = -1 -D2020-12-29 16:57:54.740 (1609232274740825770 1c0719) replica.replica0.0300070f0011a517: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 91 -D2020-12-29 16:57:54.7405 (1609232274740835087 1c0719) replica.replica0.0300070f0011a517: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 91, confirmed_decree = -1 -D2020-12-29 16:57:54.758 (1609232274758966034 1c071a) replica.replica1.0306000000000024: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:57:54.771 (1609232274771118816 1c071a) replica.replica1.0300070f0011a58f: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 91 -D2020-12-29 16:57:54.7715 (1609232274771130935 1c071a) replica.replica1.0300070f0011a58f: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 91, confirmed_decree = -1 -D2020-12-29 16:57:54.856 (1609232274856840513 1c0719) replica.replica0.0300070f0011a6da: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 91 -D2020-12-29 16:57:54.8565 (1609232274856853310 1c0719) replica.replica0.0300070f0011a6da: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 91, confirmed_decree = -1 -D2020-12-29 16:57:54.900 (1609232274900354408 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:54.900 (1609232274900370457 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.900 (1609232274900405317 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.901 (1609232274901798432 1c0719) replica.replica0.0300070f0011a790: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 44 -D2020-12-29 16:57:54.9015 (1609232274901830693 1c0719) replica.replica0.0300070f0011a790: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 44, confirmed_decree = -1 -D2020-12-29 16:57:54.904 (1609232274904290805 1c071a) replica.replica1.0300070f0011a79c: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 91 -D2020-12-29 16:57:54.9045 (1609232274904300878 1c071a) replica.replica1.0300070f0011a79c: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 91, confirmed_decree = -1 -D2020-12-29 16:57:54.938 (1609232274938145307 1c071a) replica.replica1.0300070f0011a824: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118850 -D2020-12-29 16:57:54.9385 (1609232274938154958 1c071a) replica.replica1.0300070f0011a824: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118850, confirmed_decree = -1 -D2020-12-29 16:57:54.975 (1609232274975914426 1c0719) replica.replica0.0300070f0011a8be: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 91 -D2020-12-29 16:57:54.9755 (1609232274975922707 1c0719) replica.replica0.0300070f0011a8be: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 91, confirmed_decree = -1 -D2020-12-29 16:57:54.976 (1609232274976695541 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:54.976 (1609232274976705812 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.976 (1609232274976745348 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.977 (1609232274977911987 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:54.977 (1609232274977920916 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.977 (1609232274977944239 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.982 (1609232274982457908 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:54.982 (1609232274982469158 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.982 (1609232274982511681 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.985 (1609232274985361087 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:54.985 (1609232274985368134 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.985 (1609232274985387113 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:54.992 (1609232274992596899 1c0719) replica.replica0.0300070f0011a902: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118677 -D2020-12-29 16:57:54.9925 (1609232274992607951 1c0719) replica.replica0.0300070f0011a902: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118677, confirmed_decree = -1 -D2020-12-29 16:57:54.994 (1609232274994748447 1c071a) replica.replica1.0300070f0011a90c: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 117868 -D2020-12-29 16:57:54.9945 (1609232274994758579 1c071a) replica.replica1.0300070f0011a90c: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 117868, confirmed_decree = -1 -D2020-12-29 16:57:55.6 (1609232275006451216 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:57:55.6 (1609232275006459602 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:55.6 (1609232275006483884 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:57:55.177 (1609232275177625070 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:57:55.177 (1609232275177637238 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90 -D2020-12-29 16:57:55.1770 (1609232275177783146 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 118631 -D2020-12-29 16:57:55.1771 (1609232275177787639 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 90 -D2020-12-29 16:57:55.178 (1609232275178994372 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 117956 -D2020-12-29 16:57:55.1796 (1609232275179018693 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90 -D2020-12-29 16:57:55.1790 (1609232275179057484 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 118864 -D2020-12-29 16:57:55.1794 (1609232275179062383 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 44 -D2020-12-29 16:57:55.1794 (1609232275179174605 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 118561 -D2020-12-29 16:57:55.1791 (1609232275179207735 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 119229 -D2020-12-29 16:57:55.1799 (1609232275179213607 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 39 -D2020-12-29 16:57:55.1799 (1609232275179218063 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 43 -D2020-12-29 16:57:55.1793 (1609232275179221031 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 94 -D2020-12-29 16:57:55.1794 (1609232275179224020 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90 -D2020-12-29 16:57:55.1790 (1609232275179252206 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 119072 -D2020-12-29 16:57:55.1792 (1609232275179384710 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 258889 -D2020-12-29 16:57:55.1799 (1609232275179392094 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90 -D2020-12-29 16:57:55.1790 (1609232275179396241 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 43 -D2020-12-29 16:57:55.1793 (1609232275179410536 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90 -D2020-12-29 16:57:55.1790 (1609232275179493631 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 118792 -D2020-12-29 16:57:55.1792 (1609232275179496961 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 90 -D2020-12-29 16:57:55.179 (1609232275179519933 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 6, reserved_log_size = 189492483, reserved_smallest_log = 1, reserved_largest_log = 6, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:57:55.179 (1609232275179526671 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 1910025 -D2020-12-29 16:57:55.625 (1609232275625872439 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232275625] -D2020-12-29 16:57:55.625 (1609232275625982182 1c0733) replica. fd0.030c00010000006b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232275625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:55.625 (1609232275625989572 1c0733) replica. fd0.030c00010000006b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232275625 -D2020-12-29 16:57:57.172 (1609232277172122400 1c071a) replica.replica1.0300070f0011c206: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.120000 init_prepare, mutation_tid=1097557 -D2020-12-29 16:57:57.518 (1609232277518342973 1c071a) replica.replica1.0300070f0011c6cf: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.120000 init_prepare, mutation_tid=1098782 -D2020-12-29 16:57:57.616 (1609232277616883164 1c0719) replica.replica0.0300070f0011c867: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.260000 init_prepare, mutation_tid=1099190 -D2020-12-29 16:57:58.625 (1609232278625932399 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232278625] -D2020-12-29 16:57:58.626 (1609232278626043879 1c0733) replica. fd0.030c00010000006d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232278625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:57:58.626 (1609232278626051490 1c0733) replica. fd0.030c00010000006d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232278625 -D2020-12-29 16:58:00.578 (1609232280578619537 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2289 MB, memused_res = 926MB -D2020-12-29 16:58:00.579 (1609232280579678804 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232280578), last_report_time_ms(1609232270577) -D2020-12-29 16:58:01.625 (1609232281625986579 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232281625] -D2020-12-29 16:58:01.626 (1609232281626085843 1c0734) replica. fd1.030c000000000071: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232281625], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:01.626 (1609232281626092824 1c0734) replica. fd1.030c000000000071: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232281625 -D2020-12-29 16:58:04.511 (1609232284511955458 1c0712) replica.default0.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:58:04.512 (1609232284512138557 1c0714) replica.default2.0301000000000067: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:58:04.512 (1609232284512197955 1c0714) replica.default2.0301000000000067: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:58:04.513 (1609232284513730455 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:58:04.603 (1609232284603505066 1c0719) replica.replica0.0300070f00123347: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 123090 -D2020-12-29 16:58:04.6035 (1609232284603515928 1c0719) replica.replica0.0300070f00123347: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 123090, confirmed_decree = -1 -D2020-12-29 16:58:04.620 (1609232284620638613 1c0719) replica.replica0.0300070f0012338b: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 123309 -D2020-12-29 16:58:04.6205 (1609232284620651000 1c0719) replica.replica0.0300070f0012338b: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 123309, confirmed_decree = -1 -D2020-12-29 16:58:04.624 (1609232284624039207 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:04.624 (1609232284624048613 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.624 (1609232284624074678 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.626 (1609232284626039513 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232284626] -D2020-12-29 16:58:04.626 (1609232284626141443 1c0734) replica. fd1.030c000000000073: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232284626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:04.626 (1609232284626166728 1c0734) replica. fd1.030c000000000073: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232284626 -D2020-12-29 16:58:04.639 (1609232284639714683 1c071a) replica.replica1.0300070f001233d9: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45 -D2020-12-29 16:58:04.6395 (1609232284639723049 1c071a) replica.replica1.0300070f001233d9: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45, confirmed_decree = -1 -D2020-12-29 16:58:04.656 (1609232284656552797 1c071a) replica.replica1.0306000100000024: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:58:04.656 (1609232284656801455 1c0719) replica.replica0.0306000000000041: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:58:04.662 (1609232284662608606 1c071a) replica.replica1.0300070f00123434: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 99 -D2020-12-29 16:58:04.6625 (1609232284662619562 1c071a) replica.replica1.0300070f00123434: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 99, confirmed_decree = -1 -D2020-12-29 16:58:04.740 (1609232284740940392 1c0719) replica.replica0.0300070f00123574: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95 -D2020-12-29 16:58:04.7405 (1609232284740950615 1c0719) replica.replica0.0300070f00123574: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95, confirmed_decree = -1 -D2020-12-29 16:58:04.771 (1609232284771204628 1c071a) replica.replica1.0300070f001235ee: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95 -D2020-12-29 16:58:04.7715 (1609232284771213057 1c071a) replica.replica1.0300070f001235ee: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95, confirmed_decree = -1 -D2020-12-29 16:58:04.808 (1609232284808958689 1c071a) replica.replica1.0306000100000029: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:58:04.809 (1609232284809240648 1c0719) replica.replica0.0306000000000046: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.5@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:58:04.856 (1609232284856916516 1c0719) replica.replica0.0300070f0012374c: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95 -D2020-12-29 16:58:04.8565 (1609232284856927972 1c0719) replica.replica0.0300070f0012374c: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95, confirmed_decree = -1 -D2020-12-29 16:58:04.900 (1609232284900507289 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:04.900 (1609232284900521439 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.900 (1609232284900550828 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.901 (1609232284901918140 1c0719) replica.replica0.0300070f00123801: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45 -D2020-12-29 16:58:04.9015 (1609232284901928011 1c0719) replica.replica0.0300070f00123801: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 45, confirmed_decree = -1 -D2020-12-29 16:58:04.904 (1609232284904376187 1c071a) replica.replica1.0300070f0012380c: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95 -D2020-12-29 16:58:04.9045 (1609232284904388668 1c071a) replica.replica1.0300070f0012380c: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95, confirmed_decree = -1 -D2020-12-29 16:58:04.916 (1609232284916050346 1c071a) replica.replica1.030600010000002e: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:58:04.929 (1609232284929808834 1c071a) replica.replica1.030600000000004c: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:58:04.938 (1609232284938215335 1c071a) replica.replica1.0300070f00123896: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 123494 -D2020-12-29 16:58:04.9385 (1609232284938227169 1c071a) replica.replica1.0300070f00123896: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 123494, confirmed_decree = -1 -D2020-12-29 16:58:04.976 (1609232284976002476 1c0719) replica.replica0.0300070f00123930: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95 -D2020-12-29 16:58:04.9765 (1609232284976010995 1c0719) replica.replica0.0300070f00123930: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 95, confirmed_decree = -1 -D2020-12-29 16:58:04.976 (1609232284976789703 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:04.976 (1609232284976797008 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.976 (1609232284976835319 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.978 (1609232284978013630 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:04.978 (1609232284978020804 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.978 (1609232284978043103 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.982 (1609232284982555648 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:04.982 (1609232284982565356 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.982 (1609232284982590457 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.985 (1609232284985419327 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:04.985 (1609232284985427936 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.985 (1609232284985452120 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:04.986 (1609232284986851922 1c0719) replica.replica0.0306000100000034: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:58:04.992 (1609232284992665225 1c0719) replica.replica0.0300070f00123974: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 123304 -D2020-12-29 16:58:04.9925 (1609232284992673861 1c0719) replica.replica0.0300070f00123974: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 123304, confirmed_decree = -1 -D2020-12-29 16:58:04.994 (1609232284994832040 1c071a) replica.replica1.0300070f0012397e: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 122489 -D2020-12-29 16:58:04.9945 (1609232284994840266 1c071a) replica.replica1.0300070f0012397e: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 122489, confirmed_decree = -1 -D2020-12-29 16:58:05.6 (1609232285006525369 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:05.6 (1609232285006542909 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:05.6 (1609232285006596682 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:07.626 (1609232287626094544 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232287626] -D2020-12-29 16:58:07.626 (1609232287626198632 1c0733) replica. fd0.030c00010000006f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232287626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:07.626 (1609232287626206518 1c0733) replica. fd0.030c00010000006f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232287626 -D2020-12-29 16:58:10.579 (1609232290579787777 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2306 MB, memused_res = 949MB -D2020-12-29 16:58:10.580 (1609232290580906900 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232290579), last_report_time_ms(1609232280578) -D2020-12-29 16:58:10.626 (1609232290626155664 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232290626] -D2020-12-29 16:58:10.626 (1609232290626277756 1c0733) replica. fd0.030c000100000071: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232290626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:10.626 (1609232290626285261 1c0733) replica. fd0.030c000100000071: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232290626 -D2020-12-29 16:58:13.500 (1609232293500675199 1c0719) replica.replica0.0300070f0012b76f: mutation_log.cpp:820:mark_new_offset(): switch log file by limit, old_file = /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.6.167772487, size = 33554457 -D2020-12-29 16:58:13.500o (1609232293500717893 1c0719) replica.replica0.0300070f0012b76f: mutation_log.cpp:731:create_new_log_file(): create new log file /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.7.201326944 succeed, time_used = 34189 ns -D2020-12-29 16:58:13.626 (1609232293626230351 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232293626] -D2020-12-29 16:58:13.626 (1609232293626363956 1c0734) replica. fd1.030c000000000075: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232293626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:13.626 (1609232293626372700 1c0734) replica. fd1.030c000000000075: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232293626 -D2020-12-29 16:58:14.513 (1609232294513786420 1c0712) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:58:14.593 (1609232294593093611 1c0719) replica.replica0.0306000100000039: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:58:14.603 (1609232294603603876 1c0719) replica.replica0.0300070f0012c6e9: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 127720 -D2020-12-29 16:58:14.6035 (1609232294603648486 1c0719) replica.replica0.0300070f0012c6e9: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 127720, confirmed_decree = -1 -D2020-12-29 16:58:14.620 (1609232294620754594 1c0719) replica.replica0.0300070f0012c726: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128122 -D2020-12-29 16:58:14.6205 (1609232294620767208 1c0719) replica.replica0.0300070f0012c726: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128122, confirmed_decree = -1 -D2020-12-29 16:58:14.624 (1609232294624108633 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:14.624 (1609232294624118283 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.624 (1609232294624147930 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.639 (1609232294639795792 1c071a) replica.replica1.0300070f0012c768: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 46 -D2020-12-29 16:58:14.6395 (1609232294639811987 1c071a) replica.replica1.0300070f0012c768: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 46, confirmed_decree = -1 -D2020-12-29 16:58:14.662 (1609232294662705495 1c071a) replica.replica1.0300070f0012c7b7: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 102 -D2020-12-29 16:58:14.6625 (1609232294662718230 1c071a) replica.replica1.0300070f0012c7b7: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 102, confirmed_decree = -1 -D2020-12-29 16:58:14.741 (1609232294741035992 1c0719) replica.replica0.0300070f0012c8c8: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 98 -D2020-12-29 16:58:14.7415 (1609232294741047011 1c0719) replica.replica0.0300070f0012c8c8: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 98, confirmed_decree = -1 -D2020-12-29 16:58:14.771 (1609232294771240707 1c071a) replica.replica1.0300070f0012c934: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 98 -D2020-12-29 16:58:14.7715 (1609232294771251835 1c071a) replica.replica1.0300070f0012c934: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 98, confirmed_decree = -1 -D2020-12-29 16:58:14.856 (1609232294856992376 1c0719) replica.replica0.0300070f0012ca67: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 98 -D2020-12-29 16:58:14.8575 (1609232294857005310 1c0719) replica.replica0.0300070f0012ca67: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 98, confirmed_decree = -1 -D2020-12-29 16:58:14.900 (1609232294900640471 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:14.900 (1609232294900656467 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.900 (1609232294900687865 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.902 (1609232294902081054 1c0719) replica.replica0.0300070f0012cb06: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 49 -D2020-12-29 16:58:14.9025 (1609232294902090197 1c0719) replica.replica0.0300070f0012cb06: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 49, confirmed_decree = -1 -D2020-12-29 16:58:14.904 (1609232294904455218 1c071a) replica.replica1.0300070f0012cb0f: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 98 -D2020-12-29 16:58:14.9045 (1609232294904468229 1c071a) replica.replica1.0300070f0012cb0f: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 98, confirmed_decree = -1 -D2020-12-29 16:58:14.938 (1609232294938307204 1c071a) replica.replica1.0300070f0012cb88: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128147 -D2020-12-29 16:58:14.9385 (1609232294938320154 1c071a) replica.replica1.0300070f0012cb88: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128147, confirmed_decree = -1 -D2020-12-29 16:58:14.976 (1609232294976119933 1c0719) replica.replica0.0300070f0012cc10: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 98 -D2020-12-29 16:58:14.9765 (1609232294976129341 1c0719) replica.replica0.0300070f0012cc10: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 98, confirmed_decree = -1 -D2020-12-29 16:58:14.976 (1609232294976874971 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:14.976 (1609232294976904210 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.976 (1609232294976931168 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.978 (1609232294978105348 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:14.978 (1609232294978116479 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.978 (1609232294978147231 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.982 (1609232294982632389 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:14.982 (1609232294982642678 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.982 (1609232294982672009 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.985 (1609232294985490168 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:14.985 (1609232294985500971 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.985 (1609232294985526725 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:14.992 (1609232294992758700 1c0719) replica.replica0.0300070f0012cc4c: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 127946 -D2020-12-29 16:58:14.9925 (1609232294992769815 1c0719) replica.replica0.0300070f0012cc4c: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 127946, confirmed_decree = -1 -D2020-12-29 16:58:14.994 (1609232294994982291 1c071a) replica.replica1.0300070f0012cc55: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 127200 -D2020-12-29 16:58:14.9945 (1609232294994993581 1c071a) replica.replica1.0300070f0012cc55: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 127200, confirmed_decree = -1 -D2020-12-29 16:58:15.6U (1609232295006654103 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:15.6 (1609232295006688435 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:15.6 (1609232295006724750 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:16.626 (1609232296626287760 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232296626] -D2020-12-29 16:58:16.626 (1609232296626396818 1c0734) replica. fd1.030c000000000077: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232296626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:16.626 (1609232296626403948 1c0734) replica. fd1.030c000000000077: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232296626 -D2020-12-29 16:58:18.287 (1609232298287892518 1c0719) replica.replica0.0300070f0012fce6: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.270000 init_prepare, mutation_tid=1178122 -D2020-12-29 16:58:18.361 (1609232298361270264 1c071a) replica.replica1.0300070f0012fe0b: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.130000 init_prepare, mutation_tid=1178415 -D2020-12-29 16:58:18.505 (1609232298505861707 1c071a) replica.replica1.0300070f00130048: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.130000 init_prepare, mutation_tid=1178988 -D2020-12-29 16:58:19.626 (1609232299626344279 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232299626] -D2020-12-29 16:58:19.626 (1609232299626458565 1c0733) replica. fd0.030c000100000073: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232299626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:19.626 (1609232299626465932 1c0733) replica. fd0.030c000100000073: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232299626 -D2020-12-29 16:58:20.581 (1609232300581001819 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2381 MB, memused_res = 975MB -D2020-12-29 16:58:20.582 (1609232300582021499 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232300580), last_report_time_ms(1609232290579) -D2020-12-29 16:58:22.626 (1609232302626401818 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232302626] -D2020-12-29 16:58:22.626 (1609232302626511342 1c0733) replica. fd0.030c000100000075: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232302626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:22.626 (1609232302626519054 1c0733) replica. fd0.030c000100000075: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232302626 -D2020-12-29 16:58:24.513 (1609232304513847341 1c0714) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:58:24.603 (1609232304603668491 1c0719) replica.replica0.0300070f00135df8: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 132577 -D2020-12-29 16:58:24.6035 (1609232304603701536 1c0719) replica.replica0.0300070f00135df8: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 132577, confirmed_decree = -1 -D2020-12-29 16:58:24.620 (1609232304620869786 1c0719) replica.replica0.0300070f00135e3e: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 132853 -D2020-12-29 16:58:24.6205 (1609232304620879301 1c0719) replica.replica0.0300070f00135e3e: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 132853, confirmed_decree = -1 -D2020-12-29 16:58:24.624 (1609232304624224306 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:24.624 (1609232304624232081 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.624 (1609232304624250979 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.639 (1609232304639942356 1c071a) replica.replica1.0300070f00135e8b: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 47 -D2020-12-29 16:58:24.6395 (1609232304639957512 1c071a) replica.replica1.0300070f00135e8b: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 47, confirmed_decree = -1 -D2020-12-29 16:58:24.662 (1609232304662810978 1c071a) replica.replica1.0300070f00135ee7: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105 -D2020-12-29 16:58:24.6625 (1609232304662822601 1c071a) replica.replica1.0300070f00135ee7: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105, confirmed_decree = -1 -D2020-12-29 16:58:24.741 (1609232304741118735 1c0719) replica.replica0.0300070f00136020: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 101 -D2020-12-29 16:58:24.7415 (1609232304741132320 1c0719) replica.replica0.0300070f00136020: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 101, confirmed_decree = -1 -D2020-12-29 16:58:24.771 (1609232304771354853 1c071a) replica.replica1.0300070f00136099: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 101 -D2020-12-29 16:58:24.7715 (1609232304771362867 1c071a) replica.replica1.0300070f00136099: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 101, confirmed_decree = -1 -D2020-12-29 16:58:24.857 (1609232304857084553 1c0719) replica.replica0.0300070f001361e7: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 101 -D2020-12-29 16:58:24.8575 (1609232304857096412 1c0719) replica.replica0.0300070f001361e7: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 101, confirmed_decree = -1 -D2020-12-29 16:58:24.900 (1609232304900784088 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:24.900 (1609232304900796544 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.900 (1609232304900821493 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.902 (1609232304902118447 1c0719) replica.replica0.0300070f0013629d: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52 -D2020-12-29 16:58:24.9025 (1609232304902130372 1c0719) replica.replica0.0300070f0013629d: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52, confirmed_decree = -1 -D2020-12-29 16:58:24.904 (1609232304904527420 1c071a) replica.replica1.0300070f001362a8: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 101 -D2020-12-29 16:58:24.9045 (1609232304904537165 1c071a) replica.replica1.0300070f001362a8: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 101, confirmed_decree = -1 -D2020-12-29 16:58:24.938 (1609232304938396643 1c071a) replica.replica1.0300070f00136331: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 133065 -D2020-12-29 16:58:24.9385 (1609232304938406139 1c071a) replica.replica1.0300070f00136331: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 133065, confirmed_decree = -1 -D2020-12-29 16:58:24.976 (1609232304976203680 1c0719) replica.replica0.0300070f001363c9: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 101 -D2020-12-29 16:58:24.9765 (1609232304976213104 1c0719) replica.replica0.0300070f001363c9: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 101, confirmed_decree = -1 -D2020-12-29 16:58:24.976 (1609232304976988171 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:24.977 (1609232304977012756 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.977 (1609232304977037160 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.978 (1609232304978193523 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:24.978 (1609232304978201024 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.978 (1609232304978222428 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.982 (1609232304982730661 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:24.982 (1609232304982738597 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.982 (1609232304982757392 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.985 (1609232304985564507 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:24.985 (1609232304985572678 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.985 (1609232304985593526 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:24.992 (1609232304992836760 1c0719) replica.replica0.0300070f0013640b: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 132822 -D2020-12-29 16:58:24.9925 (1609232304992845178 1c0719) replica.replica0.0300070f0013640b: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 132822, confirmed_decree = -1 -D2020-12-29 16:58:24.995 (1609232304995058267 1c071a) replica.replica1.0300070f00136415: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 132050 -D2020-12-29 16:58:24.9955 (1609232304995066789 1c071a) replica.replica1.0300070f00136415: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 132050, confirmed_decree = -1 -D2020-12-29 16:58:25.6U (1609232305006797086 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:25.6 (1609232305006824130 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:25.6 (1609232305006847694 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:25.179 (1609232305179553716 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:58:25.179 (1609232305179565871 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 100 -D2020-12-29 16:58:25.1790 (1609232305179677093 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 132704 -D2020-12-29 16:58:25.1794 (1609232305179680936 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 100 -D2020-12-29 16:58:25.1790 (1609232305179741607 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 132037 -D2020-12-29 16:58:25.1797 (1609232305179743982 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 100 -D2020-12-29 16:58:25.1790 (1609232305179814248 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 132997 -D2020-12-29 16:58:25.1797 (1609232305179823834 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 49 -D2020-12-29 16:58:25.1799 (1609232305179865494 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 132842 -D2020-12-29 16:58:25.1792 (1609232305179875582 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 133356 -D2020-12-29 16:58:25.1796 (1609232305179877584 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 46 -D2020-12-29 16:58:25.1796 (1609232305179880707 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 46 -D2020-12-29 16:58:25.1796 (1609232305179899110 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 104 -D2020-12-29 16:58:25.1794 (1609232305179901683 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 100 -D2020-12-29 16:58:25.1790 (1609232305179970302 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 133181 -D2020-12-29 16:58:25.1801 (1609232305180025818 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 273191 -D2020-12-29 16:58:25.1801 (1609232305180028039 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 100 -D2020-12-29 16:58:25.1800 (1609232305180031560 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 46 -D2020-12-29 16:58:25.1806 (1609232305180033502 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 100 -D2020-12-29 16:58:25.1800 (1609232305180100571 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 133022 -D2020-12-29 16:58:25.1802 (1609232305180102838 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 100 -D2020-12-29 16:58:25.180 (1609232305180127194 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 7, reserved_log_size = 209084271, reserved_smallest_log = 1, reserved_largest_log = 7, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:58:25.180 (1609232305180132663 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 586107 -D2020-12-29 16:58:25.626 (1609232305626458689 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232305626] -D2020-12-29 16:58:25.626 (1609232305626561669 1c0734) replica. fd1.030c000000000079: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232305626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:25.626 (1609232305626568919 1c0734) replica. fd1.030c000000000079: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232305626 -D2020-12-29 16:58:28.626 (1609232308626513674 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232308626] -D2020-12-29 16:58:28.626 (1609232308626618303 1c0734) replica. fd1.030c00000000007b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232308626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:28.626 (1609232308626626613 1c0734) replica. fd1.030c00000000007b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232308626 -D2020-12-29 16:58:30.582 (1609232310582129703 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2401 MB, memused_res = 1000MB -D2020-12-29 16:58:30.583 (1609232310583173769 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232310582), last_report_time_ms(1609232300580) -D2020-12-29 16:58:31.626 (1609232311626566378 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232311626] -D2020-12-29 16:58:31.626 (1609232311626667885 1c0733) replica. fd0.030c000100000077: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232311626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:31.626 (1609232311626677861 1c0733) replica. fd0.030c000100000077: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232311626 -D2020-12-29 16:58:34.512 (1609232314512049302 1c0713) replica.default1.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:58:34.512- (1609232314512268163 1c0714) replica.default2.0301000100000039: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:58:34.512 (1609232314512328867 1c0714) replica.default2.0301000100000039: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:58:34.513 (1609232314513914484 1c0714) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:58:34.603 (1609232314603749253 1c0719) replica.replica0.0300070f0013f916: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 137737 -D2020-12-29 16:58:34.6035 (1609232314603760164 1c0719) replica.replica0.0300070f0013f916: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 137737, confirmed_decree = -1 -D2020-12-29 16:58:34.620 (1609232314620905363 1c0719) replica.replica0.0300070f0013f95d: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 137764 -D2020-12-29 16:58:34.6205 (1609232314620914701 1c0719) replica.replica0.0300070f0013f95d: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 137764, confirmed_decree = -1 -D2020-12-29 16:58:34.624 (1609232314624292258 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:34.624 (1609232314624317515 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.624 (1609232314624342854 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.626 (1609232314626617777 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232314626] -D2020-12-29 16:58:34.626 (1609232314626725953 1c0733) replica. fd0.030c000100000079: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232314626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:34.626 (1609232314626732882 1c0733) replica. fd0.030c000100000079: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232314626 -D2020-12-29 16:58:34.640 (1609232314640064139 1c071a) replica.replica1.0300070f0013f9aa: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48 -D2020-12-29 16:58:34.6405 (1609232314640074120 1c071a) replica.replica1.0300070f0013f9aa: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 48, confirmed_decree = -1 -D2020-12-29 16:58:34.662 (1609232314662879242 1c071a) replica.replica1.0300070f0013fa05: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 110 -D2020-12-29 16:58:34.6625 (1609232314662888618 1c071a) replica.replica1.0300070f0013fa05: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 110, confirmed_decree = -1 -D2020-12-29 16:58:34.741 (1609232314741231339 1c0719) replica.replica0.0300070f0013fb35: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105 -D2020-12-29 16:58:34.7415 (1609232314741240409 1c0719) replica.replica0.0300070f0013fb35: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105, confirmed_decree = -1 -D2020-12-29 16:58:34.771 (1609232314771430005 1c071a) replica.replica1.0300070f0013fbaf: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105 -D2020-12-29 16:58:34.7715 (1609232314771441334 1c071a) replica.replica1.0300070f0013fbaf: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105, confirmed_decree = -1 -D2020-12-29 16:58:34.857 (1609232314857169385 1c0719) replica.replica0.0300070f0013fd09: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105 -D2020-12-29 16:58:34.8575 (1609232314857195586 1c0719) replica.replica0.0300070f0013fd09: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105, confirmed_decree = -1 -D2020-12-29 16:58:34.900 (1609232314900912949 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:34.900 (1609232314900926179 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.900 (1609232314900955973 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.902 (1609232314902191741 1c0719) replica.replica0.0300070f0013fdbc: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52 -D2020-12-29 16:58:34.9025 (1609232314902199259 1c0719) replica.replica0.0300070f0013fdbc: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 52, confirmed_decree = -1 -D2020-12-29 16:58:34.904 (1609232314904594381 1c071a) replica.replica1.0300070f0013fdc8: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105 -D2020-12-29 16:58:34.9045 (1609232314904602779 1c071a) replica.replica1.0300070f0013fdc8: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105, confirmed_decree = -1 -D2020-12-29 16:58:34.938 (1609232314938513594 1c071a) replica.replica1.0300070f0013fe53: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 137932 -D2020-12-29 16:58:34.9385 (1609232314938522976 1c071a) replica.replica1.0300070f0013fe53: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 137932, confirmed_decree = -1 -D2020-12-29 16:58:34.976 (1609232314976336930 1c0719) replica.replica0.0300070f0013feec: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105 -D2020-12-29 16:58:34.9765 (1609232314976347811 1c0719) replica.replica0.0300070f0013feec: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 105, confirmed_decree = -1 -D2020-12-29 16:58:34.977 (1609232314977076618 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:34.977 (1609232314977085969 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.977 (1609232314977139101 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.978 (1609232314978320054 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:34.978 (1609232314978328822 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.978 (1609232314978360010 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.982 (1609232314982793376 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:34.982 (1609232314982803302 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.982 (1609232314982827332 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.985 (1609232314985626474 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:34.985 (1609232314985635041 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.985 (1609232314985657826 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:34.992 (1609232314992925802 1c0719) replica.replica0.0300070f0013ff2c: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 137845 -D2020-12-29 16:58:34.9925 (1609232314992935063 1c0719) replica.replica0.0300070f0013ff2c: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 137845, confirmed_decree = -1 -D2020-12-29 16:58:34.995 (1609232314995139467 1c071a) replica.replica1.0300070f0013ff35: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 136842 -D2020-12-29 16:58:34.9955 (1609232314995148382 1c071a) replica.replica1.0300070f0013ff35: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 136842, confirmed_decree = -1 -D2020-12-29 16:58:35.6U (1609232315006886438 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:35.6 (1609232315006896297 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:35.6 (1609232315006919159 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:37.626 (1609232317626677972 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232317626] -D2020-12-29 16:58:37.626 (1609232317626785834 1c0734) replica. fd1.030c00000000007d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232317626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:37.626 (1609232317626793006 1c0734) replica. fd1.030c00000000007d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232317626 -D2020-12-29 16:58:38.622 (1609232318622159095 1c071a) replica.replica1.0300070f00143762: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.140000 init_prepare, mutation_tid=1258588 -D2020-12-29 16:58:38.636 (1609232318636773894 1c0719) replica.replica0.0300070f0014379d: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.280000 init_prepare, mutation_tid=1258647 -D2020-12-29 16:58:38.724 (1609232318724722559 1c071a) replica.replica1.0300070f001438f6: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.140000 init_prepare, mutation_tid=1258992 -D2020-12-29 16:58:40.583 (1609232320583274584 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2421 MB, memused_res = 1024MB -D2020-12-29 16:58:40.584 (1609232320584352563 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232320583), last_report_time_ms(1609232310582) -D2020-12-29 16:58:40.626 (1609232320626742999 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232320626] -D2020-12-29 16:58:40.626 (1609232320626830530 1c0734) replica. fd1.030c00000000007f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232320626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:40.626 (1609232320626836907 1c0734) replica. fd1.030c00000000007f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232320626 -D2020-12-29 16:58:43.626 (1609232323626794220 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232323626] -D2020-12-29 16:58:43.626 (1609232323626904374 1c0733) replica. fd0.030c00010000007b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232323626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:43.626 (1609232323626911712 1c0733) replica. fd0.030c00010000007b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232323626 -D2020-12-29 16:58:44.513 (1609232324513975525 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:58:44.603 (1609232324603883341 1c0719) replica.replica0.0300070f00149238: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 142619 -D2020-12-29 16:58:44.603 (1609232324603895179 1c0719) replica.replica0.0300070f00149238: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 142619, confirmed_decree = -1 -D2020-12-29 16:58:44.621 (1609232324621032584 1c0719) replica.replica0.0300070f0014927e: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 142662 -D2020-12-29 16:58:44.621 (1609232324621040893 1c0719) replica.replica0.0300070f0014927e: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 142662, confirmed_decree = -1 -D2020-12-29 16:58:44.624 (1609232324624383929 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:44.624 (1609232324624394619 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.624 (1609232324624418106 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.640 (1609232324640198440 1c071a) replica.replica1.0300070f001492cd: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 49 -D2020-12-29 16:58:44.6405 (1609232324640213467 1c071a) replica.replica1.0300070f001492cd: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 49, confirmed_decree = -1 -D2020-12-29 16:58:44.662 (1609232324662974512 1c071a) replica.replica1.0300070f0014932a: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 113 -D2020-12-29 16:58:44.6625 (1609232324662984243 1c071a) replica.replica1.0300070f0014932a: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 113, confirmed_decree = -1 -D2020-12-29 16:58:44.741 (1609232324741316200 1c0719) replica.replica0.0300070f0014946b: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108 -D2020-12-29 16:58:44.7415 (1609232324741328859 1c0719) replica.replica0.0300070f0014946b: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108, confirmed_decree = -1 -D2020-12-29 16:58:44.771 (1609232324771509749 1c071a) replica.replica1.0300070f001494e9: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108 -D2020-12-29 16:58:44.7715 (1609232324771521816 1c071a) replica.replica1.0300070f001494e9: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108, confirmed_decree = -1 -D2020-12-29 16:58:44.857 (1609232324857295955 1c0719) replica.replica0.0300070f00149641: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108 -D2020-12-29 16:58:44.8575 (1609232324857321884 1c0719) replica.replica0.0300070f00149641: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108, confirmed_decree = -1 -D2020-12-29 16:58:44.901 (1609232324901041316 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:44.901 (1609232324901054577 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.901 (1609232324901087065 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.902 (1609232324902314173 1c0719) replica.replica0.0300070f001496f3: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 53 -D2020-12-29 16:58:44.9025 (1609232324902322492 1c0719) replica.replica0.0300070f001496f3: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 53, confirmed_decree = -1 -D2020-12-29 16:58:44.904 (1609232324904668555 1c071a) replica.replica1.0300070f001496fe: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108 -D2020-12-29 16:58:44.9045 (1609232324904679452 1c071a) replica.replica1.0300070f001496fe: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108, confirmed_decree = -1 -D2020-12-29 16:58:44.938 (1609232324938564365 1c071a) replica.replica1.0300070f00149788: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 142861 -D2020-12-29 16:58:44.9385 (1609232324938575939 1c071a) replica.replica1.0300070f00149788: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 142861, confirmed_decree = -1 -D2020-12-29 16:58:44.976 (1609232324976416274 1c0719) replica.replica0.0300070f00149820: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108 -D2020-12-29 16:58:44.9765 (1609232324976427401 1c0719) replica.replica0.0300070f00149820: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 108, confirmed_decree = -1 -D2020-12-29 16:58:44.977 (1609232324977182901 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:44.977 (1609232324977190636 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.977 (1609232324977248718 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.978 (1609232324978450434 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:44.978 (1609232324978457488 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.978 (1609232324978479170 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.982 (1609232324982861500 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:44.982 (1609232324982870526 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.982 (1609232324982900174 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.985 (1609232324985717671 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:44.985 (1609232324985725079 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.985 (1609232324985755440 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:44.993 (1609232324993013415 1c0719) replica.replica0.0300070f00149861: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 142697 -D2020-12-29 16:58:44.9935 (1609232324993021613 1c0719) replica.replica0.0300070f00149861: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 142697, confirmed_decree = -1 -D2020-12-29 16:58:44.995 (1609232324995235935 1c071a) replica.replica1.0300070f0014986b: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141807 -D2020-12-29 16:58:44.9955 (1609232324995246062 1c071a) replica.replica1.0300070f0014986b: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141807, confirmed_decree = -1 -D2020-12-29 16:58:45.6 (1609232325006973331 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:45.6 (1609232325006983899 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:45.7 (1609232325007034669 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:46.626 (1609232326626846217 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232326626] -D2020-12-29 16:58:46.626 (1609232326626943273 1c0733) replica. fd0.030c00010000007d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232326626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:46.626 (1609232326626951919 1c0733) replica. fd0.030c00010000007d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232326626 -D2020-12-29 16:58:49.626 (1609232329626898082 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232329626] -D2020-12-29 16:58:49.627 (1609232329627015708 1c0734) replica. fd1.030c000000000081: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232329626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:49.627 (1609232329627023509 1c0734) replica. fd1.030c000000000081: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232329626 -D2020-12-29 16:58:50.584 (1609232330584434316 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2439 MB, memused_res = 1047MB -D2020-12-29 16:58:50.585 (1609232330585552268 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232330584), last_report_time_ms(1609232320583) -D2020-12-29 16:58:52.626 (1609232332626953218 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232332626] -D2020-12-29 16:58:52.627 (1609232332627078927 1c0734) replica. fd1.030c000000000083: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232332626], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:52.627 (1609232332627085304 1c0734) replica. fd1.030c000000000083: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232332626 -D2020-12-29 16:58:54.514 (1609232334514038636 1c0714) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:58:54.603 (1609232334603966295 1c0719) replica.replica0.0300070f0015250e: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 147431 -D2020-12-29 16:58:54.6035 (1609232334603980698 1c0719) replica.replica0.0300070f0015250e: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 147431, confirmed_decree = -1 -D2020-12-29 16:58:54.621 (1609232334621174287 1c0719) replica.replica0.0300070f00152547: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 147309 -D2020-12-29 16:58:54.6215 (1609232334621184396 1c0719) replica.replica0.0300070f00152547: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 147309, confirmed_decree = -1 -D2020-12-29 16:58:54.624 (1609232334624457210 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:54.624 (1609232334624472198 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.624 (1609232334624511449 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.640 (1609232334640387749 1c071a) replica.replica1.0300070f00152583: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 53 -D2020-12-29 16:58:54.6405 (1609232334640399160 1c071a) replica.replica1.0300070f00152583: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 53, confirmed_decree = -1 -D2020-12-29 16:58:54.663 (1609232334663085384 1c071a) replica.replica1.0300070f001525cb: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 116 -D2020-12-29 16:58:54.6635 (1609232334663096832 1c071a) replica.replica1.0300070f001525cb: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 116, confirmed_decree = -1 -D2020-12-29 16:58:54.741 (1609232334741476559 1c0719) replica.replica0.0300070f001526b2: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 111 -D2020-12-29 16:58:54.7415 (1609232334741502174 1c0719) replica.replica0.0300070f001526b2: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 111, confirmed_decree = -1 -D2020-12-29 16:58:54.771 (1609232334771653735 1c071a) replica.replica1.0300070f00152703: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 111 -D2020-12-29 16:58:54.7715 (1609232334771667745 1c071a) replica.replica1.0300070f00152703: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 111, confirmed_decree = -1 -D2020-12-29 16:58:54.857 (1609232334857385268 1c0719) replica.replica0.0300070f00152800: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 111 -D2020-12-29 16:58:54.8575 (1609232334857399328 1c0719) replica.replica0.0300070f00152800: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 111, confirmed_decree = -1 -D2020-12-29 16:58:54.901 (1609232334901192317 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:54.901 (1609232334901231733 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.901 (1609232334901260174 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.902 (1609232334902428355 1c0719) replica.replica0.0300070f00152896: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 54 -D2020-12-29 16:58:54.9025 (1609232334902436727 1c0719) replica.replica0.0300070f00152896: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 54, confirmed_decree = -1 -D2020-12-29 16:58:54.904 (1609232334904735178 1c071a) replica.replica1.0300070f001528a0: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 111 -D2020-12-29 16:58:54.9045 (1609232334904746619 1c071a) replica.replica1.0300070f001528a0: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 111, confirmed_decree = -1 -D2020-12-29 16:58:54.938 (1609232334938663459 1c071a) replica.replica1.0300070f0015290e: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 147466 -D2020-12-29 16:58:54.9385 (1609232334938675264 1c071a) replica.replica1.0300070f0015290e: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 147466, confirmed_decree = -1 -D2020-12-29 16:58:54.976 (1609232334976548920 1c0719) replica.replica0.0300070f0015298a: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 111 -D2020-12-29 16:58:54.9765 (1609232334976562174 1c0719) replica.replica0.0300070f0015298a: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 111, confirmed_decree = -1 -D2020-12-29 16:58:54.977 (1609232334977318517 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:54.977 (1609232334977336705 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.977 (1609232334977378431 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.978 (1609232334978518594 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:54.978 (1609232334978530683 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.978 (1609232334978588217 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.982 (1609232334982942848 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:54.982 (1609232334982963025 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.982 (1609232334982993513 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.985 (1609232334985794261 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:54.985 (1609232334985814215 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.985 (1609232334985852175 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:54.993 (1609232334993072217 1c0719) replica.replica0.0300070f001529be: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 147262 -D2020-12-29 16:58:54.9935 (1609232334993084736 1c0719) replica.replica0.0300070f001529be: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 147262, confirmed_decree = -1 -D2020-12-29 16:58:54.995 (1609232334995402721 1c071a) replica.replica1.0300070f001529c6: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 146486 -D2020-12-29 16:58:54.9955 (1609232334995414911 1c071a) replica.replica1.0300070f001529c6: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 146486, confirmed_decree = -1 -D2020-12-29 16:58:55.7 (1609232335007089844 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:58:55.7 (1609232335007111959 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:55.7 (1609232335007154624 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:58:55.1807 (1609232335180160167 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:58:55.180 (1609232335180173692 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 110 -D2020-12-29 16:58:55.1800 (1609232335180294366 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 147489 -D2020-12-29 16:58:55.1809 (1609232335180297530 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 110 -D2020-12-29 16:58:55.1800 (1609232335180343806 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 146484 -D2020-12-29 16:58:55.1804 (1609232335180347972 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 110 -D2020-12-29 16:58:55.1800 (1609232335180380679 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 147463 -D2020-12-29 16:58:55.1803 (1609232335180390633 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 53 -D2020-12-29 16:58:55.1803 (1609232335180440391 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 147255 -D2020-12-29 16:58:55.1805 (1609232335180529026 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 147708 -D2020-12-29 16:58:55.1808 (1609232335180531708 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 50 -D2020-12-29 16:58:55.1800 (1609232335180539572 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 49 -D2020-12-29 16:58:55.1809 (1609232335180541547 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 115 -D2020-12-29 16:58:55.1805 (1609232335180543447 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 110 -D2020-12-29 16:58:55.1800 (1609232335180571232 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 147649 -D2020-12-29 16:58:55.1809 (1609232335180761307 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 287640 -D2020-12-29 16:58:55.1800 (1609232335180766349 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 110 -D2020-12-29 16:58:55.1800 (1609232335180775477 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 49 -D2020-12-29 16:58:55.1809 (1609232335180778851 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 110 -D2020-12-29 16:58:55.1800 (1609232335180822726 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 147492 -D2020-12-29 16:58:55.1802 (1609232335180824991 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 110 -D2020-12-29 16:58:55.180 (1609232335180851395 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 7, reserved_log_size = 229122108, reserved_smallest_log = 1, reserved_largest_log = 7, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:58:55.180 (1609232335180857768 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 704807 -D2020-12-29 16:58:55.627 (1609232335627017166 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232335627] -D2020-12-29 16:58:55.627 (1609232335627189031 1c0733) replica. fd0.030c00010000007f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232335627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:55.627 (1609232335627198912 1c0733) replica. fd0.030c00010000007f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232335627 -D2020-12-29 16:58:58.627 (1609232338627088155 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232338627] -D2020-12-29 16:58:58.627 (1609232338627203575 1c0733) replica. fd0.030c000100000081: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232338627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:58:58.627 (1609232338627211546 1c0733) replica. fd0.030c000100000081: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232338627 -D2020-12-29 16:59:00.333 (1609232340333367190 1c0719) replica.replica0.0300070f00156c02: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.290000 init_prepare, mutation_tid=1337554 -D2020-12-29 16:59:00.585 (1609232340585669123 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2459 MB, memused_res = 1072MB -D2020-12-29 16:59:00.587 (1609232340587134882 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232340585), last_report_time_ms(1609232330584) -D2020-12-29 16:59:00.855 (1609232340855738819 1c071a) replica.replica1.0300070f0015723c: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.150000 init_prepare, mutation_tid=1339148 -D2020-12-29 16:59:00.879 (1609232340879509962 1c071a) replica.replica1.0300070f0015728c: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.150000 init_prepare, mutation_tid=1339228 -D2020-12-29 16:59:01.627 (1609232341627163969 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232341627] -D2020-12-29 16:59:01.627 (1609232341627434212 1c0734) replica. fd1.030c000000000085: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232341627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:01.627 (1609232341627451933 1c0734) replica. fd1.030c000000000085: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232341627 -D2020-12-29 16:59:04.512 (1609232344512146226 1c0716) replica.default4.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:59:04.512 (1609232344512391510 1c0713) replica.default1.0301000400000025: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:59:04.512 (1609232344512452690 1c0713) replica.default1.0301000400000025: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:59:04.514 (1609232344514103617 1c0714) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:59:04.604 (1609232344604079072 1c0719) replica.replica0.0300070f0015a120: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151329 -D2020-12-29 16:59:04.6045 (1609232344604090493 1c0719) replica.replica0.0300070f0015a120: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151329, confirmed_decree = -1 -D2020-12-29 16:59:04.621 (1609232344621291974 1c0719) replica.replica0.0300070f0015a14d: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151186 -D2020-12-29 16:59:04.6215 (1609232344621306201 1c0719) replica.replica0.0300070f0015a14d: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151186, confirmed_decree = -1 -D2020-12-29 16:59:04.624 (1609232344624566164 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:04.624 (1609232344624580949 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.624 (1609232344624653069 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.627 (1609232344627264223 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232344627] -D2020-12-29 16:59:04.627 (1609232344627408803 1c0734) replica. fd1.030c000000000087: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232344627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:04.627 (1609232344627418210 1c0734) replica. fd1.030c000000000087: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232344627 -D2020-12-29 16:59:04.640 (1609232344640544356 1c071a) replica.replica1.0300070f0015a182: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 56 -D2020-12-29 16:59:04.640 (1609232344640561171 1c071a) replica.replica1.0300070f0015a182: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 56, confirmed_decree = -1 -D2020-12-29 16:59:04.663 (1609232344663201647 1c071a) replica.replica1.0300070f0015a1bd: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 120 -D2020-12-29 16:59:04.6635 (1609232344663214991 1c071a) replica.replica1.0300070f0015a1bd: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 120, confirmed_decree = -1 -D2020-12-29 16:59:04.741 (1609232344741568349 1c0719) replica.replica0.0300070f0015a2a5: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 115 -D2020-12-29 16:59:04.7415 (1609232344741582200 1c0719) replica.replica0.0300070f0015a2a5: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 115, confirmed_decree = -1 -D2020-12-29 16:59:04.771 (1609232344771738804 1c071a) replica.replica1.0300070f0015a2ff: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 115 -D2020-12-29 16:59:04.7715 (1609232344771750235 1c071a) replica.replica1.0300070f0015a2ff: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 115, confirmed_decree = -1 -D2020-12-29 16:59:04.857 (1609232344857470150 1c0719) replica.replica0.0300070f0015a442: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 115 -D2020-12-29 16:59:04.8575 (1609232344857480893 1c0719) replica.replica0.0300070f0015a442: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 115, confirmed_decree = -1 -D2020-12-29 16:59:04.901 (1609232344901292888 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:04.901 (1609232344901305278 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.901 (1609232344901328801 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.902 (1609232344902597332 1c0719) replica.replica0.0300070f0015a4f9: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55 -D2020-12-29 16:59:04.9025 (1609232344902608576 1c0719) replica.replica0.0300070f0015a4f9: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 55, confirmed_decree = -1 -D2020-12-29 16:59:04.904 (1609232344904814594 1c071a) replica.replica1.0300070f0015a503: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 115 -D2020-12-29 16:59:04.9045 (1609232344904823016 1c071a) replica.replica1.0300070f0015a503: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 115, confirmed_decree = -1 -D2020-12-29 16:59:04.938 (1609232344938742758 1c071a) replica.replica1.0300070f0015a58c: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151455 -D2020-12-29 16:59:04.9385 (1609232344938751913 1c071a) replica.replica1.0300070f0015a58c: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151455, confirmed_decree = -1 -D2020-12-29 16:59:04.976 (1609232344976631384 1c0719) replica.replica0.0300070f0015a624: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 115 -D2020-12-29 16:59:04.9765 (1609232344976641428 1c0719) replica.replica0.0300070f0015a624: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 115, confirmed_decree = -1 -D2020-12-29 16:59:04.977 (1609232344977431845 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:04.977 (1609232344977439874 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.977 (1609232344977466641 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.978 (1609232344978733424 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:04.978 (1609232344978755589 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.978 (1609232344978786533 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.983 (1609232344983039094 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:04.983 (1609232344983052762 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.983 (1609232344983084084 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.985 (1609232344985900481 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:04.985 (1609232344985908382 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.985 (1609232344985935792 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:04.993 (1609232344993153706 1c0719) replica.replica0.0300070f0015a664: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151215 -D2020-12-29 16:59:04.9935 (1609232344993164438 1c0719) replica.replica0.0300070f0015a664: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151215, confirmed_decree = -1 -D2020-12-29 16:59:04.995 (1609232344995454677 1c071a) replica.replica1.0300070f0015a66e: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 150597 -D2020-12-29 16:59:04.9955 (1609232344995462593 1c071a) replica.replica1.0300070f0015a66e: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 150597, confirmed_decree = -1 -D2020-12-29 16:59:05.7 (1609232345007205635 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:05.7 (1609232345007223806 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:05.7 (1609232345007257901 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:05.666 (1609232345666425168 1c0719) replica.replica0.0300070f0015ae7f: mutation_log.cpp:820:mark_new_offset(): switch log file by limit, old_file = /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.7.201326944, size = 33554462 -D2020-12-29 16:59:05.666o (1609232345666467897 1c0719) replica.replica0.0300070f0015ae7f: mutation_log.cpp:731:create_new_log_file(): create new log file /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.8.234881406 succeed, time_used = 21157 ns -D2020-12-29 16:59:07.627 (1609232347627333436 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232347627] -D2020-12-29 16:59:07.627 (1609232347627515850 1c0733) replica. fd0.030c000100000083: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232347627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:07.627 (1609232347627524712 1c0733) replica. fd0.030c000100000083: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232347627 -D2020-12-29 16:59:10.587 (1609232350587228535 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2470 MB, memused_res = 1088MB -D2020-12-29 16:59:10.588 (1609232350588296636 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232350587), last_report_time_ms(1609232340585) -D2020-12-29 16:59:10.627 (1609232350627400724 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232350627] -D2020-12-29 16:59:10.627 (1609232350627499970 1c0733) replica. fd0.030c000100000085: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232350627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:10.627 (1609232350627507125 1c0733) replica. fd0.030c000100000085: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232350627 -D2020-12-29 16:59:13.627 (1609232353627469386 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232353627] -D2020-12-29 16:59:13.627 (1609232353627600975 1c0734) replica. fd1.030c000000000089: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232353627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:13.627 (1609232353627612919 1c0734) replica. fd1.030c000000000089: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232353627 -D2020-12-29 16:59:14.514 (1609232354514185825 1c0714) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:59:14.604 (1609232354604227450 1c0719) replica.replica0.0300070f0016203c: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155386 -D2020-12-29 16:59:14.6045 (1609232354604246221 1c0719) replica.replica0.0300070f0016203c: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155386, confirmed_decree = -1 -D2020-12-29 16:59:14.621 (1609232354621378017 1c0719) replica.replica0.0300070f0016206c: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155257 -D2020-12-29 16:59:14.6215 (1609232354621421253 1c0719) replica.replica0.0300070f0016206c: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155257, confirmed_decree = -1 -D2020-12-29 16:59:14.624 (1609232354624709809 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:14.624 (1609232354624721594 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.624 (1609232354624764765 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.640 (1609232354640621107 1c071a) replica.replica1.0300070f001620a3: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 56 -D2020-12-29 16:59:14.6405 (1609232354640633799 1c071a) replica.replica1.0300070f001620a3: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 56, confirmed_decree = -1 -D2020-12-29 16:59:14.663 (1609232354663338796 1c071a) replica.replica1.0300070f001620e6: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 123 -D2020-12-29 16:59:14.6635 (1609232354663353989 1c071a) replica.replica1.0300070f001620e6: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 123, confirmed_decree = -1 -D2020-12-29 16:59:14.741 (1609232354741837381 1c0719) replica.replica0.0300070f001621af: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118 -D2020-12-29 16:59:14.7415 (1609232354741867552 1c0719) replica.replica0.0300070f001621af: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118, confirmed_decree = -1 -D2020-12-29 16:59:14.771 (1609232354771915608 1c071a) replica.replica1.0300070f001621e4: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118 -D2020-12-29 16:59:14.7715 (1609232354771931040 1c071a) replica.replica1.0300070f001621e4: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118, confirmed_decree = -1 -D2020-12-29 16:59:14.857 (1609232354857764619 1c0719) replica.replica0.0300070f0016228c: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118 -D2020-12-29 16:59:14.8575 (1609232354857783184 1c0719) replica.replica0.0300070f0016228c: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118, confirmed_decree = -1 -D2020-12-29 16:59:14.901 (1609232354901424985 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:14.901 (1609232354901446512 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.901 (1609232354901492613 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.902 (1609232354902751737 1c0719) replica.replica0.0300070f001622ec: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 56 -D2020-12-29 16:59:14.9025 (1609232354902767801 1c0719) replica.replica0.0300070f001622ec: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 56, confirmed_decree = -1 -D2020-12-29 16:59:14.904 (1609232354904916623 1c071a) replica.replica1.0300070f001622f3: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118 -D2020-12-29 16:59:14.904 (1609232354904927794 1c071a) replica.replica1.0300070f001622f3: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118, confirmed_decree = -1 -D2020-12-29 16:59:14.938 (1609232354938862231 1c071a) replica.replica1.0300070f0016233c: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155453 -D2020-12-29 16:59:14.9385 (1609232354938881315 1c071a) replica.replica1.0300070f0016233c: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155453, confirmed_decree = -1 -D2020-12-29 16:59:14.976 (1609232354976839332 1c0719) replica.replica0.0300070f0016238c: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118 -D2020-12-29 16:59:14.9765 (1609232354976857654 1c0719) replica.replica0.0300070f0016238c: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 118, confirmed_decree = -1 -D2020-12-29 16:59:14.977 (1609232354977515252 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:14.977 (1609232354977529196 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.977 (1609232354977571436 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.978 (1609232354978884639 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:14.978 (1609232354978920476 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.978 (1609232354978955699 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.983 (1609232354983162387 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:14.983 (1609232354983183531 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.983 (1609232354983223299 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.985 (1609232354985978564 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:14.985 (1609232354985994609 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.986 (1609232354986042459 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:14.993 (1609232354993298341 1c0719) replica.replica0.0300070f001623ae: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155200 -D2020-12-29 16:59:14.9935 (1609232354993322946 1c0719) replica.replica0.0300070f001623ae: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155200, confirmed_decree = -1 -D2020-12-29 16:59:14.995 (1609232354995571947 1c071a) replica.replica1.0300070f001623b4: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 154568 -D2020-12-29 16:59:14.9955 (1609232354995582919 1c071a) replica.replica1.0300070f001623b4: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 154568, confirmed_decree = -1 -D2020-12-29 16:59:15.7 (1609232355007831141 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:15.7 (1609232355007861782 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:15.7 (1609232355007919872 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:16.627 (1609232356627527064 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232356627] -D2020-12-29 16:59:16.627 (1609232356627671808 1c0734) replica. fd1.030c00000000008b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232356627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:16.627 (1609232356627679331 1c0734) replica. fd1.030c00000000008b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232356627 -D2020-12-29 16:59:19.627 (1609232359627602586 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232359627] -D2020-12-29 16:59:19.627 (1609232359627697818 1c0733) replica. fd0.030c000100000087: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232359627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:19.627 (1609232359627704890 1c0733) replica. fd0.030c000100000087: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232359627 -D2020-12-29 16:59:20.588 (1609232360588381204 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2476 MB, memused_res = 1099MB -D2020-12-29 16:59:20.589 (1609232360589411202 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232360588), last_report_time_ms(1609232350587) -D2020-12-29 16:59:22.627 (1609232362627655374 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232362627] -D2020-12-29 16:59:22.627 (1609232362627767676 1c0733) replica. fd0.030c000100000089: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232362627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:22.627 (1609232362627774581 1c0733) replica. fd0.030c000100000089: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232362627 -D2020-12-29 16:59:23.909 (1609232363909290262 1c0719) replica.replica0.0300070f0016a54a: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.300000 init_prepare, mutation_tid=1417710 -D2020-12-29 16:59:24.276 (1609232364276940809 1c071a) replica.replica1.0300070f0016aa5d: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.160000 init_prepare, mutation_tid=1419009 -D2020-12-29 16:59:24.469 (1609232364469923199 1c071a) replica.replica1.0300070f0016ad3e: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.160000 init_prepare, mutation_tid=1419746 -D2020-12-29 16:59:24.514 (1609232364514248088 1c0715) replica.default3.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:59:24.549 (1609232364549821119 1c071a) replica.replica1.0306000000000005: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:24.552 (1609232364552565889 1c0719) replica.replica0.0306000100000004: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:24.604 (1609232364604318282 1c0719) replica.replica0.0300070f0016af50: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 159977 -D2020-12-29 16:59:24.6045 (1609232364604331555 1c0719) replica.replica0.0300070f0016af50: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 159977, confirmed_decree = -1 -D2020-12-29 16:59:24.621 (1609232364621448573 1c0719) replica.replica0.0300070f0016af96: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 159795 -D2020-12-29 16:59:24.6215 (1609232364621456526 1c0719) replica.replica0.0300070f0016af96: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 159795, confirmed_decree = -1 -D2020-12-29 16:59:24.624 (1609232364624818385 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:24.624 (1609232364624830740 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.624 (1609232364624858577 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.640 (1609232364640772327 1c071a) replica.replica1.0300070f0016afe3: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57 -D2020-12-29 16:59:24.6405 (1609232364640783317 1c071a) replica.replica1.0300070f0016afe3: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57, confirmed_decree = -1 -D2020-12-29 16:59:24.640 (1609232364640856176 1c0719) replica.replica0.030600000000000a: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:24.647 (1609232364647560030 1c071a) replica.replica1.0306000100000009: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.5@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:24.663 (1609232364663400605 1c071a) replica.replica1.0300070f0016b040: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 126 -D2020-12-29 16:59:24.6635 (1609232364663410089 1c071a) replica.replica1.0300070f0016b040: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 126, confirmed_decree = -1 -D2020-12-29 16:59:24.713 (1609232364713844580 1c071a) replica.replica1.030600000000000f: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:24.717 (1609232364717946331 1c071a) replica.replica1.030600010000000e: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:24.750 (1609232364750314573 1c0719) replica.replica0.0300070f0016b19b: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 121 -D2020-12-29 16:59:24.7505 (1609232364750323936 1c0719) replica.replica0.0300070f0016b19b: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 121, confirmed_decree = -1 -D2020-12-29 16:59:24.771 (1609232364771998130 1c071a) replica.replica1.0300070f0016b1ee: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 121 -D2020-12-29 16:59:24.7725 (1609232364772007930 1c071a) replica.replica1.0300070f0016b1ee: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 121, confirmed_decree = -1 -D2020-12-29 16:59:24.785 (1609232364785680589 1c0719) replica.replica0.0306000000000014: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:24.857 (1609232364857848082 1c0719) replica.replica0.0300070f0016b33b: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 121 -D2020-12-29 16:59:24.8575 (1609232364857859015 1c0719) replica.replica0.0300070f0016b33b: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 121, confirmed_decree = -1 -D2020-12-29 16:59:24.892 (1609232364892339759 1c0719) replica.replica0.0306000000000019: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:24.895 (1609232364895913770 1c071a) replica.replica1.0306000100000016: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:24.901 (1609232364901579574 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:24.901 (1609232364901591474 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.901 (1609232364901615374 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.902 (1609232364902904501 1c0719) replica.replica0.0300070f0016b3f1: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57 -D2020-12-29 16:59:24.9025 (1609232364902915677 1c0719) replica.replica0.0300070f0016b3f1: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 57, confirmed_decree = -1 -D2020-12-29 16:59:24.904 (1609232364904998045 1c071a) replica.replica1.0300070f0016b3fa: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 121 -D2020-12-29 16:59:24.9055 (1609232364905007691 1c071a) replica.replica1.0300070f0016b3fa: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 121, confirmed_decree = -1 -D2020-12-29 16:59:24.938 (1609232364938964155 1c071a) replica.replica1.0300070f0016b480: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 160135 -D2020-12-29 16:59:24.9385 (1609232364938988966 1c071a) replica.replica1.0300070f0016b480: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 160135, confirmed_decree = -1 -D2020-12-29 16:59:24.976 (1609232364976938105 1c0719) replica.replica0.0300070f0016b50c: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 121 -D2020-12-29 16:59:24.9765 (1609232364976951303 1c0719) replica.replica0.0300070f0016b50c: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 121, confirmed_decree = -1 -D2020-12-29 16:59:24.977 (1609232364977636623 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:24.977 (1609232364977646213 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.977 (1609232364977670476 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.979 (1609232364979066047 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:24.979 (1609232364979074811 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.979 (1609232364979097170 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.983 (1609232364983268213 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:24.983 (1609232364983278063 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.983 (1609232364983301766 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.986 (1609232364986092471 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:24.986 (1609232364986100587 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.986 (1609232364986131242 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:24.993 (1609232364993356336 1c0719) replica.replica0.0300070f0016b54b: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 159896 -D2020-12-29 16:59:24.9935 (1609232364993383797 1c0719) replica.replica0.0300070f0016b54b: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 159896, confirmed_decree = -1 -D2020-12-29 16:59:24.995 (1609232364995634085 1c071a) replica.replica1.0300070f0016b555: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 159313 -D2020-12-29 16:59:24.9955 (1609232364995642069 1c071a) replica.replica1.0300070f0016b555: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 159313, confirmed_decree = -1 -D2020-12-29 16:59:25.7 (1609232365007983609 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:25.7 (1609232365007994502 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:25.8 (1609232365008019839 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:25.180 (1609232365180886199 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:59:25.180 (1609232365180894090 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 120 -D2020-12-29 16:59:25.1800 (1609232365180931941 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 160218 -D2020-12-29 16:59:25.1808 (1609232365180936954 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 120 -D2020-12-29 16:59:25.1800 (1609232365180970695 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 159350 -D2020-12-29 16:59:25.1800 (1609232365180974742 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 120 -D2020-12-29 16:59:25.1800 (1609232365180987966 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 160054 -D2020-12-29 16:59:25.1814 (1609232365181002519 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 56 -D2020-12-29 16:59:25.1816 (1609232365181059312 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 159888 -D2020-12-29 16:59:25.1818 (1609232365181092910 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 160427 -D2020-12-29 16:59:25.1817 (1609232365181102989 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 52 -D2020-12-29 16:59:25.1812 (1609232365181109613 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 55 -D2020-12-29 16:59:25.1815 (1609232365181113002 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 125 -D2020-12-29 16:59:25.1815 (1609232365181116221 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 120 -D2020-12-29 16:59:25.1810 (1609232365181164809 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 160242 -D2020-12-29 16:59:25.1812 (1609232365181240524 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 300419 -D2020-12-29 16:59:25.1819 (1609232365181244398 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 120 -D2020-12-29 16:59:25.1810 (1609232365181250717 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 56 -D2020-12-29 16:59:25.1816 (1609232365181253723 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 120 -D2020-12-29 16:59:25.1810 (1609232365181332493 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 160120 -D2020-12-29 16:59:25.1810 (1609232365181337612 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 120 -D2020-12-29 16:59:25.181 (1609232365181374110 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 8, reserved_log_size = 246619429, reserved_smallest_log = 1, reserved_largest_log = 8, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:59:25.181 (1609232365181393965 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 515016 -D2020-12-29 16:59:25.627 (1609232365627705798 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232365627] -D2020-12-29 16:59:25.627 (1609232365627826600 1c0734) replica. fd1.030c00000000008d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232365627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:25.627 (1609232365627834733 1c0734) replica. fd1.030c00000000008d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232365627 -D2020-12-29 16:59:28.627 (1609232368627765464 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232368627] -D2020-12-29 16:59:28.627 (1609232368627914939 1c0734) replica. fd1.030c00000000008f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232368627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:28.627 (1609232368627923291 1c0734) replica. fd1.030c00000000008f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232368627 -D2020-12-29 16:59:30.589 (1609232370589517364 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2485 MB, memused_res = 1113MB -D2020-12-29 16:59:30.590 (1609232370590834431 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232370589), last_report_time_ms(1609232360588) -D2020-12-29 16:59:31.627 (1609232371627837083 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232371627] -D2020-12-29 16:59:31.627 (1609232371627948234 1c0733) replica. fd0.030c00010000008b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232371627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:31.627 (1609232371627957818 1c0733) replica. fd0.030c00010000008b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232371627 -D2020-12-29 16:59:34.512 (1609232374512247608 1c0712) replica.default0.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 16:59:34.512- (1609232374512427049 1c0715) replica.default3.030100000000006b: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 16:59:34.512 (1609232374512477786 1c0715) replica.default3.030100000000006b: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 16:59:34.514 (1609232374514299078 1c0715) replica.default3.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:59:34.604 (1609232374604399415 1c0719) replica.replica0.0300070f00173c82: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 164549 -D2020-12-29 16:59:34.604 (1609232374604436723 1c0719) replica.replica0.0300070f00173c82: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 164549, confirmed_decree = -1 -D2020-12-29 16:59:34.621 (1609232374621525333 1c0719) replica.replica0.0300070f00173cc6: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 164293 -D2020-12-29 16:59:34.6215 (1609232374621533363 1c0719) replica.replica0.0300070f00173cc6: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 164293, confirmed_decree = -1 -D2020-12-29 16:59:34.622 (1609232374622547985 1c0719) replica.replica0.030600000000001f: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:34.624 (1609232374624903053 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:34.624 (1609232374624914192 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.624 (1609232374624938231 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.627 (1609232374627893309 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232374627] -D2020-12-29 16:59:34.627 (1609232374627991447 1c0733) replica. fd0.030c00010000008d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232374627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:34.627 (1609232374627997443 1c0733) replica. fd0.030c00010000008d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232374627 -D2020-12-29 16:59:34.640 (1609232374640943066 1c071a) replica.replica1.0300070f00173d11: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61 -D2020-12-29 16:59:34.6405 (1609232374640951225 1c071a) replica.replica1.0300070f00173d11: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61, confirmed_decree = -1 -D2020-12-29 16:59:34.646 (1609232374646515286 1c0719) replica.replica0.030600010000001c: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:34.663 (1609232374663531759 1c071a) replica.replica1.0300070f00173d67: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131 -D2020-12-29 16:59:34.6635 (1609232374663539562 1c071a) replica.replica1.0300070f00173d67: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131, confirmed_decree = -1 -D2020-12-29 16:59:34.750 (1609232374750389792 1c0719) replica.replica0.0300070f00173ec5: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 125 -D2020-12-29 16:59:34.7505 (1609232374750399572 1c0719) replica.replica0.0300070f00173ec5: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 125, confirmed_decree = -1 -D2020-12-29 16:59:34.758 (1609232374758999202 1c071a) replica.replica1.0306000000000024: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:34.772 (1609232374772099508 1c071a) replica.replica1.0300070f00173f1a: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 125 -D2020-12-29 16:59:34.7725 (1609232374772111513 1c071a) replica.replica1.0300070f00173f1a: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 125, confirmed_decree = -1 -D2020-12-29 16:59:34.857 (1609232374857923536 1c0719) replica.replica0.0300070f00174072: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 125 -D2020-12-29 16:59:34.8575 (1609232374857932417 1c0719) replica.replica0.0300070f00174072: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 125, confirmed_decree = -1 -D2020-12-29 16:59:34.901 (1609232374901646593 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:34.901 (1609232374901661369 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.901 (1609232374901682083 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.903 (1609232374903038040 1c0719) replica.replica0.0300070f00174125: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58 -D2020-12-29 16:59:34.9035 (1609232374903055770 1c0719) replica.replica0.0300070f00174125: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 58, confirmed_decree = -1 -D2020-12-29 16:59:34.905 (1609232374905113117 1c071a) replica.replica1.0300070f0017412f: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 125 -D2020-12-29 16:59:34.9055 (1609232374905122360 1c071a) replica.replica1.0300070f0017412f: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 125, confirmed_decree = -1 -D2020-12-29 16:59:34.939 (1609232374939040898 1c071a) replica.replica1.0300070f001741b9: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 164614 -D2020-12-29 16:59:34.9395 (1609232374939078244 1c071a) replica.replica1.0300070f001741b9: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 164614, confirmed_decree = -1 -D2020-12-29 16:59:34.977 (1609232374977041891 1c0719) replica.replica0.0300070f00174247: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 125 -D2020-12-29 16:59:34.9775 (1609232374977056568 1c0719) replica.replica0.0300070f00174247: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 125, confirmed_decree = -1 -D2020-12-29 16:59:34.977 (1609232374977712184 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:34.977 (1609232374977723262 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.977 (1609232374977756850 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.979 (1609232374979192806 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:34.979 (1609232374979203108 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.979 (1609232374979231278 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.983 (1609232374983325603 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:34.983 (1609232374983341948 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.983 (1609232374983370588 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.986 (1609232374986180399 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:34.986 (1609232374986189394 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.986 (1609232374986213745 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:34.993 (1609232374993433899 1c0719) replica.replica0.0300070f0017427c: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 164413 -D2020-12-29 16:59:34.9935 (1609232374993472697 1c0719) replica.replica0.0300070f0017427c: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 164413, confirmed_decree = -1 -D2020-12-29 16:59:34.995 (1609232374995705320 1c071a) replica.replica1.0300070f00174285: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 163845 -D2020-12-29 16:59:34.9955 (1609232374995713333 1c071a) replica.replica1.0300070f00174285: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 163845, confirmed_decree = -1 -D2020-12-29 16:59:35.8U (1609232375008058607 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:35.8 (1609232375008070591 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:35.8 (1609232375008093747 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:37.627 (1609232377627940274 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232377627] -D2020-12-29 16:59:37.628 (1609232377628070049 1c0734) replica. fd1.030c000000000091: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232377627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:37.628 (1609232377628079079 1c0734) replica. fd1.030c000000000091: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232377627 -D2020-12-29 16:59:40.590 (1609232380590926276 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2500 MB, memused_res = 1124MB -D2020-12-29 16:59:40.592 (1609232380592357146 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232380590), last_report_time_ms(1609232370589) -D2020-12-29 16:59:40.627 (1609232380627994972 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232380627] -D2020-12-29 16:59:40.628 (1609232380628108006 1c0734) replica. fd1.030c000000000093: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232380627], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:40.628 (1609232380628116252 1c0734) replica. fd1.030c000000000093: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232380627 -D2020-12-29 16:59:43.628 (1609232383628051687 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232383628] -D2020-12-29 16:59:43.628 (1609232383628170015 1c0733) replica. fd0.030c00010000008f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232383628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:43.628 (1609232383628206484 1c0733) replica. fd0.030c00010000008f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232383628 -D2020-12-29 16:59:44.514 (1609232384514352131 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:59:44.604 (1609232384604532907 1c0719) replica.replica0.0300070f0017bcb8: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168410 -D2020-12-29 16:59:44.6045 (1609232384604542544 1c0719) replica.replica0.0300070f0017bcb8: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168410, confirmed_decree = -1 -D2020-12-29 16:59:44.621 (1609232384621595700 1c0719) replica.replica0.0300070f0017bcfe: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168262 -D2020-12-29 16:59:44.6215 (1609232384621603975 1c0719) replica.replica0.0300070f0017bcfe: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168262, confirmed_decree = -1 -D2020-12-29 16:59:44.624 (1609232384624971246 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:44.624 (1609232384624984175 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.625 (1609232384625013888 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.641 (1609232384641022459 1c071a) replica.replica1.0300070f0017bd4e: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61 -D2020-12-29 16:59:44.6415 (1609232384641033210 1c071a) replica.replica1.0300070f0017bd4e: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61, confirmed_decree = -1 -D2020-12-29 16:59:44.644 (1609232384644455396 1c0719) replica.replica0.0300070f0017bd5c: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.310000 init_prepare, mutation_tid=1489362 -D2020-12-29 16:59:44.656 (1609232384656586363 1c071a) replica.replica1.0306000100000024: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:44.656 (1609232384656830751 1c0719) replica.replica0.0306000000000041: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:44.663 (1609232384663618290 1c071a) replica.replica1.0300070f0017bda8: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 134 -D2020-12-29 16:59:44.6635 (1609232384663631027 1c071a) replica.replica1.0300070f0017bda8: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 134, confirmed_decree = -1 -D2020-12-29 16:59:44.750 (1609232384750488490 1c0719) replica.replica0.0300070f0017befa: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128 -D2020-12-29 16:59:44.7505 (1609232384750498681 1c0719) replica.replica0.0300070f0017befa: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128, confirmed_decree = -1 -D2020-12-29 16:59:44.772 (1609232384772178325 1c071a) replica.replica1.0300070f0017bf50: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128 -D2020-12-29 16:59:44.7725 (1609232384772187279 1c071a) replica.replica1.0300070f0017bf50: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128, confirmed_decree = -1 -D2020-12-29 16:59:44.808 (1609232384808988145 1c071a) replica.replica1.0306000100000029: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:44.809 (1609232384809267211 1c0719) replica.replica0.0306000000000046: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.5@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:44.858 (1609232384858041283 1c0719) replica.replica0.0300070f0017c09c: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128 -D2020-12-29 16:59:44.8585 (1609232384858051455 1c0719) replica.replica0.0300070f0017c09c: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128, confirmed_decree = -1 -D2020-12-29 16:59:44.901 (1609232384901721555 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:44.901 (1609232384901731949 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.901 (1609232384901756155 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.903 (1609232384903199896 1c0719) replica.replica0.0300070f0017c14a: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 59 -D2020-12-29 16:59:44.9035 (1609232384903208089 1c0719) replica.replica0.0300070f0017c14a: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 59, confirmed_decree = -1 -D2020-12-29 16:59:44.905 (1609232384905178540 1c071a) replica.replica1.0300070f0017c153: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128 -D2020-12-29 16:59:44.9055 (1609232384905208037 1c071a) replica.replica1.0300070f0017c153: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128, confirmed_decree = -1 -D2020-12-29 16:59:44.916 (1609232384916088485 1c071a) replica.replica1.030600010000002e: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:44.929 (1609232384929843226 1c071a) replica.replica1.030600000000004c: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:44.939 (1609232384939159652 1c071a) replica.replica1.0300070f0017c1d4: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168394 -D2020-12-29 16:59:44.9395 (1609232384939172372 1c071a) replica.replica1.0300070f0017c1d4: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168394, confirmed_decree = -1 -D2020-12-29 16:59:44.977 (1609232384977151731 1c0719) replica.replica0.0300070f0017c26a: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128 -D2020-12-29 16:59:44.9775 (1609232384977160717 1c0719) replica.replica0.0300070f0017c26a: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 128, confirmed_decree = -1 -D2020-12-29 16:59:44.977 (1609232384977864396 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:44.977 (1609232384977877297 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.977 (1609232384977912400 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.979 (1609232384979325934 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:44.979 (1609232384979334169 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.979 (1609232384979360433 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.983 (1609232384983410110 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:44.983 (1609232384983419842 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.983 (1609232384983440249 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.986 (1609232384986248250 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:44.986 (1609232384986257893 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.986 (1609232384986284394 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:44.986 (1609232384986880568 1c0719) replica.replica0.0306000100000034: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:44.993 (1609232384993558859 1c0719) replica.replica0.0300070f0017c2ac: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168291 -D2020-12-29 16:59:44.9935 (1609232384993568069 1c0719) replica.replica0.0300070f0017c2ac: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168291, confirmed_decree = -1 -D2020-12-29 16:59:44.995 (1609232384995790426 1c071a) replica.replica1.0300070f0017c2b5: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 167791 -D2020-12-29 16:59:44.9955 (1609232384995800866 1c071a) replica.replica1.0300070f0017c2b5: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 167791, confirmed_decree = -1 -D2020-12-29 16:59:45.8 (1609232385008126857 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:45.8 (1609232385008141206 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:45.8 (1609232385008168067 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:46.628 (1609232386628111707 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232386628] -D2020-12-29 16:59:46.628 (1609232386628213395 1c0733) replica. fd0.030c000100000091: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232386628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:46.628 (1609232386628221528 1c0733) replica. fd0.030c000100000091: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232386628 -D2020-12-29 16:59:49.168 (1609232389168855233 1c0719) replica.replica0.0300070f00180258: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.320000 init_prepare, mutation_tid=1507010 -D2020-12-29 16:59:49.628 (1609232389628161920 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232389628] -D2020-12-29 16:59:49.628 (1609232389628263936 1c0734) replica. fd1.030c000000000095: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232389628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:49.628 (1609232389628286270 1c0734) replica. fd1.030c000000000095: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232389628 -D2020-12-29 16:59:50.587 (1609232390587340834 1c071a) replica.replica1.0300070f0018155b: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.170000 init_prepare, mutation_tid=1511874 -D2020-12-29 16:59:50.592 (1609232390592461021 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2520 MB, memused_res = 1148MB -D2020-12-29 16:59:50.593 (1609232390593630398 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232390592), last_report_time_ms(1609232380590) -D2020-12-29 16:59:50.838 (1609232390838525416 1c071a) replica.replica1.0300070f001818a8: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.170000 init_prepare, mutation_tid=1512719 -D2020-12-29 16:59:52.628 (1609232392628222054 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232392628] -D2020-12-29 16:59:52.628 (1609232392628339449 1c0734) replica. fd1.030c000000000097: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232392628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:52.628 (1609232392628347273 1c0734) replica. fd1.030c000000000097: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232392628 -D2020-12-29 16:59:54.514 (1609232394514414907 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 16:59:54.593 (1609232394593161881 1c0719) replica.replica0.0306000100000039: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 16:59:54.604 (1609232394604831918 1c0719) replica.replica0.0300070f00183def: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 170394 -D2020-12-29 16:59:54.6045 (1609232394604847510 1c0719) replica.replica0.0300070f00183def: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 170394, confirmed_decree = -1 -D2020-12-29 16:59:54.624 (1609232394624503528 1c0719) replica.replica0.0300070f00183df3: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 170300 -D2020-12-29 16:59:54.6245 (1609232394624524256 1c0719) replica.replica0.0300070f00183df3: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 170300, confirmed_decree = -1 -D2020-12-29 16:59:54.625 (1609232394625070481 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:54.625 (1609232394625088299 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.625 (1609232394625142993 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.641 (1609232394641363291 1c071a) replica.replica1.0300070f00183e06: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 62 -D2020-12-29 16:59:54.6415 (1609232394641379020 1c071a) replica.replica1.0300070f00183e06: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 62, confirmed_decree = -1 -D2020-12-29 16:59:54.663 (1609232394663883476 1c071a) replica.replica1.0300070f00183e0f: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 137 -D2020-12-29 16:59:54.6635 (1609232394663896776 1c071a) replica.replica1.0300070f00183e0f: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 137, confirmed_decree = -1 -D2020-12-29 16:59:54.751 (1609232394751079519 1c0719) replica.replica0.0300070f00183e7a: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131 -D2020-12-29 16:59:54.7515 (1609232394751097037 1c0719) replica.replica0.0300070f00183e7a: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131, confirmed_decree = -1 -D2020-12-29 16:59:54.772 (1609232394772592846 1c071a) replica.replica1.0300070f00183e96: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131 -D2020-12-29 16:59:54.7725 (1609232394772611551 1c071a) replica.replica1.0300070f00183e96: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131, confirmed_decree = -1 -D2020-12-29 16:59:54.858 (1609232394858313762 1c0719) replica.replica0.0300070f00183ef0: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131 -D2020-12-29 16:59:54.8585 (1609232394858325536 1c0719) replica.replica0.0300070f00183ef0: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131, confirmed_decree = -1 -D2020-12-29 16:59:54.902 (1609232394902010074 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:54.902 (1609232394902025968 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.902 (1609232394902060972 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.903 (1609232394903335479 1c0719) replica.replica0.0300070f00183f1a: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61 -D2020-12-29 16:59:54.9035 (1609232394903412267 1c0719) replica.replica0.0300070f00183f1a: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 61, confirmed_decree = -1 -D2020-12-29 16:59:54.905 (1609232394905347383 1c071a) replica.replica1.0300070f00183f20: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131 -D2020-12-29 16:59:54.9055 (1609232394905359767 1c071a) replica.replica1.0300070f00183f20: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131, confirmed_decree = -1 -D2020-12-29 16:59:54.939 (1609232394939359071 1c071a) replica.replica1.0300070f00183f59: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 170422 -D2020-12-29 16:59:54.9395 (1609232394939375632 1c071a) replica.replica1.0300070f00183f59: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 170422, confirmed_decree = -1 -D2020-12-29 16:59:54.978 (1609232394978280627 1c0719) replica.replica0.0300070f00183f90: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131 -D2020-12-29 16:59:54.9785 (1609232394978304119 1c0719) replica.replica0.0300070f00183f90: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 131, confirmed_decree = -1 -D2020-12-29 16:59:54.978 (1609232394978664895 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:54.978 (1609232394978678243 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.978 (1609232394978695643 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.979 (1609232394979396109 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:54.979 (1609232394979407928 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.979 (1609232394979446758 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.983 (1609232394983590922 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:54.983 (1609232394983615572 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.983 (1609232394983676153 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.986 (1609232394986331852 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:54.986 (1609232394986351055 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.986 (1609232394986400091 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:54.993 (1609232394993709330 1c0719) replica.replica0.0300070f00183fae: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 170256 -D2020-12-29 16:59:54.9935 (1609232394993735851 1c0719) replica.replica0.0300070f00183fae: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 170256, confirmed_decree = -1 -D2020-12-29 16:59:54.995 (1609232394995982669 1c071a) replica.replica1.0300070f00183faf: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 169764 -D2020-12-29 16:59:54.995 (1609232394995999825 1c071a) replica.replica1.0300070f00183faf: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 169764, confirmed_decree = -1 -D2020-12-29 16:59:55.8U (1609232395008251637 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 16:59:55.8 (1609232395008283640 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:55.8 (1609232395008311847 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 16:59:55.181 (1609232395181439322 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 16:59:55.181 (1609232395181457128 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 130 -D2020-12-29 16:59:55.1810 (1609232395181650194 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170173 -D2020-12-29 16:59:55.1813 (1609232395181658298 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 130 -D2020-12-29 16:59:55.1810 (1609232395181774937 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 169637 -D2020-12-29 16:59:55.1817 (1609232395181781076 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 130 -D2020-12-29 16:59:55.1810 (1609232395181894306 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170181 -D2020-12-29 16:59:55.1811 (1609232395181906492 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 59 -D2020-12-29 16:59:55.1829 (1609232395182015101 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170111 -D2020-12-29 16:59:55.1821 (1609232395182134685 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170520 -D2020-12-29 16:59:55.1820 (1609232395182138811 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 58 -D2020-12-29 16:59:55.1828 (1609232395182142962 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 60 -D2020-12-29 16:59:55.1820 (1609232395182145372 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 136 -D2020-12-29 16:59:55.1826 (1609232395182147505 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 130 -D2020-12-29 16:59:55.1820 (1609232395182181789 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170602 -D2020-12-29 16:59:55.1822 (1609232395182198322 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 328979 -D2020-12-29 16:59:55.1829 (1609232395182201545 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 130 -D2020-12-29 16:59:55.1820 (1609232395182218049 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 61 -D2020-12-29 16:59:55.1821 (1609232395182220898 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 130 -D2020-12-29 16:59:55.1820 (1609232395182333170 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170242 -D2020-12-29 16:59:55.1822 (1609232395182336656 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 130 -D2020-12-29 16:59:55.182 (1609232395182371215 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 8, reserved_log_size = 263761879, reserved_smallest_log = 1, reserved_largest_log = 8, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 16:59:55.182 (1609232395182378656 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 951632 -D2020-12-29 16:59:55.628 (1609232395628288467 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232395628] -D2020-12-29 16:59:55.628 (1609232395628476326 1c0733) replica. fd0.030c000100000093: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232395628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:55.628 (1609232395628486421 1c0733) replica. fd0.030c000100000093: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232395628 -D2020-12-29 16:59:56.479 (1609232396479478875 1c0719) replica.replica0.0300070f00184793: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.330000 init_prepare, mutation_tid=1524714 -D2020-12-29 16:59:58.628 (1609232398628376639 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232398628] -D2020-12-29 16:59:58.628 (1609232398628490012 1c0733) replica. fd0.030c000100000095: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232398628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 16:59:58.628 (1609232398628497859 1c0733) replica. fd0.030c000100000095: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232398628 -D2020-12-29 17:00:00.593 (1609232400593742818 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2522 MB, memused_res = 1153MB -D2020-12-29 17:00:00.594 (1609232400594962124 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232400593), last_report_time_ms(1609232390592) -D2020-12-29 17:00:01.628 (1609232401628442934 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232401628] -D2020-12-29 17:00:01.628 (1609232401628615525 1c0734) replica. fd1.030c000000000099: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232401628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:01.628 (1609232401628626358 1c0734) replica. fd1.030c000000000099: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232401628 -D2020-12-29 17:00:03.662 (1609232403662289942 1c0719) replica.replica0.0300070f00188c73: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.340000 init_prepare, mutation_tid=1542341 -D2020-12-29 17:00:04.512 (1609232404512326962 1c0713) replica.default1.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 17:00:04.512 (1609232404512550681 1c0712) replica.default0.0301000100000053: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 17:00:04.512 (1609232404512610344 1c0712) replica.default0.0301000100000053: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 17:00:04.514 (1609232404514479525 1c0714) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:00:04.604 (1609232404604908750 1c0719) replica.replica0.0300070f001896fe: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171821 -D2020-12-29 17:00:04.6045 (1609232404604919030 1c0719) replica.replica0.0300070f001896fe: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171821, confirmed_decree = -1 -D2020-12-29 17:00:04.624 (1609232404624456256 1c0719) replica.replica0.0300070f0018973e: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171761 -D2020-12-29 17:00:04.6245 (1609232404624466399 1c0719) replica.replica0.0300070f0018973e: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171761, confirmed_decree = -1 -D2020-12-29 17:00:04.625 (1609232404625238761 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:04.625 (1609232404625249526 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.625 (1609232404625281692 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.628 (1609232404628616692 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232404628] -D2020-12-29 17:00:04.628 (1609232404628778276 1c0734) replica. fd1.030c00000000009b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232404628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:04.628 (1609232404628786333 1c0734) replica. fd1.030c00000000009b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232404628 -D2020-12-29 17:00:04.641 (1609232404641419521 1c071a) replica.replica1.0300070f00189775: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 63 -D2020-12-29 17:00:04.6415 (1609232404641435750 1c071a) replica.replica1.0300070f00189775: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 63, confirmed_decree = -1 -D2020-12-29 17:00:04.663 (1609232404663859413 1c071a) replica.replica1.0300070f001897bd: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141 -D2020-12-29 17:00:04.6635 (1609232404663870566 1c071a) replica.replica1.0300070f001897bd: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141, confirmed_decree = -1 -D2020-12-29 17:00:04.750 (1609232404750887485 1c0719) replica.replica0.0300070f001898ca: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 135 -D2020-12-29 17:00:04.7505 (1609232404750899742 1c0719) replica.replica0.0300070f001898ca: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 135, confirmed_decree = -1 -D2020-12-29 17:00:04.772 (1609232404772526691 1c071a) replica.replica1.0300070f00189913: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 135 -D2020-12-29 17:00:04.7725 (1609232404772537652 1c071a) replica.replica1.0300070f00189913: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 135, confirmed_decree = -1 -D2020-12-29 17:00:04.858 (1609232404858203678 1c0719) replica.replica0.0300070f00189a64: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 135 -D2020-12-29 17:00:04.8585 (1609232404858214680 1c0719) replica.replica0.0300070f00189a64: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 135, confirmed_decree = -1 -D2020-12-29 17:00:04.902 (1609232404902157595 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:04.902 (1609232404902170036 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.902 (1609232404902192833 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.903 (1609232404903426676 1c0719) replica.replica0.0300070f00189b1a: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 63 -D2020-12-29 17:00:04.9035 (1609232404903469356 1c0719) replica.replica0.0300070f00189b1a: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 63, confirmed_decree = -1 -D2020-12-29 17:00:04.905 (1609232404905390837 1c071a) replica.replica1.0300070f00189b23: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 135 -D2020-12-29 17:00:04.9055 (1609232404905401892 1c071a) replica.replica1.0300070f00189b23: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 135, confirmed_decree = -1 -D2020-12-29 17:00:04.939 (1609232404939429698 1c071a) replica.replica1.0300070f00189b9c: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171889 -D2020-12-29 17:00:04.9395 (1609232404939441967 1c071a) replica.replica1.0300070f00189b9c: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171889, confirmed_decree = -1 -D2020-12-29 17:00:04.977 (1609232404977533861 1c0719) replica.replica0.0300070f00189c39: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 135 -D2020-12-29 17:00:04.9775 (1609232404977543656 1c0719) replica.replica0.0300070f00189c39: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 135, confirmed_decree = -1 -D2020-12-29 17:00:04.978 (1609232404978763731 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:04.978 (1609232404978772929 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.978 (1609232404978801980 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.979 (1609232404979576396 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:04.979 (1609232404979586402 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.979 (1609232404979613804 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.983 (1609232404983719423 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:04.983 (1609232404983727211 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.983 (1609232404983765641 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.986 (1609232404986470941 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:04.986 (1609232404986480454 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.986 (1609232404986505929 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:04.993 (1609232404993852733 1c0719) replica.replica0.0300070f00189c7c: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171723 -D2020-12-29 17:00:04.9935 (1609232404993862168 1c0719) replica.replica0.0300070f00189c7c: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171723, confirmed_decree = -1 -D2020-12-29 17:00:04.996 (1609232404996033985 1c071a) replica.replica1.0300070f00189c85: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171244 -D2020-12-29 17:00:04.9965 (1609232404996042069 1c071a) replica.replica1.0300070f00189c85: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171244, confirmed_decree = -1 -D2020-12-29 17:00:05.8U (1609232405008363650 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:05.8 (1609232405008376365 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:05.8 (1609232405008406111 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:06.291 (1609232406291694294 1c0719) replica.replica0.0300070f0018ae10: mutation_log.cpp:820:mark_new_offset(): switch log file by limit, old_file = /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.8.234881406, size = 33554553 -D2020-12-29 17:00:06.291o (1609232406291727806 1c0719) replica.replica0.0300070f0018ae10: mutation_log.cpp:731:create_new_log_file(): create new log file /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/slog/log.9.268435959 succeed, time_used = 25995 ns -D2020-12-29 17:00:07.628 (1609232407628674319 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232407628] -D2020-12-29 17:00:07.628 (1609232407628781374 1c0733) replica. fd0.030c000100000097: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232407628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:07.628 (1609232407628806107 1c0733) replica. fd0.030c000100000097: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232407628 -D2020-12-29 17:00:08.843 (1609232408843709419 1c0719) replica.replica0.0300070f0018d22d: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.350000 init_prepare, mutation_tid=1560174 -D2020-12-29 17:00:10.595 (1609232410595060445 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2534 MB, memused_res = 1170MB -D2020-12-29 17:00:10.596 (1609232410596025539 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232410594), last_report_time_ms(1609232400593) -D2020-12-29 17:00:10.628 (1609232410628743323 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232410628] -D2020-12-29 17:00:10.628 (1609232410628851120 1c0733) replica. fd0.030c000100000099: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232410628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:10.628 (1609232410628859414 1c0733) replica. fd0.030c000100000099: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232410628 -D2020-12-29 17:00:13.226 (1609232413226369545 1c0719) replica.replica0.0300070f001916d0: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.360000 init_prepare, mutation_tid=1577743 -D2020-12-29 17:00:13.628 (1609232413628798919 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232413628] -D2020-12-29 17:00:13.628 (1609232413628906393 1c0734) replica. fd1.030c00000000009d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232413628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:13.628 (1609232413628913627 1c0734) replica. fd1.030c00000000009d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232413628 -D2020-12-29 17:00:14.514 (1609232414514536331 1c0714) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:00:14.604 (1609232414604974117 1c0719) replica.replica0.0300070f00192c53: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 174186 -D2020-12-29 17:00:14.6045 (1609232414604988056 1c0719) replica.replica0.0300070f00192c53: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 174186, confirmed_decree = -1 -D2020-12-29 17:00:14.624 (1609232414624554388 1c0719) replica.replica0.0300070f00192ca5: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 174127 -D2020-12-29 17:00:14.6245 (1609232414624563975 1c0719) replica.replica0.0300070f00192ca5: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 174127, confirmed_decree = -1 -D2020-12-29 17:00:14.625 (1609232414625336542 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:14.625 (1609232414625365838 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.625 (1609232414625404495 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.641 (1609232414641572453 1c071a) replica.replica1.0300070f00192ceb: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 67 -D2020-12-29 17:00:14.6415 (1609232414641581385 1c071a) replica.replica1.0300070f00192ceb: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 67, confirmed_decree = -1 -D2020-12-29 17:00:14.667 (1609232414667247462 1c071a) replica.replica1.0300070f00192d3c: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 144 -D2020-12-29 17:00:14.6675 (1609232414667267108 1c071a) replica.replica1.0300070f00192d3c: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 144, confirmed_decree = -1 -D2020-12-29 17:00:14.751 (1609232414751012522 1c0719) replica.replica0.0300070f00192e4e: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 138 -D2020-12-29 17:00:14.7515 (1609232414751023972 1c0719) replica.replica0.0300070f00192e4e: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 138, confirmed_decree = -1 -D2020-12-29 17:00:14.772 (1609232414772613559 1c071a) replica.replica1.0300070f00192ea3: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 138 -D2020-12-29 17:00:14.7725 (1609232414772625946 1c071a) replica.replica1.0300070f00192ea3: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 138, confirmed_decree = -1 -D2020-12-29 17:00:14.858 (1609232414858280484 1c0719) replica.replica0.0300070f00192ff7: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 138 -D2020-12-29 17:00:14.8585 (1609232414858294280 1c0719) replica.replica0.0300070f00192ff7: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 138, confirmed_decree = -1 -D2020-12-29 17:00:14.902 (1609232414902281748 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:14.902 (1609232414902293808 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.902 (1609232414902335743 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.903 (1609232414903480030 1c0719) replica.replica0.0300070f001930ae: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 63 -D2020-12-29 17:00:14.9035 (1609232414903488347 1c0719) replica.replica0.0300070f001930ae: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 63, confirmed_decree = -1 -D2020-12-29 17:00:14.905 (1609232414905529859 1c071a) replica.replica1.0300070f001930b8: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 138 -D2020-12-29 17:00:14.9055 (1609232414905540170 1c071a) replica.replica1.0300070f001930b8: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 138, confirmed_decree = -1 -D2020-12-29 17:00:14.939 (1609232414939504966 1c071a) replica.replica1.0300070f0019313c: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 174199 -D2020-12-29 17:00:14.9395 (1609232414939518206 1c071a) replica.replica1.0300070f0019313c: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 174199, confirmed_decree = -1 -D2020-12-29 17:00:14.977 (1609232414977605821 1c0719) replica.replica0.0300070f001931d5: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 138 -D2020-12-29 17:00:14.9775 (1609232414977615405 1c0719) replica.replica0.0300070f001931d5: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 138, confirmed_decree = -1 -D2020-12-29 17:00:14.978 (1609232414978841042 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:14.978 (1609232414978848959 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.978 (1609232414978874355 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.979 (1609232414979717254 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:14.979 (1609232414979729514 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.979 (1609232414979751938 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.983 (1609232414983854830 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:14.983 (1609232414983865745 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.983 (1609232414983897939 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.986 (1609232414986543931 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:14.986 (1609232414986552792 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.986 (1609232414986577970 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:14.993 (1609232414993901791 1c0719) replica.replica0.0300070f00193216: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 174045 -D2020-12-29 17:00:14.9935 (1609232414993910222 1c0719) replica.replica0.0300070f00193216: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 174045, confirmed_decree = -1 -D2020-12-29 17:00:14.996 (1609232414996120173 1c071a) replica.replica1.0300070f00193220: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 173627 -D2020-12-29 17:00:14.9965 (1609232414996130159 1c071a) replica.replica1.0300070f00193220: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 173627, confirmed_decree = -1 -D2020-12-29 17:00:15.8 (1609232415008449528 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:15.8 (1609232415008471945 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:15.8 (1609232415008494622 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:16.628 (1609232416628854721 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232416628] -D2020-12-29 17:00:16.628 (1609232416628976078 1c0734) replica. fd1.030c00000000009f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232416628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:16.628 (1609232416628982431 1c0734) replica. fd1.030c00000000009f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232416628 -D2020-12-29 17:00:17.717 (1609232417717073663 1c0719) replica.replica0.0300070f00195bb2: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.370000 init_prepare, mutation_tid=1595359 -D2020-12-29 17:00:19.628 (1609232419628921158 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232419628] -D2020-12-29 17:00:19.629 (1609232419629017226 1c0733) replica. fd0.030c00010000009b: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232419628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:19.629 (1609232419629025785 1c0733) replica. fd0.030c00010000009b: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232419628 -D2020-12-29 17:00:20.596 (1609232420596102651 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2543 MB, memused_res = 1184MB -D2020-12-29 17:00:20.597 (1609232420597186967 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232420596), last_report_time_ms(1609232410594) -D2020-12-29 17:00:22.170 (1609232422170608304 1c0719) replica.replica0.0300070f0019a10f: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.380000 init_prepare, mutation_tid=1613114 -D2020-12-29 17:00:22.628 (1609232422628971415 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232422628] -D2020-12-29 17:00:22.629 (1609232422629074564 1c0733) replica. fd0.030c00010000009d: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232422628], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:22.629 (1609232422629080480 1c0733) replica. fd0.030c00010000009d: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232422628 -D2020-12-29 17:00:24.514 (1609232424514601349 1c0712) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:00:24.605 (1609232424605098312 1c0719) replica.replica0.0300070f0019c682: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 176582 -D2020-12-29 17:00:24.6055 (1609232424605114859 1c0719) replica.replica0.0300070f0019c682: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 176582, confirmed_decree = -1 -D2020-12-29 17:00:24.624 (1609232424624638135 1c0719) replica.replica0.0300070f0019c6b1: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 176581 -D2020-12-29 17:00:24.6245 (1609232424624652390 1c0719) replica.replica0.0300070f0019c6b1: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 176581, confirmed_decree = -1 -D2020-12-29 17:00:24.625 (1609232424625455164 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:24.625 (1609232424625468190 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.625 (1609232424625513305 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.641 (1609232424641690783 1c071a) replica.replica1.0300070f0019c6e8: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 70 -D2020-12-29 17:00:24.6415 (1609232424641702325 1c071a) replica.replica1.0300070f0019c6e8: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 70, confirmed_decree = -1 -D2020-12-29 17:00:24.667 (1609232424667335869 1c071a) replica.replica1.0300070f0019c740: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 147 -D2020-12-29 17:00:24.6675 (1609232424667346506 1c071a) replica.replica1.0300070f0019c740: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 147, confirmed_decree = -1 -D2020-12-29 17:00:24.751 (1609232424751137036 1c0719) replica.replica0.0300070f0019c859: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141 -D2020-12-29 17:00:24.7515 (1609232424751150637 1c0719) replica.replica0.0300070f0019c859: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141, confirmed_decree = -1 -D2020-12-29 17:00:24.772 (1609232424772793515 1c071a) replica.replica1.0300070f0019c8a4: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141 -D2020-12-29 17:00:24.7725 (1609232424772805856 1c071a) replica.replica1.0300070f0019c8a4: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141, confirmed_decree = -1 -D2020-12-29 17:00:24.858 (1609232424858428032 1c0719) replica.replica0.0300070f0019c9be: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141 -D2020-12-29 17:00:24.8585 (1609232424858439423 1c0719) replica.replica0.0300070f0019c9be: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141, confirmed_decree = -1 -D2020-12-29 17:00:24.902 (1609232424902374817 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:24.902 (1609232424902387663 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.902 (1609232424902407526 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.903 (1609232424903629117 1c0719) replica.replica0.0300070f0019ca3e: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 64 -D2020-12-29 17:00:24.9035 (1609232424903643250 1c0719) replica.replica0.0300070f0019ca3e: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 64, confirmed_decree = -1 -D2020-12-29 17:00:24.905 (1609232424905616605 1c071a) replica.replica1.0300070f0019ca46: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141 -D2020-12-29 17:00:24.9055 (1609232424905628650 1c071a) replica.replica1.0300070f0019ca46: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141, confirmed_decree = -1 -D2020-12-29 17:00:24.939 (1609232424939617833 1c071a) replica.replica1.0300070f0019cab0: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 176631 -D2020-12-29 17:00:24.9395 (1609232424939628817 1c071a) replica.replica1.0300070f0019cab0: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 176631, confirmed_decree = -1 -D2020-12-29 17:00:24.977 (1609232424977707955 1c0719) replica.replica0.0300070f0019cb2b: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141 -D2020-12-29 17:00:24.9775 (1609232424977721392 1c0719) replica.replica0.0300070f0019cb2b: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 141, confirmed_decree = -1 -D2020-12-29 17:00:24.978 (1609232424978955546 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:24.978 (1609232424978964751 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.978 (1609232424978993808 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.979 (1609232424979850469 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:24.979 (1609232424979860981 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.979 (1609232424979895976 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.983 (1609232424983975255 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:24.983 (1609232424983987342 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.984 (1609232424984019057 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.986 (1609232424986624053 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:24.986 (1609232424986639734 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.986 (1609232424986675668 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:24.994 (1609232424994007586 1c0719) replica.replica0.0300070f0019cb60: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 176475 -D2020-12-29 17:00:24.9945 (1609232424994018092 1c0719) replica.replica0.0300070f0019cb60: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 176475, confirmed_decree = -1 -D2020-12-29 17:00:24.996 (1609232424996194053 1c071a) replica.replica1.0300070f0019cb68: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 176145 -D2020-12-29 17:00:24.9965 (1609232424996206147 1c071a) replica.replica1.0300070f0019cb68: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 176145, confirmed_decree = -1 -D2020-12-29 17:00:25.8U (1609232425008520140 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:25.8 (1609232425008538908 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:25.8 (1609232425008578659 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:25.1822 (1609232425182416884 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 17:00:25.182 (1609232425182433681 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 140 -D2020-12-29 17:00:25.1820 (1609232425182533754 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 176571 -D2020-12-29 17:00:25.1821 (1609232425182561256 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 140 -D2020-12-29 17:00:25.1820 (1609232425182731222 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 175918 -D2020-12-29 17:00:25.1828 (1609232425182736589 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 140 -D2020-12-29 17:00:25.1820 (1609232425182861199 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 176491 -D2020-12-29 17:00:25.1821 (1609232425182874844 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 63 -D2020-12-29 17:00:25.1823 (1609232425182935802 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 176415 -D2020-12-29 17:00:25.1825 (1609232425182980433 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 176865 -D2020-12-29 17:00:25.1825 (1609232425182989696 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 60 -D2020-12-29 17:00:25.1820 (1609232425182994169 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 66 -D2020-12-29 17:00:25.1826 (1609232425182997371 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 146 -D2020-12-29 17:00:25.1836 (1609232425183002737 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 140 -D2020-12-29 17:00:25.1830 (1609232425183154228 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 176794 -D2020-12-29 17:00:25.1834 (1609232425183210487 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 386338 -D2020-12-29 17:00:25.1838 (1609232425183216223 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 140 -D2020-12-29 17:00:25.1830 (1609232425183244671 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 67 -D2020-12-29 17:00:25.1837 (1609232425183249781 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 140 -D2020-12-29 17:00:25.1830 (1609232425183301210 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 176582 -D2020-12-29 17:00:25.1832 (1609232425183305613 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 140 -D2020-12-29 17:00:25.183 (1609232425183344312 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 9, reserved_log_size = 280704557, reserved_smallest_log = 1, reserved_largest_log = 9, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 17:00:25.183 (1609232425183353640 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 946696 -D2020-12-29 17:00:25.629 (1609232425629034746 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232425629] -D2020-12-29 17:00:25.629 (1609232425629160773 1c0734) replica. fd1.030c0000000000a1: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232425629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:25.629 (1609232425629168016 1c0734) replica. fd1.030c0000000000a1: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232425629 -D2020-12-29 17:00:27.3 (1609232427003010459 1c0719) replica.replica0.0300070f0019e606: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.390000 init_prepare, mutation_tid=1630752 -D2020-12-29 17:00:28.629 (1609232428629091317 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232428629] -D2020-12-29 17:00:28.629 (1609232428629215083 1c0734) replica. fd1.030c0000000000a3: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232428629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:28.629 (1609232428629224383 1c0734) replica. fd1.030c0000000000a3: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232428629 -D2020-12-29 17:00:30.597 (1609232430597277007 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2557 MB, memused_res = 1195MB -D2020-12-29 17:00:30.598 (1609232430598407113 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232430597), last_report_time_ms(1609232420596) -D2020-12-29 17:00:31.470 (1609232431470141569 1c0719) replica.replica0.0300070f001a2bd5: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.400000 init_prepare, mutation_tid=1648620 -D2020-12-29 17:00:31.629 (1609232431629146508 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232431629] -D2020-12-29 17:00:31.629 (1609232431629245805 1c0733) replica. fd0.030c00010000009f: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232431629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:31.629 (1609232431629253274 1c0733) replica. fd0.030c00010000009f: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232431629 -D2020-12-29 17:00:34.512 (1609232434512414436 1c0716) replica.default4.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 17:00:34.512 (1609232434512617890 1c0715) replica.default3.0301000400000027: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 17:00:34.512 (1609232434512669209 1c0715) replica.default3.0301000400000027: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 17:00:34.514 (1609232434514657532 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:00:34.605 (1609232434605187313 1c0719) replica.replica0.0300070f001a5cb8: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178999 -D2020-12-29 17:00:34.6055 (1609232434605199857 1c0719) replica.replica0.0300070f001a5cb8: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178999, confirmed_decree = -1 -D2020-12-29 17:00:34.624 (1609232434624728701 1c0719) replica.replica0.0300070f001a5d02: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 179005 -D2020-12-29 17:00:34.6245 (1609232434624739229 1c0719) replica.replica0.0300070f001a5d02: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 179005, confirmed_decree = -1 -D2020-12-29 17:00:34.625 (1609232434625552234 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:34.625 (1609232434625561061 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.625 (1609232434625585826 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.629 (1609232434629196990 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232434629] -D2020-12-29 17:00:34.629 (1609232434629295440 1c0733) replica. fd0.030c0001000000a1: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232434629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:34.629 (1609232434629319230 1c0733) replica. fd0.030c0001000000a1: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232434629 -D2020-12-29 17:00:34.641 (1609232434641762908 1c071a) replica.replica1.0300070f001a5d46: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 70 -D2020-12-29 17:00:34.6415 (1609232434641774531 1c071a) replica.replica1.0300070f001a5d46: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 70, confirmed_decree = -1 -D2020-12-29 17:00:34.667 (1609232434667423080 1c071a) replica.replica1.0300070f001a5dab: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 153 -D2020-12-29 17:00:34.6675 (1609232434667432068 1c071a) replica.replica1.0300070f001a5dab: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 153, confirmed_decree = -1 -D2020-12-29 17:00:34.751 (1609232434751292006 1c0719) replica.replica0.0300070f001a5ef0: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 145 -D2020-12-29 17:00:34.7515 (1609232434751301118 1c0719) replica.replica0.0300070f001a5ef0: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 145, confirmed_decree = -1 -D2020-12-29 17:00:34.772 (1609232434772799172 1c071a) replica.replica1.0300070f001a5f41: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 145 -D2020-12-29 17:00:34.7725 (1609232434772812394 1c071a) replica.replica1.0300070f001a5f41: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 145, confirmed_decree = -1 -D2020-12-29 17:00:34.858 (1609232434858552388 1c0719) replica.replica0.0300070f001a6083: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 145 -D2020-12-29 17:00:34.8585 (1609232434858573548 1c0719) replica.replica0.0300070f001a6083: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 145, confirmed_decree = -1 -D2020-12-29 17:00:34.902 (1609232434902527342 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:34.902 (1609232434902539008 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.902 (1609232434902568879 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.903 (1609232434903840789 1c0719) replica.replica0.0300070f001a612b: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65 -D2020-12-29 17:00:34.9035 (1609232434903853909 1c0719) replica.replica0.0300070f001a612b: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 65, confirmed_decree = -1 -D2020-12-29 17:00:34.905 (1609232434905725999 1c071a) replica.replica1.0300070f001a6134: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 145 -D2020-12-29 17:00:34.9055 (1609232434905734700 1c071a) replica.replica1.0300070f001a6134: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 145, confirmed_decree = -1 -D2020-12-29 17:00:34.939 (1609232434939701375 1c071a) replica.replica1.0300070f001a61bc: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 179076 -D2020-12-29 17:00:34.9395 (1609232434939710267 1c071a) replica.replica1.0300070f001a61bc: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 179076, confirmed_decree = -1 -D2020-12-29 17:00:34.977 (1609232434977842376 1c0719) replica.replica0.0300070f001a624e: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 145 -D2020-12-29 17:00:34.9775 (1609232434977851807 1c0719) replica.replica0.0300070f001a624e: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 145, confirmed_decree = -1 -D2020-12-29 17:00:34.979 (1609232434979044166 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:34.979 (1609232434979062321 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.979 (1609232434979091063 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.980 (1609232434980012965 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:34.980 (1609232434980021632 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.980 (1609232434980045124 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.984 (1609232434984068114 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:34.984 (1609232434984078604 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.984 (1609232434984111593 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.986 (1609232434986712716 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:34.986 (1609232434986719832 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.986 (1609232434986738610 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:34.994 (1609232434994109378 1c0719) replica.replica0.0300070f001a628b: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178868 -D2020-12-29 17:00:34.9945 (1609232434994120398 1c0719) replica.replica0.0300070f001a628b: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178868, confirmed_decree = -1 -D2020-12-29 17:00:34.996 (1609232434996270006 1c071a) replica.replica1.0300070f001a6294: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178549 -D2020-12-29 17:00:34.9965 (1609232434996278548 1c071a) replica.replica1.0300070f001a6294: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178549, confirmed_decree = -1 -D2020-12-29 17:00:35.8U (1609232435008620978 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:35.8 (1609232435008634619 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:35.8 (1609232435008663670 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:35.378 (1609232435378824723 1c0712) replica.default0.0300070f001a687c: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = replication::detect_action::START -D2020-12-29 17:00:35.378 (1609232435378832996 1c0712) replica.default0.0300070f001a687c: hotkey_collector.cpp:265:on_start_detect(): [3.3@10.232.52.144:34803] starting to detect replication::hotkey_type::WRITE hotkey -D2020-12-29 17:00:35.968 (1609232435968728381 1c0719) replica.replica0.0300070f001a719e: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.410000 init_prepare, mutation_tid=1666467 -D2020-12-29 17:00:37.629 (1609232437629243957 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232437629] -D2020-12-29 17:00:37.629 (1609232437629343153 1c0734) replica. fd1.030c0000000000a5: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232437629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:37.629 (1609232437629350672 1c0734) replica. fd1.030c0000000000a5: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232437629 -D2020-12-29 17:00:37.693 (1609232437693037824 1c071a) replica.replica1.0300070f001a8c68: replica_2pc.cpp:168:init_prepare(): 3.0@10.232.52.144:34803: mutation 3.0.3.180000 init_prepare, mutation_tid=1673323 -D2020-12-29 17:00:37.897 (1609232437897226743 1c071a) replica.replica1.0300070f001a8f98: replica_2pc.cpp:168:init_prepare(): 3.6@10.232.52.144:34803: mutation 3.6.3.180000 init_prepare, mutation_tid=1674139 -D2020-12-29 17:00:40.561 (1609232440561118079 1c0719) replica.replica0.0300070f001ab7c8: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.420000 init_prepare, mutation_tid=1684424 -D2020-12-29 17:00:40.598 (1609232440598515242 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2577 MB, memused_res = 1212MB -D2020-12-29 17:00:40.599 (1609232440599626535 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232440598), last_report_time_ms(1609232430597) -D2020-12-29 17:00:40.629 (1609232440629292129 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232440629] -D2020-12-29 17:00:40.629 (1609232440629394706 1c0734) replica. fd1.030c0000000000a7: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232440629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:40.629 (1609232440629405893 1c0734) replica. fd1.030c0000000000a7: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232440629 -D2020-12-29 17:00:43.146 (1609232443146836982 1c0719) replica.replica0.0304000000467945: mutation_log.cpp:820:mark_new_offset(): switch log file by limit, old_file = /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/reps/3.3.pegasus/plog/log.2.33585090, size = 33570561 -D2020-12-29 17:00:43.146o (1609232443146875693 1c0719) replica.replica0.0304000000467945: mutation_log.cpp:731:create_new_log_file(): create new log file /home/smilencer/Code/incubator-pegasus/onebox/replica3/data/replica/reps/3.3.pegasus/plog/log.3.67155651 succeed, time_used = 31023 ns -D2020-12-29 17:00:43.629 (1609232443629351173 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232443629] -D2020-12-29 17:00:43.629 (1609232443629445908 1c0733) replica. fd0.030c0001000000a3: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232443629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:43.629 (1609232443629453317 1c0733) replica. fd0.030c0001000000a3: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232443629 -D2020-12-29 17:00:44.514 (1609232444514713170 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:00:44.605 (1609232444605301207 1c0719) replica.replica0.0300070f001af406: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181486 -D2020-12-29 17:00:44.6055 (1609232444605336947 1c0719) replica.replica0.0300070f001af406: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181486, confirmed_decree = -1 -D2020-12-29 17:00:44.624 (1609232444624744497 1c0719) replica.replica0.0300070f001af453: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181445 -D2020-12-29 17:00:44.6245 (1609232444624752474 1c0719) replica.replica0.0300070f001af453: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181445, confirmed_decree = -1 -D2020-12-29 17:00:44.625 (1609232444625624011 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:44.625 (1609232444625634584 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.625 (1609232444625665830 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.641 (1609232444641960827 1c071a) replica.replica1.0300070f001af498: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71 -D2020-12-29 17:00:44.6415 (1609232444641971092 1c071a) replica.replica1.0300070f001af498: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 71, confirmed_decree = -1 -D2020-12-29 17:00:44.667 (1609232444667511256 1c071a) replica.replica1.0300070f001af4ff: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 156 -D2020-12-29 17:00:44.6675 (1609232444667521789 1c071a) replica.replica1.0300070f001af4ff: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 156, confirmed_decree = -1 -D2020-12-29 17:00:44.751 (1609232444751364222 1c0719) replica.replica0.0300070f001af64d: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 148 -D2020-12-29 17:00:44.7515 (1609232444751373780 1c0719) replica.replica0.0300070f001af64d: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 148, confirmed_decree = -1 -D2020-12-29 17:00:44.772 (1609232444772898397 1c071a) replica.replica1.0300070f001af6a5: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 148 -D2020-12-29 17:00:44.7725 (1609232444772910864 1c071a) replica.replica1.0300070f001af6a5: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 148, confirmed_decree = -1 -D2020-12-29 17:00:44.858 (1609232444858680685 1c0719) replica.replica0.0300070f001af802: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 148 -D2020-12-29 17:00:44.8585 (1609232444858690977 1c0719) replica.replica0.0300070f001af802: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 148, confirmed_decree = -1 -D2020-12-29 17:00:44.902 (1609232444902629001 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:44.902 (1609232444902642722 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.902 (1609232444902665717 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.903 (1609232444903992802 1c0719) replica.replica0.0300070f001af8b6: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 66 -D2020-12-29 17:00:44.9045 (1609232444904003211 1c0719) replica.replica0.0300070f001af8b6: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 66, confirmed_decree = -1 -D2020-12-29 17:00:44.905 (1609232444905798501 1c071a) replica.replica1.0300070f001af8be: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 148 -D2020-12-29 17:00:44.9055 (1609232444905808575 1c071a) replica.replica1.0300070f001af8be: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 148, confirmed_decree = -1 -D2020-12-29 17:00:44.939 (1609232444939823046 1c071a) replica.replica1.0300070f001af947: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181420 -D2020-12-29 17:00:44.9395 (1609232444939834190 1c071a) replica.replica1.0300070f001af947: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181420, confirmed_decree = -1 -D2020-12-29 17:00:44.977 (1609232444977943615 1c0719) replica.replica0.0300070f001af9e0: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 148 -D2020-12-29 17:00:44.9775 (1609232444977952104 1c0719) replica.replica0.0300070f001af9e0: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 148, confirmed_decree = -1 -D2020-12-29 17:00:44.979 (1609232444979184721 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:44.979 (1609232444979210514 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.979 (1609232444979234572 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.980 (1609232444980169911 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:44.980 (1609232444980178386 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.980 (1609232444980204874 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.984 (1609232444984195138 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:44.984 (1609232444984205559 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.984 (1609232444984228215 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.986 (1609232444986774072 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:44.986 (1609232444986782434 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.986 (1609232444986808456 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:44.994 (1609232444994189336 1c0719) replica.replica0.0300070f001afa1f: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181367 -D2020-12-29 17:00:44.9945 (1609232444994202042 1c0719) replica.replica0.0300070f001afa1f: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181367, confirmed_decree = -1 -D2020-12-29 17:00:44.996 (1609232444996356875 1c071a) replica.replica1.0300070f001afa29: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 180948 -D2020-12-29 17:00:44.9965 (1609232444996364756 1c071a) replica.replica1.0300070f001afa29: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 180948, confirmed_decree = -1 -D2020-12-29 17:00:45.8U (1609232445008705468 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:45.8 (1609232445008737738 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:45.8 (1609232445008774778 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:45.172 (1609232445172632326 1c0719) replica.replica0.0300070f001afcf0: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.430000 init_prepare, mutation_tid=1702114 -D2020-12-29 17:00:45.394 (1609232445394267658 1c0712) replica.default0.0300070f001b002e: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = replication::detect_action::START -W2020-12-29 17:00:45.394 (1609232445394275773 1c0712) replica.default0.0300070f001b002e: hotkey_collector.cpp:249:on_start_detect(): [3.3@10.232.52.144:34803] still detecting replication::hotkey_type::WRITE hotkey, state is hotkey_collector_state::FINE_DETECTING -D2020-12-29 17:00:46.629 (1609232446629404386 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232446629] -D2020-12-29 17:00:46.629 (1609232446629503093 1c0733) replica. fd0.030c0001000000a5: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232446629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:46.629 (1609232446629513809 1c0733) replica. fd0.030c0001000000a5: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232446629 -D2020-12-29 17:00:49.629 (1609232449629466705 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232449629] -D2020-12-29 17:00:49.629 (1609232449629577697 1c0734) replica. fd1.030c0000000000a9: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232449629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:49.629 (1609232449629585186 1c0734) replica. fd1.030c0000000000a9: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232449629 -D2020-12-29 17:00:49.652 (1609232449652179048 1c0719) replica.replica0.0300070f001b42bf: replica_2pc.cpp:168:init_prepare(): 3.3@10.232.52.144:34803: mutation 3.3.3.440000 init_prepare, mutation_tid=1719978 -D2020-12-29 17:00:50.599 (1609232450599715983 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2589 MB, memused_res = 1230MB -D2020-12-29 17:00:50.600 (1609232450600749114 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232450599), last_report_time_ms(1609232440598) -D2020-12-29 17:00:52.629 (1609232452629520839 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232452629] -D2020-12-29 17:00:52.629 (1609232452629632174 1c0734) replica. fd1.030c0000000000ab: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232452629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:52.629 (1609232452629640724 1c0734) replica. fd1.030c0000000000ab: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232452629 -D2020-12-29 17:00:54.0U (1609232454000307548 1c0712) replica.default0.0300070f001b7947: replica_stub.cpp:2800:on_detect_hotkey(): [3.0@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = Unknown -D2020-12-29 17:00:54.0 (1609232454000318301 1c0712) replica.default0.0300070f001b7947: hotkey_collector.cpp:292:query_result(): [3.0@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::STOPPED -D2020-12-29 17:00:54.0U (1609232454000630910 1c0713) replica.default1.0300070f001b7948: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = Unknown -D2020-12-29 17:00:54.0 (1609232454000638668 1c0713) replica.default1.0300070f001b7948: hotkey_collector.cpp:292:query_result(): [3.3@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::FINE_DETECTING -D2020-12-29 17:00:54.0U (1609232454000905665 1c0714) replica.default2.0300070f001b7949: replica_stub.cpp:2800:on_detect_hotkey(): [3.6@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = Unknown -D2020-12-29 17:00:54.0 (1609232454000915449 1c0714) replica.default2.0300070f001b7949: hotkey_collector.cpp:292:query_result(): [3.6@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::STOPPED -D2020-12-29 17:00:54.514 (1609232454514778741 1c0712) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:00:54.605 (1609232454605394315 1c0719) replica.replica0.0300070f001b857f: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183601 -D2020-12-29 17:00:54.605 (1609232454605409323 1c0719) replica.replica0.0300070f001b857f: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183601, confirmed_decree = -1 -D2020-12-29 17:00:54.624 (1609232454624834673 1c0719) replica.replica0.0300070f001b85fe: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183602 -D2020-12-29 17:00:54.6245 (1609232454624843869 1c0719) replica.replica0.0300070f001b85fe: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183602, confirmed_decree = -1 -D2020-12-29 17:00:54.625 (1609232454625718384 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:54.625 (1609232454625729629 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.625 (1609232454625761988 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.642 (1609232454642020469 1c071a) replica.replica1.0300070f001b8659: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 72 -D2020-12-29 17:00:54.6425 (1609232454642028818 1c071a) replica.replica1.0300070f001b8659: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 72, confirmed_decree = -1 -E2020-12-29 17:00:54.657 (1609232454657152131 1c0715) replica.default3.0306000000000040: hotkey_collector.cpp:173:change_state_by_result(): [3.3@10.232.52.144:34803] Find the hotkey: ThisisahotkeyThisisahotkey -D2020-12-29 17:00:54.667 (1609232454667647505 1c071a) replica.replica1.0300070f001b86f5: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 159 -D2020-12-29 17:00:54.6675 (1609232454667656424 1c071a) replica.replica1.0300070f001b86f5: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 159, confirmed_decree = -1 -D2020-12-29 17:00:54.751 (1609232454751439159 1c0719) replica.replica0.0300070f001b88e6: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151 -D2020-12-29 17:00:54.7515 (1609232454751451544 1c0719) replica.replica0.0300070f001b88e6: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151, confirmed_decree = -1 -D2020-12-29 17:00:54.773 (1609232454773010967 1c071a) replica.replica1.0300070f001b895c: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151 -D2020-12-29 17:00:54.7735 (1609232454773024971 1c071a) replica.replica1.0300070f001b895c: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151, confirmed_decree = -1 -D2020-12-29 17:00:54.858 (1609232454858783742 1c0719) replica.replica0.0300070f001b8b4c: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151 -D2020-12-29 17:00:54.8585 (1609232454858794449 1c0719) replica.replica0.0300070f001b8b4c: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151, confirmed_decree = -1 -D2020-12-29 17:00:54.902 (1609232454902708958 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:54.902 (1609232454902726873 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.902 (1609232454902773455 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.904 (1609232454904115428 1c0719) replica.replica0.0300070f001b8c55: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 70 -D2020-12-29 17:00:54.9045 (1609232454904126007 1c0719) replica.replica0.0300070f001b8c55: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 70, confirmed_decree = -1 -D2020-12-29 17:00:54.905 (1609232454905896949 1c071a) replica.replica1.0300070f001b8c64: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151 -D2020-12-29 17:00:54.9055 (1609232454905930653 1c071a) replica.replica1.0300070f001b8c64: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151, confirmed_decree = -1 -D2020-12-29 17:00:54.939 (1609232454939923900 1c071a) replica.replica1.0300070f001b8d2d: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183416 -D2020-12-29 17:00:54.9395 (1609232454939939446 1c071a) replica.replica1.0300070f001b8d2d: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183416, confirmed_decree = -1 -D2020-12-29 17:00:54.978 (1609232454978035713 1c0719) replica.replica0.0300070f001b8e1d: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151 -D2020-12-29 17:00:54.9785 (1609232454978045480 1c0719) replica.replica0.0300070f001b8e1d: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 151, confirmed_decree = -1 -D2020-12-29 17:00:54.979 (1609232454979276382 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:54.979 (1609232454979285014 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.979 (1609232454979318237 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.980 (1609232454980294747 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:54.980 (1609232454980304988 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.980 (1609232454980328333 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.984 (1609232454984263842 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:54.984 (1609232454984271445 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.984 (1609232454984299470 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.986 (1609232454986845728 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:54.986 (1609232454986855146 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.986 (1609232454986918483 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:54.994 (1609232454994296686 1c0719) replica.replica0.0300070f001b8e78: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183487 -D2020-12-29 17:00:54.9945 (1609232454994310296 1c0719) replica.replica0.0300070f001b8e78: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183487, confirmed_decree = -1 -D2020-12-29 17:00:54.996 (1609232454996441011 1c071a) replica.replica1.0300070f001b8e85: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182963 -D2020-12-29 17:00:54.9965 (1609232454996449985 1c071a) replica.replica1.0300070f001b8e85: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182963, confirmed_decree = -1 -D2020-12-29 17:00:55.8 (1609232455008818249 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:00:55.8 (1609232455008835888 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:55.8 (1609232455008882184 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:00:55.1836 (1609232455183392002 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 17:00:55.183 (1609232455183405712 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 150 -D2020-12-29 17:00:55.1830 (1609232455183548842 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183346 -D2020-12-29 17:00:55.1836 (1609232455183553913 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 150 -D2020-12-29 17:00:55.1830 (1609232455183643504 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 182829 -D2020-12-29 17:00:55.1839 (1609232455183647955 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 150 -D2020-12-29 17:00:55.1830 (1609232455183658316 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183593 -D2020-12-29 17:00:55.1833 (1609232455183689809 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 66 -D2020-12-29 17:00:55.1836 (1609232455183737902 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183416 -D2020-12-29 17:00:55.1836 (1609232455183811299 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183575 -D2020-12-29 17:00:55.1835 (1609232455183820674 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 63 -D2020-12-29 17:00:55.1833 (1609232455183824188 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 70 -D2020-12-29 17:00:55.1830 (1609232455183827472 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 158 -D2020-12-29 17:00:55.1838 (1609232455183830625 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 150 -D2020-12-29 17:00:55.1830 (1609232455183924978 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183644 -D2020-12-29 17:00:55.1834 (1609232455183997099 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 447711 -D2020-12-29 17:00:55.1841 (1609232455184001167 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 150 -D2020-12-29 17:00:55.1840 (1609232455184004663 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 71 -D2020-12-29 17:00:55.1841 (1609232455184006828 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 150 -D2020-12-29 17:00:55.1840 (1609232455184070835 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183316 -D2020-12-29 17:00:55.1846 (1609232455184087265 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 150 -D2020-12-29 17:00:55.184 (1609232455184123572 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 9, reserved_log_size = 298981807, reserved_smallest_log = 1, reserved_largest_log = 9, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 17:00:55.184 (1609232455184130683 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 747944 -D2020-12-29 17:00:55.415 (1609232455415204427 1c0713) replica.default1.0300070f001b9849: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::WRITE, detect_action = replication::detect_action::START -W2020-12-29 17:00:55.415 (1609232455415215017 1c0713) replica.default1.0300070f001b9849: hotkey_collector.cpp:258:on_start_detect(): [3.3@10.232.52.144:34803] replication::hotkey_type::WRITE hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -D2020-12-29 17:00:55.629 (1609232455629588935 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232455629] -D2020-12-29 17:00:55.629 (1609232455629689406 1c0733) replica. fd0.030c0001000000a7: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232455629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:55.629 (1609232455629698803 1c0733) replica. fd0.030c0001000000a7: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232455629 -D2020-12-29 17:00:58.629 (1609232458629645488 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232458629] -D2020-12-29 17:00:58.629 (1609232458629763588 1c0733) replica. fd0.030c0001000000a9: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232458629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:00:58.629 (1609232458629770257 1c0733) replica. fd0.030c0001000000a9: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232458629 -D2020-12-29 17:01:00.600 (1609232460600844946 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2589 MB, memused_res = 1233MB -D2020-12-29 17:01:00.602 (1609232460602021490 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232460600), last_report_time_ms(1609232450599) -D2020-12-29 17:01:01.629 (1609232461629703234 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232461629] -D2020-12-29 17:01:01.629 (1609232461629826586 1c0734) replica. fd1.030c0000000000ad: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232461629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:01.629 (1609232461629835204 1c0734) replica. fd1.030c0000000000ad: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232461629 -D2020-12-29 17:01:04.512 (1609232464512511132 1c0714) replica.default2.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 17:01:04.512- (1609232464512681439 1c0716) replica.default4.0301000200000057: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 17:01:04.512 (1609232464512735873 1c0716) replica.default4.0301000200000057: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 17:01:04.514 (1609232464514846357 1c0715) replica.default3.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:01:04.549 (1609232464549863472 1c071a) replica.replica1.0306000000000005: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:04.552 (1609232464552597651 1c0719) replica.replica0.0306000100000004: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:04.605 (1609232464605512169 1c0719) replica.replica0.0300070f001c5992: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183601 -D2020-12-29 17:01:04.6055 (1609232464605529929 1c0719) replica.replica0.0300070f001c5992: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183601, confirmed_decree = -1 -D2020-12-29 17:01:04.624 (1609232464624926432 1c0719) replica.replica0.0300070f001c59ff: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183602 -D2020-12-29 17:01:04.6245 (1609232464624939650 1c0719) replica.replica0.0300070f001c59ff: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183602, confirmed_decree = -1 -D2020-12-29 17:01:04.625 (1609232464625806924 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:04.625 (1609232464625817385 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.625 (1609232464625853061 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.629 (1609232464629776837 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232464629] -D2020-12-29 17:01:04.629 (1609232464629886445 1c0734) replica. fd1.030c0000000000af: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232464629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:04.629 (1609232464629894337 1c0734) replica. fd1.030c0000000000af: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232464629 -D2020-12-29 17:01:04.640 (1609232464640892760 1c0719) replica.replica0.030600000000000a: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:04.642 (1609232464642169339 1c071a) replica.replica1.0300070f001c5a5a: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 73 -D2020-12-29 17:01:04.6425 (1609232464642180648 1c071a) replica.replica1.0300070f001c5a5a: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 73, confirmed_decree = -1 -D2020-12-29 17:01:04.647 (1609232464647589532 1c071a) replica.replica1.0306000100000009: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.5@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:04.667 (1609232464667746334 1c071a) replica.replica1.0300070f001c5ae0: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 163 -D2020-12-29 17:01:04.6675 (1609232464667758029 1c071a) replica.replica1.0300070f001c5ae0: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 163, confirmed_decree = -1 -D2020-12-29 17:01:04.713 (1609232464713888001 1c071a) replica.replica1.030600000000000f: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:04.717 (1609232464717981651 1c071a) replica.replica1.030600010000000e: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:04.751 (1609232464751551047 1c0719) replica.replica0.0300070f001c5c87: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155 -D2020-12-29 17:01:04.7515 (1609232464751564986 1c0719) replica.replica0.0300070f001c5c87: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155, confirmed_decree = -1 -D2020-12-29 17:01:04.773 (1609232464773110623 1c071a) replica.replica1.0300070f001c5cff: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155 -D2020-12-29 17:01:04.7735 (1609232464773123039 1c071a) replica.replica1.0300070f001c5cff: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155, confirmed_decree = -1 -D2020-12-29 17:01:04.785 (1609232464785734318 1c0719) replica.replica0.0306000000000014: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:04.858 (1609232464858934867 1c0719) replica.replica0.0300070f001c5e8a: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155 -D2020-12-29 17:01:04.8585 (1609232464858950505 1c0719) replica.replica0.0300070f001c5e8a: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155, confirmed_decree = -1 -D2020-12-29 17:01:04.892 (1609232464892376872 1c0719) replica.replica0.0306000000000019: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:04.895 (1609232464895948981 1c071a) replica.replica1.0306000100000016: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:04.902 (1609232464902902585 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:04.902 (1609232464902920602 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.902 (1609232464902964341 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.904 (1609232464904231150 1c0719) replica.replica0.0300070f001c5f66: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 73 -D2020-12-29 17:01:04.9045 (1609232464904251876 1c0719) replica.replica0.0300070f001c5f66: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 73, confirmed_decree = -1 -D2020-12-29 17:01:04.906 (1609232464906019671 1c071a) replica.replica1.0300070f001c5f67: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155 -D2020-12-29 17:01:04.9065 (1609232464906033768 1c071a) replica.replica1.0300070f001c5f67: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155, confirmed_decree = -1 -D2020-12-29 17:01:04.940 (1609232464940035294 1c071a) replica.replica1.0300070f001c6015: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183416 -D2020-12-29 17:01:04.9405 (1609232464940050325 1c071a) replica.replica1.0300070f001c6015: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183416, confirmed_decree = -1 -D2020-12-29 17:01:04.978 (1609232464978168557 1c0719) replica.replica0.0300070f001c60d5: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155 -D2020-12-29 17:01:04.9785 (1609232464978179918 1c0719) replica.replica0.0300070f001c60d5: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 155, confirmed_decree = -1 -D2020-12-29 17:01:04.979 (1609232464979365237 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:04.979 (1609232464979378842 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.979 (1609232464979413915 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.980 (1609232464980369558 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:04.980 (1609232464980378714 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.980 (1609232464980416319 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.984 (1609232464984343176 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:04.984 (1609232464984353906 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.984 (1609232464984381602 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.986 (1609232464986960429 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:04.986 (1609232464986970267 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.987 (1609232464987001075 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:04.994 (1609232464994392006 1c0719) replica.replica0.0300070f001c6125: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183487 -D2020-12-29 17:01:04.9945 (1609232464994402134 1c0719) replica.replica0.0300070f001c6125: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183487, confirmed_decree = -1 -D2020-12-29 17:01:04.996 (1609232464996532756 1c071a) replica.replica1.0300070f001c6130: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182963 -D2020-12-29 17:01:04.9965 (1609232464996547529 1c071a) replica.replica1.0300070f001c6130: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182963, confirmed_decree = -1 -D2020-12-29 17:01:05.8U (1609232465008929889 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:05.8 (1609232465008945018 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:05.8 (1609232465008982160 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:07.629 (1609232467629839564 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232467629] -D2020-12-29 17:01:07.629 (1609232467629966168 1c0733) replica. fd0.030c0001000000ab: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232467629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:07.629 (1609232467629976575 1c0733) replica. fd0.030c0001000000ab: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232467629 -D2020-12-29 17:01:10.602 (1609232470602106525 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2589 MB, memused_res = 1234MB -D2020-12-29 17:01:10.603 (1609232470603202065 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232470602), last_report_time_ms(1609232460600) -D2020-12-29 17:01:10.629 (1609232470629904879 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232470629] -D2020-12-29 17:01:10.630 (1609232470630041868 1c0733) replica. fd0.030c0001000000ad: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232470629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:10.630 (1609232470630048603 1c0733) replica. fd0.030c0001000000ad: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232470629 -D2020-12-29 17:01:13.629 (1609232473629997575 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232473629] -D2020-12-29 17:01:13.630 (1609232473630192465 1c0734) replica. fd1.030c0000000000b1: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232473629], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:13.630 (1609232473630205248 1c0734) replica. fd1.030c0000000000b1: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232473629 -D2020-12-29 17:01:14.514 (1609232474514909941 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:01:14.605 (1609232474605729166 1c0719) replica.replica0.0300070f001d371c: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183602 -D2020-12-29 17:01:14.6055 (1609232474605742481 1c0719) replica.replica0.0300070f001d371c: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183602, confirmed_decree = -1 -D2020-12-29 17:01:14.622 (1609232474622590766 1c0719) replica.replica0.030600000000001f: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:14.625 (1609232474625114844 1c0719) replica.replica0.0300070f001d378e: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183603 -D2020-12-29 17:01:14.6255 (1609232474625125721 1c0719) replica.replica0.0300070f001d378e: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183603, confirmed_decree = -1 -D2020-12-29 17:01:14.625 (1609232474625958399 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:14.625 (1609232474625973119 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.626 (1609232474626009104 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.642 (1609232474642342327 1c071a) replica.replica1.0300070f001d3807: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 74 -D2020-12-29 17:01:14.642 (1609232474642357568 1c071a) replica.replica1.0300070f001d3807: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 74, confirmed_decree = -1 -D2020-12-29 17:01:14.646 (1609232474646547593 1c0719) replica.replica0.030600010000001c: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:14.667 (1609232474667823829 1c071a) replica.replica1.0300070f001d38a7: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 166 -D2020-12-29 17:01:14.6675 (1609232474667832762 1c071a) replica.replica1.0300070f001d38a7: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 166, confirmed_decree = -1 -D2020-12-29 17:01:14.751 (1609232474751664638 1c0719) replica.replica0.0300070f001d3a9f: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 158 -D2020-12-29 17:01:14.7515 (1609232474751686896 1c0719) replica.replica0.0300070f001d3a9f: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 158, confirmed_decree = -1 -D2020-12-29 17:01:14.759 (1609232474759041059 1c071a) replica.replica1.0306000000000024: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:14.773 (1609232474773241122 1c071a) replica.replica1.0300070f001d3b13: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 158 -D2020-12-29 17:01:14.7735 (1609232474773250615 1c071a) replica.replica1.0300070f001d3b13: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 158, confirmed_decree = -1 -D2020-12-29 17:01:14.859 (1609232474859087959 1c0719) replica.replica0.0300070f001d3d1b: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 158 -D2020-12-29 17:01:14.8595 (1609232474859102524 1c0719) replica.replica0.0300070f001d3d1b: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 158, confirmed_decree = -1 -D2020-12-29 17:01:14.903 (1609232474903111430 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:14.903 (1609232474903127912 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.903 (1609232474903164913 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.904 (1609232474904319968 1c0719) replica.replica0.0300070f001d3e4f: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 73 -D2020-12-29 17:01:14.9045 (1609232474904332399 1c0719) replica.replica0.0300070f001d3e4f: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 73, confirmed_decree = -1 -D2020-12-29 17:01:14.906 (1609232474906105237 1c071a) replica.replica1.0300070f001d3e59: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 158 -D2020-12-29 17:01:14.9065 (1609232474906114275 1c071a) replica.replica1.0300070f001d3e59: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 158, confirmed_decree = -1 -D2020-12-29 17:01:14.940 (1609232474940220438 1c071a) replica.replica1.0300070f001d3f20: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183417 -D2020-12-29 17:01:14.9405 (1609232474940237481 1c071a) replica.replica1.0300070f001d3f20: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183417, confirmed_decree = -1 -D2020-12-29 17:01:14.978 (1609232474978270411 1c0719) replica.replica0.0300070f001d400a: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 158 -D2020-12-29 17:01:14.9785 (1609232474978280268 1c0719) replica.replica0.0300070f001d400a: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 158, confirmed_decree = -1 -D2020-12-29 17:01:14.979 (1609232474979491526 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:14.979 (1609232474979500379 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.979 (1609232474979534231 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.980 (1609232474980542579 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:14.980 (1609232474980550132 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.980 (1609232474980576026 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.984 (1609232474984489890 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:14.984 (1609232474984501071 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.984 (1609232474984532632 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.987 (1609232474987050145 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:14.987 (1609232474987059181 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.987 (1609232474987083805 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:14.994 (1609232474994559636 1c0719) replica.replica0.0300070f001d4076: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183488 -D2020-12-29 17:01:14.9945 (1609232474994567864 1c0719) replica.replica0.0300070f001d4076: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183488, confirmed_decree = -1 -D2020-12-29 17:01:14.996 (1609232474996703791 1c071a) replica.replica1.0300070f001d4081: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182964 -D2020-12-29 17:01:14.9965 (1609232474996716436 1c071a) replica.replica1.0300070f001d4081: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182964, confirmed_decree = -1 -D2020-12-29 17:01:15.9 (1609232475009140444 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:15.9 (1609232475009149482 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:15.9 (1609232475009176276 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:16.630 (1609232476630084237 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232476630] -D2020-12-29 17:01:16.630 (1609232476630180749 1c0734) replica. fd1.030c0000000000b3: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232476630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:16.630 (1609232476630187354 1c0734) replica. fd1.030c0000000000b3: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232476630 -D2020-12-29 17:01:19.630 (1609232479630137276 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232479630] -D2020-12-29 17:01:19.630 (1609232479630252192 1c0733) replica. fd0.030c0001000000af: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232479630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:19.630 (1609232479630258872 1c0733) replica. fd0.030c0001000000af: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232479630 -D2020-12-29 17:01:20.603 (1609232480603278416 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2589 MB, memused_res = 1234MB -D2020-12-29 17:01:20.604 (1609232480604292122 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232480603), last_report_time_ms(1609232470602) -D2020-12-29 17:01:22.630 (1609232482630199125 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232482630] -D2020-12-29 17:01:22.630 (1609232482630310269 1c0733) replica. fd0.030c0001000000b1: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232482630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:22.630 (1609232482630316013 1c0733) replica. fd0.030c0001000000b1: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232482630 -D2020-12-29 17:01:24.514 (1609232484514973020 1c0712) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:01:24.605 (1609232484605923049 1c0719) replica.replica0.0300070f001e256f: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183603 -D2020-12-29 17:01:24.6055 (1609232484605937422 1c0719) replica.replica0.0300070f001e256f: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183603, confirmed_decree = -1 -D2020-12-29 17:01:24.625 (1609232484625280789 1c0719) replica.replica0.0300070f001e25f4: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183604 -D2020-12-29 17:01:24.6255 (1609232484625292716 1c0719) replica.replica0.0300070f001e25f4: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183604, confirmed_decree = -1 -D2020-12-29 17:01:24.626 (1609232484626102088 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:24.626 (1609232484626112921 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.626 (1609232484626147870 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.642 (1609232484642478907 1c071a) replica.replica1.0300070f001e2657: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75 -D2020-12-29 17:01:24.6425 (1609232484642489649 1c071a) replica.replica1.0300070f001e2657: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 75, confirmed_decree = -1 -D2020-12-29 17:01:24.656 (1609232484656616562 1c071a) replica.replica1.0306000100000024: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:24.656 (1609232484656856787 1c0719) replica.replica0.0306000000000041: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:24.667 (1609232484667909380 1c071a) replica.replica1.0300070f001e26f7: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 169 -D2020-12-29 17:01:24.6675 (1609232484667919043 1c071a) replica.replica1.0300070f001e26f7: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 169, confirmed_decree = -1 -D2020-12-29 17:01:24.751 (1609232484751762111 1c0719) replica.replica0.0300070f001e28f3: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 161 -D2020-12-29 17:01:24.7515 (1609232484751780394 1c0719) replica.replica0.0300070f001e28f3: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 161, confirmed_decree = -1 -D2020-12-29 17:01:24.773 (1609232484773319144 1c071a) replica.replica1.0300070f001e296e: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 161 -D2020-12-29 17:01:24.7735 (1609232484773330942 1c071a) replica.replica1.0300070f001e296e: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 161, confirmed_decree = -1 -D2020-12-29 17:01:24.809 (1609232484809018395 1c071a) replica.replica1.0306000100000029: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:24.809 (1609232484809306658 1c0719) replica.replica0.0306000000000046: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.5@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:24.859 (1609232484859220150 1c0719) replica.replica0.0300070f001e2ac3: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 161 -D2020-12-29 17:01:24.8595 (1609232484859274912 1c0719) replica.replica0.0300070f001e2ac3: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 161, confirmed_decree = -1 -D2020-12-29 17:01:24.903 (1609232484903269080 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:24.903 (1609232484903288547 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.903 (1609232484903330055 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.904 (1609232484904649457 1c0719) replica.replica0.0300070f001e2b54: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 74 -D2020-12-29 17:01:24.9045 (1609232484904660914 1c0719) replica.replica0.0300070f001e2b54: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 74, confirmed_decree = -1 -D2020-12-29 17:01:24.906 (1609232484906219959 1c071a) replica.replica1.0300070f001e2b5b: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 161 -D2020-12-29 17:01:24.9065 (1609232484906231450 1c071a) replica.replica1.0300070f001e2b5b: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 161, confirmed_decree = -1 -D2020-12-29 17:01:24.916 (1609232484916127342 1c071a) replica.replica1.030600010000002e: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:24.929 (1609232484929892547 1c071a) replica.replica1.030600000000004c: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:24.940 (1609232484940407689 1c071a) replica.replica1.0300070f001e2c12: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183418 -D2020-12-29 17:01:24.9405 (1609232484940422030 1c071a) replica.replica1.0300070f001e2c12: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183418, confirmed_decree = -1 -D2020-12-29 17:01:24.978 (1609232484978353415 1c0719) replica.replica0.0300070f001e2cdb: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 161 -D2020-12-29 17:01:24.9785 (1609232484978364839 1c0719) replica.replica0.0300070f001e2cdb: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 161, confirmed_decree = -1 -D2020-12-29 17:01:24.979 (1609232484979609469 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:24.979 (1609232484979620688 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.979 (1609232484979653438 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.980 (1609232484980618418 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:24.980 (1609232484980628318 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.980 (1609232484980657949 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.984 (1609232484984645842 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:24.984 (1609232484984654602 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.984 (1609232484984685421 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.986 (1609232484986909445 1c0719) replica.replica0.0306000100000034: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:24.987 (1609232484987122931 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:24.987 (1609232484987131430 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.987 (1609232484987160447 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:24.994 (1609232484994703328 1c0719) replica.replica0.0300070f001e2d3d: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183489 -D2020-12-29 17:01:24.9945 (1609232484994714001 1c0719) replica.replica0.0300070f001e2d3d: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183489, confirmed_decree = -1 -D2020-12-29 17:01:24.996 (1609232484996862122 1c071a) replica.replica1.0300070f001e2d49: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182965 -D2020-12-29 17:01:24.9965 (1609232484996870438 1c071a) replica.replica1.0300070f001e2d49: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182965, confirmed_decree = -1 -D2020-12-29 17:01:25.9U (1609232485009298547 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:25.9 (1609232485009315608 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:25.9 (1609232485009355640 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:25.184 (1609232485184164458 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 17:01:25.184 (1609232485184179473 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 160 -D2020-12-29 17:01:25.1840 (1609232485184195799 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183602 -D2020-12-29 17:01:25.1842 (1609232485184198928 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 160 -D2020-12-29 17:01:25.1840 (1609232485184207279 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 182964 -D2020-12-29 17:01:25.1844 (1609232485184209242 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 160 -D2020-12-29 17:01:25.1840 (1609232485184213176 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183603 -D2020-12-29 17:01:25.1843 (1609232485184220997 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 73 -D2020-12-29 17:01:25.1843 (1609232485184226231 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183488 -D2020-12-29 17:01:25.1848 (1609232485184229889 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183708 -D2020-12-29 17:01:25.1848 (1609232485184240549 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 69 -D2020-12-29 17:01:25.1849 (1609232485184270953 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 72 -D2020-12-29 17:01:25.1842 (1609232485184277893 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 168 -D2020-12-29 17:01:25.1848 (1609232485184281387 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 160 -D2020-12-29 17:01:25.1840 (1609232485184289558 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183821 -D2020-12-29 17:01:25.1841 (1609232485184297733 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 447848 -D2020-12-29 17:01:25.1848 (1609232485184300252 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 160 -D2020-12-29 17:01:25.1840 (1609232485184304391 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 74 -D2020-12-29 17:01:25.1844 (1609232485184307653 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 160 -D2020-12-29 17:01:25.1840 (1609232485184313019 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183417 -D2020-12-29 17:01:25.1847 (1609232485184316613 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 160 -D2020-12-29 17:01:25.184 (1609232485184359942 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 9, reserved_log_size = 299000753, reserved_smallest_log = 1, reserved_largest_log = 9, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 17:01:25.184 (1609232485184368382 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 213980 -D2020-12-29 17:01:25.472 (1609232485472177559 1c0714) replica.default2.0300070f001e3760: replica_stub.cpp:2800:on_detect_hotkey(): [3.0@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -D2020-12-29 17:01:25.472 (1609232485472185983 1c0714) replica.default2.0300070f001e3760: hotkey_collector.cpp:265:on_start_detect(): [3.0@10.232.52.144:34803] starting to detect replication::hotkey_type::READ hotkey -D2020-12-29 17:01:25.472 (1609232485472920623 1c0712) replica.default0.0300070f001e3764: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -D2020-12-29 17:01:25.472 (1609232485472927111 1c0712) replica.default0.0300070f001e3764: hotkey_collector.cpp:265:on_start_detect(): [3.3@10.232.52.144:34803] starting to detect replication::hotkey_type::READ hotkey -D2020-12-29 17:01:25.473 (1609232485473580319 1c0716) replica.default4.0300070f001e3768: replica_stub.cpp:2800:on_detect_hotkey(): [3.6@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -D2020-12-29 17:01:25.473 (1609232485473587297 1c0716) replica.default4.0300070f001e3768: hotkey_collector.cpp:265:on_start_detect(): [3.6@10.232.52.144:34803] starting to detect replication::hotkey_type::READ hotkey -D2020-12-29 17:01:25.630 (1609232485630268625 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232485630] -D2020-12-29 17:01:25.630 (1609232485630382924 1c0734) replica. fd1.030c0000000000b5: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232485630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:25.630 (1609232485630394354 1c0734) replica. fd1.030c0000000000b5: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232485630 -D2020-12-29 17:01:28.630 (1609232488630342591 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232488630] -D2020-12-29 17:01:28.630 (1609232488630461187 1c0734) replica. fd1.030c0000000000b7: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232488630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:28.630 (1609232488630471431 1c0734) replica. fd1.030c0000000000b7: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232488630 -D2020-12-29 17:01:30.604 (1609232490604365864 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2589 MB, memused_res = 1234MB -D2020-12-29 17:01:30.605 (1609232490605447641 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232490604), last_report_time_ms(1609232480603) -D2020-12-29 17:01:31.630 (1609232491630404314 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232491630] -D2020-12-29 17:01:31.630 (1609232491630517609 1c0733) replica. fd0.030c0001000000b3: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232491630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:31.630 (1609232491630525338 1c0733) replica. fd0.030c0001000000b3: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232491630 -D2020-12-29 17:01:34.512 (1609232494512596300 1c0715) replica.default3.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 17:01:34.512 (1609232494512755792 1c0714) replica.default2.030100030000006f: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 17:01:34.512 (1609232494512829893 1c0714) replica.default2.030100030000006f: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 17:01:34.515 (1609232494515034593 1c0712) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:01:34.593 (1609232494593241994 1c0719) replica.replica0.0306000100000039: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 17:01:34.606 (1609232494606065918 1c0719) replica.replica0.0300070f001f0a4c: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183604 -D2020-12-29 17:01:34.6065 (1609232494606079199 1c0719) replica.replica0.0300070f001f0a4c: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183604, confirmed_decree = -1 -D2020-12-29 17:01:34.625 (1609232494625447906 1c0719) replica.replica0.0300070f001f0ac1: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183605 -D2020-12-29 17:01:34.6255 (1609232494625469283 1c0719) replica.replica0.0300070f001f0ac1: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183605, confirmed_decree = -1 -D2020-12-29 17:01:34.626 (1609232494626262371 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:34.626 (1609232494626276265 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.626 (1609232494626308079 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.630 (1609232494630468384 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232494630] -D2020-12-29 17:01:34.630 (1609232494630583637 1c0733) replica. fd0.030c0001000000b5: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232494630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:34.630 (1609232494630589644 1c0733) replica. fd0.030c0001000000b5: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232494630 -D2020-12-29 17:01:34.642 (1609232494642600160 1c071a) replica.replica1.0300070f001f0b2b: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 76 -D2020-12-29 17:01:34.6425 (1609232494642610329 1c071a) replica.replica1.0300070f001f0b2b: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 76, confirmed_decree = -1 -D2020-12-29 17:01:34.668 (1609232494668024270 1c071a) replica.replica1.0300070f001f0bbf: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 174 -D2020-12-29 17:01:34.6685 (1609232494668062713 1c071a) replica.replica1.0300070f001f0bbf: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 174, confirmed_decree = -1 -D2020-12-29 17:01:34.751 (1609232494751855245 1c0719) replica.replica0.0300070f001f0dc4: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 165 -D2020-12-29 17:01:34.7515 (1609232494751877262 1c0719) replica.replica0.0300070f001f0dc4: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 165, confirmed_decree = -1 -D2020-12-29 17:01:34.773 (1609232494773396463 1c071a) replica.replica1.0300070f001f0e3e: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 165 -D2020-12-29 17:01:34.7735 (1609232494773406236 1c071a) replica.replica1.0300070f001f0e3e: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 165, confirmed_decree = -1 -D2020-12-29 17:01:34.859 (1609232494859303066 1c0719) replica.replica0.0300070f001f1026: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 165 -D2020-12-29 17:01:34.8595 (1609232494859317976 1c0719) replica.replica0.0300070f001f1026: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 165, confirmed_decree = -1 -D2020-12-29 17:01:34.903 (1609232494903456932 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:34.903 (1609232494903472986 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.903 (1609232494903513695 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.904 (1609232494904626727 1c0719) replica.replica0.0300070f001f1119: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78 -D2020-12-29 17:01:34.9045 (1609232494904635440 1c0719) replica.replica0.0300070f001f1119: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78, confirmed_decree = -1 -D2020-12-29 17:01:34.906 (1609232494906306660 1c071a) replica.replica1.0300070f001f1121: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 165 -D2020-12-29 17:01:34.9065 (1609232494906317896 1c071a) replica.replica1.0300070f001f1121: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 165, confirmed_decree = -1 -D2020-12-29 17:01:34.940 (1609232494940567717 1c071a) replica.replica1.0300070f001f11e9: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183419 -D2020-12-29 17:01:34.9405 (1609232494940577564 1c071a) replica.replica1.0300070f001f11e9: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183419, confirmed_decree = -1 -D2020-12-29 17:01:34.978 (1609232494978470762 1c0719) replica.replica0.0300070f001f12ce: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 165 -D2020-12-29 17:01:34.9785 (1609232494978483292 1c0719) replica.replica0.0300070f001f12ce: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 165, confirmed_decree = -1 -D2020-12-29 17:01:34.979 (1609232494979704684 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:34.979 (1609232494979717305 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.979 (1609232494979765400 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.980 (1609232494980714968 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:34.980 (1609232494980738089 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.980 (1609232494980766263 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.984 (1609232494984813271 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:34.984 (1609232494984831734 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.984 (1609232494984864369 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.987 (1609232494987204271 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:34.987 (1609232494987219139 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.987 (1609232494987264898 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:34.994 (1609232494994858353 1c0719) replica.replica0.0300070f001f1326: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183490 -D2020-12-29 17:01:34.9945 (1609232494994869770 1c0719) replica.replica0.0300070f001f1326: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183490, confirmed_decree = -1 -D2020-12-29 17:01:34.997 (1609232494997004847 1c071a) replica.replica1.0300070f001f1336: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182966 -D2020-12-29 17:01:34.9975 (1609232494997014841 1c071a) replica.replica1.0300070f001f1336: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182966, confirmed_decree = -1 -D2020-12-29 17:01:35.9U (1609232495009494981 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:35.9 (1609232495009506661 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:35.9 (1609232495009531975 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:35.490 (1609232495490487833 1c0714) replica.default2.0300070f001f1ee0: replica_stub.cpp:2800:on_detect_hotkey(): [3.0@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -W2020-12-29 17:01:35.490 (1609232495490498614 1c0714) replica.default2.0300070f001f1ee0: hotkey_collector.cpp:249:on_start_detect(): [3.0@10.232.52.144:34803] still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -D2020-12-29 17:01:35.491 (1609232495491223722 1c0712) replica.default0.0300070f001f1ee5: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -W2020-12-29 17:01:35.491 (1609232495491232419 1c0712) replica.default0.0300070f001f1ee5: hotkey_collector.cpp:249:on_start_detect(): [3.3@10.232.52.144:34803] still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -D2020-12-29 17:01:35.491 (1609232495491877344 1c0715) replica.default3.0300070f001f1ee9: replica_stub.cpp:2800:on_detect_hotkey(): [3.6@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -W2020-12-29 17:01:35.491 (1609232495491886117 1c0715) replica.default3.0300070f001f1ee9: hotkey_collector.cpp:249:on_start_detect(): [3.6@10.232.52.144:34803] still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -D2020-12-29 17:01:37.630 (1609232497630520589 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232497630] -D2020-12-29 17:01:37.630 (1609232497630655669 1c0734) replica. fd1.030c0000000000b9: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232497630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:37.630 (1609232497630677729 1c0734) replica. fd1.030c0000000000b9: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232497630 -D2020-12-29 17:01:40.605 (1609232500605524317 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2589 MB, memused_res = 1234MB -D2020-12-29 17:01:40.606 (1609232500606593065 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232500605), last_report_time_ms(1609232490604) -D2020-12-29 17:01:40.630 (1609232500630584747 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232500630] -D2020-12-29 17:01:40.630 (1609232500630689501 1c0734) replica. fd1.030c0000000000bb: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232500630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:40.630 (1609232500630696176 1c0734) replica. fd1.030c0000000000bb: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232500630 -D2020-12-29 17:01:43.630 (1609232503630644117 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232503630] -D2020-12-29 17:01:43.630 (1609232503630765914 1c0733) replica. fd0.030c0001000000b7: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232503630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:43.630 (1609232503630772812 1c0733) replica. fd0.030c0001000000b7: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232503630 -D2020-12-29 17:01:44.515 (1609232504515098283 1c0716) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:01:44.606 (1609232504606220847 1c0719) replica.replica0.0300070f001ff4d3: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183605 -D2020-12-29 17:01:44.6065 (1609232504606231609 1c0719) replica.replica0.0300070f001ff4d3: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183605, confirmed_decree = -1 -D2020-12-29 17:01:44.625 (1609232504625609280 1c0719) replica.replica0.0300070f001ff547: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183606 -D2020-12-29 17:01:44.6255 (1609232504625620681 1c0719) replica.replica0.0300070f001ff547: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183606, confirmed_decree = -1 -D2020-12-29 17:01:44.626 (1609232504626412959 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:44.626 (1609232504626423190 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.626 (1609232504626473117 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.642 (1609232504642743637 1c071a) replica.replica1.0300070f001ff5a9: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 77 -D2020-12-29 17:01:44.6425 (1609232504642756325 1c071a) replica.replica1.0300070f001ff5a9: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 77, confirmed_decree = -1 -D2020-12-29 17:01:44.668 (1609232504668093039 1c071a) replica.replica1.0300070f001ff632: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 177 -D2020-12-29 17:01:44.6685 (1609232504668101995 1c071a) replica.replica1.0300070f001ff632: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 177, confirmed_decree = -1 -D2020-12-29 17:01:44.751 (1609232504751948207 1c0719) replica.replica0.0300070f001ff832: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168 -D2020-12-29 17:01:44.7515 (1609232504751964654 1c0719) replica.replica0.0300070f001ff832: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168, confirmed_decree = -1 -D2020-12-29 17:01:44.773 (1609232504773481039 1c071a) replica.replica1.0300070f001ff8b8: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168 -D2020-12-29 17:01:44.7735 (1609232504773493087 1c071a) replica.replica1.0300070f001ff8b8: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168, confirmed_decree = -1 -D2020-12-29 17:01:44.859 (1609232504859408135 1c0719) replica.replica0.0300070f001ffab4: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168 -D2020-12-29 17:01:44.8595 (1609232504859419620 1c0719) replica.replica0.0300070f001ffab4: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168, confirmed_decree = -1 -D2020-12-29 17:01:44.903 (1609232504903618767 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:44.903 (1609232504903636731 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.903 (1609232504903673086 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.904 (1609232504904721254 1c0719) replica.replica0.0300070f001ffbd3: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78 -D2020-12-29 17:01:44.9045 (1609232504904730146 1c0719) replica.replica0.0300070f001ffbd3: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 78, confirmed_decree = -1 -D2020-12-29 17:01:44.906 (1609232504906383048 1c071a) replica.replica1.0300070f001ffbdc: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168 -D2020-12-29 17:01:44.9065 (1609232504906394532 1c071a) replica.replica1.0300070f001ffbdc: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168, confirmed_decree = -1 -D2020-12-29 17:01:44.940 (1609232504940755301 1c071a) replica.replica1.0300070f001ffcb6: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183420 -D2020-12-29 17:01:44.9405 (1609232504940769836 1c071a) replica.replica1.0300070f001ffcb6: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183420, confirmed_decree = -1 -D2020-12-29 17:01:44.978 (1609232504978567679 1c0719) replica.replica0.0300070f001ffd8c: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168 -D2020-12-29 17:01:44.9785 (1609232504978578134 1c0719) replica.replica0.0300070f001ffd8c: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 168, confirmed_decree = -1 -D2020-12-29 17:01:44.979 (1609232504979808099 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:44.979 (1609232504979820122 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.979 (1609232504979852520 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.980 (1609232504980803145 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:44.980 (1609232504980812983 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.980 (1609232504980839751 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.984 (1609232504984964795 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:44.984 (1609232504984996995 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.985 (1609232504985026898 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.987 (1609232504987331202 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:44.987 (1609232504987340290 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.987 (1609232504987375018 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:44.995 (1609232504995000061 1c0719) replica.replica0.0300070f001ffdea: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183491 -D2020-12-29 17:01:44.9955 (1609232504995010496 1c0719) replica.replica0.0300070f001ffdea: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183491, confirmed_decree = -1 -D2020-12-29 17:01:44.997 (1609232504997170247 1c071a) replica.replica1.0300070f001ffdf7: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182967 -D2020-12-29 17:01:44.9975 (1609232504997178882 1c071a) replica.replica1.0300070f001ffdf7: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182967, confirmed_decree = -1 -D2020-12-29 17:01:45.9 (1609232505009615322 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:45.9 (1609232505009624042 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:45.9 (1609232505009651424 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:45.509 (1609232505509718021 1c0712) replica.default0.0300070f00200875: replica_stub.cpp:2800:on_detect_hotkey(): [3.0@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -W2020-12-29 17:01:45.509 (1609232505509726336 1c0712) replica.default0.0300070f00200875: hotkey_collector.cpp:249:on_start_detect(): [3.0@10.232.52.144:34803] still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -D2020-12-29 17:01:45.510 (1609232505510438492 1c0715) replica.default3.0300070f0020087a: replica_stub.cpp:2800:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -W2020-12-29 17:01:45.510 (1609232505510461933 1c0715) replica.default3.0300070f0020087a: hotkey_collector.cpp:249:on_start_detect(): [3.3@10.232.52.144:34803] still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -D2020-12-29 17:01:45.511 (1609232505511141728 1c0713) replica.default1.0300070f0020087c: replica_stub.cpp:2800:on_detect_hotkey(): [3.6@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -W2020-12-29 17:01:45.511 (1609232505511151650 1c0713) replica.default1.0300070f0020087c: hotkey_collector.cpp:249:on_start_detect(): [3.6@10.232.52.144:34803] still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::COARSE_DETECTING -D2020-12-29 17:01:46.630 (1609232506630710689 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232506630] -D2020-12-29 17:01:46.630 (1609232506630830745 1c0733) replica. fd0.030c0001000000b9: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232506630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:46.630 (1609232506630839376 1c0733) replica. fd0.030c0001000000b9: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232506630 -D2020-12-29 17:01:49.630 (1609232509630775848 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232509630] -D2020-12-29 17:01:49.630 (1609232509630916842 1c0734) replica. fd1.030c0000000000bd: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232509630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:49.630 (1609232509630926222 1c0734) replica. fd1.030c0000000000bd: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232509630 -D2020-12-29 17:01:50.606 (1609232510606666446 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2589 MB, memused_res = 1234MB -D2020-12-29 17:01:50.607 (1609232510607732122 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232510606), last_report_time_ms(1609232500605) -D2020-12-29 17:01:52.630 (1609232512630856879 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232512630] -D2020-12-29 17:01:52.630 (1609232512630953043 1c0734) replica. fd1.030c0000000000bf: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232512630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:52.630 (1609232512630959491 1c0734) replica. fd1.030c0000000000bf: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232512630 -D2020-12-29 17:01:54.515 (1609232514515169155 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:01:54.606 (1609232514606403212 1c0719) replica.replica0.0300070f0020ce7d: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183606 -D2020-12-29 17:01:54.6065 (1609232514606415765 1c0719) replica.replica0.0300070f0020ce7d: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183606, confirmed_decree = -1 -D2020-12-29 17:01:54.625 (1609232514625760798 1c0719) replica.replica0.0300070f0020cef5: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183607 -D2020-12-29 17:01:54.6255 (1609232514625769963 1c0719) replica.replica0.0300070f0020cef5: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183607, confirmed_decree = -1 -D2020-12-29 17:01:54.626 (1609232514626587296 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:54.626 (1609232514626597166 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.626 (1609232514626629205 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.642 (1609232514642883825 1c071a) replica.replica1.0300070f0020cf67: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 79 -D2020-12-29 17:01:54.6425 (1609232514642892354 1c071a) replica.replica1.0300070f0020cf67: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 79, confirmed_decree = -1 -D2020-12-29 17:01:54.668 (1609232514668182471 1c071a) replica.replica1.0300070f0020cfee: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 180 -D2020-12-29 17:01:54.6685 (1609232514668193666 1c071a) replica.replica1.0300070f0020cfee: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 180, confirmed_decree = -1 -D2020-12-29 17:01:54.752 (1609232514752046852 1c0719) replica.replica0.0300070f0020d1d3: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171 -D2020-12-29 17:01:54.7525 (1609232514752059619 1c0719) replica.replica0.0300070f0020d1d3: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171, confirmed_decree = -1 -D2020-12-29 17:01:54.773 (1609232514773570919 1c071a) replica.replica1.0300070f0020d253: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171 -D2020-12-29 17:01:54.7735 (1609232514773581501 1c071a) replica.replica1.0300070f0020d253: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171, confirmed_decree = -1 -D2020-12-29 17:01:54.859 (1609232514859523853 1c0719) replica.replica0.0300070f0020d43e: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171 -D2020-12-29 17:01:54.8595 (1609232514859556508 1c0719) replica.replica0.0300070f0020d43e: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171, confirmed_decree = -1 -D2020-12-29 17:01:54.903 (1609232514903765894 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:54.903 (1609232514903783167 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.903 (1609232514903820112 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.904 (1609232514904884645 1c0719) replica.replica0.0300070f0020d533: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 79 -D2020-12-29 17:01:54.9045 (1609232514904899575 1c0719) replica.replica0.0300070f0020d533: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 79, confirmed_decree = -1 -D2020-12-29 17:01:54.906 (1609232514906488156 1c071a) replica.replica1.0300070f0020d53d: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171 -D2020-12-29 17:01:54.9065 (1609232514906499548 1c071a) replica.replica1.0300070f0020d53d: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171, confirmed_decree = -1 -D2020-12-29 17:01:54.940 (1609232514940954966 1c071a) replica.replica1.0300070f0020d60a: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183421 -D2020-12-29 17:01:54.9405 (1609232514940981014 1c071a) replica.replica1.0300070f0020d60a: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183421, confirmed_decree = -1 -D2020-12-29 17:01:54.978 (1609232514978657922 1c0719) replica.replica0.0300070f0020d6f1: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171 -D2020-12-29 17:01:54.9785 (1609232514978670438 1c0719) replica.replica0.0300070f0020d6f1: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 171, confirmed_decree = -1 -D2020-12-29 17:01:54.979 (1609232514979894223 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:54.979 (1609232514979902139 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.979 (1609232514979956893 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.980 (1609232514980933748 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:54.980 (1609232514980944132 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.980 (1609232514980976158 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.985 (1609232514985135000 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:54.985 (1609232514985149046 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.985 (1609232514985182971 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.987 (1609232514987419440 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:54.987 (1609232514987430736 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.987 (1609232514987456229 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:54.995 (1609232514995132052 1c0719) replica.replica0.0300070f0020d75e: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183492 -D2020-12-29 17:01:54.9955 (1609232514995140381 1c0719) replica.replica0.0300070f0020d75e: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183492, confirmed_decree = -1 -D2020-12-29 17:01:54.997 (1609232514997321715 1c071a) replica.replica1.0300070f0020d76e: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182968 -D2020-12-29 17:01:54.9975 (1609232514997332545 1c071a) replica.replica1.0300070f0020d76e: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182968, confirmed_decree = -1 -D2020-12-29 17:01:55.9U (1609232515009752868 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:01:55.9 (1609232515009765952 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:55.9 (1609232515009820308 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:01:55.184 (1609232515184402439 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 17:01:55.184 (1609232515184418047 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170 -D2020-12-29 17:01:55.1840 (1609232515184433893 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183605 -D2020-12-29 17:01:55.1845 (1609232515184438690 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 170 -D2020-12-29 17:01:55.1840 (1609232515184455174 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 182967 -D2020-12-29 17:01:55.1847 (1609232515184458513 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170 -D2020-12-29 17:01:55.1840 (1609232515184462262 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183606 -D2020-12-29 17:01:55.1846 (1609232515184468179 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 78 -D2020-12-29 17:01:55.1848 (1609232515184471927 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183491 -D2020-12-29 17:01:55.1841 (1609232515184477847 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183711 -D2020-12-29 17:01:55.1841 (1609232515184481500 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 74 -D2020-12-29 17:01:55.1844 (1609232515184483223 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 78 -D2020-12-29 17:01:55.1848 (1609232515184484957 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 179 -D2020-12-29 17:01:55.1849 (1609232515184512991 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170 -D2020-12-29 17:01:55.1840 (1609232515184524667 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183824 -D2020-12-29 17:01:55.1844 (1609232515184534136 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 447851 -D2020-12-29 17:01:55.1841 (1609232515184536461 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170 -D2020-12-29 17:01:55.1840 (1609232515184542450 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 77 -D2020-12-29 17:01:55.1847 (1609232515184544367 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170 -D2020-12-29 17:01:55.1840 (1609232515184548919 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183420 -D2020-12-29 17:01:55.1840 (1609232515184551977 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 170 -D2020-12-29 17:01:55.184 (1609232515184591230 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 9, reserved_log_size = 299020558, reserved_smallest_log = 1, reserved_largest_log = 9, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 17:01:55.184 (1609232515184609670 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 218586 -D2020-12-29 17:01:55.630 (1609232515630926654 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232515630] -D2020-12-29 17:01:55.631 (1609232515631147371 1c0733) replica. fd0.030c0001000000bb: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232515630], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:55.631 (1609232515631161185 1c0733) replica. fd0.030c0001000000bb: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232515630 -D2020-12-29 17:01:58.631 (1609232518631033013 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232518631] -D2020-12-29 17:01:58.631 (1609232518631140532 1c0733) replica. fd0.030c0001000000bd: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232518631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:01:58.631 (1609232518631173001 1c0733) replica. fd0.030c0001000000bd: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232518631 -D2020-12-29 17:02:00.607 (1609232520607826801 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2589 MB, memused_res = 1234MB -D2020-12-29 17:02:00.608 (1609232520608961825 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232520607), last_report_time_ms(1609232510606) -D2020-12-29 17:02:01.631 (1609232521631096003 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232521631] -D2020-12-29 17:02:01.631 (1609232521631206634 1c0734) replica. fd1.030c0000000000c1: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232521631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:02:01.631 (1609232521631213318 1c0734) replica. fd1.030c0000000000c1: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232521631 -D2020-12-29 17:02:04.512 (1609232524512712556 1c0715) replica.default3.0301000000000004: replica_stub.cpp:1256:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 17:02:04.512 (1609232524512936151 1c0714) replica.default2.0301000300000071: replica_stub.cpp:1287:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 17:02:04.512 (1609232524512985965 1c0714) replica.default2.0301000300000071: replica_stub.cpp:1330:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 17:02:04.515 (1609232524515234555 1c0712) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:02:04.606 (1609232524606622284 1c0719) replica.replica0.0300070f0021a6e9: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183607 -D2020-12-29 17:02:04.6065 (1609232524606634974 1c0719) replica.replica0.0300070f0021a6e9: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183607, confirmed_decree = -1 -D2020-12-29 17:02:04.625 (1609232524625918050 1c0719) replica.replica0.0300070f0021a764: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183608 -D2020-12-29 17:02:04.6255 (1609232524625933283 1c0719) replica.replica0.0300070f0021a764: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183608, confirmed_decree = -1 -D2020-12-29 17:02:04.626 (1609232524626723145 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:04.626 (1609232524626735365 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.626 (1609232524626803806 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.631 (1609232524631156430 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232524631] -D2020-12-29 17:02:04.631 (1609232524631260901 1c0734) replica. fd1.030c0000000000c3: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232524631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:02:04.631 (1609232524631268947 1c0734) replica. fd1.030c0000000000c3: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232524631 -D2020-12-29 17:02:04.642 (1609232524642970093 1c071a) replica.replica1.0300070f0021a7b5: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81 -D2020-12-29 17:02:04.6425 (1609232524642979638 1c071a) replica.replica1.0300070f0021a7b5: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81, confirmed_decree = -1 -D2020-12-29 17:02:04.668 (1609232524668279324 1c071a) replica.replica1.0300070f0021a849: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 184 -D2020-12-29 17:02:04.6685 (1609232524668289116 1c071a) replica.replica1.0300070f0021a849: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 184, confirmed_decree = -1 -D2020-12-29 17:02:04.752 (1609232524752148971 1c0719) replica.replica0.0300070f0021aa38: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 175 -D2020-12-29 17:02:04.7525 (1609232524752162172 1c0719) replica.replica0.0300070f0021aa38: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 175, confirmed_decree = -1 -D2020-12-29 17:02:04.773 (1609232524773682975 1c071a) replica.replica1.0300070f0021aac2: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 175 -D2020-12-29 17:02:04.7735 (1609232524773696322 1c071a) replica.replica1.0300070f0021aac2: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 175, confirmed_decree = -1 -D2020-12-29 17:02:04.859 (1609232524859629835 1c0719) replica.replica0.0300070f0021ac94: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 175 -D2020-12-29 17:02:04.8595 (1609232524859643514 1c0719) replica.replica0.0300070f0021ac94: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 175, confirmed_decree = -1 -D2020-12-29 17:02:04.903 (1609232524903927210 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:04.903 (1609232524903940074 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.903 (1609232524903971371 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.905 (1609232524905047675 1c0719) replica.replica0.0300070f0021ad81: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 80 -D2020-12-29 17:02:04.9055 (1609232524905058986 1c0719) replica.replica0.0300070f0021ad81: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 80, confirmed_decree = -1 -D2020-12-29 17:02:04.906 (1609232524906586214 1c071a) replica.replica1.0300070f0021ad8a: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 175 -D2020-12-29 17:02:04.9065 (1609232524906600449 1c071a) replica.replica1.0300070f0021ad8a: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 175, confirmed_decree = -1 -D2020-12-29 17:02:04.941 (1609232524941165829 1c071a) replica.replica1.0300070f0021ae43: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183422 -D2020-12-29 17:02:04.9415 (1609232524941181286 1c071a) replica.replica1.0300070f0021ae43: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183422, confirmed_decree = -1 -D2020-12-29 17:02:04.978 (1609232524978797273 1c0719) replica.replica0.0300070f0021af08: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 175 -D2020-12-29 17:02:04.9785 (1609232524978809491 1c0719) replica.replica0.0300070f0021af08: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 175, confirmed_decree = -1 -D2020-12-29 17:02:04.979 (1609232524979997312 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:04.980 (1609232524980008706 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.980 (1609232524980040061 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.981 (1609232524981068694 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:04.981 (1609232524981101964 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.981 (1609232524981134220 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.985 (1609232524985279649 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:04.985 (1609232524985288722 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.985 (1609232524985316443 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.987 (1609232524987499366 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:04.987 (1609232524987508328 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.987 (1609232524987538204 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:04.995 (1609232524995279970 1c0719) replica.replica0.0300070f0021af5c: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183493 -D2020-12-29 17:02:04.9955 (1609232524995292255 1c0719) replica.replica0.0300070f0021af5c: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183493, confirmed_decree = -1 -D2020-12-29 17:02:04.997 (1609232524997508939 1c071a) replica.replica1.0300070f0021af69: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182969 -D2020-12-29 17:02:04.9975 (1609232524997520389 1c071a) replica.replica1.0300070f0021af69: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182969, confirmed_decree = -1 -D2020-12-29 17:02:05.9U (1609232525009934135 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:05.9 (1609232525009951705 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:05.9 (1609232525009992752 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:07.631 (1609232527631207781 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232527631] -D2020-12-29 17:02:07.631 (1609232527631325241 1c0733) replica. fd0.030c0001000000bf: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232527631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:02:07.631 (1609232527631331461 1c0733) replica. fd0.030c0001000000bf: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232527631 -D2020-12-29 17:02:10.609 (1609232530609043459 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2589 MB, memused_res = 1234MB -D2020-12-29 17:02:10.610 (1609232530610052151 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232530608), last_report_time_ms(1609232520607) -D2020-12-29 17:02:10.631 (1609232530631282810 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232530631] -D2020-12-29 17:02:10.631 (1609232530631377895 1c0733) replica. fd0.030c0001000000c1: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232530631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:02:10.631 (1609232530631386101 1c0733) replica. fd0.030c0001000000c1: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232530631 -D2020-12-29 17:02:13.631 (1609232533631335632 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232533631] -D2020-12-29 17:02:13.631 (1609232533631473749 1c0734) replica. fd1.030c0000000000c5: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232533631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:02:13.631 (1609232533631484096 1c0734) replica. fd1.030c0000000000c5: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232533631 -D2020-12-29 17:02:14.515 (1609232534515302539 1c0714) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:02:14.606 (1609232534606804745 1c0719) replica.replica0.0300070f00228d9a: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183608 -D2020-12-29 17:02:14.6065 (1609232534606818383 1c0719) replica.replica0.0300070f00228d9a: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183608, confirmed_decree = -1 -D2020-12-29 17:02:14.626 (1609232534626075731 1c0719) replica.replica0.0300070f00228df8: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183609 -D2020-12-29 17:02:14.6265 (1609232534626089877 1c0719) replica.replica0.0300070f00228df8: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183609, confirmed_decree = -1 -D2020-12-29 17:02:14.626 (1609232534626906806 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:14.626 (1609232534626917587 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.626 (1609232534626986717 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.643 (1609232534643079879 1c071a) replica.replica1.0300070f00228e53: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81 -D2020-12-29 17:02:14.6435 (1609232534643089837 1c071a) replica.replica1.0300070f00228e53: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 81, confirmed_decree = -1 -D2020-12-29 17:02:14.668 (1609232534668403737 1c071a) replica.replica1.0300070f00228ee5: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 187 -D2020-12-29 17:02:14.6685 (1609232534668425601 1c071a) replica.replica1.0300070f00228ee5: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 187, confirmed_decree = -1 -D2020-12-29 17:02:14.752 (1609232534752264833 1c0719) replica.replica0.0300070f002290cf: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178 -D2020-12-29 17:02:14.7525 (1609232534752280101 1c0719) replica.replica0.0300070f002290cf: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178, confirmed_decree = -1 -D2020-12-29 17:02:14.773 (1609232534773798919 1c071a) replica.replica1.0300070f0022914f: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178 -D2020-12-29 17:02:14.7735 (1609232534773814452 1c071a) replica.replica1.0300070f0022914f: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178, confirmed_decree = -1 -D2020-12-29 17:02:14.859 (1609232534859946236 1c0719) replica.replica0.0300070f002292c3: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178 -D2020-12-29 17:02:14.8595 (1609232534859964123 1c0719) replica.replica0.0300070f002292c3: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178, confirmed_decree = -1 -D2020-12-29 17:02:14.904 (1609232534904108420 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:14.904 (1609232534904127544 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.904 (1609232534904176349 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.905 (1609232534905225414 1c0719) replica.replica0.0300070f00229364: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 84 -D2020-12-29 17:02:14.9055 (1609232534905239020 1c0719) replica.replica0.0300070f00229364: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 84, confirmed_decree = -1 -D2020-12-29 17:02:14.906 (1609232534906736497 1c071a) replica.replica1.0300070f00229369: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178 -D2020-12-29 17:02:14.9065 (1609232534906750277 1c071a) replica.replica1.0300070f00229369: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178, confirmed_decree = -1 -D2020-12-29 17:02:14.941 (1609232534941353249 1c071a) replica.replica1.0300070f00229410: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183423 -D2020-12-29 17:02:14.9415 (1609232534941372247 1c071a) replica.replica1.0300070f00229410: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183423, confirmed_decree = -1 -D2020-12-29 17:02:14.978 (1609232534978908932 1c0719) replica.replica0.0300070f002294d7: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178 -D2020-12-29 17:02:14.9785 (1609232534978922111 1c0719) replica.replica0.0300070f002294d7: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 178, confirmed_decree = -1 -D2020-12-29 17:02:14.980 (1609232534980113998 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:14.980 (1609232534980124718 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.980 (1609232534980159581 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.981 (1609232534981260647 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:14.981 (1609232534981273568 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.981 (1609232534981304997 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.985 (1609232534985426306 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:14.985 (1609232534985441200 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.985 (1609232534985479909 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.987 (1609232534987597589 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:14.987 (1609232534987608637 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.987 (1609232534987635638 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:14.995 (1609232534995450339 1c0719) replica.replica0.0300070f00229530: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183494 -D2020-12-29 17:02:14.9955 (1609232534995464744 1c0719) replica.replica0.0300070f00229530: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183494, confirmed_decree = -1 -D2020-12-29 17:02:14.997 (1609232534997664964 1c071a) replica.replica1.0300070f0022953e: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182970 -D2020-12-29 17:02:14.9975 (1609232534997678105 1c071a) replica.replica1.0300070f0022953e: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182970, confirmed_decree = -1 -D2020-12-29 17:02:15.10 (1609232535010126473 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:15.10 (1609232535010145881 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:15.10 (1609232535010184706 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:16.631 (1609232536631386210 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232536631] -D2020-12-29 17:02:16.631 (1609232536631496794 1c0734) replica. fd1.030c0000000000c7: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232536631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:02:16.631 (1609232536631502360 1c0734) replica. fd1.030c0000000000c7: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232536631 -D2020-12-29 17:02:19.631 (1609232539631444351 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232539631] -D2020-12-29 17:02:19.631 (1609232539631558141 1c0733) replica. fd0.030c0001000000c3: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232539631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:02:19.631 (1609232539631565346 1c0733) replica. fd0.030c0001000000c3: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232539631 -D2020-12-29 17:02:20.610 (1609232540610123048 1c0735) unknown.io-thrd.1836853: builtin_counters.cpp:36:update_counters(): memused_virt = 2589 MB, memused_res = 1234MB -D2020-12-29 17:02:20.611 (1609232540611137850 1c0735) unknown.io-thrd.1836853: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232540610), last_report_time_ms(1609232530608) -D2020-12-29 17:02:22.631 (1609232542631509849 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232542631] -D2020-12-29 17:02:22.631 (1609232542631650622 1c0733) replica. fd0.030c0001000000c5: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232542631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:02:22.631 (1609232542631658125 1c0733) replica. fd0.030c0001000000c5: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232542631 -D2020-12-29 17:02:24.515 (1609232544515388364 1c0713) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 17:02:24.607 (1609232544607022512 1c0719) replica.replica0.0300070f0023714f: replica_stub.cpp:1095:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183609 -D2020-12-29 17:02:24.6075 (1609232544607042028 1c0719) replica.replica0.0300070f0023714f: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183609, confirmed_decree = -1 -D2020-12-29 17:02:24.626 (1609232544626251089 1c0719) replica.replica0.0300070f002371b6: replica_stub.cpp:1095:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183610 -D2020-12-29 17:02:24.6265 (1609232544626261463 1c0719) replica.replica0.0300070f002371b6: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183610, confirmed_decree = -1 -D2020-12-29 17:02:24.627 (1609232544627118995 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:24.627 (1609232544627134208 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.627 (1609232544627163982 1c071a) replica.replica1.03040001000000c8: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.643 (1609232544643274104 1c071a) replica.replica1.0300070f00237220: replica_stub.cpp:1095:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 82 -D2020-12-29 17:02:24.6435 (1609232544643306630 1c071a) replica.replica1.0300070f00237220: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 82, confirmed_decree = -1 -D2020-12-29 17:02:24.668 (1609232544668504834 1c071a) replica.replica1.0300070f002372b8: replica_stub.cpp:1095:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 190 -D2020-12-29 17:02:24.6685 (1609232544668517005 1c071a) replica.replica1.0300070f002372b8: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 190, confirmed_decree = -1 -D2020-12-29 17:02:24.752 (1609232544752397978 1c0719) replica.replica0.0300070f002374aa: replica_stub.cpp:1095:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181 -D2020-12-29 17:02:24.7525 (1609232544752409833 1c0719) replica.replica0.0300070f002374aa: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181, confirmed_decree = -1 -D2020-12-29 17:02:24.773 (1609232544773920779 1c071a) replica.replica1.0300070f0023752c: replica_stub.cpp:1095:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181 -D2020-12-29 17:02:24.7735 (1609232544773932984 1c071a) replica.replica1.0300070f0023752c: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181, confirmed_decree = -1 -D2020-12-29 17:02:24.860 (1609232544860040261 1c0719) replica.replica0.0300070f00237721: replica_stub.cpp:1095:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181 -D2020-12-29 17:02:24.8605 (1609232544860056676 1c0719) replica.replica0.0300070f00237721: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181, confirmed_decree = -1 -D2020-12-29 17:02:24.904 (1609232544904372564 1c0719) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:24.904 (1609232544904390691 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.904 (1609232544904425955 1c0719) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.905 (1609232544905322361 1c0719) replica.replica0.0300070f00237820: replica_stub.cpp:1095:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 87 -D2020-12-29 17:02:24.9055 (1609232544905334774 1c0719) replica.replica0.0300070f00237820: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 87, confirmed_decree = -1 -D2020-12-29 17:02:24.906 (1609232544906796131 1c071a) replica.replica1.0300070f0023782c: replica_stub.cpp:1095:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181 -D2020-12-29 17:02:24.9065 (1609232544906806426 1c071a) replica.replica1.0300070f0023782c: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181, confirmed_decree = -1 -D2020-12-29 17:02:24.941 (1609232544941533300 1c071a) replica.replica1.0300070f00237904: replica_stub.cpp:1095:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183424 -D2020-12-29 17:02:24.9415 (1609232544941542524 1c071a) replica.replica1.0300070f00237904: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183424, confirmed_decree = -1 -D2020-12-29 17:02:24.978 (1609232544978994338 1c0719) replica.replica0.0300070f002379ed: replica_stub.cpp:1095:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181 -D2020-12-29 17:02:24.9795 (1609232544979009333 1c0719) replica.replica0.0300070f002379ed: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 181, confirmed_decree = -1 -D2020-12-29 17:02:24.980 (1609232544980203602 1c0719) replica.replica0.0304000000000032: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:24.980 (1609232544980211577 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.980 (1609232544980243803 1c0719) replica.replica0.0304000000000032: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.981 (1609232544981375251 1c071a) replica.replica1.0304000100000034: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:24.981 (1609232544981389731 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.981 (1609232544981419129 1c071a) replica.replica1.0304000100000034: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.985 (1609232544985607996 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:24.985 (1609232544985616850 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.985 (1609232544985640260 1c0719) replica.replica0.03040000000000ae: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.987 (1609232544987691505 1c071a) replica.replica1.030400010000003b: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:24.987 (1609232544987699738 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.987 (1609232544987730429 1c071a) replica.replica1.030400010000003b: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:24.995 (1609232544995601826 1c0719) replica.replica0.0300070f00237a4b: replica_stub.cpp:1095:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183495 -D2020-12-29 17:02:24.9955 (1609232544995612869 1c0719) replica.replica0.0300070f00237a4b: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 183495, confirmed_decree = -1 -D2020-12-29 17:02:24.997 (1609232544997799647 1c071a) replica.replica1.0300070f00237a5d: replica_stub.cpp:1095:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182971 -D2020-12-29 17:02:24.9975 (1609232544997808989 1c071a) replica.replica1.0300070f00237a5d: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 182971, confirmed_decree = -1 -D2020-12-29 17:02:25.10 (1609232545010311200 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 17:02:25.10 (1609232545010320333 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:25.10 (1609232545010346005 1c071a) replica.replica1.03040001000000c1: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 17:02:25.1846 (1609232545184645465 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1620:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 17:02:25.184 (1609232545184659771 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 180 -D2020-12-29 17:02:25.1840 (1609232545184674129 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183608 -D2020-12-29 17:02:25.1848 (1609232545184678731 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 1, last_durable_decree= 1, plog_max_commit_on_disk = 180 -D2020-12-29 17:02:25.1840 (1609232545184688199 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 182970 -D2020-12-29 17:02:25.1840 (1609232545184717789 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 180 -D2020-12-29 17:02:25.1840 (1609232545184729354 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183609 -D2020-12-29 17:02:25.1849 (1609232545184740019 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 84 -D2020-12-29 17:02:25.1844 (1609232545184756268 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183494 -D2020-12-29 17:02:25.1844 (1609232545184763048 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183714 -D2020-12-29 17:02:25.1844 (1609232545184769206 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 80 -D2020-12-29 17:02:25.1840 (1609232545184774096 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 80 -D2020-12-29 17:02:25.1840 (1609232545184776289 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 189 -D2020-12-29 17:02:25.1849 (1609232545184778456 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 180 -D2020-12-29 17:02:25.1840 (1609232545184785087 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183827 -D2020-12-29 17:02:25.1847 (1609232545184792345 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 447854 -D2020-12-29 17:02:25.1844 (1609232545184796473 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 180 -D2020-12-29 17:02:25.1840 (1609232545184801633 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 81 -D2020-12-29 17:02:25.1841 (1609232545184881636 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 180 -D2020-12-29 17:02:25.1840 (1609232545184893554 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 183423 -D2020-12-29 17:02:25.1843 (1609232545184897155 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1660:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 180 -D2020-12-29 17:02:25.184 (1609232545184934797 1c0720) replica.rep_long1.0301000000000001: mutation_log.cpp:1537:garbage_collection(): gc_shared: no file can be deleted, file_count_limit = 100, reserved_log_count = 9, reserved_log_size = 299040369, reserved_smallest_log = 1, reserved_largest_log = 9, stop_gc_log_index = 1, stop_gc_replica_count = 0, stop_gc_replica = 3.3, stop_gc_decree_gap = 56346, stop_gc_garbage_max_decree = 0, stop_gc_log_max_decree = 56346 -D2020-12-29 17:02:25.184 (1609232545184944144 1c0720) replica.rep_long1.0301000000000001: replica_stub.cpp:1771:on_gc(): finish to garbage collection, time_used_ns = 308629 -D2020-12-29 17:02:25.631 (1609232545631573114 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232545631] -D2020-12-29 17:02:25.631 (1609232545631675708 1c0734) replica. fd1.030c0000000000c9: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232545631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:02:25.631 (1609232545631685334 1c0734) replica. fd1.030c0000000000c9: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232545631 -D2020-12-29 17:02:28.631 (1609232548631629356 1c0733) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232548631] -D2020-12-29 17:02:28.631 (1609232548631733513 1c0734) replica. fd1.030c0000000000cb: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232548631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:02:28.631 (1609232548631742223 1c0734) replica. fd1.030c0000000000cb: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232548631 -D2020-12-29 17:02:30.611 (1609232550611211557 1c0736) unknown.io-thrd.1836854: builtin_counters.cpp:36:update_counters(): memused_virt = 2589 MB, memused_res = 1234MB -D2020-12-29 17:02:30.612 (1609232550612266779 1c0736) unknown.io-thrd.1836854: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609232550611), last_report_time_ms(1609232540610) -D2020-12-29 17:02:31.631 (1609232551631692308 1c0734) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609232551631] -D2020-12-29 17:02:31.631 (1609232551631836334 1c0733) replica. fd0.030c0001000000c7: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609232551631], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 17:02:31.631 (1609232551631849148 1c0733) replica. fd0.030c0001000000c7: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609232551631 -D2020-12-29 17:02:34.0U (1609232554000190910 1c0714) replica.default2.0300070f00244b15: replica_stub.cpp:2800:on_detect_hotkey(): [3.0@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = Unknown -D2020-12-29 17:02:34.0 (1609232554000199607 1c0714) replica.default2.0300070f00244b15: hotkey_collector.cpp:292:query_result(): [3.0@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::COARSE_DETECTING +D2020-12-29 21:33:13.8580 (1609248793858799264 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:13.8582 (1609248793858811470 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 +D2020-12-29 21:33:13.8580 (1609248793858819794 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:13.8582 (1609248793858824389 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 +D2020-12-29 21:33:13.8580 (1609248793858832618 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 +D2020-12-29 21:33:13.8580 (1609248793858835845 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:13.8582 (1609248793858852566 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:13.8582 (1609248793858855608 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:13.8582 (1609248793858860778 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:13.8582 (1609248793858864783 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 +D2020-12-29 21:33:13.8580 (1609248793858905564 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 +D2020-12-29 21:33:13.8580 (1609248793858914736 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 1 +D2020-12-29 21:33:13.8581 (1609248793858920919 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 +D2020-12-29 21:33:13.8580 (1609248793858924285 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:13.8582 (1609248793858927718 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 +D2020-12-29 21:33:13.8580 (1609248793858934105 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:13.858 (1609248793858940025 1d62a1) replica.rep_long0.0301000000000001: mutation_log.cpp:1364:garbage_collection(): gc_shared: too few files to delete, file_count_limit = 100, reserved_log_count = 1, reserved_log_size = 6649, current_log_index = 1 +D2020-12-29 21:33:13.858 (1609248793858945831 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1774:on_gc(): finish to garbage collection, time_used_ns = 251255 +D2020-12-29 21:33:15.831 (1609248795831387232 1d6297) replica.default3.0301000000000004: replica_stub.cpp:1257:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 +D2020-12-29 21:33:15.831- (1609248795831554273 1d6295) replica.default1.0301000300000003: replica_stub.cpp:1288:on_node_query_reply(): query node partitions replied, err = ERR_OK +D2020-12-29 21:33:15.831 (1609248795831610553 1d6295) replica.default1.0301000300000003: replica_stub.cpp:1332:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) +D2020-12-29 21:33:15.833 (1609248795833087220 1d6296) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) +D2020-12-29 21:33:15.840 (1609248795840295250 1d6291) replica.io-thrd.1925777: network.cpp:690:on_server_session_accepted(): server session accepted, remote_client = 10.232.52.144:35144, current_count = 6 +D2020-12-29 21:33:15.840 (1609248795840304112 1d6291) replica.io-thrd.1925777: network.cpp:695:on_server_session_accepted(): ip session increased, remote_client = 10.232.52.144:35144, current_count = 6 +D2020-12-29 21:33:15.840 (1609248795840489781 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248795840] +D2020-12-29 21:33:15.840 (1609248795840656030 1d62b6) replica. fd1.030c000000000016: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248795840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:15.840 (1609248795840665500 1d62b6) replica. fd1.030c000000000016: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248795840 +D2020-12-29 21:33:15.870 (1609248795870871616 1d629c) replica.replica1.030062910001babf: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:15.8705 (1609248795870881742 1d629c) replica.replica1.030062910001babf: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:15.872 (1609248795872670558 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:15.872 (1609248795872680781 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:15.872 (1609248795872712825 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:15.875 (1609248795875043949 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:15.875 (1609248795875059514 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:15.875 (1609248795875084457 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:15.876 (1609248795876332845 1d629c) replica.replica1.030062910001bade: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 +D2020-12-29 21:33:15.8765 (1609248795876340963 1d629c) replica.replica1.030062910001bade: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 +D2020-12-29 21:33:15.879 (1609248795879845301 1d629b) replica.replica0.030062910001baf6: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1 +D2020-12-29 21:33:15.8795 (1609248795879854985 1d629b) replica.replica0.030062910001baf6: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1, confirmed_decree = -1 +D2020-12-29 21:33:15.886 (1609248795886211164 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:15.886 (1609248795886227696 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:15.886 (1609248795886258467 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:16.8 (1609248796008514152 1d629b) replica.replica0.030062910001bdd1: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:16.82.5 (1609248796008539519 1d629b) replica.replica0.030062910001bdd1: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:16.264 (1609248796264946780 1d629c) replica.replica1.030062910001c38f: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1 +D2020-12-29 21:33:16.2645 (1609248796264960110 1d629c) replica.replica1.030062910001c38f: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1, confirmed_decree = -1 +D2020-12-29 21:33:16.293 (1609248796293849176 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:16.293 (1609248796293862367 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:16.293 (1609248796293916051 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:16.295 (1609248796295487244 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:16.295 (1609248796295495576 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:16.295 (1609248796295521366 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:16.387 (1609248796387479271 1d629b) replica.replica0.030062910001c628: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1 +D2020-12-29 21:33:16.3875 (1609248796387489219 1d629b) replica.replica0.030062910001c628: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1, confirmed_decree = -1 +D2020-12-29 21:33:16.419 (1609248796419654543 1d629c) replica.replica1.030062910001c6d1: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1 +D2020-12-29 21:33:16.4195 (1609248796419665828 1d629c) replica.replica1.030062910001c6d1: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1, confirmed_decree = -1 +D2020-12-29 21:33:16.499 (1609248796499161447 1d629b) replica.replica0.030062910001c892: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1 +D2020-12-29 21:33:16.4995 (1609248796499208015 1d629b) replica.replica0.030062910001c892: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1, confirmed_decree = -1 +D2020-12-29 21:33:18.78 (1609248798078287612 1d629b) replica.replica0.030062910001ee10: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:18.78.5 (1609248798078298539 1d629b) replica.replica0.030062910001ee10: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:18.78 (1609248798078700305 1d629b) replica.replica0.030062910001ee12: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 +D2020-12-29 21:33:18.78.5 (1609248798078707366 1d629b) replica.replica0.030062910001ee12: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 +D2020-12-29 21:33:18.81 (1609248798081848224 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:18.81 (1609248798081857734 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:18.81 (1609248798081890978 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:18.176 (1609248798176142265 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:18.176 (1609248798176162272 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:18.176 (1609248798176204363 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:18.176 (1609248798176277998 1d629b) replica.replica0.030062910001f066: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:18.1765 (1609248798176287847 1d629b) replica.replica0.030062910001f066: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:18.197 (1609248798197745790 1d629c) replica.replica1.030062910001f0dc: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:18.1975 (1609248798197755680 1d629c) replica.replica1.030062910001f0dc: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:18.197 (1609248798197928192 1d629c) replica.replica1.030062910001f0dd: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:18.1975 (1609248798197937987 1d629c) replica.replica1.030062910001f0dd: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:18.840 (1609248798840550051 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248798840] +D2020-12-29 21:33:18.840 (1609248798840680585 1d62b5) replica. fd0.030c000100000018: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248798840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:18.840 (1609248798840689679 1d62b5) replica. fd0.030c000100000018: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248798840 +D2020-12-29 21:33:21.840 (1609248801840615265 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248801840] +D2020-12-29 21:33:21.840 (1609248801840752479 1d62b5) replica. fd0.030c00010000001a: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248801840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:21.840 (1609248801840759870 1d62b5) replica. fd0.030c00010000001a: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248801840 +D2020-12-29 21:33:21.847 (1609248801847216476 1d62ba) unknown.io-thrd.1925818: builtin_counters.cpp:36:update_counters(): memused_virt = 1319 MB, memused_res = 201MB +D2020-12-29 21:33:21.848 (1609248801848182667 1d62ba) unknown.io-thrd.1925818: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248801847), last_report_time_ms(1609248791845) +D2020-12-29 21:33:24.840 (1609248804840694545 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248804840] +D2020-12-29 21:33:24.840 (1609248804840849252 1d62b6) replica. fd1.030c000000000018: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248804840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:24.840 (1609248804840859933 1d62b6) replica. fd1.030c000000000018: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248804840 +D2020-12-29 21:33:25.833 (1609248805833144909 1d6297) replica.default3.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) +D2020-12-29 21:33:25.870 (1609248805870992897 1d629c) replica.replica1.030062910002a41f: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 +D2020-12-29 21:33:25.8715 (1609248805871008891 1d629c) replica.replica1.030062910002a41f: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 +D2020-12-29 21:33:25.872 (1609248805872778988 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:25.872 (1609248805872787485 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:25.872 (1609248805872823460 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:25.875 (1609248805875121246 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:25.875 (1609248805875132412 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:25.875 (1609248805875162626 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:25.876 (1609248805876511965 1d629c) replica.replica1.030062910002a435: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:25.8765 (1609248805876523838 1d629c) replica.replica1.030062910002a435: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:25.880 (1609248805880014802 1d629b) replica.replica0.030062910002a449: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2 +D2020-12-29 21:33:25.8805 (1609248805880025361 1d629b) replica.replica0.030062910002a449: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2, confirmed_decree = -1 +D2020-12-29 21:33:25.886 (1609248805886415336 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:25.886 (1609248805886443676 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:25.886 (1609248805886484473 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:26.8 (1609248806008636345 1d629b) replica.replica0.030062910002a744: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 +D2020-12-29 21:33:26.82.5 (1609248806008646027 1d629b) replica.replica0.030062910002a744: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 +D2020-12-29 21:33:26.265 (1609248806265143178 1d629c) replica.replica1.030062910002ad4a: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2 +D2020-12-29 21:33:26.2655 (1609248806265155625 1d629c) replica.replica1.030062910002ad4a: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2, confirmed_decree = -1 +D2020-12-29 21:33:26.294 (1609248806294038311 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:26.294 (1609248806294053967 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:26.294 (1609248806294087866 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:26.295 (1609248806295630662 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:26.295 (1609248806295641110 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:26.295 (1609248806295666337 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:26.387 (1609248806387693284 1d629b) replica.replica0.030062910002b03c: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2 +D2020-12-29 21:33:26.3875 (1609248806387707201 1d629b) replica.replica0.030062910002b03c: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2, confirmed_decree = -1 +D2020-12-29 21:33:26.419 (1609248806419844240 1d629c) replica.replica1.030062910002b0ef: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2 +D2020-12-29 21:33:26.4195 (1609248806419854325 1d629c) replica.replica1.030062910002b0ef: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2, confirmed_decree = -1 +D2020-12-29 21:33:26.499 (1609248806499396252 1d629b) replica.replica0.030062910002b2ac: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2 +D2020-12-29 21:33:26.4995 (1609248806499408215 1d629b) replica.replica0.030062910002b2ac: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2, confirmed_decree = -1 +D2020-12-29 21:33:27.840 (1609248807840752401 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248807840] +D2020-12-29 21:33:27.840 (1609248807840983476 1d62b6) replica. fd1.030c00000000001a: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248807840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:27.840 (1609248807840995416 1d62b6) replica. fd1.030c00000000001a: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248807840 +D2020-12-29 21:33:28.78 (1609248808078396218 1d629b) replica.replica0.030062910002d5a3: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 +D2020-12-29 21:33:28.78.5 (1609248808078406115 1d629b) replica.replica0.030062910002d5a3: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 +D2020-12-29 21:33:28.78 (1609248808078856165 1d629b) replica.replica0.030062910002d5a8: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:28.78.5 (1609248808078867962 1d629b) replica.replica0.030062910002d5a8: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:28.81 (1609248808081929332 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:28.81 (1609248808081939309 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:28.81 (1609248808081993930 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:28.176 (1609248808176250881 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:28.176 (1609248808176267944 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:28.176 (1609248808176317958 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:28.176 (1609248808176380079 1d629b) replica.replica0.030062910002d7ed: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 +D2020-12-29 21:33:28.1765 (1609248808176390520 1d629b) replica.replica0.030062910002d7ed: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 +D2020-12-29 21:33:28.197 (1609248808197859045 1d629c) replica.replica1.030062910002d870: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 +D2020-12-29 21:33:28.1975 (1609248808197883103 1d629c) replica.replica1.030062910002d870: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 +D2020-12-29 21:33:28.198 (1609248808198037691 1d629c) replica.replica1.030062910002d871: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 +D2020-12-29 21:33:28.1985 (1609248808198061863 1d629c) replica.replica1.030062910002d871: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 +D2020-12-29 21:33:30.840 (1609248810840885203 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248810840] +D2020-12-29 21:33:30.841 (1609248810841091553 1d62b5) replica. fd0.030c00010000001c: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248810840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:30.841 (1609248810841101783 1d62b5) replica. fd0.030c00010000001c: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248810840 +D2020-12-29 21:33:31.848 (1609248811848265752 1d62bb) unknown.io-thrd.1925819: builtin_counters.cpp:36:update_counters(): memused_virt = 1328 MB, memused_res = 209MB +D2020-12-29 21:33:31.849 (1609248811849257854 1d62bb) unknown.io-thrd.1925819: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248811848), last_report_time_ms(1609248801847) +D2020-12-29 21:33:33.840 (1609248813840946777 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248813840] +D2020-12-29 21:33:33.841 (1609248813841099260 1d62b5) replica. fd0.030c00010000001e: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248813840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:33.841 (1609248813841109832 1d62b5) replica. fd0.030c00010000001e: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248813840 +D2020-12-29 21:33:35.833 (1609248815833226604 1d6295) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) +D2020-12-29 21:33:35.871 (1609248815871132160 1d629c) replica.replica1.030062910003846e: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 +D2020-12-29 21:33:35.8715 (1609248815871143107 1d629c) replica.replica1.030062910003846e: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 +D2020-12-29 21:33:35.872 (1609248815872872793 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:35.872 (1609248815872895806 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:35.872 (1609248815872928604 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:35.875 (1609248815875283296 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:35.875 (1609248815875292324 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:35.875 (1609248815875327904 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:35.876 (1609248815876659023 1d629c) replica.replica1.0300629100038494: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 +D2020-12-29 21:33:35.8765 (1609248815876669627 1d629c) replica.replica1.0300629100038494: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 +D2020-12-29 21:33:35.880 (1609248815880178321 1d629b) replica.replica0.03006291000384aa: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 +D2020-12-29 21:33:35.8805 (1609248815880189251 1d629b) replica.replica0.03006291000384aa: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 +D2020-12-29 21:33:35.886 (1609248815886634258 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:35.886 (1609248815886647726 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:35.886 (1609248815886702614 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:36.8 (1609248816008747664 1d629b) replica.replica0.0300629100038765: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 +D2020-12-29 21:33:36.82.5 (1609248816008760095 1d629b) replica.replica0.0300629100038765: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 +D2020-12-29 21:33:36.265 (1609248816265333071 1d629c) replica.replica1.0300629100038d5b: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 +D2020-12-29 21:33:36.2655 (1609248816265347253 1d629c) replica.replica1.0300629100038d5b: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 +D2020-12-29 21:33:36.294 (1609248816294188541 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:36.294 (1609248816294208098 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:36.294 (1609248816294251798 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:36.295 (1609248816295774535 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:36.295 (1609248816295795378 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:36.295 (1609248816295831047 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:36.387 (1609248816387827185 1d629b) replica.replica0.030062910003903e: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 +D2020-12-29 21:33:36.3875 (1609248816387839566 1d629b) replica.replica0.030062910003903e: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 +D2020-12-29 21:33:36.420 (1609248816420005136 1d629c) replica.replica1.03006291000390fd: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 +D2020-12-29 21:33:36.4205 (1609248816420018836 1d629c) replica.replica1.03006291000390fd: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 +D2020-12-29 21:33:36.499 (1609248816499637464 1d629b) replica.replica0.0300629100039291: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 +D2020-12-29 21:33:36.4995 (1609248816499652009 1d629b) replica.replica0.0300629100039291: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 +D2020-12-29 21:33:36.841 (1609248816841008965 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248816841] +D2020-12-29 21:33:36.841 (1609248816841174744 1d62b6) replica. fd1.030c00000000001c: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248816841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:36.841 (1609248816841182316 1d62b6) replica. fd1.030c00000000001c: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248816841 +D2020-12-29 21:33:38.78 (1609248818078510138 1d629b) replica.replica0.030062910003cb67: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12 +D2020-12-29 21:33:38.78.5 (1609248818078548624 1d629b) replica.replica0.030062910003cb67: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12, confirmed_decree = -1 +D2020-12-29 21:33:38.79 (1609248818079029199 1d629b) replica.replica0.030062910003cb6c: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 +D2020-12-29 21:33:38.79.5 (1609248818079040466 1d629b) replica.replica0.030062910003cb6c: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 +D2020-12-29 21:33:38.82 (1609248818082050623 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:38.82 (1609248818082062816 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:38.82 (1609248818082096332 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:38.176 (1609248818176380948 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:38.176 (1609248818176398849 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:38.176 (1609248818176431445 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:38.176 (1609248818176491558 1d629b) replica.replica0.030062910003cf85: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12 +D2020-12-29 21:33:38.1765 (1609248818176501563 1d629b) replica.replica0.030062910003cf85: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12, confirmed_decree = -1 +D2020-12-29 21:33:38.197 (1609248818197959917 1d629c) replica.replica1.030062910003d08a: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12 +D2020-12-29 21:33:38.1975 (1609248818197969549 1d629c) replica.replica1.030062910003d08a: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12, confirmed_decree = -1 +D2020-12-29 21:33:38.198 (1609248818198179805 1d629c) replica.replica1.030062910003d08c: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12 +D2020-12-29 21:33:38.1985 (1609248818198204194 1d629c) replica.replica1.030062910003d08c: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12, confirmed_decree = -1 +D2020-12-29 21:33:39.841 (1609248819841072869 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248819841] +D2020-12-29 21:33:39.841 (1609248819841231138 1d62b6) replica. fd1.030c00000000001e: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248819841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:39.841 (1609248819841239355 1d62b6) replica. fd1.030c00000000001e: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248819841 +D2020-12-29 21:33:41.849 (1609248821849327337 1d62ba) unknown.io-thrd.1925818: builtin_counters.cpp:36:update_counters(): memused_virt = 1335 MB, memused_res = 217MB +D2020-12-29 21:33:41.850 (1609248821850468406 1d62ba) unknown.io-thrd.1925818: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248821849), last_report_time_ms(1609248811848) +D2020-12-29 21:33:42.841 (1609248822841145147 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248822841] +D2020-12-29 21:33:42.841 (1609248822841283928 1d62b5) replica. fd0.030c000100000020: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248822841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:42.841 (1609248822841293150 1d62b5) replica. fd0.030c000100000020: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248822841 +D2020-12-29 21:33:43.8585 (1609248823858982290 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1623:on_gc(): start to garbage collection, replica_count = 20 +D2020-12-29 21:33:43.859 (1609248823859009084 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 +D2020-12-29 21:33:43.8595 (1609248823859015896 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 +D2020-12-29 21:33:43.8593 (1609248823859018388 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 +D2020-12-29 21:33:43.8593 (1609248823859024786 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:43.8592 (1609248823859026735 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 +D2020-12-29 21:33:43.8593 (1609248823859030388 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:43.8592 (1609248823859074517 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 +D2020-12-29 21:33:43.8595 (1609248823859082606 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:43.8592 (1609248823859087952 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:43.8592 (1609248823859097765 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 +D2020-12-29 21:33:43.8595 (1609248823859101275 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 +D2020-12-29 21:33:43.8593 (1609248823859104197 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 10 +D2020-12-29 21:33:43.8590 (1609248823859107084 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 +D2020-12-29 21:33:43.8593 (1609248823859111891 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:43.8592 (1609248823859117670 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:43.8592 (1609248823859120291 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 12 +D2020-12-29 21:33:43.8592 (1609248823859124242 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:43.8592 (1609248823859126910 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 +D2020-12-29 21:33:43.8593 (1609248823859132101 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 +D2020-12-29 21:33:43.8592 (1609248823859134470 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 +D2020-12-29 21:33:43.859 (1609248823859150557 1d62a1) replica.rep_long0.0301000000000001: mutation_log.cpp:1364:garbage_collection(): gc_shared: too few files to delete, file_count_limit = 100, reserved_log_count = 1, reserved_log_size = 26235, current_log_index = 1 +D2020-12-29 21:33:43.859 (1609248823859158922 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1774:on_gc(): finish to garbage collection, time_used_ns = 188451 +D2020-12-29 21:33:45.831 (1609248825831480945 1d6294) replica.default0.0301000000000004: replica_stub.cpp:1257:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 +D2020-12-29 21:33:45.831 (1609248825831658386 1d6296) replica.default2.0301000000000009: replica_stub.cpp:1288:on_node_query_reply(): query node partitions replied, err = ERR_OK +D2020-12-29 21:33:45.831 (1609248825831713164 1d6296) replica.default2.0301000000000009: replica_stub.cpp:1332:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) +D2020-12-29 21:33:45.833 (1609248825833295307 1d6295) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) +D2020-12-29 21:33:45.841 (1609248825841337043 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248825841] +D2020-12-29 21:33:45.841 (1609248825841480791 1d62b5) replica. fd0.030c000100000022: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248825841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:45.841 (1609248825841488294 1d62b5) replica. fd0.030c000100000022: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248825841 +D2020-12-29 21:33:45.871 (1609248825871206204 1d629c) replica.replica1.0300629100051ee3: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 +D2020-12-29 21:33:45.8715 (1609248825871215834 1d629c) replica.replica1.0300629100051ee3: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 +D2020-12-29 21:33:45.872 (1609248825872987983 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:45.872 (1609248825872998890 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:45.873 (1609248825873033856 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:45.875 (1609248825875450881 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:45.875 (1609248825875459141 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:45.875 (1609248825875484540 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:45.876 (1609248825876886854 1d629c) replica.replica1.0300629100051f27: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 +D2020-12-29 21:33:45.8765 (1609248825876894855 1d629c) replica.replica1.0300629100051f27: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 +D2020-12-29 21:33:45.880 (1609248825880351950 1d629b) replica.replica0.0300629100051f4e: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 +D2020-12-29 21:33:45.8805 (1609248825880363436 1d629b) replica.replica0.0300629100051f4e: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 +D2020-12-29 21:33:45.886 (1609248825886821765 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:45.886 (1609248825886833669 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:45.886 (1609248825886873613 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:46.8 (1609248826008861627 1d629b) replica.replica0.0300629100052466: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 +D2020-12-29 21:33:46.82.5 (1609248826008873279 1d629b) replica.replica0.0300629100052466: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 +D2020-12-29 21:33:46.265 (1609248826265543519 1d629c) replica.replica1.0300629100052f3e: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 +D2020-12-29 21:33:46.2655 (1609248826265558622 1d629c) replica.replica1.0300629100052f3e: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 +D2020-12-29 21:33:46.294 (1609248826294367537 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:46.294 (1609248826294384783 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:46.294 (1609248826294417879 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:46.295 (1609248826295968716 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:46.295 (1609248826295998602 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:46.296 (1609248826296038968 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:46.388 (1609248826388059334 1d629b) replica.replica0.030062910005346d: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 +D2020-12-29 21:33:46.3885 (1609248826388071691 1d629b) replica.replica0.030062910005346d: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 +D2020-12-29 21:33:46.420 (1609248826420160681 1d629c) replica.replica1.03006291000535ba: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 +D2020-12-29 21:33:46.4205 (1609248826420170031 1d629c) replica.replica1.03006291000535ba: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 +D2020-12-29 21:33:46.499 (1609248826499835884 1d629b) replica.replica0.0300629100053939: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 +D2020-12-29 21:33:46.4995 (1609248826499846649 1d629b) replica.replica0.0300629100053939: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 +D2020-12-29 21:33:48.78 (1609248828078609096 1d629b) replica.replica0.0300629100057cdf: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 +D2020-12-29 21:33:48.78.5 (1609248828078622832 1d629b) replica.replica0.0300629100057cdf: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 +D2020-12-29 21:33:48.79 (1609248828079118051 1d629b) replica.replica0.0300629100057ce6: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 +D2020-12-29 21:33:48.79.5 (1609248828079130318 1d629b) replica.replica0.0300629100057ce6: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 +D2020-12-29 21:33:48.82 (1609248828082133651 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:48.82 (1609248828082141693 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:48.82 (1609248828082201228 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:48.176 (1609248828176564296 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:48.176 (1609248828176577718 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:48.176 (1609248828176613804 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:48.176 (1609248828176646504 1d629b) replica.replica0.030062910005815d: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 +D2020-12-29 21:33:48.1765 (1609248828176657780 1d629b) replica.replica0.030062910005815d: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 +D2020-12-29 21:33:48.198 (1609248828198049443 1d629c) replica.replica1.030062910005824d: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 +D2020-12-29 21:33:48.1985 (1609248828198064513 1d629c) replica.replica1.030062910005824d: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 +D2020-12-29 21:33:48.198 (1609248828198273920 1d629c) replica.replica1.030062910005824f: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 +D2020-12-29 21:33:48.1985 (1609248828198283026 1d629c) replica.replica1.030062910005824f: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 +D2020-12-29 21:33:48.841 (1609248828841405261 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248828841] +D2020-12-29 21:33:48.841 (1609248828841525005 1d62b6) replica. fd1.030c000000000020: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248828841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:48.841 (1609248828841534004 1d62b6) replica. fd1.030c000000000020: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248828841 +D2020-12-29 21:33:51.841 (1609248831841460196 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248831841] +D2020-12-29 21:33:51.841 (1609248831841582473 1d62b6) replica. fd1.030c000000000022: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248831841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:51.841 (1609248831841611972 1d62b6) replica. fd1.030c000000000022: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248831841 +D2020-12-29 21:33:51.850 (1609248831850527887 1d62bb) unknown.io-thrd.1925819: builtin_counters.cpp:36:update_counters(): memused_virt = 1341 MB, memused_res = 224MB +D2020-12-29 21:33:51.851 (1609248831851585151 1d62bb) unknown.io-thrd.1925819: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248831850), last_report_time_ms(1609248821849) +D2020-12-29 21:33:54.841 (1609248834841523371 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248834841] +D2020-12-29 21:33:54.841 (1609248834841638674 1d62b5) replica. fd0.030c000100000024: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248834841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:54.841 (1609248834841649236 1d62b5) replica. fd0.030c000100000024: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248834841 +D2020-12-29 21:33:55.833 (1609248835833360617 1d6295) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) +D2020-12-29 21:33:55.871 (1609248835871294749 1d629c) replica.replica1.030062910006e324: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18 +D2020-12-29 21:33:55.871 (1609248835871306117 1d629c) replica.replica1.030062910006e324: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18, confirmed_decree = -1 +D2020-12-29 21:33:55.873 (1609248835873071463 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:55.873 (1609248835873081328 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:55.873 (1609248835873113042 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:55.875 (1609248835875616930 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:55.875 (1609248835875628908 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:55.875 (1609248835875660190 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:55.876 (1609248835876960601 1d629c) replica.replica1.030062910006e362: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 +D2020-12-29 21:33:55.8765 (1609248835876967819 1d629c) replica.replica1.030062910006e362: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 +D2020-12-29 21:33:55.880 (1609248835880457627 1d629b) replica.replica0.030062910006e38e: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:55.8805 (1609248835880467764 1d629b) replica.replica0.030062910006e38e: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:55.886 (1609248835886978792 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:55.886 (1609248835886987760 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:55.887 (1609248835887012911 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:56.8 (1609248836008966591 1d629b) replica.replica0.030062910006e907: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18 +D2020-12-29 21:33:56.82.5 (1609248836008976130 1d629b) replica.replica0.030062910006e907: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18, confirmed_decree = -1 +D2020-12-29 21:33:56.265 (1609248836265736320 1d629c) replica.replica1.030062910006f4d9: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:56.2655 (1609248836265749134 1d629c) replica.replica1.030062910006f4d9: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:56.294 (1609248836294512966 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:56.294 (1609248836294528642 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:56.294 (1609248836294564858 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:56.296 (1609248836296175098 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:56.296 (1609248836296182817 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:56.296 (1609248836296209150 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:56.388 (1609248836388262320 1d629b) replica.replica0.030062910006fa7a: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:56.3885 (1609248836388273936 1d629b) replica.replica0.030062910006fa7a: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:56.420 (1609248836420358562 1d629c) replica.replica1.030062910006fbe2: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:56.4205 (1609248836420370080 1d629c) replica.replica1.030062910006fbe2: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:56.500 (1609248836500044906 1d629b) replica.replica0.030062910006ff1b: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 +D2020-12-29 21:33:56.5005 (1609248836500054607 1d629b) replica.replica0.030062910006ff1b: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 +D2020-12-29 21:33:57.841 (1609248837841583251 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248837841] +D2020-12-29 21:33:57.841 (1609248837841757682 1d62b5) replica. fd0.030c000100000026: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248837841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:33:57.841 (1609248837841768223 1d62b5) replica. fd0.030c000100000026: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248837841 +D2020-12-29 21:33:58.78 (1609248838078746101 1d629b) replica.replica0.0300629100074063: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 +D2020-12-29 21:33:58.78.5 (1609248838078757992 1d629b) replica.replica0.0300629100074063: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 +D2020-12-29 21:33:58.79 (1609248838079200414 1d629b) replica.replica0.0300629100074069: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 +D2020-12-29 21:33:58.79.5 (1609248838079207854 1d629b) replica.replica0.0300629100074069: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 +D2020-12-29 21:33:58.82 (1609248838082243085 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:58.82 (1609248838082251433 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:58.82 (1609248838082297653 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:58.176 (1609248838176749217 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:33:58.176 (1609248838176762733 1d629b) replica.replica0.03006291000744f7: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 +D2020-12-29 21:33:58.1765 (1609248838176780559 1d629b) replica.replica0.03006291000744f7: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 +D2020-12-29 21:33:58.176 (1609248838176869310 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:58.176 (1609248838176904072 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:33:58.198 (1609248838198136769 1d629c) replica.replica1.03006291000745ee: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 +D2020-12-29 21:33:58.1985 (1609248838198150873 1d629c) replica.replica1.03006291000745ee: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 +D2020-12-29 21:33:58.198 (1609248838198348603 1d629c) replica.replica1.03006291000745f1: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 +D2020-12-29 21:33:58.1985 (1609248838198355198 1d629c) replica.replica1.03006291000745f1: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 +D2020-12-29 21:34:00.841 (1609248840841646324 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248840841] +D2020-12-29 21:34:00.841 (1609248840841762956 1d62b6) replica. fd1.030c000000000024: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248840841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:00.841 (1609248840841773304 1d62b6) replica. fd1.030c000000000024: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248840841 +D2020-12-29 21:34:01.851 (1609248841851649201 1d62ba) unknown.io-thrd.1925818: builtin_counters.cpp:36:update_counters(): memused_virt = 1397 MB, memused_res = 232MB +D2020-12-29 21:34:01.852 (1609248841852758624 1d62ba) unknown.io-thrd.1925818: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248841851), last_report_time_ms(1609248831850) +D2020-12-29 21:34:03.841 (1609248843841824451 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248843841] +D2020-12-29 21:34:03.841 (1609248843841969298 1d62b6) replica. fd1.030c000000000026: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248843841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:03.841 (1609248843841979505 1d62b6) replica. fd1.030c000000000026: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248843841 +D2020-12-29 21:34:05.833 (1609248845833428290 1d6298) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) +D2020-12-29 21:34:05.871 (1609248845871390545 1d629c) replica.replica1.030062910008911b: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 +D2020-12-29 21:34:05.8715 (1609248845871403698 1d629c) replica.replica1.030062910008911b: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 +D2020-12-29 21:34:05.873 (1609248845873156813 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:05.873 (1609248845873169262 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:05.873 (1609248845873195806 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:05.875 (1609248845875698200 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:05.875 (1609248845875708025 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:05.875 (1609248845875749036 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:05.877 (1609248845877043001 1d629c) replica.replica1.030062910008915d: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 14 +D2020-12-29 21:34:05.8775 (1609248845877053788 1d629c) replica.replica1.030062910008915d: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 14, confirmed_decree = -1 +D2020-12-29 21:34:05.880 (1609248845880606117 1d629b) replica.replica0.0300629100089181: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 +D2020-12-29 21:34:05.8805 (1609248845880618997 1d629b) replica.replica0.0300629100089181: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 +D2020-12-29 21:34:05.887 (1609248845887117970 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:05.887 (1609248845887127050 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:05.887 (1609248845887156432 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:05.955 (1609248845955443037 1d6294) replica.default0.03006291000894c0: replica_stub.cpp:2806:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START +D2020-12-29 21:34:05.955 (1609248845955453356 1d6294) replica.default0.03006291000894c0: hotkey_collector.cpp:265:on_start_detect(): [3.3@10.232.52.144:34803] starting to detect replication::hotkey_type::READ hotkey +D2020-12-29 21:34:06.9 (1609248846009056507 1d629b) replica.replica0.0300629100089736: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 +D2020-12-29 21:34:06.92.5 (1609248846009067176 1d629b) replica.replica0.0300629100089736: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 +D2020-12-29 21:34:06.265 (1609248846265912552 1d629c) replica.replica1.030062910008a2ef: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 +D2020-12-29 21:34:06.2655 (1609248846265926397 1d629c) replica.replica1.030062910008a2ef: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 +D2020-12-29 21:34:06.294 (1609248846294685058 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:06.294 (1609248846294697604 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:06.294 (1609248846294733828 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:06.296 (1609248846296337430 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:06.296 (1609248846296345501 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:06.296 (1609248846296372424 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:06.388 (1609248846388417813 1d629b) replica.replica0.030062910008a84f: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 +D2020-12-29 21:34:06.3885 (1609248846388457565 1d629b) replica.replica0.030062910008a84f: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 +D2020-12-29 21:34:06.420 (1609248846420559683 1d629c) replica.replica1.030062910008a9a6: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 +D2020-12-29 21:34:06.4205 (1609248846420570016 1d629c) replica.replica1.030062910008a9a6: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 +D2020-12-29 21:34:06.500 (1609248846500230168 1d629b) replica.replica0.030062910008ad5a: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 +D2020-12-29 21:34:06.5005 (1609248846500242835 1d629b) replica.replica0.030062910008ad5a: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 +D2020-12-29 21:34:06.841 (1609248846841889699 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248846841] +D2020-12-29 21:34:06.842 (1609248846842007294 1d62b5) replica. fd0.030c000100000028: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248846841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:06.842 (1609248846842017972 1d62b5) replica. fd0.030c000100000028: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248846841 +D2020-12-29 21:34:07.573 (1609248847573060687 1d629b) replica.replica0.0306000100000005: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.3@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:07.580 (1609248847580131317 1d629c) replica.replica1.0306000000000004: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.0@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:07.735 (1609248847735877288 1d629b) replica.replica0.030600010000000a: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.2@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:07.754 (1609248847754682017 1d629c) replica.replica1.0306000000000009: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.5@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:07.880 (1609248847880357190 1d629c) replica.replica1.030600010000000f: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.1@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:07.898 (1609248847898283928 1d629b) replica.replica0.030600000000000e: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.6@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:08.13 (1609248848013505316 1d629b) replica.replica0.0306000100000014: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.4@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:08.62 (1609248848062942648 1d629b) replica.replica0.0306000000000015: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.1@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:08.78 (1609248848078839530 1d629b) replica.replica0.030062910008f5d8: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22 +D2020-12-29 21:34:08.78.5 (1609248848078871582 1d629b) replica.replica0.030062910008f5d8: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22, confirmed_decree = -1 +D2020-12-29 21:34:08.79 (1609248848079357045 1d629b) replica.replica0.030062910008f5df: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 +D2020-12-29 21:34:08.79.5 (1609248848079366933 1d629b) replica.replica0.030062910008f5df: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 +D2020-12-29 21:34:08.82 (1609248848082346804 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:08.82 (1609248848082362206 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:08.82 (1609248848082403026 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:08.176 (1609248848176876004 1d629b) replica.replica0.030062910008f9bf: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22 +D2020-12-29 21:34:08.1765 (1609248848176889416 1d629b) replica.replica0.030062910008f9bf: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22, confirmed_decree = -1 +D2020-12-29 21:34:08.177 (1609248848177007327 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:08.177 (1609248848177017940 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:08.177 (1609248848177062411 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:08.189 (1609248848189074560 1d629c) replica.replica1.030600000000001a: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.3@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:08.189 (1609248848189132327 1d629c) replica.replica1.030600010000001b: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.7@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:08.198 (1609248848198221310 1d629c) replica.replica1.030062910008fa99: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22 +D2020-12-29 21:34:08.1985 (1609248848198233969 1d629c) replica.replica1.030062910008fa99: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22, confirmed_decree = -1 +D2020-12-29 21:34:08.198 (1609248848198428518 1d629c) replica.replica1.030062910008fa9b: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22 +D2020-12-29 21:34:08.1985 (1609248848198439662 1d629c) replica.replica1.030062910008fa9b: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22, confirmed_decree = -1 +D2020-12-29 21:34:09.841 (1609248849841939718 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248849841] +D2020-12-29 21:34:09.842 (1609248849842048457 1d62b5) replica. fd0.030c00010000002a: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248849841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:09.842 (1609248849842056457 1d62b5) replica. fd0.030c00010000002a: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248849841 +D2020-12-29 21:34:11.852 (1609248851852867666 1d62bb) unknown.io-thrd.1925819: builtin_counters.cpp:36:update_counters(): memused_virt = 1421 MB, memused_res = 240MB +D2020-12-29 21:34:11.853 (1609248851853854582 1d62bb) unknown.io-thrd.1925819: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248851852), last_report_time_ms(1609248841851) +D2020-12-29 21:34:12.841 (1609248852841998575 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248852841] +D2020-12-29 21:34:12.842 (1609248852842132763 1d62b6) replica. fd1.030c000000000028: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248852841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:12.842 (1609248852842143924 1d62b6) replica. fd1.030c000000000028: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248852841 +D2020-12-29 21:34:13.859 (1609248853859193670 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1623:on_gc(): start to garbage collection, replica_count = 20 +D2020-12-29 21:34:13.859 (1609248853859221843 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 11 +D2020-12-29 21:34:13.8591 (1609248853859228743 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 +D2020-12-29 21:34:13.8593 (1609248853859232551 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 +D2020-12-29 21:34:13.8593 (1609248853859245313 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 +D2020-12-29 21:34:13.8595 (1609248853859251843 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 +D2020-12-29 21:34:13.8593 (1609248853859292972 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 +D2020-12-29 21:34:13.8595 (1609248853859297884 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 10 +D2020-12-29 21:34:13.8590 (1609248853859307404 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 +D2020-12-29 21:34:13.8595 (1609248853859316754 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 +D2020-12-29 21:34:13.8595 (1609248853859324289 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 11 +D2020-12-29 21:34:13.8591 (1609248853859329210 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 +D2020-12-29 21:34:13.8593 (1609248853859334412 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 12 +D2020-12-29 21:34:13.8592 (1609248853859338027 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 +D2020-12-29 21:34:13.8593 (1609248853859344794 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 +D2020-12-29 21:34:13.8595 (1609248853859352314 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 +D2020-12-29 21:34:13.8595 (1609248853859356083 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 22 +D2020-12-29 21:34:13.8592 (1609248853859365256 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 +D2020-12-29 21:34:13.8595 (1609248853859368662 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 +D2020-12-29 21:34:13.8593 (1609248853859458619 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 +D2020-12-29 21:34:13.8595 (1609248853859463225 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 +D2020-12-29 21:34:13.859 (1609248853859470817 1d62a1) replica.rep_long0.0301000000000001: mutation_log.cpp:1364:garbage_collection(): gc_shared: too few files to delete, file_count_limit = 100, reserved_log_count = 1, reserved_log_size = 46096, current_log_index = 1 +D2020-12-29 21:34:13.859 (1609248853859478692 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1774:on_gc(): finish to garbage collection, time_used_ns = 293908 +D2020-12-29 21:34:15.831 (1609248855831561574 1d6295) replica.default1.0301000000000004: replica_stub.cpp:1257:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 +D2020-12-29 21:34:15.831 (1609248855831719139 1d6297) replica.default3.030100010000001d: replica_stub.cpp:1288:on_node_query_reply(): query node partitions replied, err = ERR_OK +D2020-12-29 21:34:15.831 (1609248855831762218 1d6297) replica.default3.030100010000001d: replica_stub.cpp:1332:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) +D2020-12-29 21:34:15.833 (1609248855833505475 1d6294) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) +D2020-12-29 21:34:15.842 (1609248855842062318 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248855842] +D2020-12-29 21:34:15.842 (1609248855842192389 1d62b6) replica. fd1.030c00000000002a: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248855842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:15.842 (1609248855842201734 1d62b6) replica. fd1.030c00000000002a: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248855842 +D2020-12-29 21:34:15.860 (1609248855860842801 1d629b) replica.replica0.0306000000000020: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.0@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:15.863 (1609248855863702539 1d629c) replica.replica1.0306000100000021: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.2@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:15.871 (1609248855871513159 1d629c) replica.replica1.03006291000a455f: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26 +D2020-12-29 21:34:15.8715 (1609248855871536283 1d629c) replica.replica1.03006291000a455f: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26, confirmed_decree = -1 +D2020-12-29 21:34:15.873 (1609248855873235126 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:15.873 (1609248855873243257 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:15.873 (1609248855873306170 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:15.875 (1609248855875899104 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:15.875 (1609248855875907585 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:15.875 (1609248855875956018 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:15.877 (1609248855877158531 1d629c) replica.replica1.03006291000a459e: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 14 +D2020-12-29 21:34:15.8775 (1609248855877170739 1d629c) replica.replica1.03006291000a459e: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 14, confirmed_decree = -1 +D2020-12-29 21:34:15.880 (1609248855880796051 1d629b) replica.replica0.03006291000a45c6: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 +D2020-12-29 21:34:15.8805 (1609248855880808341 1d629b) replica.replica0.03006291000a45c6: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 +D2020-12-29 21:34:15.887 (1609248855887257648 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:15.887 (1609248855887273221 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:15.887 (1609248855887315310 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:15.972 (1609248855972378007 1d6297) replica.default3.03006291000a49b7: replica_stub.cpp:2806:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START +W2020-12-29 21:34:15.972 (1609248855972392369 1d6297) replica.default3.03006291000a49b7: hotkey_collector.cpp:249:on_start_detect(): [3.3@10.232.52.144:34803] still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::FINE_DETECTING +D2020-12-29 21:34:16.9 (1609248856009396590 1d629b) replica.replica0.03006291000a4b43: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 +D2020-12-29 21:34:16.92.5 (1609248856009410781 1d629b) replica.replica0.03006291000a4b43: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 +D2020-12-29 21:34:16.266 (1609248856266094982 1d629c) replica.replica1.03006291000a56d8: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 +D2020-12-29 21:34:16.2665 (1609248856266109078 1d629c) replica.replica1.03006291000a56d8: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 +D2020-12-29 21:34:16.294 (1609248856294842103 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:16.294 (1609248856294855258 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:16.294 (1609248856294888098 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:16.296 (1609248856296477459 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:16.296 (1609248856296484645 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:16.296 (1609248856296512200 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:16.388 (1609248856388605641 1d629b) replica.replica0.03006291000a5c42: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 +D2020-12-29 21:34:16.3885 (1609248856388617824 1d629b) replica.replica0.03006291000a5c42: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 +D2020-12-29 21:34:16.420 (1609248856420741472 1d629c) replica.replica1.03006291000a5dce: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 +D2020-12-29 21:34:16.4205 (1609248856420756130 1d629c) replica.replica1.03006291000a5dce: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 +D2020-12-29 21:34:16.500 (1609248856500413556 1d629b) replica.replica0.03006291000a6176: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 +D2020-12-29 21:34:16.5005 (1609248856500424325 1d629b) replica.replica0.03006291000a6176: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 +D2020-12-29 21:34:18.78 (1609248858078930481 1d629b) replica.replica0.03006291000aaa11: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 +D2020-12-29 21:34:18.78.5 (1609248858078969389 1d629b) replica.replica0.03006291000aaa11: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 +D2020-12-29 21:34:18.79 (1609248858079511498 1d629b) replica.replica0.03006291000aaa19: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 +D2020-12-29 21:34:18.79.5 (1609248858079518790 1d629b) replica.replica0.03006291000aaa19: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 +D2020-12-29 21:34:18.82 (1609248858082445347 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:18.82 (1609248858082455499 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:18.82 (1609248858082489233 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:18.176 (1609248858176991948 1d629b) replica.replica0.03006291000aae5f: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 +D2020-12-29 21:34:18.1775 (1609248858177004358 1d629b) replica.replica0.03006291000aae5f: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 +D2020-12-29 21:34:18.177 (1609248858177180900 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:18.177 (1609248858177190425 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:18.177 (1609248858177218558 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:18.198 (1609248858198318004 1d629c) replica.replica1.03006291000aaf51: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 +D2020-12-29 21:34:18.1985 (1609248858198331070 1d629c) replica.replica1.03006291000aaf51: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 +D2020-12-29 21:34:18.198 (1609248858198510530 1d629c) replica.replica1.03006291000aaf54: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 +D2020-12-29 21:34:18.1985 (1609248858198517451 1d629c) replica.replica1.03006291000aaf54: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 +D2020-12-29 21:34:18.842 (1609248858842123429 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248858842] +D2020-12-29 21:34:18.842 (1609248858842265505 1d62b5) replica. fd0.030c00010000002c: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248858842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:18.842 (1609248858842272085 1d62b5) replica. fd0.030c00010000002c: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248858842 +D2020-12-29 21:34:21.842 (1609248861842186099 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248861842] +D2020-12-29 21:34:21.842 (1609248861842321046 1d62b5) replica. fd0.030c00010000002e: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248861842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:21.842 (1609248861842331173 1d62b5) replica. fd0.030c00010000002e: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248861842 +D2020-12-29 21:34:21.853 (1609248861853915231 1d62ba) unknown.io-thrd.1925818: builtin_counters.cpp:36:update_counters(): memused_virt = 1486 MB, memused_res = 251MB +D2020-12-29 21:34:21.855 (1609248861855002427 1d62ba) unknown.io-thrd.1925818: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248861853), last_report_time_ms(1609248851852) +D2020-12-29 21:34:24.842 (1609248864842253866 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248864842] +D2020-12-29 21:34:24.842 (1609248864842387587 1d62b6) replica. fd1.030c00000000002c: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248864842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:24.842 (1609248864842394424 1d62b6) replica. fd1.030c00000000002c: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248864842 +D2020-12-29 21:34:25.833 (1609248865833562462 1d6295) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) +E2020-12-29 21:34:25.864 (1609248865864866778 1d6297) replica.default3.0306000100000024: hotkey_collector.cpp:173:change_state_by_result(): [3.3@10.232.52.144:34803] Find the hotkey: ThisisahotkeyThisisahotkey +D2020-12-29 21:34:25.871 (1609248865871608719 1d629c) replica.replica1.03006291000bff9c: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 +D2020-12-29 21:34:25.8715 (1609248865871618464 1d629c) replica.replica1.03006291000bff9c: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 +D2020-12-29 21:34:25.873 (1609248865873367267 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:25.873 (1609248865873389348 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:25.873 (1609248865873422008 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:25.876 (1609248865876017814 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:25.876 (1609248865876027856 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:25.876 (1609248865876061249 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:25.877 (1609248865877383729 1d629c) replica.replica1.03006291000bffda: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 +D2020-12-29 21:34:25.8775 (1609248865877395993 1d629c) replica.replica1.03006291000bffda: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 +D2020-12-29 21:34:25.880 (1609248865880936034 1d629b) replica.replica0.03006291000c0009: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 +D2020-12-29 21:34:25.8805 (1609248865880947245 1d629b) replica.replica0.03006291000c0009: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 +D2020-12-29 21:34:25.887 (1609248865887443417 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:25.887 (1609248865887453434 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:25.887 (1609248865887490422 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:25.989 (1609248865989297524 1d6294) replica.default0.03006291000c04e2: replica_stub.cpp:2806:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START +W2020-12-29 21:34:25.989 (1609248865989324883 1d6294) replica.default0.03006291000c04e2: hotkey_collector.cpp:258:on_start_detect(): [3.3@10.232.52.144:34803] replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection +D2020-12-29 21:34:26.9 (1609248866009470870 1d629b) replica.replica0.03006291000c05c9: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28 +D2020-12-29 21:34:26.92.5 (1609248866009480654 1d629b) replica.replica0.03006291000c05c9: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28, confirmed_decree = -1 +D2020-12-29 21:34:26.266 (1609248866266284609 1d629c) replica.replica1.03006291000c1169: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 +D2020-12-29 21:34:26.2665 (1609248866266299198 1d629c) replica.replica1.03006291000c1169: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 +D2020-12-29 21:34:26.294 (1609248866294999011 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:26.295 (1609248866295010318 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:26.295 (1609248866295056481 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:26.296 (1609248866296618997 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:26.296 (1609248866296628917 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:26.296 (1609248866296656538 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:26.388 (1609248866388748820 1d629b) replica.replica0.03006291000c16d6: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 +D2020-12-29 21:34:26.3885 (1609248866388761258 1d629b) replica.replica0.03006291000c16d6: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 +D2020-12-29 21:34:26.420 (1609248866420905461 1d629c) replica.replica1.03006291000c1849: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 +D2020-12-29 21:34:26.4205 (1609248866420917680 1d629c) replica.replica1.03006291000c1849: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 +D2020-12-29 21:34:26.500 (1609248866500574549 1d629b) replica.replica0.03006291000c1bdb: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 +D2020-12-29 21:34:26.5005 (1609248866500584339 1d629b) replica.replica0.03006291000c1bdb: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 +D2020-12-29 21:34:27.842 (1609248867842313668 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248867842] +D2020-12-29 21:34:27.842 (1609248867842447136 1d62b6) replica. fd1.030c00000000002e: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248867842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:27.842 (1609248867842454983 1d62b6) replica. fd1.030c00000000002e: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248867842 +D2020-12-29 21:34:28.79 (1609248868079055932 1d629b) replica.replica0.03006291000c6476: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 +D2020-12-29 21:34:28.79.5 (1609248868079070212 1d629b) replica.replica0.03006291000c6476: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 +D2020-12-29 21:34:28.79 (1609248868079817270 1d629b) replica.replica0.03006291000c647d: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12 +D2020-12-29 21:34:28.79.5 (1609248868079827224 1d629b) replica.replica0.03006291000c647d: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12, confirmed_decree = -1 +D2020-12-29 21:34:28.82 (1609248868082531150 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:28.82 (1609248868082543525 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:28.82 (1609248868082576545 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:28.177 (1609248868177079502 1d629b) replica.replica0.03006291000c690f: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 +D2020-12-29 21:34:28.1775 (1609248868177092486 1d629b) replica.replica0.03006291000c690f: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 +D2020-12-29 21:34:28.177 (1609248868177318737 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:28.177 (1609248868177331030 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:28.177 (1609248868177367364 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:28.198 (1609248868198404637 1d629c) replica.replica1.03006291000c6a05: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 +D2020-12-29 21:34:28.1985 (1609248868198418679 1d629c) replica.replica1.03006291000c6a05: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 +D2020-12-29 21:34:28.198 (1609248868198610229 1d629c) replica.replica1.03006291000c6a07: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 +D2020-12-29 21:34:28.1985 (1609248868198619418 1d629c) replica.replica1.03006291000c6a07: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 +D2020-12-29 21:34:30.842 (1609248870842411909 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248870842] +D2020-12-29 21:34:30.842 (1609248870842526945 1d62b5) replica. fd0.030c000100000030: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248870842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:30.842 (1609248870842536435 1d62b5) replica. fd0.030c000100000030: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248870842 +D2020-12-29 21:34:31.855 (1609248871855092941 1d62bb) unknown.io-thrd.1925819: builtin_counters.cpp:36:update_counters(): memused_virt = 1510 MB, memused_res = 260MB +D2020-12-29 21:34:31.856 (1609248871856214390 1d62bb) unknown.io-thrd.1925819: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248871855), last_report_time_ms(1609248861853) +D2020-12-29 21:34:33.842 (1609248873842462546 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248873842] +D2020-12-29 21:34:33.842 (1609248873842613548 1d62b5) replica. fd0.030c000100000032: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248873842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:33.842 (1609248873842620355 1d62b5) replica. fd0.030c000100000032: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248873842 +D2020-12-29 21:34:35.833 (1609248875833623597 1d6297) replica.default3.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) +D2020-12-29 21:34:35.864 (1609248875864404786 1d629c) replica.replica1.030600000000003d: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.0@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:35.865 (1609248875865088125 1d629b) replica.replica0.0306000100000026: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.3@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:35.871 (1609248875871704353 1d629c) replica.replica1.03006291000dbd6c: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32 +D2020-12-29 21:34:35.8715 (1609248875871712900 1d629c) replica.replica1.03006291000dbd6c: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32, confirmed_decree = -1 +D2020-12-29 21:34:35.873 (1609248875873481134 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:35.873 (1609248875873493053 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:35.873 (1609248875873525954 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:35.876 (1609248875876104184 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:35.876 (1609248875876116972 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:35.876 (1609248875876151384 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:35.877 (1609248875877486903 1d629c) replica.replica1.03006291000dbda9: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 +D2020-12-29 21:34:35.8775 (1609248875877497272 1d629c) replica.replica1.03006291000dbda9: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 +D2020-12-29 21:34:35.881 (1609248875881101161 1d629b) replica.replica0.03006291000dbdce: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 +D2020-12-29 21:34:35.8815 (1609248875881111098 1d629b) replica.replica0.03006291000dbdce: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 +D2020-12-29 21:34:35.887 (1609248875887603105 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:35.887 (1609248875887614032 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:35.887 (1609248875887651051 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:36.9 (1609248876009553332 1d629b) replica.replica0.03006291000dc3a5: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31 +D2020-12-29 21:34:36.92.5 (1609248876009566575 1d629b) replica.replica0.03006291000dc3a5: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31, confirmed_decree = -1 +D2020-12-29 21:34:36.10 (1609248876010712778 1d6297) replica.default3.03006291000dc3b5: replica_stub.cpp:2806:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START +W2020-12-29 21:34:36.10 (1609248876010721840 1d6297) replica.default3.03006291000dc3b5: hotkey_collector.cpp:258:on_start_detect(): [3.3@10.232.52.144:34803] replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection +D2020-12-29 21:34:36.44 (1609248876044108999 1d629c) replica.replica1.0306000000000042: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.6@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:36.56 (1609248876056329242 1d629c) replica.replica1.030600010000002b: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.2@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:36.222 (1609248876222628878 1d629c) replica.replica1.0306000100000030: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.4@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:36.227 (1609248876227252718 1d629b) replica.replica0.0306000000000047: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.1@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:36.266 (1609248876266458595 1d629c) replica.replica1.03006291000dcf74: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 +D2020-12-29 21:34:36.2665 (1609248876266472199 1d629c) replica.replica1.03006291000dcf74: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 +D2020-12-29 21:34:36.295 (1609248876295167077 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:36.295 (1609248876295178470 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:36.295 (1609248876295211345 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:36.296 (1609248876296763516 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:36.296 (1609248876296771088 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:36.296 (1609248876296800655 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:36.376 (1609248876376286998 1d629b) replica.replica0.0306000100000036: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.7@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:36.388 (1609248876388921049 1d629b) replica.replica0.03006291000dd4f8: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 +D2020-12-29 21:34:36.3885 (1609248876388931276 1d629b) replica.replica0.03006291000dd4f8: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 +D2020-12-29 21:34:36.421 (1609248876421073498 1d629c) replica.replica1.03006291000dd673: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 +D2020-12-29 21:34:36.4215 (1609248876421084439 1d629c) replica.replica1.03006291000dd673: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 +D2020-12-29 21:34:36.500 (1609248876500735404 1d629b) replica.replica0.03006291000dda1c: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 +D2020-12-29 21:34:36.5005 (1609248876500747706 1d629b) replica.replica0.03006291000dda1c: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 +D2020-12-29 21:34:36.842 (1609248876842519547 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248876842] +D2020-12-29 21:34:36.842 (1609248876842653189 1d62b6) replica. fd1.030c000000000030: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248876842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:36.842 (1609248876842665675 1d62b6) replica. fd1.030c000000000030: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248876842 +D2020-12-29 21:34:38.79 (1609248878079155118 1d629b) replica.replica0.03006291000e2153: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32 +D2020-12-29 21:34:38.79.5 (1609248878079167209 1d629b) replica.replica0.03006291000e2153: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32, confirmed_decree = -1 +D2020-12-29 21:34:38.79 (1609248878079981350 1d629b) replica.replica0.03006291000e215b: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 13 +D2020-12-29 21:34:38.79.5 (1609248878079993223 1d629b) replica.replica0.03006291000e215b: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 13, confirmed_decree = -1 +D2020-12-29 21:34:38.82 (1609248878082625764 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:38.82 (1609248878082642892 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:38.82 (1609248878082678788 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:38.177 (1609248878177210395 1d629b) replica.replica0.03006291000e259c: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32 +D2020-12-29 21:34:38.1775 (1609248878177223654 1d629b) replica.replica0.03006291000e259c: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32, confirmed_decree = -1 +D2020-12-29 21:34:38.177 (1609248878177493498 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:38.177 (1609248878177503174 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:38.177 (1609248878177534345 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:38.198 (1609248878198490251 1d629c) replica.replica1.03006291000e26a6: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32 +D2020-12-29 21:34:38.1985 (1609248878198499700 1d629c) replica.replica1.03006291000e26a6: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32, confirmed_decree = -1 +D2020-12-29 21:34:38.198 (1609248878198730973 1d629c) replica.replica1.03006291000e26aa: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32 +D2020-12-29 21:34:38.1985 (1609248878198741632 1d629c) replica.replica1.03006291000e26aa: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32, confirmed_decree = -1 +D2020-12-29 21:34:39.842 (1609248879842581698 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248879842] +D2020-12-29 21:34:39.842 (1609248879842698722 1d62b6) replica. fd1.030c000000000032: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248879842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:39.842 (1609248879842706559 1d62b6) replica. fd1.030c000000000032: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248879842 +D2020-12-29 21:34:41.856 (1609248881856286131 1d62ba) unknown.io-thrd.1925818: builtin_counters.cpp:36:update_counters(): memused_virt = 1525 MB, memused_res = 267MB +D2020-12-29 21:34:41.857 (1609248881857303855 1d62ba) unknown.io-thrd.1925818: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248881856), last_report_time_ms(1609248871855) +D2020-12-29 21:34:42.842 (1609248882842641103 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248882842] +D2020-12-29 21:34:42.842 (1609248882842772472 1d62b5) replica. fd0.030c000100000034: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248882842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:42.842 (1609248882842783365 1d62b5) replica. fd0.030c000100000034: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248882842 +D2020-12-29 21:34:43.859 (1609248883859509798 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1623:on_gc(): start to garbage collection, replica_count = 20 +D2020-12-29 21:34:43.859 (1609248883859530948 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 15 +D2020-12-29 21:34:43.8595 (1609248883859534852 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 +D2020-12-29 21:34:43.8593 (1609248883859537019 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 +D2020-12-29 21:34:43.8593 (1609248883859542799 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 +D2020-12-29 21:34:43.8598 (1609248883859544642 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 +D2020-12-29 21:34:43.8593 (1609248883859547941 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 +D2020-12-29 21:34:43.8598 (1609248883859553423 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 12 +D2020-12-29 21:34:43.8592 (1609248883859556854 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 +D2020-12-29 21:34:43.8598 (1609248883859567752 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 +D2020-12-29 21:34:43.8598 (1609248883859579541 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 12 +D2020-12-29 21:34:43.8592 (1609248883859601621 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 34 +D2020-12-29 21:34:43.8594 (1609248883859609812 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 15 +D2020-12-29 21:34:43.8595 (1609248883859612812 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 +D2020-12-29 21:34:43.8593 (1609248883859618000 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 +D2020-12-29 21:34:43.8598 (1609248883859624293 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 +D2020-12-29 21:34:43.8598 (1609248883859627395 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 32 +D2020-12-29 21:34:43.8592 (1609248883859632342 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 +D2020-12-29 21:34:43.8598 (1609248883859635910 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 +D2020-12-29 21:34:43.8593 (1609248883859640527 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 +D2020-12-29 21:34:43.8598 (1609248883859642439 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 +D2020-12-29 21:34:43.859 (1609248883859648008 1d62a1) replica.rep_long0.0301000000000001: mutation_log.cpp:1364:garbage_collection(): gc_shared: too few files to delete, file_count_limit = 100, reserved_log_count = 1, reserved_log_size = 65980, current_log_index = 1 +D2020-12-29 21:34:43.859 (1609248883859654752 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1774:on_gc(): finish to garbage collection, time_used_ns = 152907 +D2020-12-29 21:34:45.831 (1609248885831662950 1d6298) replica.default4.0301000000000004: replica_stub.cpp:1257:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 +D2020-12-29 21:34:45.831 (1609248885831813881 1d6297) replica.default3.0301000400000009: replica_stub.cpp:1288:on_node_query_reply(): query node partitions replied, err = ERR_OK +D2020-12-29 21:34:45.831 (1609248885831878949 1d6297) replica.default3.0301000400000009: replica_stub.cpp:1332:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) +D2020-12-29 21:34:45.833 (1609248885833684766 1d6296) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) +D2020-12-29 21:34:45.842 (1609248885842700954 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248885842] +D2020-12-29 21:34:45.842 (1609248885842819096 1d62b5) replica. fd0.030c000100000036: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248885842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] +D2020-12-29 21:34:45.842 (1609248885842828100 1d62b5) replica. fd0.030c000100000036: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248885842 +D2020-12-29 21:34:45.870 (1609248885870429793 1d629b) replica.replica0.030600010000003c: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.5@10.232.52.144:34803: trigger non-emergency checkpoint +D2020-12-29 21:34:45.871 (1609248885871805159 1d629c) replica.replica1.03006291000f7de0: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 36 +D2020-12-29 21:34:45.8715 (1609248885871816920 1d629c) replica.replica1.03006291000f7de0: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 36, confirmed_decree = -1 +D2020-12-29 21:34:45.873 (1609248885873565814 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:45.873 (1609248885873577187 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:45.873 (1609248885873612123 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:45.876 (1609248885876202359 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:45.876 (1609248885876218487 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:45.876 (1609248885876251197 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:45.877 (1609248885877574214 1d629c) replica.replica1.03006291000f7e12: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 +D2020-12-29 21:34:45.8775 (1609248885877592531 1d629c) replica.replica1.03006291000f7e12: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 +D2020-12-29 21:34:45.881 (1609248885881258025 1d629b) replica.replica0.03006291000f7e3d: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 +D2020-12-29 21:34:45.8815 (1609248885881287691 1d629b) replica.replica0.03006291000f7e3d: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 +D2020-12-29 21:34:45.887 (1609248885887774466 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:45.887 (1609248885887783919 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:45.887 (1609248885887813435 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:46.9 (1609248886009659231 1d629b) replica.replica0.03006291000f8415: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35 +D2020-12-29 21:34:46.92.5 (1609248886009676383 1d629b) replica.replica0.03006291000f8415: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35, confirmed_decree = -1 +D2020-12-29 21:34:46.31 (1609248886031641760 1d6294) replica.default0.03006291000f8506: replica_stub.cpp:2806:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START +W2020-12-29 21:34:46.31 (1609248886031651712 1d6294) replica.default0.03006291000f8506: hotkey_collector.cpp:258:on_start_detect(): [3.3@10.232.52.144:34803] replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection +D2020-12-29 21:34:46.266 (1609248886266630256 1d629c) replica.replica1.03006291000f8fad: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 +D2020-12-29 21:34:46.2665 (1609248886266644620 1d629c) replica.replica1.03006291000f8fad: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 +D2020-12-29 21:34:46.295 (1609248886295315966 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:46.295 (1609248886295331579 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:46.295 (1609248886295372497 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:46.296 (1609248886296929198 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check +D2020-12-29 21:34:46.296 (1609248886296940302 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:46.296 (1609248886296991205 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY +D2020-12-29 21:34:46.389 (1609248886389095602 1d629b) replica.replica0.03006291000f9551: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 +D2020-12-29 21:34:46.3895 (1609248886389114209 1d629b) replica.replica0.03006291000f9551: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 +D2020-12-29 21:34:46.421 (1609248886421222281 1d629c) replica.replica1.03006291000f96d8: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 +D2020-12-29 21:34:46.4215 (1609248886421233954 1d629c) replica.replica1.03006291000f96d8: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 +D2020-12-29 21:34:46.500 (1609248886500924178 1d629b) replica.replica0.03006291000f9a91: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 +D2020-12-29 21:34:46.5005 (1609248886500936011 1d629b) replica.replica0.03006291000f9a91: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 +D2020-12-29 21:34:47.0V (1609248887000211230 1d6295) replica.default1.03006291000fb195: replica_stub.cpp:2806:on_detect_hotkey(): [3.0@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::QUERY +D2020-12-29 21:34:47.0 (1609248887000220301 1d6295) replica.default1.03006291000fb195: hotkey_collector.cpp:292:query_result(): [3.0@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::STOPPED diff --git a/rdsn b/rdsn index 1fd9cd5711..6851f33f5b 160000 --- a/rdsn +++ b/rdsn @@ -1 +1 @@ -Subproject commit 1fd9cd57117de8dc67d42f6b8a5fb05ea82e122f +Subproject commit 6851f33f5ba739ab67eb36e2c1fe35b15b1f8c77 diff --git a/src/server/hotkey_collector.cpp b/src/server/hotkey_collector.cpp index 2f60c69649..b5912dcbb3 100644 --- a/src/server/hotkey_collector.cpp +++ b/src/server/hotkey_collector.cpp @@ -31,13 +31,13 @@ namespace server { DSN_DEFINE_uint32( "pegasus.server", hot_bucket_variance_threshold, - 4, + 7, "the variance threshold to detect hot bucket during coarse analysis of hotkey detection"); DSN_DEFINE_uint32( "pegasus.server", hot_key_variance_threshold, - 4, + 5, "the variance threshold to detect hot key during fine analysis of hotkey detection"); DSN_DEFINE_uint32("pegasus.server", diff --git a/src/server/hotspot_partition_calculator.cpp b/src/server/hotspot_partition_calculator.cpp index 32f9b8e1c0..1950e2a001 100644 --- a/src/server/hotspot_partition_calculator.cpp +++ b/src/server/hotspot_partition_calculator.cpp @@ -55,7 +55,6 @@ DSN_DEFINE_int32("pegasus.collector", void hotspot_partition_calculator::data_aggregate(const std::vector &partition_stats) { - std::cout << "data_aggregate" << std::endl; while (_partitions_stat_histories.size() >= FLAGS_max_hotspot_store_size) { _partitions_stat_histories.pop_front(); } @@ -117,6 +116,12 @@ void hotspot_partition_calculator::stat_histories_analyse(int data_type, // use ceil to guarantee conversion results hot_points[i] = ceil(std::max(hot_point, double(0))); } + // test + std::string result = ""; + for (int i = 0; i < hot_point_size; i++) { + result += std::to_string(hot_points[i]); + } + derror_f("{} hot_points: {}", data_type, result); } void hotspot_partition_calculator::update_hot_point(int data_type, std::vector &hot_points) diff --git a/src/test/function_test/test_detect_hotspot.cpp b/src/test/function_test/test_detect_hotspot.cpp index 1eb07dfaa8..119bf476b3 100644 --- a/src/test/function_test/test_detect_hotspot.cpp +++ b/src/test/function_test/test_detect_hotspot.cpp @@ -268,13 +268,11 @@ class test_detect_hotspot : public testing::Test auto errinfo = ddl_client->detect_hotkey(partitions[partition_index].primary, req, resp); ASSERT_EQ(errinfo, dsn::ERR_OK); - if (resp.hotkey_result.empty()) { + if (!resp.hotkey_result.empty()) { find_hotkey = true; break; } } - ASSERT_EQ(resp.err_hint, - "Can't get hotkey now, now state: hotkey_collector_state::FINISHED"); ASSERT_TRUE(find_hotkey); ASSERT_EQ(resp.err, dsn::ERR_OK); @@ -330,26 +328,26 @@ class test_detect_hotspot : public testing::Test pegasus::pegasus_client *pg_client; }; -// TEST_F(test_detect_hotspot, write_hotspot_data) -// { -// std::cout << "start testing write_hotspot_data..." << std::endl; -// write_hotspot_data(); -// std::cout << "hotspot passed....." << std::endl; -// } - -// TEST_F(test_detect_hotspot, write_random_data) -// { -// std::cout << "start testing write_random_data..." << std::endl; -// write_random_data(); -// std::cout << "hotspot passed....." << std::endl; -// } - -// TEST_F(test_detect_hotspot, capture_until_maxtime) -// { -// std::cout << "start testing capture_until_maxtime..." << std::endl; -// capture_until_maxtime(); -// std::cout << "hotspot passed....." << std::endl; -// } +TEST_F(test_detect_hotspot, write_hotspot_data) +{ + std::cout << "start testing write_hotspot_data..." << std::endl; + write_hotspot_data(); + std::cout << "hotspot passed....." << std::endl; +} + +TEST_F(test_detect_hotspot, write_random_data) +{ + std::cout << "start testing write_random_data..." << std::endl; + write_random_data(); + std::cout << "hotspot passed....." << std::endl; +} + +TEST_F(test_detect_hotspot, capture_until_maxtime) +{ + std::cout << "start testing capture_until_maxtime..." << std::endl; + capture_until_maxtime(); + std::cout << "hotspot passed....." << std::endl; +} TEST_F(test_detect_hotspot, read_hotspot_data) { @@ -358,9 +356,9 @@ TEST_F(test_detect_hotspot, read_hotspot_data) std::cout << "hotspot passed....." << std::endl; } -// TEST_F(test_detect_hotspot, read_random_data) -// { -// std::cout << "start testing read_random_data..." << std::endl; -// read_random_data(); -// std::cout << "hotspot passed....." << std::endl; -// } +TEST_F(test_detect_hotspot, read_random_data) +{ + std::cout << "start testing read_random_data..." << std::endl; + read_random_data(); + std::cout << "hotspot passed....." << std::endl; +} From 0c7c65705189fcb4b1b6e9689ff6c5f9063daaa4 Mon Sep 17 00:00:00 2001 From: Tangyanzhao Date: Wed, 30 Dec 2020 00:15:51 +0800 Subject: [PATCH 07/19] delete useless --- debug.txt | 162 ------------ debug2.txt | 709 ----------------------------------------------------- rdsn | 2 +- 3 files changed, 1 insertion(+), 872 deletions(-) delete mode 100644 debug.txt delete mode 100644 debug2.txt diff --git a/debug.txt b/debug.txt deleted file mode 100644 index ea0c39b2d3..0000000000 --- a/debug.txt +++ /dev/null @@ -1,162 +0,0 @@ -D2020-12-29 21:33:15.840 (1609248795840224105 1d62bc) collector.io-thrd.1925820: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34801, current_count = 4 -D2020-12-29 21:33:15.840 (1609248795840232986 1d62c7) collector.default0.010100040000000e: network.cpp:649:send_message(): client session created, remote_server = 10.232.52.144:34803, current_count = 5 -D2020-12-29 21:33:15.840 (1609248795840260095 1d62bc) collector.io-thrd.1925820: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34802, current_count = 5 -D2020-12-29 21:33:15.840 (1609248795840919072 1d62bc) collector.io-thrd.1925820: network.cpp:787:on_client_session_connected(): client session connected, remote_server = 10.232.52.144:34803, current_count = 5 -E2020-12-29 21:33:15.857 (1609248795857993212 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00010121 -E2020-12-29 21:33:15.858 (1609248795858031891 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 -E2020-12-29 21:33:15.858 (1609248795858204752 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 -E2020-12-29 21:33:15.858 (1609248795858213054 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0000 -E2020-12-29 21:33:15.858 (1609248795858397676 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00000000 -E2020-12-29 21:33:15.858 (1609248795858405982 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 -D2020-12-29 21:33:15.858 (1609248795858562859 1d62c8) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 6394.83, total_write_qps = 0 -D2020-12-29 21:33:18.839 (1609248798839979489 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(8), recent_day_fail_times(0), recent_hour_detect_times(8), recent_hour_fail_times(0) recent_minute_detect_times(8), recent_minute_fail_times(0) -D2020-12-29 21:33:21.840 (1609248801840025987 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(16), recent_day_fail_times(0), recent_hour_detect_times(16), recent_hour_fail_times(0) recent_minute_detect_times(16), recent_minute_fail_times(0) -D2020-12-29 21:33:21.842 (1609248801842909631 1d62d1) unknown.io-thrd.1925841: builtin_counters.cpp:36:update_counters(): memused_virt = 312 MB, memused_res = 44MB -D2020-12-29 21:33:21.843 (1609248801843121408 1d62d1) unknown.io-thrd.1925841: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248801842), last_report_time_ms(1609248791842) -D2020-12-29 21:33:23.841 (1609248803841907024 1d62c8) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 21:33:24.840 (1609248804840094178 1d62c8) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(24), recent_day_fail_times(0), recent_hour_detect_times(24), recent_hour_fail_times(0) recent_minute_detect_times(24), recent_minute_fail_times(0) -D2020-12-29 21:33:25.858 (1609248805858591358 1d62c9) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 21:33:25.880 (1609248805880208379 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111211 -E2020-12-29 21:33:25.880 (1609248805880245844 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 -E2020-12-29 21:33:25.880 (1609248805880265924 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 -E2020-12-29 21:33:25.880 (1609248805880277967 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 3001 -E2020-12-29 21:33:25.880 (1609248805880294300 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11211211 -E2020-12-29 21:33:25.880 (1609248805880308612 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11211211 -D2020-12-29 21:33:25.880 (1609248805880333855 1d62c9) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 15769.8, total_write_qps = 2.19974 -D2020-12-29 21:33:27.840 (1609248807840144013 1d62ca) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(32), recent_day_fail_times(0), recent_hour_detect_times(32), recent_hour_fail_times(0) recent_minute_detect_times(32), recent_minute_fail_times(0) -D2020-12-29 21:33:30.840 (1609248810840258499 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(46), recent_day_fail_times(0), recent_hour_detect_times(46), recent_hour_fail_times(0) recent_minute_detect_times(46), recent_minute_fail_times(0) -D2020-12-29 21:33:31.842 (1609248811842663526 1d62cb) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 21:33:31.843 (1609248811843185984 1d62d0) unknown.io-thrd.1925840: builtin_counters.cpp:36:update_counters(): memused_virt = 312 MB, memused_res = 45MB -D2020-12-29 21:33:31.843 (1609248811843404044 1d62d0) unknown.io-thrd.1925840: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248811843), last_report_time_ms(1609248801842) -D2020-12-29 21:33:33.840 (1609248813840336373 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(55), recent_day_fail_times(0), recent_hour_detect_times(55), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) -D2020-12-29 21:33:35.880 (1609248815880374985 1d62c9) collector.default2.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 21:33:35.896 (1609248815896907285 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 -E2020-12-29 21:33:35.896 (1609248815896946249 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 -E2020-12-29 21:33:35.896 (1609248815896963591 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 -E2020-12-29 21:33:35.896 (1609248815896969407 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 3000 -E2020-12-29 21:33:35.896 (1609248815896980526 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 22122122 -E2020-12-29 21:33:35.896 (1609248815896987840 1d62c9) collector.default2.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 22122122 -D2020-12-29 21:33:35.897 (1609248815897015947 1d62c9) collector.default2.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 15591.5, total_write_qps = 3.29963 -D2020-12-29 21:33:36.840 (1609248816840408314 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(63), recent_day_fail_times(0), recent_hour_detect_times(63), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) -D2020-12-29 21:33:39.840 (1609248819840671959 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(71), recent_day_fail_times(0), recent_hour_detect_times(71), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) -D2020-12-29 21:33:39.843 (1609248819843713798 1d62c9) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 21:33:41.843 (1609248821843491489 1d62d1) unknown.io-thrd.1925841: builtin_counters.cpp:36:update_counters(): memused_virt = 313 MB, memused_res = 45MB -D2020-12-29 21:33:41.843 (1609248821843703431 1d62d1) unknown.io-thrd.1925841: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248821843), last_report_time_ms(1609248811843) -D2020-12-29 21:33:42.840 (1609248822840740370 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(79), recent_day_fail_times(0), recent_hour_detect_times(79), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) -D2020-12-29 21:33:45.840 (1609248825840816889 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(87), recent_day_fail_times(0), recent_hour_detect_times(87), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) -D2020-12-29 21:33:45.897 (1609248825897050041 1d62c8) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 21:33:45.918 (1609248825918234957 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00050000 -E2020-12-29 21:33:45.918 (1609248825918279772 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 -E2020-12-29 21:33:45.918 (1609248825918307924 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 -E2020-12-29 21:33:45.918 (1609248825918316003 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0200 -E2020-12-29 21:33:45.918 (1609248825918329705 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 -E2020-12-29 21:33:45.918 (1609248825918340428 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 -D2020-12-29 21:33:45.918 (1609248825918361942 1d62c8) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 15668, total_write_qps = 2.69967 -D2020-12-29 21:33:47.844 (1609248827844447978 1d62c7) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 21:33:48.840 (1609248828840984078 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(95), recent_day_fail_times(0), recent_hour_detect_times(95), recent_hour_fail_times(0) recent_minute_detect_times(95), recent_minute_fail_times(0) -D2020-12-29 21:33:51.841 (1609248831841082859 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(103), recent_day_fail_times(0), recent_hour_detect_times(103), recent_hour_fail_times(0) recent_minute_detect_times(103), recent_minute_fail_times(0) -D2020-12-29 21:33:51.843 (1609248831843765979 1d62d0) unknown.io-thrd.1925840: builtin_counters.cpp:36:update_counters(): memused_virt = 313 MB, memused_res = 45MB -D2020-12-29 21:33:51.843 (1609248831843946408 1d62d0) unknown.io-thrd.1925840: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248831843), last_report_time_ms(1609248821843) -D2020-12-29 21:33:54.841 (1609248834841216102 1d62ca) collector.default3.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(111), recent_day_fail_times(0), recent_hour_detect_times(111), recent_hour_fail_times(0) recent_minute_detect_times(111), recent_minute_fail_times(0) -D2020-12-29 21:33:55.845 (1609248835845189417 1d62c8) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 21:33:55.918 (1609248835918397038 1d62c7) collector.default0.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 21:33:55.938 (1609248835938535620 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00060000 -E2020-12-29 21:33:55.938 (1609248835938577745 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 -E2020-12-29 21:33:55.938 (1609248835938596515 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 -E2020-12-29 21:33:55.938 (1609248835938607891 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0002 -E2020-12-29 21:33:55.938 (1609248835938619332 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11211211 -E2020-12-29 21:33:55.938 (1609248835938626585 1d62c7) collector.default0.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11211211 -D2020-12-29 21:33:55.938 (1609248835938646514 1d62c7) collector.default0.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16489.6, total_write_qps = 2.89966 -D2020-12-29 21:33:57.841 (1609248837841268503 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(119), recent_day_fail_times(0), recent_hour_detect_times(119), recent_hour_fail_times(0) recent_minute_detect_times(119), recent_minute_fail_times(0) -D2020-12-29 21:34:00.841 (1609248840841353462 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(127), recent_day_fail_times(0), recent_hour_detect_times(127), recent_hour_fail_times(0) recent_minute_detect_times(127), recent_minute_fail_times(0) -D2020-12-29 21:34:01.844 (1609248841844008585 1d62d1) unknown.io-thrd.1925841: builtin_counters.cpp:36:update_counters(): memused_virt = 313 MB, memused_res = 46MB -D2020-12-29 21:34:01.844 (1609248841844189956 1d62d1) unknown.io-thrd.1925841: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248841843), last_report_time_ms(1609248831843) -D2020-12-29 21:34:03.841 (1609248843841424262 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(135), recent_day_fail_times(0), recent_hour_detect_times(135), recent_hour_fail_times(0) recent_minute_detect_times(135), recent_minute_fail_times(0) -D2020-12-29 21:34:03.845 (1609248843845911032 1d62c8) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 21:34:05.938 (1609248845938677241 1d62c8) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 21:34:05.955 (1609248845955163416 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00050000 -E2020-12-29 21:34:05.955 (1609248845955211436 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 -E2020-12-29 21:34:05.955 (1609248845955221581 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 -D2020-12-29 21:34:05.955 (1609248845955229843 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:169:detect_hotkey_in_hotpartition(): !!!!!! 3 0 5 3 -D2020-12-29 21:34:05.9555 (1609248845955515865 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:210:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 21:34:05.955 (1609248845955535168 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 -E2020-12-29 21:34:05.955 (1609248845955551303 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0020 -E2020-12-29 21:34:05.955 (1609248845955565754 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 -E2020-12-29 21:34:05.955 (1609248845955580228 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 -D2020-12-29 21:34:05.955 (1609248845955604530 1d62c8) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16421.9, total_write_qps = 3.29963 -D2020-12-29 21:34:06.841 (1609248846841506658 1d62c8) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(143), recent_day_fail_times(0), recent_hour_detect_times(143), recent_hour_fail_times(0) recent_minute_detect_times(143), recent_minute_fail_times(0) -D2020-12-29 21:34:09.841 (1609248849841551696 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(151), recent_day_fail_times(0), recent_hour_detect_times(151), recent_hour_fail_times(0) recent_minute_detect_times(151), recent_minute_fail_times(0) -D2020-12-29 21:34:11.844 (1609248851844260881 1d62d0) unknown.io-thrd.1925840: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 46MB -D2020-12-29 21:34:11.844 (1609248851844443648 1d62d0) unknown.io-thrd.1925840: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248851844), last_report_time_ms(1609248841843) -D2020-12-29 21:34:11.846 (1609248851846706612 1d62cb) collector.default4.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 21:34:12.841 (1609248852841683150 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(159), recent_day_fail_times(0), recent_hour_detect_times(159), recent_hour_fail_times(0) recent_minute_detect_times(159), recent_minute_fail_times(0) -D2020-12-29 21:34:15.839 (1609248855839454245 1d62c9) collector.default2.0101000000000005: available_detector.cpp:461:on_minute_report(): start to report on new minute, last_minute = 2020-12-29 21:32 -D2020-12-29 21:34:15.841 (1609248855841722589 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(167), recent_day_fail_times(0), recent_hour_detect_times(167), recent_hour_fail_times(0) recent_minute_detect_times(7), recent_minute_fail_times(0) -D2020-12-29 21:34:15.955 (1609248855955634914 1d62cb) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 21:34:15.972 (1609248855972122668 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00040000 -E2020-12-29 21:34:15.972 (1609248855972158462 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 -E2020-12-29 21:34:15.972 (1609248855972168079 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 -D2020-12-29 21:34:15.972 (1609248855972176282 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:169:detect_hotkey_in_hotpartition(): !!!!!! 3 0 4 4 -D2020-12-29 21:34:15.9725 (1609248855972454602 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:210:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 21:34:15.9725 (1609248855972463232 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:224:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::FINE_DETECTING -E2020-12-29 21:34:15.972 (1609248855972502444 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 -E2020-12-29 21:34:15.972 (1609248855972524725 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0021 -E2020-12-29 21:34:15.972 (1609248855972540776 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 -E2020-12-29 21:34:15.972 (1609248855972551357 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 -D2020-12-29 21:34:15.972 (1609248855972568010 1d62cb) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16325.2, total_write_qps = 2.89967 -D2020-12-29 21:34:18.841 (1609248858841822896 1d62c8) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(175), recent_day_fail_times(0), recent_hour_detect_times(175), recent_hour_fail_times(0) recent_minute_detect_times(15), recent_minute_fail_times(0) -D2020-12-29 21:34:19.847 (1609248859847412760 1d62c9) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 21:34:21.841 (1609248861841885828 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(183), recent_day_fail_times(0), recent_hour_detect_times(183), recent_hour_fail_times(0) recent_minute_detect_times(23), recent_minute_fail_times(0) -D2020-12-29 21:34:21.844 (1609248861844508638 1d62d1) unknown.io-thrd.1925841: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 46MB -D2020-12-29 21:34:21.844 (1609248861844685735 1d62d1) unknown.io-thrd.1925841: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248861844), last_report_time_ms(1609248851844) -D2020-12-29 21:34:24.841 (1609248864841939984 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(191), recent_day_fail_times(0), recent_hour_detect_times(191), recent_hour_fail_times(0) recent_minute_detect_times(31), recent_minute_fail_times(0) -D2020-12-29 21:34:25.972 (1609248865972597958 1d62cb) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 21:34:25.989 (1609248865989068658 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00040000 -E2020-12-29 21:34:25.989 (1609248865989112487 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 -E2020-12-29 21:34:25.989 (1609248865989119921 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 -D2020-12-29 21:34:25.989 (1609248865989127012 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:169:detect_hotkey_in_hotpartition(): !!!!!! 3 0 4 5 -D2020-12-29 21:34:25.9895 (1609248865989390581 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:210:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 21:34:25.9895 (1609248865989402059 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:224:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -E2020-12-29 21:34:25.989 (1609248865989430605 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 -E2020-12-29 21:34:25.989 (1609248865989439345 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0001 -E2020-12-29 21:34:25.989 (1609248865989455758 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11211211 -E2020-12-29 21:34:25.989 (1609248865989468081 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 -D2020-12-29 21:34:25.989 (1609248865989484349 1d62cb) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16351.8, total_write_qps = 2.79966 -D2020-12-29 21:34:27.841 (1609248867841993495 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(199), recent_day_fail_times(0), recent_hour_detect_times(199), recent_hour_fail_times(0) recent_minute_detect_times(39), recent_minute_fail_times(0) -D2020-12-29 21:34:27.848 (1609248867848105914 1d62c7) collector.default0.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 21:34:30.842 (1609248870842037088 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(207), recent_day_fail_times(0), recent_hour_detect_times(207), recent_hour_fail_times(0) recent_minute_detect_times(47), recent_minute_fail_times(0) -D2020-12-29 21:34:31.844 (1609248871844770812 1d62d0) unknown.io-thrd.1925840: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 46MB -D2020-12-29 21:34:31.844 (1609248871844946162 1d62d0) unknown.io-thrd.1925840: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248871844), last_report_time_ms(1609248861844) -D2020-12-29 21:34:33.842 (1609248873842095964 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(215), recent_day_fail_times(0), recent_hour_detect_times(215), recent_hour_fail_times(0) recent_minute_detect_times(55), recent_minute_fail_times(0) -D2020-12-29 21:34:35.848 (1609248875848837038 1d62c9) collector.default2.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 21:34:35.989 (1609248875989514077 1d62c8) collector.default1.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 21:34:36.10 (1609248876010431515 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00040000 -E2020-12-29 21:34:36.10 (1609248876010465724 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 -E2020-12-29 21:34:36.10 (1609248876010472911 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 -D2020-12-29 21:34:36.10 (1609248876010486907 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:169:detect_hotkey_in_hotpartition(): !!!!!! 3 0 4 6 -D2020-12-29 21:34:36.10.5 (1609248876010775557 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:210:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 21:34:36.10.5 (1609248876010787757 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:224:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -E2020-12-29 21:34:36.10 (1609248876010820580 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 -E2020-12-29 21:34:36.10 (1609248876010835431 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0002 -E2020-12-29 21:34:36.10 (1609248876010856338 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 -E2020-12-29 21:34:36.10 (1609248876010867159 1d62c8) collector.default1.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 -D2020-12-29 21:34:36.10 (1609248876010881451 1d62c8) collector.default1.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16509.2, total_write_qps = 3.29957 -D2020-12-29 21:34:36.842 (1609248876842158904 1d62c7) collector.default0.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(223), recent_day_fail_times(0), recent_hour_detect_times(223), recent_hour_fail_times(0) recent_minute_detect_times(63), recent_minute_fail_times(0) -D2020-12-29 21:34:39.842 (1609248879842214458 1d62c8) collector.default1.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(231), recent_day_fail_times(0), recent_hour_detect_times(231), recent_hour_fail_times(0) recent_minute_detect_times(71), recent_minute_fail_times(0) -D2020-12-29 21:34:41.845 (1609248881845006257 1d62d1) unknown.io-thrd.1925841: builtin_counters.cpp:36:update_counters(): memused_virt = 314 MB, memused_res = 47MB -D2020-12-29 21:34:41.845 (1609248881845185719 1d62d1) unknown.io-thrd.1925841: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248881844), last_report_time_ms(1609248871844) -D2020-12-29 21:34:42.842 (1609248882842300341 1d62cb) collector.default4.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(239), recent_day_fail_times(0), recent_hour_detect_times(239), recent_hour_fail_times(0) recent_minute_detect_times(79), recent_minute_fail_times(0) -D2020-12-29 21:34:43.849 (1609248883849502091 1d62c8) collector.default1.0101000000000002: info_collector.cpp:243:on_capacity_unit_stat(): start to stat capacity unit, remaining_retry_count = 3 -D2020-12-29 21:34:45.842 (1609248885842354161 1d62c9) collector.default2.0101000400000004: available_detector.cpp:286:on_detect(): detecting table[temp] with app_id[2] and partition_count[8] on cluster[onebox], recent_day_detect_times(247), recent_day_fail_times(0), recent_hour_detect_times(247), recent_hour_fail_times(0) recent_minute_detect_times(87), recent_minute_fail_times(0) -D2020-12-29 21:34:46.10 (1609248886010921470 1d62cb) collector.default4.0101000000000001: info_collector.cpp:145:on_app_stat(): start to stat apps -E2020-12-29 21:34:46.31 (1609248886031362369 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 00040000 -E2020-12-29 21:34:46.31 (1609248886031397671 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 00000000 -E2020-12-29 21:34:46.31 (1609248886031424105 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:164:detect_hotkey_in_hotpartition(): Find a read hot partition hotspot_test.3 -D2020-12-29 21:34:46.31 (1609248886031439776 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:169:detect_hotkey_in_hotpartition(): !!!!!! 3 0 4 7 -D2020-12-29 21:34:46.31.5 (1609248886031704678 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:210:send_detect_hotkey_request(): Start read hotkey detection in hotspot_test.3, server address: 10.232.52.144:34803 -E2020-12-29 21:34:46.31.5 (1609248886031714144 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:224:send_detect_hotkey_request(): Hotkey detect rpc executing failed, in hotspot_test.3, error_hint:ERR_BUSY replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -E2020-12-29 21:34:46.31 (1609248886031746990 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 0000 -E2020-12-29 21:34:46.31 (1609248886031760704 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 0020 -E2020-12-29 21:34:46.31 (1609248886031782687 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 0 hot_points: 11111111 -E2020-12-29 21:34:46.31 (1609248886031792110 1d62cb) collector.default4.0101000000000001: hotspot_partition_calculator.cpp:124:stat_histories_analyse(): 1 hot_points: 11111111 -D2020-12-29 21:34:46.31 (1609248886031809761 1d62cb) collector.default4.0101000000000001: info_collector.cpp:174:on_app_stat(): stat apps succeed, app_count = 3, total_read_qps = 16514.9, total_write_qps = 2.69968 diff --git a/debug2.txt b/debug2.txt deleted file mode 100644 index 9210c6820b..0000000000 --- a/debug2.txt +++ /dev/null @@ -1,709 +0,0 @@ -D2020-12-29 21:33:13.8580 (1609248793858799264 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:13.8582 (1609248793858811470 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 -D2020-12-29 21:33:13.8580 (1609248793858819794 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:13.8582 (1609248793858824389 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 -D2020-12-29 21:33:13.8580 (1609248793858832618 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 -D2020-12-29 21:33:13.8580 (1609248793858835845 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:13.8582 (1609248793858852566 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:13.8582 (1609248793858855608 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:13.8582 (1609248793858860778 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:13.8582 (1609248793858864783 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 -D2020-12-29 21:33:13.8580 (1609248793858905564 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 -D2020-12-29 21:33:13.8580 (1609248793858914736 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 1 -D2020-12-29 21:33:13.8581 (1609248793858920919 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 -D2020-12-29 21:33:13.8580 (1609248793858924285 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:13.8582 (1609248793858927718 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 0 -D2020-12-29 21:33:13.8580 (1609248793858934105 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:13.858 (1609248793858940025 1d62a1) replica.rep_long0.0301000000000001: mutation_log.cpp:1364:garbage_collection(): gc_shared: too few files to delete, file_count_limit = 100, reserved_log_count = 1, reserved_log_size = 6649, current_log_index = 1 -D2020-12-29 21:33:13.858 (1609248793858945831 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1774:on_gc(): finish to garbage collection, time_used_ns = 251255 -D2020-12-29 21:33:15.831 (1609248795831387232 1d6297) replica.default3.0301000000000004: replica_stub.cpp:1257:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 21:33:15.831- (1609248795831554273 1d6295) replica.default1.0301000300000003: replica_stub.cpp:1288:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 21:33:15.831 (1609248795831610553 1d6295) replica.default1.0301000300000003: replica_stub.cpp:1332:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 21:33:15.833 (1609248795833087220 1d6296) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 21:33:15.840 (1609248795840295250 1d6291) replica.io-thrd.1925777: network.cpp:690:on_server_session_accepted(): server session accepted, remote_client = 10.232.52.144:35144, current_count = 6 -D2020-12-29 21:33:15.840 (1609248795840304112 1d6291) replica.io-thrd.1925777: network.cpp:695:on_server_session_accepted(): ip session increased, remote_client = 10.232.52.144:35144, current_count = 6 -D2020-12-29 21:33:15.840 (1609248795840489781 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248795840] -D2020-12-29 21:33:15.840 (1609248795840656030 1d62b6) replica. fd1.030c000000000016: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248795840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:15.840 (1609248795840665500 1d62b6) replica. fd1.030c000000000016: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248795840 -D2020-12-29 21:33:15.870 (1609248795870871616 1d629c) replica.replica1.030062910001babf: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:15.8705 (1609248795870881742 1d629c) replica.replica1.030062910001babf: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:15.872 (1609248795872670558 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:15.872 (1609248795872680781 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:15.872 (1609248795872712825 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:15.875 (1609248795875043949 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:15.875 (1609248795875059514 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:15.875 (1609248795875084457 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:15.876 (1609248795876332845 1d629c) replica.replica1.030062910001bade: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 -D2020-12-29 21:33:15.8765 (1609248795876340963 1d629c) replica.replica1.030062910001bade: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 -D2020-12-29 21:33:15.879 (1609248795879845301 1d629b) replica.replica0.030062910001baf6: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1 -D2020-12-29 21:33:15.8795 (1609248795879854985 1d629b) replica.replica0.030062910001baf6: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1, confirmed_decree = -1 -D2020-12-29 21:33:15.886 (1609248795886211164 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:15.886 (1609248795886227696 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:15.886 (1609248795886258467 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:16.8 (1609248796008514152 1d629b) replica.replica0.030062910001bdd1: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:16.82.5 (1609248796008539519 1d629b) replica.replica0.030062910001bdd1: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:16.264 (1609248796264946780 1d629c) replica.replica1.030062910001c38f: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1 -D2020-12-29 21:33:16.2645 (1609248796264960110 1d629c) replica.replica1.030062910001c38f: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1, confirmed_decree = -1 -D2020-12-29 21:33:16.293 (1609248796293849176 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:16.293 (1609248796293862367 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:16.293 (1609248796293916051 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:16.295 (1609248796295487244 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:16.295 (1609248796295495576 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:16.295 (1609248796295521366 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:16.387 (1609248796387479271 1d629b) replica.replica0.030062910001c628: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1 -D2020-12-29 21:33:16.3875 (1609248796387489219 1d629b) replica.replica0.030062910001c628: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1, confirmed_decree = -1 -D2020-12-29 21:33:16.419 (1609248796419654543 1d629c) replica.replica1.030062910001c6d1: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1 -D2020-12-29 21:33:16.4195 (1609248796419665828 1d629c) replica.replica1.030062910001c6d1: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1, confirmed_decree = -1 -D2020-12-29 21:33:16.499 (1609248796499161447 1d629b) replica.replica0.030062910001c892: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1 -D2020-12-29 21:33:16.4995 (1609248796499208015 1d629b) replica.replica0.030062910001c892: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 1, confirmed_decree = -1 -D2020-12-29 21:33:18.78 (1609248798078287612 1d629b) replica.replica0.030062910001ee10: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:18.78.5 (1609248798078298539 1d629b) replica.replica0.030062910001ee10: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:18.78 (1609248798078700305 1d629b) replica.replica0.030062910001ee12: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 -D2020-12-29 21:33:18.78.5 (1609248798078707366 1d629b) replica.replica0.030062910001ee12: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 -D2020-12-29 21:33:18.81 (1609248798081848224 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:18.81 (1609248798081857734 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:18.81 (1609248798081890978 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:18.176 (1609248798176142265 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:18.176 (1609248798176162272 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:18.176 (1609248798176204363 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:18.176 (1609248798176277998 1d629b) replica.replica0.030062910001f066: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:18.1765 (1609248798176287847 1d629b) replica.replica0.030062910001f066: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:18.197 (1609248798197745790 1d629c) replica.replica1.030062910001f0dc: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:18.1975 (1609248798197755680 1d629c) replica.replica1.030062910001f0dc: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:18.197 (1609248798197928192 1d629c) replica.replica1.030062910001f0dd: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:18.1975 (1609248798197937987 1d629c) replica.replica1.030062910001f0dd: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:18.840 (1609248798840550051 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248798840] -D2020-12-29 21:33:18.840 (1609248798840680585 1d62b5) replica. fd0.030c000100000018: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248798840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:18.840 (1609248798840689679 1d62b5) replica. fd0.030c000100000018: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248798840 -D2020-12-29 21:33:21.840 (1609248801840615265 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248801840] -D2020-12-29 21:33:21.840 (1609248801840752479 1d62b5) replica. fd0.030c00010000001a: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248801840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:21.840 (1609248801840759870 1d62b5) replica. fd0.030c00010000001a: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248801840 -D2020-12-29 21:33:21.847 (1609248801847216476 1d62ba) unknown.io-thrd.1925818: builtin_counters.cpp:36:update_counters(): memused_virt = 1319 MB, memused_res = 201MB -D2020-12-29 21:33:21.848 (1609248801848182667 1d62ba) unknown.io-thrd.1925818: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248801847), last_report_time_ms(1609248791845) -D2020-12-29 21:33:24.840 (1609248804840694545 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248804840] -D2020-12-29 21:33:24.840 (1609248804840849252 1d62b6) replica. fd1.030c000000000018: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248804840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:24.840 (1609248804840859933 1d62b6) replica. fd1.030c000000000018: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248804840 -D2020-12-29 21:33:25.833 (1609248805833144909 1d6297) replica.default3.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 21:33:25.870 (1609248805870992897 1d629c) replica.replica1.030062910002a41f: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 21:33:25.8715 (1609248805871008891 1d629c) replica.replica1.030062910002a41f: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 21:33:25.872 (1609248805872778988 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:25.872 (1609248805872787485 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:25.872 (1609248805872823460 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:25.875 (1609248805875121246 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:25.875 (1609248805875132412 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:25.875 (1609248805875162626 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:25.876 (1609248805876511965 1d629c) replica.replica1.030062910002a435: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:25.8765 (1609248805876523838 1d629c) replica.replica1.030062910002a435: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:25.880 (1609248805880014802 1d629b) replica.replica0.030062910002a449: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2 -D2020-12-29 21:33:25.8805 (1609248805880025361 1d629b) replica.replica0.030062910002a449: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2, confirmed_decree = -1 -D2020-12-29 21:33:25.886 (1609248805886415336 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:25.886 (1609248805886443676 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:25.886 (1609248805886484473 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:26.8 (1609248806008636345 1d629b) replica.replica0.030062910002a744: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 21:33:26.82.5 (1609248806008646027 1d629b) replica.replica0.030062910002a744: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 21:33:26.265 (1609248806265143178 1d629c) replica.replica1.030062910002ad4a: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2 -D2020-12-29 21:33:26.2655 (1609248806265155625 1d629c) replica.replica1.030062910002ad4a: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2, confirmed_decree = -1 -D2020-12-29 21:33:26.294 (1609248806294038311 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:26.294 (1609248806294053967 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:26.294 (1609248806294087866 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:26.295 (1609248806295630662 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:26.295 (1609248806295641110 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:26.295 (1609248806295666337 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:26.387 (1609248806387693284 1d629b) replica.replica0.030062910002b03c: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2 -D2020-12-29 21:33:26.3875 (1609248806387707201 1d629b) replica.replica0.030062910002b03c: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2, confirmed_decree = -1 -D2020-12-29 21:33:26.419 (1609248806419844240 1d629c) replica.replica1.030062910002b0ef: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2 -D2020-12-29 21:33:26.4195 (1609248806419854325 1d629c) replica.replica1.030062910002b0ef: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2, confirmed_decree = -1 -D2020-12-29 21:33:26.499 (1609248806499396252 1d629b) replica.replica0.030062910002b2ac: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2 -D2020-12-29 21:33:26.4995 (1609248806499408215 1d629b) replica.replica0.030062910002b2ac: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 2, confirmed_decree = -1 -D2020-12-29 21:33:27.840 (1609248807840752401 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248807840] -D2020-12-29 21:33:27.840 (1609248807840983476 1d62b6) replica. fd1.030c00000000001a: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248807840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:27.840 (1609248807840995416 1d62b6) replica. fd1.030c00000000001a: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248807840 -D2020-12-29 21:33:28.78 (1609248808078396218 1d629b) replica.replica0.030062910002d5a3: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 21:33:28.78.5 (1609248808078406115 1d629b) replica.replica0.030062910002d5a3: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 21:33:28.78 (1609248808078856165 1d629b) replica.replica0.030062910002d5a8: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:28.78.5 (1609248808078867962 1d629b) replica.replica0.030062910002d5a8: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:28.81 (1609248808081929332 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:28.81 (1609248808081939309 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:28.81 (1609248808081993930 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:28.176 (1609248808176250881 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:28.176 (1609248808176267944 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:28.176 (1609248808176317958 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:28.176 (1609248808176380079 1d629b) replica.replica0.030062910002d7ed: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 21:33:28.1765 (1609248808176390520 1d629b) replica.replica0.030062910002d7ed: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 21:33:28.197 (1609248808197859045 1d629c) replica.replica1.030062910002d870: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 21:33:28.1975 (1609248808197883103 1d629c) replica.replica1.030062910002d870: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 21:33:28.198 (1609248808198037691 1d629c) replica.replica1.030062910002d871: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 21:33:28.1985 (1609248808198061863 1d629c) replica.replica1.030062910002d871: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 21:33:30.840 (1609248810840885203 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248810840] -D2020-12-29 21:33:30.841 (1609248810841091553 1d62b5) replica. fd0.030c00010000001c: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248810840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:30.841 (1609248810841101783 1d62b5) replica. fd0.030c00010000001c: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248810840 -D2020-12-29 21:33:31.848 (1609248811848265752 1d62bb) unknown.io-thrd.1925819: builtin_counters.cpp:36:update_counters(): memused_virt = 1328 MB, memused_res = 209MB -D2020-12-29 21:33:31.849 (1609248811849257854 1d62bb) unknown.io-thrd.1925819: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248811848), last_report_time_ms(1609248801847) -D2020-12-29 21:33:33.840 (1609248813840946777 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248813840] -D2020-12-29 21:33:33.841 (1609248813841099260 1d62b5) replica. fd0.030c00010000001e: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248813840], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:33.841 (1609248813841109832 1d62b5) replica. fd0.030c00010000001e: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248813840 -D2020-12-29 21:33:35.833 (1609248815833226604 1d6295) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 21:33:35.871 (1609248815871132160 1d629c) replica.replica1.030062910003846e: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 -D2020-12-29 21:33:35.8715 (1609248815871143107 1d629c) replica.replica1.030062910003846e: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 -D2020-12-29 21:33:35.872 (1609248815872872793 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:35.872 (1609248815872895806 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:35.872 (1609248815872928604 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:35.875 (1609248815875283296 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:35.875 (1609248815875292324 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:35.875 (1609248815875327904 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:35.876 (1609248815876659023 1d629c) replica.replica1.0300629100038494: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 -D2020-12-29 21:33:35.8765 (1609248815876669627 1d629c) replica.replica1.0300629100038494: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 -D2020-12-29 21:33:35.880 (1609248815880178321 1d629b) replica.replica0.03006291000384aa: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 21:33:35.8805 (1609248815880189251 1d629b) replica.replica0.03006291000384aa: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 21:33:35.886 (1609248815886634258 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:35.886 (1609248815886647726 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:35.886 (1609248815886702614 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:36.8 (1609248816008747664 1d629b) replica.replica0.0300629100038765: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 -D2020-12-29 21:33:36.82.5 (1609248816008760095 1d629b) replica.replica0.0300629100038765: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 -D2020-12-29 21:33:36.265 (1609248816265333071 1d629c) replica.replica1.0300629100038d5b: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 21:33:36.2655 (1609248816265347253 1d629c) replica.replica1.0300629100038d5b: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 21:33:36.294 (1609248816294188541 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:36.294 (1609248816294208098 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:36.294 (1609248816294251798 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:36.295 (1609248816295774535 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:36.295 (1609248816295795378 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:36.295 (1609248816295831047 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:36.387 (1609248816387827185 1d629b) replica.replica0.030062910003903e: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 21:33:36.3875 (1609248816387839566 1d629b) replica.replica0.030062910003903e: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 21:33:36.420 (1609248816420005136 1d629c) replica.replica1.03006291000390fd: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 21:33:36.4205 (1609248816420018836 1d629c) replica.replica1.03006291000390fd: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 21:33:36.499 (1609248816499637464 1d629b) replica.replica0.0300629100039291: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3 -D2020-12-29 21:33:36.4995 (1609248816499652009 1d629b) replica.replica0.0300629100039291: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 3, confirmed_decree = -1 -D2020-12-29 21:33:36.841 (1609248816841008965 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248816841] -D2020-12-29 21:33:36.841 (1609248816841174744 1d62b6) replica. fd1.030c00000000001c: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248816841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:36.841 (1609248816841182316 1d62b6) replica. fd1.030c00000000001c: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248816841 -D2020-12-29 21:33:38.78 (1609248818078510138 1d629b) replica.replica0.030062910003cb67: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12 -D2020-12-29 21:33:38.78.5 (1609248818078548624 1d629b) replica.replica0.030062910003cb67: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12, confirmed_decree = -1 -D2020-12-29 21:33:38.79 (1609248818079029199 1d629b) replica.replica0.030062910003cb6c: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 21:33:38.79.5 (1609248818079040466 1d629b) replica.replica0.030062910003cb6c: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 21:33:38.82 (1609248818082050623 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:38.82 (1609248818082062816 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:38.82 (1609248818082096332 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:38.176 (1609248818176380948 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:38.176 (1609248818176398849 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:38.176 (1609248818176431445 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:38.176 (1609248818176491558 1d629b) replica.replica0.030062910003cf85: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12 -D2020-12-29 21:33:38.1765 (1609248818176501563 1d629b) replica.replica0.030062910003cf85: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12, confirmed_decree = -1 -D2020-12-29 21:33:38.197 (1609248818197959917 1d629c) replica.replica1.030062910003d08a: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12 -D2020-12-29 21:33:38.1975 (1609248818197969549 1d629c) replica.replica1.030062910003d08a: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12, confirmed_decree = -1 -D2020-12-29 21:33:38.198 (1609248818198179805 1d629c) replica.replica1.030062910003d08c: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12 -D2020-12-29 21:33:38.1985 (1609248818198204194 1d629c) replica.replica1.030062910003d08c: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12, confirmed_decree = -1 -D2020-12-29 21:33:39.841 (1609248819841072869 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248819841] -D2020-12-29 21:33:39.841 (1609248819841231138 1d62b6) replica. fd1.030c00000000001e: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248819841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:39.841 (1609248819841239355 1d62b6) replica. fd1.030c00000000001e: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248819841 -D2020-12-29 21:33:41.849 (1609248821849327337 1d62ba) unknown.io-thrd.1925818: builtin_counters.cpp:36:update_counters(): memused_virt = 1335 MB, memused_res = 217MB -D2020-12-29 21:33:41.850 (1609248821850468406 1d62ba) unknown.io-thrd.1925818: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248821849), last_report_time_ms(1609248811848) -D2020-12-29 21:33:42.841 (1609248822841145147 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248822841] -D2020-12-29 21:33:42.841 (1609248822841283928 1d62b5) replica. fd0.030c000100000020: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248822841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:42.841 (1609248822841293150 1d62b5) replica. fd0.030c000100000020: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248822841 -D2020-12-29 21:33:43.8585 (1609248823858982290 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1623:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 21:33:43.859 (1609248823859009084 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 21:33:43.8595 (1609248823859015896 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 -D2020-12-29 21:33:43.8593 (1609248823859018388 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 -D2020-12-29 21:33:43.8593 (1609248823859024786 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:43.8592 (1609248823859026735 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 -D2020-12-29 21:33:43.8593 (1609248823859030388 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:43.8592 (1609248823859074517 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 21:33:43.8595 (1609248823859082606 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:43.8592 (1609248823859087952 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:43.8592 (1609248823859097765 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 21:33:43.8595 (1609248823859101275 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 -D2020-12-29 21:33:43.8593 (1609248823859104197 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 10 -D2020-12-29 21:33:43.8590 (1609248823859107084 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 -D2020-12-29 21:33:43.8593 (1609248823859111891 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:43.8592 (1609248823859117670 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:43.8592 (1609248823859120291 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 12 -D2020-12-29 21:33:43.8592 (1609248823859124242 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:43.8592 (1609248823859126910 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 -D2020-12-29 21:33:43.8593 (1609248823859132101 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 2 -D2020-12-29 21:33:43.8592 (1609248823859134470 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 13 -D2020-12-29 21:33:43.859 (1609248823859150557 1d62a1) replica.rep_long0.0301000000000001: mutation_log.cpp:1364:garbage_collection(): gc_shared: too few files to delete, file_count_limit = 100, reserved_log_count = 1, reserved_log_size = 26235, current_log_index = 1 -D2020-12-29 21:33:43.859 (1609248823859158922 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1774:on_gc(): finish to garbage collection, time_used_ns = 188451 -D2020-12-29 21:33:45.831 (1609248825831480945 1d6294) replica.default0.0301000000000004: replica_stub.cpp:1257:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 21:33:45.831 (1609248825831658386 1d6296) replica.default2.0301000000000009: replica_stub.cpp:1288:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 21:33:45.831 (1609248825831713164 1d6296) replica.default2.0301000000000009: replica_stub.cpp:1332:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 21:33:45.833 (1609248825833295307 1d6295) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 21:33:45.841 (1609248825841337043 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248825841] -D2020-12-29 21:33:45.841 (1609248825841480791 1d62b5) replica. fd0.030c000100000022: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248825841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:45.841 (1609248825841488294 1d62b5) replica. fd0.030c000100000022: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248825841 -D2020-12-29 21:33:45.871 (1609248825871206204 1d629c) replica.replica1.0300629100051ee3: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 21:33:45.8715 (1609248825871215834 1d629c) replica.replica1.0300629100051ee3: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 21:33:45.872 (1609248825872987983 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:45.872 (1609248825872998890 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:45.873 (1609248825873033856 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:45.875 (1609248825875450881 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:45.875 (1609248825875459141 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:45.875 (1609248825875484540 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:45.876 (1609248825876886854 1d629c) replica.replica1.0300629100051f27: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 -D2020-12-29 21:33:45.8765 (1609248825876894855 1d629c) replica.replica1.0300629100051f27: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 -D2020-12-29 21:33:45.880 (1609248825880351950 1d629b) replica.replica0.0300629100051f4e: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 -D2020-12-29 21:33:45.8805 (1609248825880363436 1d629b) replica.replica0.0300629100051f4e: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 -D2020-12-29 21:33:45.886 (1609248825886821765 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:45.886 (1609248825886833669 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:45.886 (1609248825886873613 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:46.8 (1609248826008861627 1d629b) replica.replica0.0300629100052466: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 21:33:46.82.5 (1609248826008873279 1d629b) replica.replica0.0300629100052466: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 21:33:46.265 (1609248826265543519 1d629c) replica.replica1.0300629100052f3e: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 -D2020-12-29 21:33:46.2655 (1609248826265558622 1d629c) replica.replica1.0300629100052f3e: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 -D2020-12-29 21:33:46.294 (1609248826294367537 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:46.294 (1609248826294384783 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:46.294 (1609248826294417879 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:46.295 (1609248826295968716 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:46.295 (1609248826295998602 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:46.296 (1609248826296038968 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:46.388 (1609248826388059334 1d629b) replica.replica0.030062910005346d: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 -D2020-12-29 21:33:46.3885 (1609248826388071691 1d629b) replica.replica0.030062910005346d: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 -D2020-12-29 21:33:46.420 (1609248826420160681 1d629c) replica.replica1.03006291000535ba: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 -D2020-12-29 21:33:46.4205 (1609248826420170031 1d629c) replica.replica1.03006291000535ba: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 -D2020-12-29 21:33:46.499 (1609248826499835884 1d629b) replica.replica0.0300629100053939: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4 -D2020-12-29 21:33:46.4995 (1609248826499846649 1d629b) replica.replica0.0300629100053939: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 4, confirmed_decree = -1 -D2020-12-29 21:33:48.78 (1609248828078609096 1d629b) replica.replica0.0300629100057cdf: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 21:33:48.78.5 (1609248828078622832 1d629b) replica.replica0.0300629100057cdf: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 21:33:48.79 (1609248828079118051 1d629b) replica.replica0.0300629100057ce6: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 21:33:48.79.5 (1609248828079130318 1d629b) replica.replica0.0300629100057ce6: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 21:33:48.82 (1609248828082133651 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:48.82 (1609248828082141693 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:48.82 (1609248828082201228 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:48.176 (1609248828176564296 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:48.176 (1609248828176577718 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:48.176 (1609248828176613804 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:48.176 (1609248828176646504 1d629b) replica.replica0.030062910005815d: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 21:33:48.1765 (1609248828176657780 1d629b) replica.replica0.030062910005815d: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 21:33:48.198 (1609248828198049443 1d629c) replica.replica1.030062910005824d: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 21:33:48.1985 (1609248828198064513 1d629c) replica.replica1.030062910005824d: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 21:33:48.198 (1609248828198273920 1d629c) replica.replica1.030062910005824f: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 21:33:48.1985 (1609248828198283026 1d629c) replica.replica1.030062910005824f: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 21:33:48.841 (1609248828841405261 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248828841] -D2020-12-29 21:33:48.841 (1609248828841525005 1d62b6) replica. fd1.030c000000000020: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248828841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:48.841 (1609248828841534004 1d62b6) replica. fd1.030c000000000020: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248828841 -D2020-12-29 21:33:51.841 (1609248831841460196 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248831841] -D2020-12-29 21:33:51.841 (1609248831841582473 1d62b6) replica. fd1.030c000000000022: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248831841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:51.841 (1609248831841611972 1d62b6) replica. fd1.030c000000000022: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248831841 -D2020-12-29 21:33:51.850 (1609248831850527887 1d62bb) unknown.io-thrd.1925819: builtin_counters.cpp:36:update_counters(): memused_virt = 1341 MB, memused_res = 224MB -D2020-12-29 21:33:51.851 (1609248831851585151 1d62bb) unknown.io-thrd.1925819: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248831850), last_report_time_ms(1609248821849) -D2020-12-29 21:33:54.841 (1609248834841523371 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248834841] -D2020-12-29 21:33:54.841 (1609248834841638674 1d62b5) replica. fd0.030c000100000024: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248834841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:54.841 (1609248834841649236 1d62b5) replica. fd0.030c000100000024: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248834841 -D2020-12-29 21:33:55.833 (1609248835833360617 1d6295) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 21:33:55.871 (1609248835871294749 1d629c) replica.replica1.030062910006e324: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18 -D2020-12-29 21:33:55.871 (1609248835871306117 1d629c) replica.replica1.030062910006e324: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18, confirmed_decree = -1 -D2020-12-29 21:33:55.873 (1609248835873071463 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:55.873 (1609248835873081328 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:55.873 (1609248835873113042 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:55.875 (1609248835875616930 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:55.875 (1609248835875628908 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:55.875 (1609248835875660190 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:55.876 (1609248835876960601 1d629c) replica.replica1.030062910006e362: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 -D2020-12-29 21:33:55.8765 (1609248835876967819 1d629c) replica.replica1.030062910006e362: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 -D2020-12-29 21:33:55.880 (1609248835880457627 1d629b) replica.replica0.030062910006e38e: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:55.8805 (1609248835880467764 1d629b) replica.replica0.030062910006e38e: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:55.886 (1609248835886978792 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:55.886 (1609248835886987760 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:55.887 (1609248835887012911 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:56.8 (1609248836008966591 1d629b) replica.replica0.030062910006e907: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18 -D2020-12-29 21:33:56.82.5 (1609248836008976130 1d629b) replica.replica0.030062910006e907: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 18, confirmed_decree = -1 -D2020-12-29 21:33:56.265 (1609248836265736320 1d629c) replica.replica1.030062910006f4d9: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:56.2655 (1609248836265749134 1d629c) replica.replica1.030062910006f4d9: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:56.294 (1609248836294512966 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:56.294 (1609248836294528642 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:56.294 (1609248836294564858 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:56.296 (1609248836296175098 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:56.296 (1609248836296182817 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:56.296 (1609248836296209150 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:56.388 (1609248836388262320 1d629b) replica.replica0.030062910006fa7a: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:56.3885 (1609248836388273936 1d629b) replica.replica0.030062910006fa7a: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:56.420 (1609248836420358562 1d629c) replica.replica1.030062910006fbe2: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:56.4205 (1609248836420370080 1d629c) replica.replica1.030062910006fbe2: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:56.500 (1609248836500044906 1d629b) replica.replica0.030062910006ff1b: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5 -D2020-12-29 21:33:56.5005 (1609248836500054607 1d629b) replica.replica0.030062910006ff1b: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 5, confirmed_decree = -1 -D2020-12-29 21:33:57.841 (1609248837841583251 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248837841] -D2020-12-29 21:33:57.841 (1609248837841757682 1d62b5) replica. fd0.030c000100000026: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248837841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:33:57.841 (1609248837841768223 1d62b5) replica. fd0.030c000100000026: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248837841 -D2020-12-29 21:33:58.78 (1609248838078746101 1d629b) replica.replica0.0300629100074063: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 -D2020-12-29 21:33:58.78.5 (1609248838078757992 1d629b) replica.replica0.0300629100074063: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 -D2020-12-29 21:33:58.79 (1609248838079200414 1d629b) replica.replica0.0300629100074069: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 21:33:58.79.5 (1609248838079207854 1d629b) replica.replica0.0300629100074069: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 21:33:58.82 (1609248838082243085 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:58.82 (1609248838082251433 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:58.82 (1609248838082297653 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:58.176 (1609248838176749217 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:33:58.176 (1609248838176762733 1d629b) replica.replica0.03006291000744f7: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 -D2020-12-29 21:33:58.1765 (1609248838176780559 1d629b) replica.replica0.03006291000744f7: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 -D2020-12-29 21:33:58.176 (1609248838176869310 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:58.176 (1609248838176904072 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:33:58.198 (1609248838198136769 1d629c) replica.replica1.03006291000745ee: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 -D2020-12-29 21:33:58.1985 (1609248838198150873 1d629c) replica.replica1.03006291000745ee: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 -D2020-12-29 21:33:58.198 (1609248838198348603 1d629c) replica.replica1.03006291000745f1: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 -D2020-12-29 21:33:58.1985 (1609248838198355198 1d629c) replica.replica1.03006291000745f1: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 -D2020-12-29 21:34:00.841 (1609248840841646324 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248840841] -D2020-12-29 21:34:00.841 (1609248840841762956 1d62b6) replica. fd1.030c000000000024: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248840841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:00.841 (1609248840841773304 1d62b6) replica. fd1.030c000000000024: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248840841 -D2020-12-29 21:34:01.851 (1609248841851649201 1d62ba) unknown.io-thrd.1925818: builtin_counters.cpp:36:update_counters(): memused_virt = 1397 MB, memused_res = 232MB -D2020-12-29 21:34:01.852 (1609248841852758624 1d62ba) unknown.io-thrd.1925818: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248841851), last_report_time_ms(1609248831850) -D2020-12-29 21:34:03.841 (1609248843841824451 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248843841] -D2020-12-29 21:34:03.841 (1609248843841969298 1d62b6) replica. fd1.030c000000000026: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248843841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:03.841 (1609248843841979505 1d62b6) replica. fd1.030c000000000026: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248843841 -D2020-12-29 21:34:05.833 (1609248845833428290 1d6298) replica.default4.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 21:34:05.871 (1609248845871390545 1d629c) replica.replica1.030062910008911b: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 -D2020-12-29 21:34:05.8715 (1609248845871403698 1d629c) replica.replica1.030062910008911b: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 -D2020-12-29 21:34:05.873 (1609248845873156813 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:05.873 (1609248845873169262 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:05.873 (1609248845873195806 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:05.875 (1609248845875698200 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:05.875 (1609248845875708025 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:05.875 (1609248845875749036 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:05.877 (1609248845877043001 1d629c) replica.replica1.030062910008915d: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 14 -D2020-12-29 21:34:05.8775 (1609248845877053788 1d629c) replica.replica1.030062910008915d: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 14, confirmed_decree = -1 -D2020-12-29 21:34:05.880 (1609248845880606117 1d629b) replica.replica0.0300629100089181: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 -D2020-12-29 21:34:05.8805 (1609248845880618997 1d629b) replica.replica0.0300629100089181: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 -D2020-12-29 21:34:05.887 (1609248845887117970 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:05.887 (1609248845887127050 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:05.887 (1609248845887156432 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:05.955 (1609248845955443037 1d6294) replica.default0.03006291000894c0: replica_stub.cpp:2806:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -D2020-12-29 21:34:05.955 (1609248845955453356 1d6294) replica.default0.03006291000894c0: hotkey_collector.cpp:265:on_start_detect(): [3.3@10.232.52.144:34803] starting to detect replication::hotkey_type::READ hotkey -D2020-12-29 21:34:06.9 (1609248846009056507 1d629b) replica.replica0.0300629100089736: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21 -D2020-12-29 21:34:06.92.5 (1609248846009067176 1d629b) replica.replica0.0300629100089736: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 21, confirmed_decree = -1 -D2020-12-29 21:34:06.265 (1609248846265912552 1d629c) replica.replica1.030062910008a2ef: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 -D2020-12-29 21:34:06.2655 (1609248846265926397 1d629c) replica.replica1.030062910008a2ef: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 -D2020-12-29 21:34:06.294 (1609248846294685058 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:06.294 (1609248846294697604 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:06.294 (1609248846294733828 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:06.296 (1609248846296337430 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:06.296 (1609248846296345501 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:06.296 (1609248846296372424 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:06.388 (1609248846388417813 1d629b) replica.replica0.030062910008a84f: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 -D2020-12-29 21:34:06.3885 (1609248846388457565 1d629b) replica.replica0.030062910008a84f: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 -D2020-12-29 21:34:06.420 (1609248846420559683 1d629c) replica.replica1.030062910008a9a6: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 -D2020-12-29 21:34:06.4205 (1609248846420570016 1d629c) replica.replica1.030062910008a9a6: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 -D2020-12-29 21:34:06.500 (1609248846500230168 1d629b) replica.replica0.030062910008ad5a: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6 -D2020-12-29 21:34:06.5005 (1609248846500242835 1d629b) replica.replica0.030062910008ad5a: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 6, confirmed_decree = -1 -D2020-12-29 21:34:06.841 (1609248846841889699 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248846841] -D2020-12-29 21:34:06.842 (1609248846842007294 1d62b5) replica. fd0.030c000100000028: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248846841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:06.842 (1609248846842017972 1d62b5) replica. fd0.030c000100000028: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248846841 -D2020-12-29 21:34:07.573 (1609248847573060687 1d629b) replica.replica0.0306000100000005: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:07.580 (1609248847580131317 1d629c) replica.replica1.0306000000000004: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:07.735 (1609248847735877288 1d629b) replica.replica0.030600010000000a: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:07.754 (1609248847754682017 1d629c) replica.replica1.0306000000000009: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.5@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:07.880 (1609248847880357190 1d629c) replica.replica1.030600010000000f: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:07.898 (1609248847898283928 1d629b) replica.replica0.030600000000000e: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:08.13 (1609248848013505316 1d629b) replica.replica0.0306000100000014: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:08.62 (1609248848062942648 1d629b) replica.replica0.0306000000000015: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:08.78 (1609248848078839530 1d629b) replica.replica0.030062910008f5d8: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22 -D2020-12-29 21:34:08.78.5 (1609248848078871582 1d629b) replica.replica0.030062910008f5d8: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22, confirmed_decree = -1 -D2020-12-29 21:34:08.79 (1609248848079357045 1d629b) replica.replica0.030062910008f5df: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 -D2020-12-29 21:34:08.79.5 (1609248848079366933 1d629b) replica.replica0.030062910008f5df: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 -D2020-12-29 21:34:08.82 (1609248848082346804 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:08.82 (1609248848082362206 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:08.82 (1609248848082403026 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:08.176 (1609248848176876004 1d629b) replica.replica0.030062910008f9bf: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22 -D2020-12-29 21:34:08.1765 (1609248848176889416 1d629b) replica.replica0.030062910008f9bf: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22, confirmed_decree = -1 -D2020-12-29 21:34:08.177 (1609248848177007327 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:08.177 (1609248848177017940 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:08.177 (1609248848177062411 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:08.189 (1609248848189074560 1d629c) replica.replica1.030600000000001a: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:08.189 (1609248848189132327 1d629c) replica.replica1.030600010000001b: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:08.198 (1609248848198221310 1d629c) replica.replica1.030062910008fa99: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22 -D2020-12-29 21:34:08.1985 (1609248848198233969 1d629c) replica.replica1.030062910008fa99: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22, confirmed_decree = -1 -D2020-12-29 21:34:08.198 (1609248848198428518 1d629c) replica.replica1.030062910008fa9b: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22 -D2020-12-29 21:34:08.1985 (1609248848198439662 1d629c) replica.replica1.030062910008fa9b: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 22, confirmed_decree = -1 -D2020-12-29 21:34:09.841 (1609248849841939718 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248849841] -D2020-12-29 21:34:09.842 (1609248849842048457 1d62b5) replica. fd0.030c00010000002a: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248849841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:09.842 (1609248849842056457 1d62b5) replica. fd0.030c00010000002a: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248849841 -D2020-12-29 21:34:11.852 (1609248851852867666 1d62bb) unknown.io-thrd.1925819: builtin_counters.cpp:36:update_counters(): memused_virt = 1421 MB, memused_res = 240MB -D2020-12-29 21:34:11.853 (1609248851853854582 1d62bb) unknown.io-thrd.1925819: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248851852), last_report_time_ms(1609248841851) -D2020-12-29 21:34:12.841 (1609248852841998575 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248852841] -D2020-12-29 21:34:12.842 (1609248852842132763 1d62b6) replica. fd1.030c000000000028: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248852841], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:12.842 (1609248852842143924 1d62b6) replica. fd1.030c000000000028: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248852841 -D2020-12-29 21:34:13.859 (1609248853859193670 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1623:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 21:34:13.859 (1609248853859221843 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 11 -D2020-12-29 21:34:13.8591 (1609248853859228743 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 -D2020-12-29 21:34:13.8593 (1609248853859232551 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 -D2020-12-29 21:34:13.8593 (1609248853859245313 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 21:34:13.8595 (1609248853859251843 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 -D2020-12-29 21:34:13.8593 (1609248853859292972 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 21:34:13.8595 (1609248853859297884 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 10 -D2020-12-29 21:34:13.8590 (1609248853859307404 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 21:34:13.8595 (1609248853859316754 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 21:34:13.8595 (1609248853859324289 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 11 -D2020-12-29 21:34:13.8591 (1609248853859329210 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 -D2020-12-29 21:34:13.8593 (1609248853859334412 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 12 -D2020-12-29 21:34:13.8592 (1609248853859338027 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 -D2020-12-29 21:34:13.8593 (1609248853859344794 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 21:34:13.8595 (1609248853859352314 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 21:34:13.8595 (1609248853859356083 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 22 -D2020-12-29 21:34:13.8592 (1609248853859365256 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 21:34:13.8595 (1609248853859368662 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 -D2020-12-29 21:34:13.8593 (1609248853859458619 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 5 -D2020-12-29 21:34:13.8595 (1609248853859463225 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 23 -D2020-12-29 21:34:13.859 (1609248853859470817 1d62a1) replica.rep_long0.0301000000000001: mutation_log.cpp:1364:garbage_collection(): gc_shared: too few files to delete, file_count_limit = 100, reserved_log_count = 1, reserved_log_size = 46096, current_log_index = 1 -D2020-12-29 21:34:13.859 (1609248853859478692 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1774:on_gc(): finish to garbage collection, time_used_ns = 293908 -D2020-12-29 21:34:15.831 (1609248855831561574 1d6295) replica.default1.0301000000000004: replica_stub.cpp:1257:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 21:34:15.831 (1609248855831719139 1d6297) replica.default3.030100010000001d: replica_stub.cpp:1288:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 21:34:15.831 (1609248855831762218 1d6297) replica.default3.030100010000001d: replica_stub.cpp:1332:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 21:34:15.833 (1609248855833505475 1d6294) replica.default0.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 21:34:15.842 (1609248855842062318 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248855842] -D2020-12-29 21:34:15.842 (1609248855842192389 1d62b6) replica. fd1.030c00000000002a: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248855842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:15.842 (1609248855842201734 1d62b6) replica. fd1.030c00000000002a: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248855842 -D2020-12-29 21:34:15.860 (1609248855860842801 1d629b) replica.replica0.0306000000000020: replica_chkpt.cpp:67:on_checkpoint_timer(): 2.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:15.863 (1609248855863702539 1d629c) replica.replica1.0306000100000021: replica_chkpt.cpp:67:on_checkpoint_timer(): 1.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:15.871 (1609248855871513159 1d629c) replica.replica1.03006291000a455f: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26 -D2020-12-29 21:34:15.8715 (1609248855871536283 1d629c) replica.replica1.03006291000a455f: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 26, confirmed_decree = -1 -D2020-12-29 21:34:15.873 (1609248855873235126 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:15.873 (1609248855873243257 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:15.873 (1609248855873306170 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:15.875 (1609248855875899104 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:15.875 (1609248855875907585 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:15.875 (1609248855875956018 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:15.877 (1609248855877158531 1d629c) replica.replica1.03006291000a459e: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 14 -D2020-12-29 21:34:15.8775 (1609248855877170739 1d629c) replica.replica1.03006291000a459e: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 14, confirmed_decree = -1 -D2020-12-29 21:34:15.880 (1609248855880796051 1d629b) replica.replica0.03006291000a45c6: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 -D2020-12-29 21:34:15.8805 (1609248855880808341 1d629b) replica.replica0.03006291000a45c6: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 -D2020-12-29 21:34:15.887 (1609248855887257648 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:15.887 (1609248855887273221 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:15.887 (1609248855887315310 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:15.972 (1609248855972378007 1d6297) replica.default3.03006291000a49b7: replica_stub.cpp:2806:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -W2020-12-29 21:34:15.972 (1609248855972392369 1d6297) replica.default3.03006291000a49b7: hotkey_collector.cpp:249:on_start_detect(): [3.3@10.232.52.144:34803] still detecting replication::hotkey_type::READ hotkey, state is hotkey_collector_state::FINE_DETECTING -D2020-12-29 21:34:16.9 (1609248856009396590 1d629b) replica.replica0.03006291000a4b43: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 21:34:16.92.5 (1609248856009410781 1d629b) replica.replica0.03006291000a4b43: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 21:34:16.266 (1609248856266094982 1d629c) replica.replica1.03006291000a56d8: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 -D2020-12-29 21:34:16.2665 (1609248856266109078 1d629c) replica.replica1.03006291000a56d8: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 -D2020-12-29 21:34:16.294 (1609248856294842103 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:16.294 (1609248856294855258 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:16.294 (1609248856294888098 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:16.296 (1609248856296477459 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:16.296 (1609248856296484645 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:16.296 (1609248856296512200 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:16.388 (1609248856388605641 1d629b) replica.replica0.03006291000a5c42: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 -D2020-12-29 21:34:16.3885 (1609248856388617824 1d629b) replica.replica0.03006291000a5c42: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 -D2020-12-29 21:34:16.420 (1609248856420741472 1d629c) replica.replica1.03006291000a5dce: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 -D2020-12-29 21:34:16.4205 (1609248856420756130 1d629c) replica.replica1.03006291000a5dce: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 -D2020-12-29 21:34:16.500 (1609248856500413556 1d629b) replica.replica0.03006291000a6176: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7 -D2020-12-29 21:34:16.5005 (1609248856500424325 1d629b) replica.replica0.03006291000a6176: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 7, confirmed_decree = -1 -D2020-12-29 21:34:18.78 (1609248858078930481 1d629b) replica.replica0.03006291000aaa11: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 21:34:18.78.5 (1609248858078969389 1d629b) replica.replica0.03006291000aaa11: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 21:34:18.79 (1609248858079511498 1d629b) replica.replica0.03006291000aaa19: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11 -D2020-12-29 21:34:18.79.5 (1609248858079518790 1d629b) replica.replica0.03006291000aaa19: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 11, confirmed_decree = -1 -D2020-12-29 21:34:18.82 (1609248858082445347 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:18.82 (1609248858082455499 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:18.82 (1609248858082489233 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:18.176 (1609248858176991948 1d629b) replica.replica0.03006291000aae5f: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 21:34:18.1775 (1609248858177004358 1d629b) replica.replica0.03006291000aae5f: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 21:34:18.177 (1609248858177180900 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:18.177 (1609248858177190425 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:18.177 (1609248858177218558 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:18.198 (1609248858198318004 1d629c) replica.replica1.03006291000aaf51: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 21:34:18.1985 (1609248858198331070 1d629c) replica.replica1.03006291000aaf51: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 21:34:18.198 (1609248858198510530 1d629c) replica.replica1.03006291000aaf54: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25 -D2020-12-29 21:34:18.1985 (1609248858198517451 1d629c) replica.replica1.03006291000aaf54: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 25, confirmed_decree = -1 -D2020-12-29 21:34:18.842 (1609248858842123429 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248858842] -D2020-12-29 21:34:18.842 (1609248858842265505 1d62b5) replica. fd0.030c00010000002c: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248858842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:18.842 (1609248858842272085 1d62b5) replica. fd0.030c00010000002c: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248858842 -D2020-12-29 21:34:21.842 (1609248861842186099 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248861842] -D2020-12-29 21:34:21.842 (1609248861842321046 1d62b5) replica. fd0.030c00010000002e: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248861842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:21.842 (1609248861842331173 1d62b5) replica. fd0.030c00010000002e: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248861842 -D2020-12-29 21:34:21.853 (1609248861853915231 1d62ba) unknown.io-thrd.1925818: builtin_counters.cpp:36:update_counters(): memused_virt = 1486 MB, memused_res = 251MB -D2020-12-29 21:34:21.855 (1609248861855002427 1d62ba) unknown.io-thrd.1925818: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248861853), last_report_time_ms(1609248851852) -D2020-12-29 21:34:24.842 (1609248864842253866 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248864842] -D2020-12-29 21:34:24.842 (1609248864842387587 1d62b6) replica. fd1.030c00000000002c: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248864842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:24.842 (1609248864842394424 1d62b6) replica. fd1.030c00000000002c: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248864842 -D2020-12-29 21:34:25.833 (1609248865833562462 1d6295) replica.default1.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -E2020-12-29 21:34:25.864 (1609248865864866778 1d6297) replica.default3.0306000100000024: hotkey_collector.cpp:173:change_state_by_result(): [3.3@10.232.52.144:34803] Find the hotkey: ThisisahotkeyThisisahotkey -D2020-12-29 21:34:25.871 (1609248865871608719 1d629c) replica.replica1.03006291000bff9c: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 -D2020-12-29 21:34:25.8715 (1609248865871618464 1d629c) replica.replica1.03006291000bff9c: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 -D2020-12-29 21:34:25.873 (1609248865873367267 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:25.873 (1609248865873389348 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:25.873 (1609248865873422008 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:25.876 (1609248865876017814 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:25.876 (1609248865876027856 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:25.876 (1609248865876061249 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:25.877 (1609248865877383729 1d629c) replica.replica1.03006291000bffda: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15 -D2020-12-29 21:34:25.8775 (1609248865877395993 1d629c) replica.replica1.03006291000bffda: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 15, confirmed_decree = -1 -D2020-12-29 21:34:25.880 (1609248865880936034 1d629b) replica.replica0.03006291000c0009: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 21:34:25.8805 (1609248865880947245 1d629b) replica.replica0.03006291000c0009: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 21:34:25.887 (1609248865887443417 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:25.887 (1609248865887453434 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:25.887 (1609248865887490422 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:25.989 (1609248865989297524 1d6294) replica.default0.03006291000c04e2: replica_stub.cpp:2806:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -W2020-12-29 21:34:25.989 (1609248865989324883 1d6294) replica.default0.03006291000c04e2: hotkey_collector.cpp:258:on_start_detect(): [3.3@10.232.52.144:34803] replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -D2020-12-29 21:34:26.9 (1609248866009470870 1d629b) replica.replica0.03006291000c05c9: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28 -D2020-12-29 21:34:26.92.5 (1609248866009480654 1d629b) replica.replica0.03006291000c05c9: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 28, confirmed_decree = -1 -D2020-12-29 21:34:26.266 (1609248866266284609 1d629c) replica.replica1.03006291000c1169: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 21:34:26.2665 (1609248866266299198 1d629c) replica.replica1.03006291000c1169: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 21:34:26.294 (1609248866294999011 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:26.295 (1609248866295010318 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:26.295 (1609248866295056481 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:26.296 (1609248866296618997 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:26.296 (1609248866296628917 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:26.296 (1609248866296656538 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:26.388 (1609248866388748820 1d629b) replica.replica0.03006291000c16d6: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 21:34:26.3885 (1609248866388761258 1d629b) replica.replica0.03006291000c16d6: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 21:34:26.420 (1609248866420905461 1d629c) replica.replica1.03006291000c1849: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 21:34:26.4205 (1609248866420917680 1d629c) replica.replica1.03006291000c1849: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 21:34:26.500 (1609248866500574549 1d629b) replica.replica0.03006291000c1bdb: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8 -D2020-12-29 21:34:26.5005 (1609248866500584339 1d629b) replica.replica0.03006291000c1bdb: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 8, confirmed_decree = -1 -D2020-12-29 21:34:27.842 (1609248867842313668 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248867842] -D2020-12-29 21:34:27.842 (1609248867842447136 1d62b6) replica. fd1.030c00000000002e: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248867842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:27.842 (1609248867842454983 1d62b6) replica. fd1.030c00000000002e: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248867842 -D2020-12-29 21:34:28.79 (1609248868079055932 1d629b) replica.replica0.03006291000c6476: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 -D2020-12-29 21:34:28.79.5 (1609248868079070212 1d629b) replica.replica0.03006291000c6476: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 -D2020-12-29 21:34:28.79 (1609248868079817270 1d629b) replica.replica0.03006291000c647d: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12 -D2020-12-29 21:34:28.79.5 (1609248868079827224 1d629b) replica.replica0.03006291000c647d: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 12, confirmed_decree = -1 -D2020-12-29 21:34:28.82 (1609248868082531150 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:28.82 (1609248868082543525 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:28.82 (1609248868082576545 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:28.177 (1609248868177079502 1d629b) replica.replica0.03006291000c690f: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 -D2020-12-29 21:34:28.1775 (1609248868177092486 1d629b) replica.replica0.03006291000c690f: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 -D2020-12-29 21:34:28.177 (1609248868177318737 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:28.177 (1609248868177331030 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:28.177 (1609248868177367364 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:28.198 (1609248868198404637 1d629c) replica.replica1.03006291000c6a05: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 -D2020-12-29 21:34:28.1985 (1609248868198418679 1d629c) replica.replica1.03006291000c6a05: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 -D2020-12-29 21:34:28.198 (1609248868198610229 1d629c) replica.replica1.03006291000c6a07: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29 -D2020-12-29 21:34:28.1985 (1609248868198619418 1d629c) replica.replica1.03006291000c6a07: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 29, confirmed_decree = -1 -D2020-12-29 21:34:30.842 (1609248870842411909 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248870842] -D2020-12-29 21:34:30.842 (1609248870842526945 1d62b5) replica. fd0.030c000100000030: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248870842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:30.842 (1609248870842536435 1d62b5) replica. fd0.030c000100000030: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248870842 -D2020-12-29 21:34:31.855 (1609248871855092941 1d62bb) unknown.io-thrd.1925819: builtin_counters.cpp:36:update_counters(): memused_virt = 1510 MB, memused_res = 260MB -D2020-12-29 21:34:31.856 (1609248871856214390 1d62bb) unknown.io-thrd.1925819: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248871855), last_report_time_ms(1609248861853) -D2020-12-29 21:34:33.842 (1609248873842462546 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248873842] -D2020-12-29 21:34:33.842 (1609248873842613548 1d62b5) replica. fd0.030c000100000032: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248873842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:33.842 (1609248873842620355 1d62b5) replica. fd0.030c000100000032: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248873842 -D2020-12-29 21:34:35.833 (1609248875833623597 1d6297) replica.default3.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 21:34:35.864 (1609248875864404786 1d629c) replica.replica1.030600000000003d: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.0@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:35.865 (1609248875865088125 1d629b) replica.replica0.0306000100000026: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.3@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:35.871 (1609248875871704353 1d629c) replica.replica1.03006291000dbd6c: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32 -D2020-12-29 21:34:35.8715 (1609248875871712900 1d629c) replica.replica1.03006291000dbd6c: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32, confirmed_decree = -1 -D2020-12-29 21:34:35.873 (1609248875873481134 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:35.873 (1609248875873493053 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:35.873 (1609248875873525954 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:35.876 (1609248875876104184 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:35.876 (1609248875876116972 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:35.876 (1609248875876151384 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:35.877 (1609248875877486903 1d629c) replica.replica1.03006291000dbda9: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 -D2020-12-29 21:34:35.8775 (1609248875877497272 1d629c) replica.replica1.03006291000dbda9: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 -D2020-12-29 21:34:35.881 (1609248875881101161 1d629b) replica.replica0.03006291000dbdce: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 21:34:35.8815 (1609248875881111098 1d629b) replica.replica0.03006291000dbdce: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 21:34:35.887 (1609248875887603105 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:35.887 (1609248875887614032 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:35.887 (1609248875887651051 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:36.9 (1609248876009553332 1d629b) replica.replica0.03006291000dc3a5: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31 -D2020-12-29 21:34:36.92.5 (1609248876009566575 1d629b) replica.replica0.03006291000dc3a5: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 31, confirmed_decree = -1 -D2020-12-29 21:34:36.10 (1609248876010712778 1d6297) replica.default3.03006291000dc3b5: replica_stub.cpp:2806:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -W2020-12-29 21:34:36.10 (1609248876010721840 1d6297) replica.default3.03006291000dc3b5: hotkey_collector.cpp:258:on_start_detect(): [3.3@10.232.52.144:34803] replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -D2020-12-29 21:34:36.44 (1609248876044108999 1d629c) replica.replica1.0306000000000042: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.6@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:36.56 (1609248876056329242 1d629c) replica.replica1.030600010000002b: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.2@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:36.222 (1609248876222628878 1d629c) replica.replica1.0306000100000030: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.4@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:36.227 (1609248876227252718 1d629b) replica.replica0.0306000000000047: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.1@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:36.266 (1609248876266458595 1d629c) replica.replica1.03006291000dcf74: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 21:34:36.2665 (1609248876266472199 1d629c) replica.replica1.03006291000dcf74: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 21:34:36.295 (1609248876295167077 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:36.295 (1609248876295178470 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:36.295 (1609248876295211345 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:36.296 (1609248876296763516 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:36.296 (1609248876296771088 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:36.296 (1609248876296800655 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:36.376 (1609248876376286998 1d629b) replica.replica0.0306000100000036: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.7@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:36.388 (1609248876388921049 1d629b) replica.replica0.03006291000dd4f8: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 21:34:36.3885 (1609248876388931276 1d629b) replica.replica0.03006291000dd4f8: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 21:34:36.421 (1609248876421073498 1d629c) replica.replica1.03006291000dd673: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 21:34:36.4215 (1609248876421084439 1d629c) replica.replica1.03006291000dd673: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 21:34:36.500 (1609248876500735404 1d629b) replica.replica0.03006291000dda1c: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9 -D2020-12-29 21:34:36.5005 (1609248876500747706 1d629b) replica.replica0.03006291000dda1c: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 9, confirmed_decree = -1 -D2020-12-29 21:34:36.842 (1609248876842519547 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248876842] -D2020-12-29 21:34:36.842 (1609248876842653189 1d62b6) replica. fd1.030c000000000030: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248876842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:36.842 (1609248876842665675 1d62b6) replica. fd1.030c000000000030: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248876842 -D2020-12-29 21:34:38.79 (1609248878079155118 1d629b) replica.replica0.03006291000e2153: replica_stub.cpp:1096:on_group_check(): 2.6@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32 -D2020-12-29 21:34:38.79.5 (1609248878079167209 1d629b) replica.replica0.03006291000e2153: replica_check.cpp:161:on_group_check(): [2.6@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32, confirmed_decree = -1 -D2020-12-29 21:34:38.79 (1609248878079981350 1d629b) replica.replica0.03006291000e215b: replica_stub.cpp:1096:on_group_check(): 1.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 13 -D2020-12-29 21:34:38.79.5 (1609248878079993223 1d629b) replica.replica0.03006291000e215b: replica_check.cpp:161:on_group_check(): [1.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 13, confirmed_decree = -1 -D2020-12-29 21:34:38.82 (1609248878082625764 1d629b) replica.replica0.030400000000002b: replica_check.cpp:77:broadcast_group_check(): 2.2@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:38.82 (1609248878082642892 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:38.82 (1609248878082678788 1d629b) replica.replica0.030400000000002b: replica_check.cpp:124:broadcast_group_check(): 2.2@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:38.177 (1609248878177210395 1d629b) replica.replica0.03006291000e259c: replica_stub.cpp:1096:on_group_check(): 2.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32 -D2020-12-29 21:34:38.1775 (1609248878177223654 1d629b) replica.replica0.03006291000e259c: replica_check.cpp:161:on_group_check(): [2.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32, confirmed_decree = -1 -D2020-12-29 21:34:38.177 (1609248878177493498 1d629c) replica.replica1.0304000100000024: replica_check.cpp:77:broadcast_group_check(): 1.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:38.177 (1609248878177503174 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:38.177 (1609248878177534345 1d629c) replica.replica1.0304000100000024: replica_check.cpp:124:broadcast_group_check(): 1.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:38.198 (1609248878198490251 1d629c) replica.replica1.03006291000e26a6: replica_stub.cpp:1096:on_group_check(): 2.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32 -D2020-12-29 21:34:38.1985 (1609248878198499700 1d629c) replica.replica1.03006291000e26a6: replica_check.cpp:161:on_group_check(): [2.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32, confirmed_decree = -1 -D2020-12-29 21:34:38.198 (1609248878198730973 1d629c) replica.replica1.03006291000e26aa: replica_stub.cpp:1096:on_group_check(): 2.3@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32 -D2020-12-29 21:34:38.1985 (1609248878198741632 1d629c) replica.replica1.03006291000e26aa: replica_check.cpp:161:on_group_check(): [2.3@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 32, confirmed_decree = -1 -D2020-12-29 21:34:39.842 (1609248879842581698 1d62b5) replica. fd0.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248879842] -D2020-12-29 21:34:39.842 (1609248879842698722 1d62b6) replica. fd1.030c000000000032: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248879842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:39.842 (1609248879842706559 1d62b6) replica. fd1.030c000000000032: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248879842 -D2020-12-29 21:34:41.856 (1609248881856286131 1d62ba) unknown.io-thrd.1925818: builtin_counters.cpp:36:update_counters(): memused_virt = 1525 MB, memused_res = 267MB -D2020-12-29 21:34:41.857 (1609248881857303855 1d62ba) unknown.io-thrd.1925818: pegasus_counter_reporter.cpp:293:update(): update now_ms(1609248881856), last_report_time_ms(1609248871855) -D2020-12-29 21:34:42.842 (1609248882842641103 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248882842] -D2020-12-29 21:34:42.842 (1609248882842772472 1d62b5) replica. fd0.030c000100000034: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248882842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:42.842 (1609248882842783365 1d62b5) replica. fd0.030c000100000034: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248882842 -D2020-12-29 21:34:43.859 (1609248883859509798 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1623:on_gc(): start to garbage collection, replica_count = 20 -D2020-12-29 21:34:43.859 (1609248883859530948 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 15 -D2020-12-29 21:34:43.8595 (1609248883859534852 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.0@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 -D2020-12-29 21:34:43.8593 (1609248883859537019 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 -D2020-12-29 21:34:43.8593 (1609248883859542799 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.5@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 21:34:43.8598 (1609248883859544642 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.3@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 -D2020-12-29 21:34:43.8593 (1609248883859547941 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 21:34:43.8598 (1609248883859553423 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 12 -D2020-12-29 21:34:43.8592 (1609248883859556854 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.7@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 21:34:43.8598 (1609248883859567752 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.6@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 21:34:43.8598 (1609248883859579541 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 12 -D2020-12-29 21:34:43.8592 (1609248883859601621 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.1@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 34 -D2020-12-29 21:34:43.8594 (1609248883859609812 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 1.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 15 -D2020-12-29 21:34:43.8595 (1609248883859612812 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.2@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 -D2020-12-29 21:34:43.8593 (1609248883859618000 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.0@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 21:34:43.8598 (1609248883859624293 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.3@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 21:34:43.8598 (1609248883859627395 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.5@10.232.52.144:34803, status = replication::partition_status::PS_PRIMARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 32 -D2020-12-29 21:34:43.8592 (1609248883859632342 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 21:34:43.8598 (1609248883859635910 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.6@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 -D2020-12-29 21:34:43.8593 (1609248883859640527 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 3.2@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 8 -D2020-12-29 21:34:43.8598 (1609248883859642439 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1663:on_gc(): gc_shared: gc condition for 2.4@10.232.52.144:34803, status = replication::partition_status::PS_SECONDARY, garbage_max_decree = 0, last_durable_decree= 0, plog_max_commit_on_disk = 33 -D2020-12-29 21:34:43.859 (1609248883859648008 1d62a1) replica.rep_long0.0301000000000001: mutation_log.cpp:1364:garbage_collection(): gc_shared: too few files to delete, file_count_limit = 100, reserved_log_count = 1, reserved_log_size = 65980, current_log_index = 1 -D2020-12-29 21:34:43.859 (1609248883859654752 1d62a1) replica.rep_long0.0301000000000001: replica_stub.cpp:1774:on_gc(): finish to garbage collection, time_used_ns = 152907 -D2020-12-29 21:34:45.831 (1609248885831662950 1d6298) replica.default4.0301000000000004: replica_stub.cpp:1257:query_configuration_by_node(): send query node partitions request to meta server, stored_replicas_count = 20 -D2020-12-29 21:34:45.831 (1609248885831813881 1d6297) replica.default3.0301000400000009: replica_stub.cpp:1288:on_node_query_reply(): query node partitions replied, err = ERR_OK -D2020-12-29 21:34:45.831 (1609248885831878949 1d6297) replica.default3.0301000400000009: replica_stub.cpp:1332:on_node_query_reply(): process query node partitions response for resp.err = ERR_OK, partitions_count(20), gc_replicas_count(0) -D2020-12-29 21:34:45.833 (1609248885833684766 1d6296) replica.default2.0301000000000006: duplication_sync_timer.cpp:54:run(): duplication_sync to meta(meta-servers) -D2020-12-29 21:34:45.842 (1609248885842700954 1d62b6) replica. fd1.030c000100000001: failure_detector.cpp:597:send_beacon(): send ping message, from[10.232.52.144:34803], to[10.232.52.144:34601], time[1609248885842] -D2020-12-29 21:34:45.842 (1609248885842819096 1d62b5) replica. fd0.030c000100000036: failure_detector_multimaster.cpp:87:end_ping(): end ping result, error[ERR_OK], time[1609248885842], ack.this_node[10.232.52.144:34601], ack.primary_node[10.232.52.144:34601], ack.is_master[true], ack.allowed[true] -D2020-12-29 21:34:45.842 (1609248885842828100 1d62b5) replica. fd0.030c000100000036: failure_detector.cpp:491:end_ping_internal(): worker 10.232.52.144:34601 send beacon succeed, update last_send_time=1609248885842 -D2020-12-29 21:34:45.870 (1609248885870429793 1d629b) replica.replica0.030600010000003c: replica_chkpt.cpp:67:on_checkpoint_timer(): 3.5@10.232.52.144:34803: trigger non-emergency checkpoint -D2020-12-29 21:34:45.871 (1609248885871805159 1d629c) replica.replica1.03006291000f7de0: replica_stub.cpp:1096:on_group_check(): 2.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 36 -D2020-12-29 21:34:45.8715 (1609248885871816920 1d629c) replica.replica1.03006291000f7de0: replica_check.cpp:161:on_group_check(): [2.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 36, confirmed_decree = -1 -D2020-12-29 21:34:45.873 (1609248885873565814 1d629c) replica.replica1.0304000100000043: replica_check.cpp:77:broadcast_group_check(): 2.5@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:45.873 (1609248885873577187 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:45.873 (1609248885873612123 1d629c) replica.replica1.0304000100000043: replica_check.cpp:124:broadcast_group_check(): 2.5@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:45.876 (1609248885876202359 1d629b) replica.replica0.0304000000000068: replica_check.cpp:77:broadcast_group_check(): 1.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:45.876 (1609248885876218487 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:45.876 (1609248885876251197 1d629b) replica.replica0.0304000000000068: replica_check.cpp:124:broadcast_group_check(): 1.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:45.877 (1609248885877574214 1d629c) replica.replica1.03006291000f7e12: replica_stub.cpp:1096:on_group_check(): 1.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19 -D2020-12-29 21:34:45.8775 (1609248885877592531 1d629c) replica.replica1.03006291000f7e12: replica_check.cpp:161:on_group_check(): [1.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 19, confirmed_decree = -1 -D2020-12-29 21:34:45.881 (1609248885881258025 1d629b) replica.replica0.03006291000f7e3d: replica_stub.cpp:1096:on_group_check(): 3.5@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 -D2020-12-29 21:34:45.8815 (1609248885881287691 1d629b) replica.replica0.03006291000f7e3d: replica_check.cpp:161:on_group_check(): [3.5@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 -D2020-12-29 21:34:45.887 (1609248885887774466 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:77:broadcast_group_check(): 3.6@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:45.887 (1609248885887783919 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:45.887 (1609248885887813435 1d629c) replica.replica1.03040001000000f4: replica_check.cpp:124:broadcast_group_check(): 3.6@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:46.9 (1609248886009659231 1d629b) replica.replica0.03006291000f8415: replica_stub.cpp:1096:on_group_check(): 2.0@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35 -D2020-12-29 21:34:46.92.5 (1609248886009676383 1d629b) replica.replica0.03006291000f8415: replica_check.cpp:161:on_group_check(): [2.0@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 35, confirmed_decree = -1 -D2020-12-29 21:34:46.31 (1609248886031641760 1d6294) replica.default0.03006291000f8506: replica_stub.cpp:2806:on_detect_hotkey(): [3.3@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::START -W2020-12-29 21:34:46.31 (1609248886031651712 1d6294) replica.default0.03006291000f8506: hotkey_collector.cpp:258:on_start_detect(): [3.3@10.232.52.144:34803] replication::hotkey_type::READ hotkey result has been found: ThisisahotkeyThisisahotkey, you can send a stop rpc to restart hotkey detection -D2020-12-29 21:34:46.266 (1609248886266630256 1d629c) replica.replica1.03006291000f8fad: replica_stub.cpp:1096:on_group_check(): 3.4@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 -D2020-12-29 21:34:46.2665 (1609248886266644620 1d629c) replica.replica1.03006291000f8fad: replica_check.cpp:161:on_group_check(): [3.4@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 -D2020-12-29 21:34:46.295 (1609248886295315966 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:77:broadcast_group_check(): 3.3@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:46.295 (1609248886295331579 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:46.295 (1609248886295372497 1d629b) replica.replica0.03040000000000b3: replica_check.cpp:124:broadcast_group_check(): 3.3@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:46.296 (1609248886296929198 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:77:broadcast_group_check(): 3.0@10.232.52.144:34803: start to broadcast group check -D2020-12-29 21:34:46.296 (1609248886296940302 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34802 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:46.296 (1609248886296991205 1d629c) replica.replica1.03040001000000c7: replica_check.cpp:124:broadcast_group_check(): 3.0@10.232.52.144:34803: send group check to 10.232.52.144:34801 with state replication::partition_status::PS_SECONDARY -D2020-12-29 21:34:46.389 (1609248886389095602 1d629b) replica.replica0.03006291000f9551: replica_stub.cpp:1096:on_group_check(): 3.1@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 -D2020-12-29 21:34:46.3895 (1609248886389114209 1d629b) replica.replica0.03006291000f9551: replica_check.cpp:161:on_group_check(): [3.1@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 -D2020-12-29 21:34:46.421 (1609248886421222281 1d629c) replica.replica1.03006291000f96d8: replica_stub.cpp:1096:on_group_check(): 3.2@10.232.52.144:34803: received group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 -D2020-12-29 21:34:46.4215 (1609248886421233954 1d629c) replica.replica1.03006291000f96d8: replica_check.cpp:161:on_group_check(): [3.2@10.232.52.144:34803] process group check, primary = 10.232.52.144:34801, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 -D2020-12-29 21:34:46.500 (1609248886500924178 1d629b) replica.replica0.03006291000f9a91: replica_stub.cpp:1096:on_group_check(): 3.7@10.232.52.144:34803: received group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10 -D2020-12-29 21:34:46.5005 (1609248886500936011 1d629b) replica.replica0.03006291000f9a91: replica_check.cpp:161:on_group_check(): [3.7@10.232.52.144:34803] process group check, primary = 10.232.52.144:34802, ballot = 3, status = replication::partition_status::PS_SECONDARY, last_committed_decree = 10, confirmed_decree = -1 -D2020-12-29 21:34:47.0V (1609248887000211230 1d6295) replica.default1.03006291000fb195: replica_stub.cpp:2806:on_detect_hotkey(): [3.0@10.232.52.144:34803]: received detect hotkey request, hotkey_type = replication::hotkey_type::READ, detect_action = replication::detect_action::QUERY -D2020-12-29 21:34:47.0 (1609248887000220301 1d6295) replica.default1.03006291000fb195: hotkey_collector.cpp:292:query_result(): [3.0@10.232.52.144:34803] Can't get hotkey now, now state: hotkey_collector_state::STOPPED diff --git a/rdsn b/rdsn index 6851f33f5b..1fd9cd5711 160000 --- a/rdsn +++ b/rdsn @@ -1 +1 @@ -Subproject commit 6851f33f5ba739ab67eb36e2c1fe35b15b1f8c77 +Subproject commit 1fd9cd57117de8dc67d42f6b8a5fb05ea82e122f From 7ee8672fc6dbd56f4553442b35fb368bccbb55d6 Mon Sep 17 00:00:00 2001 From: Tangyanzhao Date: Wed, 30 Dec 2020 00:17:26 +0800 Subject: [PATCH 08/19] update --- src/test/function_test/run.sh | 54 +++++++++++++++++------------------ 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/src/test/function_test/run.sh b/src/test/function_test/run.sh index e7c0cded1e..e24d982650 100755 --- a/src/test/function_test/run.sh +++ b/src/test/function_test/run.sh @@ -48,31 +48,29 @@ test_case=pegasus_function_test config_file=config.ini table_name=temp -# GTEST_OUTPUT="xml:$REPORT_DIR/basic.xml" GTEST_FILTER="basic.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test basic failed: $test_case $config_file $table_name" -# GTEST_OUTPUT="xml:$REPORT_DIR/incr.xml" GTEST_FILTER="incr.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test incr failed: $test_case $config_file $table_name" -# GTEST_OUTPUT="xml:$REPORT_DIR/check_and_set.xml" GTEST_FILTER="check_and_set.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test check_and_set failed: $test_case $config_file $table_name" -# GTEST_OUTPUT="xml:$REPORT_DIR/check_and_mutate.xml" GTEST_FILTER="check_and_mutate.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test check_and_mutate failed: $test_case $config_file $table_name" -# GTEST_OUTPUT="xml:$REPORT_DIR/scan.xml" GTEST_FILTER="scan.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test scan failed: $test_case $config_file $table_name" -# GTEST_OUTPUT="xml:$REPORT_DIR/ttl.xml" GTEST_FILTER="ttl.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test ttl failed: $test_case $config_file $table_name" -# GTEST_OUTPUT="xml:$REPORT_DIR/slog_log.xml" GTEST_FILTER="lost_log.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test slog_lost failed: $test_case $config_file $table_name" -# GTEST_OUTPUT="xml:$REPORT_DIR/recall.xml" GTEST_FILTER="drop_and_recall.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test recall failed: $test_case $config_file $table_name" -# if [ $on_travis == "NO" ]; then -# GTEST_OUTPUT="xml:$REPORT_DIR/restore.xml" GTEST_FILTER="restore_test.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test restore_test failed: $test_case $config_file $table_name" -# GTEST_OUTPUT="xml:$REPORT_DIR/recovery.xml" GTEST_FILTER="recovery_test.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test recovery failed: $test_case $config_file $table_name" -# GTEST_OUTPUT="xml:$REPORT_DIR/bulk_load.xml" GTEST_FILTER="bulk_load_test.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test bulk load failed: $test_case $config_file $table_name" -# GTEST_OUTPUT="xml:$REPORT_DIR/test_detect_hotspot.xml" GTEST_FILTER="test_detect_hotspot.*" ./$test_case $config_file $table_name -# exit_if_fail $? "run test test_detect_hotspot load failed: $test_case $config_file $table_name" -# fi -GTEST_OUTPUT="xml:$REPORT_DIR/test_detect_hotspot.xml" GTEST_FILTER="test_detect_hotspot.*" ./$test_case $config_file $table_name -exit_if_fail $? "run test test_detect_hotspot load failed: $test_case $config_file $table_name" \ No newline at end of file +GTEST_OUTPUT="xml:$REPORT_DIR/basic.xml" GTEST_FILTER="basic.*" ./$test_case $config_file $table_name +exit_if_fail $? "run test basic failed: $test_case $config_file $table_name" +GTEST_OUTPUT="xml:$REPORT_DIR/incr.xml" GTEST_FILTER="incr.*" ./$test_case $config_file $table_name +exit_if_fail $? "run test incr failed: $test_case $config_file $table_name" +GTEST_OUTPUT="xml:$REPORT_DIR/check_and_set.xml" GTEST_FILTER="check_and_set.*" ./$test_case $config_file $table_name +exit_if_fail $? "run test check_and_set failed: $test_case $config_file $table_name" +GTEST_OUTPUT="xml:$REPORT_DIR/check_and_mutate.xml" GTEST_FILTER="check_and_mutate.*" ./$test_case $config_file $table_name +exit_if_fail $? "run test check_and_mutate failed: $test_case $config_file $table_name" +GTEST_OUTPUT="xml:$REPORT_DIR/scan.xml" GTEST_FILTER="scan.*" ./$test_case $config_file $table_name +exit_if_fail $? "run test scan failed: $test_case $config_file $table_name" +GTEST_OUTPUT="xml:$REPORT_DIR/ttl.xml" GTEST_FILTER="ttl.*" ./$test_case $config_file $table_name +exit_if_fail $? "run test ttl failed: $test_case $config_file $table_name" +GTEST_OUTPUT="xml:$REPORT_DIR/slog_log.xml" GTEST_FILTER="lost_log.*" ./$test_case $config_file $table_name +exit_if_fail $? "run test slog_lost failed: $test_case $config_file $table_name" +GTEST_OUTPUT="xml:$REPORT_DIR/recall.xml" GTEST_FILTER="drop_and_recall.*" ./$test_case $config_file $table_name +exit_if_fail $? "run test recall failed: $test_case $config_file $table_name" +if [ $on_travis == "NO" ]; then + GTEST_OUTPUT="xml:$REPORT_DIR/restore.xml" GTEST_FILTER="restore_test.*" ./$test_case $config_file $table_name + exit_if_fail $? "run test restore_test failed: $test_case $config_file $table_name" + GTEST_OUTPUT="xml:$REPORT_DIR/recovery.xml" GTEST_FILTER="recovery_test.*" ./$test_case $config_file $table_name + exit_if_fail $? "run test recovery failed: $test_case $config_file $table_name" + GTEST_OUTPUT="xml:$REPORT_DIR/bulk_load.xml" GTEST_FILTER="bulk_load_test.*" ./$test_case $config_file $table_name + exit_if_fail $? "run test bulk load failed: $test_case $config_file $table_name" + GTEST_OUTPUT="xml:$REPORT_DIR/test_detect_hotspot.xml" GTEST_FILTER="test_detect_hotspot.*" ./$test_case $config_file $table_name + exit_if_fail $? "run test test_detect_hotspot load failed: $test_case $config_file $table_name" +fi \ No newline at end of file From bc79a4a40b1e975a774f5d39ea2325437163b903 Mon Sep 17 00:00:00 2001 From: Tangyanzhao Date: Wed, 30 Dec 2020 00:39:36 +0800 Subject: [PATCH 09/19] update --- src/server/hotspot_partition_calculator.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/server/hotspot_partition_calculator.cpp b/src/server/hotspot_partition_calculator.cpp index 1950e2a001..32d2cb1d34 100644 --- a/src/server/hotspot_partition_calculator.cpp +++ b/src/server/hotspot_partition_calculator.cpp @@ -162,11 +162,6 @@ void hotspot_partition_calculator::detect_hotkey_in_hotpartition(int data_type) (data_type == partition_qps_type::READ_HOTSPOT_DATA ? "read" : "write"), _app_name, index); - ddebug_f("!!!!!! {} {} {} {}", - index, - data_type, - _hot_points[index][data_type].get()->get_value(), - _hotpartition_counter[index][data_type]); send_detect_hotkey_request(_app_name, index, (data_type == dsn::replication::hotkey_type::type::READ) From dc85c7dbad98749ceac9c7784d40d03b066bb7ef Mon Sep 17 00:00:00 2001 From: Smilencer <527646889@qq.com> Date: Wed, 30 Dec 2020 00:54:25 +0800 Subject: [PATCH 10/19] Update run.sh --- src/test/function_test/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/function_test/run.sh b/src/test/function_test/run.sh index e24d982650..334b590351 100755 --- a/src/test/function_test/run.sh +++ b/src/test/function_test/run.sh @@ -73,4 +73,4 @@ if [ $on_travis == "NO" ]; then exit_if_fail $? "run test bulk load failed: $test_case $config_file $table_name" GTEST_OUTPUT="xml:$REPORT_DIR/test_detect_hotspot.xml" GTEST_FILTER="test_detect_hotspot.*" ./$test_case $config_file $table_name exit_if_fail $? "run test test_detect_hotspot load failed: $test_case $config_file $table_name" -fi \ No newline at end of file +fi From 8a4ba0e3319cd2be0320ff396328d74c5775c4fb Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Wed, 30 Dec 2020 11:23:15 +0800 Subject: [PATCH 11/19] fix --- rdsn | 2 +- src/server/hotspot_partition_calculator.cpp | 8 +----- .../function_test/test_detect_hotspot.cpp | 26 +------------------ 3 files changed, 3 insertions(+), 33 deletions(-) diff --git a/rdsn b/rdsn index 1fd9cd5711..d5a6f67e76 160000 --- a/rdsn +++ b/rdsn @@ -1 +1 @@ -Subproject commit 1fd9cd57117de8dc67d42f6b8a5fb05ea82e122f +Subproject commit d5a6f67e7625994029e88375a21f33ba44f0ca2e diff --git a/src/server/hotspot_partition_calculator.cpp b/src/server/hotspot_partition_calculator.cpp index 32d2cb1d34..3b7086b695 100644 --- a/src/server/hotspot_partition_calculator.cpp +++ b/src/server/hotspot_partition_calculator.cpp @@ -44,7 +44,7 @@ DSN_DEFINE_bool("pegasus.collector", DSN_DEFINE_int32("pegasus.collector", hot_partition_threshold, - 4, + 3, "threshold of hotspot partition value, if app.stat.hotspots >= " "FLAGS_hotpartition_threshold, this partition is a hot partition"); @@ -116,12 +116,6 @@ void hotspot_partition_calculator::stat_histories_analyse(int data_type, // use ceil to guarantee conversion results hot_points[i] = ceil(std::max(hot_point, double(0))); } - // test - std::string result = ""; - for (int i = 0; i < hot_point_size; i++) { - result += std::to_string(hot_points[i]); - } - derror_f("{} hot_points: {}", data_type, result); } void hotspot_partition_calculator::update_hot_point(int data_type, std::vector &hot_points) diff --git a/src/test/function_test/test_detect_hotspot.cpp b/src/test/function_test/test_detect_hotspot.cpp index 119bf476b3..1da9d0e0a6 100644 --- a/src/test/function_test/test_detect_hotspot.cpp +++ b/src/test/function_test/test_detect_hotspot.cpp @@ -332,33 +332,9 @@ TEST_F(test_detect_hotspot, write_hotspot_data) { std::cout << "start testing write_hotspot_data..." << std::endl; write_hotspot_data(); - std::cout << "hotspot passed....." << std::endl; -} - -TEST_F(test_detect_hotspot, write_random_data) -{ - std::cout << "start testing write_random_data..." << std::endl; write_random_data(); - std::cout << "hotspot passed....." << std::endl; -} - -TEST_F(test_detect_hotspot, capture_until_maxtime) -{ - std::cout << "start testing capture_until_maxtime..." << std::endl; capture_until_maxtime(); - std::cout << "hotspot passed....." << std::endl; -} - -TEST_F(test_detect_hotspot, read_hotspot_data) -{ - std::cout << "start testing read_hotspot_data..." << std::endl; read_hotspot_data(); - std::cout << "hotspot passed....." << std::endl; -} - -TEST_F(test_detect_hotspot, read_random_data) -{ - std::cout << "start testing read_random_data..." << std::endl; read_random_data(); std::cout << "hotspot passed....." << std::endl; -} +} \ No newline at end of file From 0e2281ca3790696754fc8b83f9a1bb75066b0f40 Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Wed, 30 Dec 2020 11:23:41 +0800 Subject: [PATCH 12/19] update --- src/test/function_test/test_detect_hotspot.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/function_test/test_detect_hotspot.cpp b/src/test/function_test/test_detect_hotspot.cpp index 1da9d0e0a6..d970e8d40c 100644 --- a/src/test/function_test/test_detect_hotspot.cpp +++ b/src/test/function_test/test_detect_hotspot.cpp @@ -337,4 +337,4 @@ TEST_F(test_detect_hotspot, write_hotspot_data) read_hotspot_data(); read_random_data(); std::cout << "hotspot passed....." << std::endl; -} \ No newline at end of file +} From c40d0753265031badba4f518895a22c92a6b0cb9 Mon Sep 17 00:00:00 2001 From: Smilencer <527646889@qq.com> Date: Wed, 30 Dec 2020 11:44:21 +0800 Subject: [PATCH 13/19] Update test_detect_hotspot.cpp --- src/test/function_test/test_detect_hotspot.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/function_test/test_detect_hotspot.cpp b/src/test/function_test/test_detect_hotspot.cpp index d970e8d40c..32d26ced6b 100644 --- a/src/test/function_test/test_detect_hotspot.cpp +++ b/src/test/function_test/test_detect_hotspot.cpp @@ -81,8 +81,8 @@ class test_detect_hotspot : public testing::Test { chdir(global_env::instance()._pegasus_root.c_str()); system("./run.sh clear_onebox"); - // system("./run.sh start_onebox -w"); - // chdir(global_env::instance()._working_dir.c_str()); + system("./run.sh start_onebox -w"); + chdir(global_env::instance()._working_dir.c_str()); } void write_hotspot_data() From 14dad92c276ac2035ebc4665ddb3b71a5738caf2 Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Thu, 31 Dec 2020 15:35:00 +0800 Subject: [PATCH 14/19] update --- src/server/test/hotspot_partition_test.cpp | 9 +- .../function_test/test_detect_hotspot.cpp | 145 ++++++------------ 2 files changed, 46 insertions(+), 108 deletions(-) diff --git a/src/server/test/hotspot_partition_test.cpp b/src/server/test/hotspot_partition_test.cpp index 31d1e87c76..e21b84a27b 100644 --- a/src/server/test/hotspot_partition_test.cpp +++ b/src/server/test/hotspot_partition_test.cpp @@ -132,9 +132,6 @@ TEST_F(hotspot_partition_test, hotspot_partition_policy) TEST_F(hotspot_partition_test, send_detect_hotkey_request) { - auto default_occurrence_threshold = FLAGS_occurrence_threshold; - FLAGS_occurrence_threshold = 100; - const int READ_HOT_PARTITION = 7; const int WRITE_HOT_PARTITION = 0; std::vector test_rows = generate_row_data(); @@ -145,11 +142,9 @@ TEST_F(hotspot_partition_test, send_detect_hotkey_request) expect_result[WRITE_HOT_PARTITION][1] = FLAGS_occurrence_threshold; aggregate_analyse_data(test_rows, expect_result, FLAGS_occurrence_threshold); const int back_to_normal = 30; - expect_result[READ_HOT_PARTITION][0] = FLAGS_occurrence_threshold - back_to_normal; - expect_result[WRITE_HOT_PARTITION][1] = FLAGS_occurrence_threshold - back_to_normal; + expect_result[READ_HOT_PARTITION][0] = 0; + expect_result[WRITE_HOT_PARTITION][1] = 0; aggregate_analyse_data(generate_row_data(), expect_result, back_to_normal); - - FLAGS_occurrence_threshold = default_occurrence_threshold; } } // namespace server diff --git a/src/test/function_test/test_detect_hotspot.cpp b/src/test/function_test/test_detect_hotspot.cpp index 32d26ced6b..c4bd6b7fd8 100644 --- a/src/test/function_test/test_detect_hotspot.cpp +++ b/src/test/function_test/test_detect_hotspot.cpp @@ -50,6 +50,17 @@ static std::string generate_hash_key_by_random(bool is_hotkey, int probability = return result; } +enum detection_type +{ + read_data, + write_data +}; +enum key_type +{ + random_dataset, + hotspot_dataset +}; + class test_detect_hotspot : public testing::Test { public: @@ -57,7 +68,6 @@ class test_detect_hotspot : public testing::Test { chdir(global_env::instance()._pegasus_root.c_str()); system("pwd"); - system("./run.sh clear_onebox"); system("cp src/server/config.min.ini config-server-test-hotspot.ini"); system("sed -i \"/^\\s*enable_detect_hotkey/c enable_detect_hotkey = " @@ -75,6 +85,8 @@ class test_detect_hotspot : public testing::Test auto err = ddl_client->create_app(app_name.c_str(), "pegasus", 8, 3, {}, false); ASSERT_EQ(dsn::ERR_OK, err); + + ddl_client->list_app(app_name, app_id, partition_count, partitions); } virtual void TearDown() override @@ -85,37 +97,33 @@ class test_detect_hotspot : public testing::Test chdir(global_env::instance()._working_dir.c_str()); } - void write_hotspot_data() + void generate_dataset(int64_t time_duration, detection_type dt, key_type kt) { int64_t start = dsn_now_s(); int err = PERR_OK; ASSERT_NE(pg_client, nullptr); - const int64_t warmup_second = 30; - for (int i = 0; dsn_now_s() - start < warmup_second; ++i %= 1000) { + for (int i = 0; dsn_now_s() - start < time_duration; ++i %= 1000) { std::string index = std::to_string(i); - std::string h_key = generate_hash_key_by_random(false, 0); + std::string h_key = generate_hash_key_by_random(kt, 50); std::string s_key = "sortkey_" + index; std::string value = "value_" + index; - err = pg_client->set(h_key, s_key, value); - ASSERT_EQ(err, PERR_OK); + if (dt == detection_type::write_data) { + err = pg_client->set(h_key, s_key, value); + ASSERT_EQ(err, PERR_OK); + } else { + err = pg_client->get(h_key, s_key, value); + ASSERT_TRUE((err == PERR_OK) || err == (PERR_NOT_FOUND)); + } } + } - for (int i = 0; dsn_now_s() - start < max_detection_second; ++i %= 1000) { - std::string index = std::to_string(i); - std::string h_key = generate_hash_key_by_random(true, 50); - std::string s_key = "sortkey_" + index; - std::string value = "value_" + index; - err = pg_client->set(h_key, s_key, value); - ASSERT_EQ(err, PERR_OK); - } + void write_hotspot_data() + { + generate_dataset(warmup_second, detection_type::write_data, key_type::random_dataset); + generate_dataset( + max_detection_second, detection_type::write_data, key_type::hotspot_dataset); - int32_t app_id; - int32_t partition_count; - std::vector partitions; - ddl_client->list_app(app_name, app_id, partition_count, partitions); - dsn::replication::detect_hotkey_response resp; - dsn::replication::detect_hotkey_request req; req.type = dsn::replication::hotkey_type::type::WRITE; req.action = dsn::replication::detect_action::QUERY; bool find_hotkey = false; @@ -155,25 +163,9 @@ class test_detect_hotspot : public testing::Test void write_random_data() { - int64_t start = dsn_now_s(); - int err = PERR_OK; - ASSERT_NE(pg_client, nullptr); + generate_dataset( + max_detection_second, detection_type::write_data, key_type::random_dataset); - for (int i = 0; dsn_now_s() - start < max_detection_second; ++i %= 1000) { - std::string index = std::to_string(i); - std::string h_key = generate_hash_key_by_random(false, 0); - std::string s_key = "sortkey_" + index; - std::string value = "value_" + index; - err = pg_client->set(h_key, s_key, value); - ASSERT_EQ(err, PERR_OK); - } - - int32_t app_id; - int32_t partition_count; - std::vector partitions; - ddl_client->list_app(app_name, app_id, partition_count, partitions); - dsn::replication::detect_hotkey_response resp; - dsn::replication::detect_hotkey_request req; req.type = dsn::replication::hotkey_type::type::WRITE; req.action = dsn::replication::detect_action::QUERY; bool find_hotkey = false; @@ -190,17 +182,10 @@ class test_detect_hotspot : public testing::Test void capture_until_maxtime() { - int64_t start = dsn_now_s(); - dsn::replication::detect_hotkey_response resp; - dsn::replication::detect_hotkey_request req; int target_partition = 2; - int32_t app_id; - int32_t partition_count; - std::vector partitions; - req.type = dsn::replication::hotkey_type::type::WRITE; req.action = dsn::replication::detect_action::START; - ddl_client->list_app(app_name, app_id, partition_count, partitions); + req.pid = dsn::gpid(app_id, target_partition); auto errinfo = ddl_client->detect_hotkey(partitions[target_partition].primary, req, resp); ASSERT_EQ(errinfo, dsn::ERR_OK); @@ -213,14 +198,8 @@ class test_detect_hotspot : public testing::Test // max_detection_second > max_seconds_to_detect_hotkey int max_seconds_to_detect_hotkey = 160; - for (int i = 0; dsn_now_s() - start < max_seconds_to_detect_hotkey; ++i %= 1000) { - std::string index = std::to_string(i); - std::string h_key = generate_hash_key_by_random(false, 0); - std::string s_key = "sortkey_" + index; - std::string value = "value_" + index; - auto err = pg_client->set(h_key, s_key, value); - ASSERT_EQ(err, PERR_OK); - } + generate_dataset( + max_seconds_to_detect_hotkey, detection_type::write_data, key_type::random_dataset); req.action = dsn::replication::detect_action::QUERY; errinfo = ddl_client->detect_hotkey(partitions[target_partition].primary, req, resp); @@ -230,35 +209,10 @@ class test_detect_hotspot : public testing::Test void read_hotspot_data() { - int64_t start = dsn_now_s(); - int err = PERR_OK; - ASSERT_NE(pg_client, nullptr); - - const int64_t warmup_second = 30; - for (int i = 0; dsn_now_s() - start < warmup_second; ++i %= 1000) { - std::string index = std::to_string(i); - std::string h_key = generate_hash_key_by_random(false, 0); - std::string s_key = "sortkey_" + index; - std::string value = "value_" + index; - err = pg_client->get(h_key, s_key, value); - ASSERT_TRUE((err == PERR_OK) || err == (PERR_NOT_FOUND)); - } + generate_dataset(warmup_second, detection_type::read_data, key_type::random_dataset); + generate_dataset( + max_detection_second, detection_type::read_data, key_type::hotspot_dataset); - for (int i = 0; dsn_now_s() - start < max_detection_second; ++i %= 1000) { - std::string index = std::to_string(i); - std::string h_key = generate_hash_key_by_random(true, 50); - std::string s_key = "sortkey_" + index; - std::string value; - err = pg_client->get(h_key, s_key, value); - ASSERT_TRUE((err == PERR_OK) || err == (PERR_NOT_FOUND)); - } - - int32_t app_id; - int32_t partition_count; - std::vector partitions; - ddl_client->list_app(app_name, app_id, partition_count, partitions); - dsn::replication::detect_hotkey_response resp; - dsn::replication::detect_hotkey_request req; req.type = dsn::replication::hotkey_type::type::READ; req.action = dsn::replication::detect_action::QUERY; bool find_hotkey = false; @@ -289,25 +243,8 @@ class test_detect_hotspot : public testing::Test void read_random_data() { - int64_t start = dsn_now_s(); - int err = PERR_OK; - ASSERT_NE(pg_client, nullptr); + generate_dataset(max_detection_second, detection_type::read_data, key_type::random_dataset); - for (int i = 0; dsn_now_s() - start < max_detection_second; ++i %= 1000) { - std::string index = std::to_string(i); - std::string h_key = generate_hash_key_by_random(false, 0); - std::string s_key = "sortkey_" + index; - std::string value; - err = pg_client->get(h_key, s_key, value); - ASSERT_TRUE((err == PERR_OK) || err == (PERR_NOT_FOUND)); - } - - int32_t app_id; - int32_t partition_count; - std::vector partitions; - ddl_client->list_app(app_name, app_id, partition_count, partitions); - dsn::replication::detect_hotkey_response resp; - dsn::replication::detect_hotkey_request req; req.type = dsn::replication::hotkey_type::type::READ; req.action = dsn::replication::detect_action::QUERY; bool find_hotkey = false; @@ -324,6 +261,12 @@ class test_detect_hotspot : public testing::Test const std::string app_name = "hotspot_test"; const int64_t max_detection_second = 100; + const int64_t warmup_second = 30; + int32_t app_id; + int32_t partition_count; + std::vector partitions; + dsn::replication::detect_hotkey_response resp; + dsn::replication::detect_hotkey_request req; std::shared_ptr ddl_client; pegasus::pegasus_client *pg_client; }; From 839bb3b6b14caf88ee916eda8a74c9bfd1b90484 Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Thu, 31 Dec 2020 15:36:09 +0800 Subject: [PATCH 15/19] update rdsn --- rdsn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rdsn b/rdsn index d5a6f67e76..00bb368678 160000 --- a/rdsn +++ b/rdsn @@ -1 +1 @@ -Subproject commit d5a6f67e7625994029e88375a21f33ba44f0ca2e +Subproject commit 00bb36867828403f42009a50d8a75d2444853bb1 From d39bb8c5dcdcbcec955c7b0b68a96f832909f9c9 Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Thu, 31 Dec 2020 15:54:12 +0800 Subject: [PATCH 16/19] 1 --- rdsn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rdsn b/rdsn index 00bb368678..f27decf2e7 160000 --- a/rdsn +++ b/rdsn @@ -1 +1 @@ -Subproject commit 00bb36867828403f42009a50d8a75d2444853bb1 +Subproject commit f27decf2e7015b4b75e2e40b53ad6b33fb752ecd From b42dac0894c72578d6162c78d395402a5ac3a8a9 Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Thu, 31 Dec 2020 16:08:54 +0800 Subject: [PATCH 17/19] 1 --- rdsn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rdsn b/rdsn index f27decf2e7..00bb368678 160000 --- a/rdsn +++ b/rdsn @@ -1 +1 @@ -Subproject commit f27decf2e7015b4b75e2e40b53ad6b33fb752ecd +Subproject commit 00bb36867828403f42009a50d8a75d2444853bb1 From b76f8cb816a1c1330b0fe2a7f3176da9d6606221 Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Thu, 31 Dec 2020 18:48:07 +0800 Subject: [PATCH 18/19] update --- src/server/hotkey_collector.cpp | 8 +- src/server/test/hotspot_partition_test.cpp | 2 +- .../function_test/test_detect_hotspot.cpp | 110 +++++++----------- 3 files changed, 45 insertions(+), 75 deletions(-) diff --git a/src/server/hotkey_collector.cpp b/src/server/hotkey_collector.cpp index b5912dcbb3..6b91b7912e 100644 --- a/src/server/hotkey_collector.cpp +++ b/src/server/hotkey_collector.cpp @@ -245,8 +245,6 @@ void hotkey_collector::on_start_detect(dsn::replication::detect_hotkey_response hint = fmt::format("still detecting {} hotkey, state is {}", dsn::enum_to_string(_hotkey_type), enum_to_string(now_state)); - resp.__set_err_hint(hint); - dwarn_replica(hint); return; case hotkey_collector_state::FINISHED: resp.err = dsn::ERR_BUSY; @@ -254,15 +252,11 @@ void hotkey_collector::on_start_detect(dsn::replication::detect_hotkey_response "restart hotkey detection", dsn::enum_to_string(_hotkey_type), pegasus::utils::c_escape_string(_result.hot_hash_key)); - resp.__set_err_hint(hint); - dwarn_replica(hint); return; case hotkey_collector_state::STOPPED: change_state_to_coarse_detecting(); resp.err = dsn::ERR_OK; hint = fmt::format("starting to detect {} hotkey", dsn::enum_to_string(_hotkey_type)); - resp.__set_err_hint(hint); - ddebug_replica(hint); return; default: hint = "invalid collector state"; @@ -271,6 +265,8 @@ void hotkey_collector::on_start_detect(dsn::replication::detect_hotkey_response derror_replica(hint); dassert(false, "invalid collector state"); } + resp.__set_err_hint(hint); + dwarn_replica(hint); } void hotkey_collector::on_stop_detect(dsn::replication::detect_hotkey_response &resp) diff --git a/src/server/test/hotspot_partition_test.cpp b/src/server/test/hotspot_partition_test.cpp index e21b84a27b..d390f180be 100644 --- a/src/server/test/hotspot_partition_test.cpp +++ b/src/server/test/hotspot_partition_test.cpp @@ -25,7 +25,7 @@ namespace pegasus { namespace server { DSN_DECLARE_int32(occurrence_threshold); -DSN_DECLARE_int32(enable_detect_hotkey); +DSN_DECLARE_bool(enable_detect_hotkey); class hotspot_partition_test : public pegasus_server_test_base { diff --git a/src/test/function_test/test_detect_hotspot.cpp b/src/test/function_test/test_detect_hotspot.cpp index c4bd6b7fd8..d60b90a202 100644 --- a/src/test/function_test/test_detect_hotspot.cpp +++ b/src/test/function_test/test_detect_hotspot.cpp @@ -118,14 +118,15 @@ class test_detect_hotspot : public testing::Test } } - void write_hotspot_data() + void get_result(detection_type dt, key_type expect_hotspot) { - generate_dataset(warmup_second, detection_type::write_data, key_type::random_dataset); - generate_dataset( - max_detection_second, detection_type::write_data, key_type::hotspot_dataset); - - req.type = dsn::replication::hotkey_type::type::WRITE; + if (dt == detection_type::write_data) { + req.type = dsn::replication::hotkey_type::type::WRITE; + } else { + req.type = dsn::replication::hotkey_type::type::READ; + } req.action = dsn::replication::detect_action::QUERY; + bool find_hotkey = false; int partition_index; for (partition_index = 0; partition_index < partitions.size(); partition_index++) { @@ -138,17 +139,24 @@ class test_detect_hotspot : public testing::Test break; } } - ASSERT_TRUE(find_hotkey); - ASSERT_EQ(resp.err, dsn::ERR_OK); - ASSERT_EQ(resp.hotkey_result, "ThisisahotkeyThisisahotkey"); + if (expect_hotspot == key_type::hotspot_dataset) { + ASSERT_TRUE(find_hotkey); + ASSERT_EQ(resp.err, dsn::ERR_OK); + ASSERT_EQ(resp.hotkey_result, "ThisisahotkeyThisisahotkey"); + } else { + ASSERT_FALSE(find_hotkey); + } // Wait for collector sending the next start detecting command sleep(15); req.action = dsn::replication::detect_action::STOP; - auto errinfo = ddl_client->detect_hotkey(partitions[partition_index].primary, req, resp); - ASSERT_EQ(errinfo, dsn::ERR_OK); - ASSERT_EQ(resp.err, dsn::ERR_OK); + for (partition_index = 0; partition_index < partitions.size(); partition_index++) { + auto errinfo = + ddl_client->detect_hotkey(partitions[partition_index].primary, req, resp); + ASSERT_EQ(errinfo, dsn::ERR_OK); + ASSERT_EQ(resp.err, dsn::ERR_OK); + } req.action = dsn::replication::detect_action::QUERY; for (partition_index = 0; partition_index < partitions.size(); partition_index++) { @@ -161,23 +169,19 @@ class test_detect_hotspot : public testing::Test } } + void write_hotspot_data() + { + generate_dataset(warmup_second, detection_type::write_data, key_type::random_dataset); + generate_dataset( + max_detection_second, detection_type::write_data, key_type::hotspot_dataset); + get_result(detection_type::write_data, key_type::hotspot_dataset); + } + void write_random_data() { generate_dataset( max_detection_second, detection_type::write_data, key_type::random_dataset); - - req.type = dsn::replication::hotkey_type::type::WRITE; - req.action = dsn::replication::detect_action::QUERY; - bool find_hotkey = false; - int partition_index; - for (partition_index = 0; partition_index < partitions.size(); partition_index++) { - req.pid = dsn::gpid(app_id, partition_index); - auto errinfo = - ddl_client->detect_hotkey(partitions[partition_index].primary, req, resp); - ASSERT_EQ(errinfo, dsn::ERR_OK); - ASSERT_EQ(resp.err_hint, - "Can't get hotkey now, now state: hotkey_collector_state::STOPPED"); - } + get_result(detection_type::write_data, key_type::random_dataset); } void capture_until_maxtime() @@ -209,54 +213,16 @@ class test_detect_hotspot : public testing::Test void read_hotspot_data() { - generate_dataset(warmup_second, detection_type::read_data, key_type::random_dataset); + generate_dataset(warmup_second, detection_type::read_data, key_type::hotspot_dataset); generate_dataset( max_detection_second, detection_type::read_data, key_type::hotspot_dataset); - - req.type = dsn::replication::hotkey_type::type::READ; - req.action = dsn::replication::detect_action::QUERY; - bool find_hotkey = false; - int partition_index; - for (partition_index = 0; partition_index < partitions.size(); partition_index++) { - req.pid = dsn::gpid(app_id, partition_index); - auto errinfo = - ddl_client->detect_hotkey(partitions[partition_index].primary, req, resp); - ASSERT_EQ(errinfo, dsn::ERR_OK); - if (!resp.hotkey_result.empty()) { - find_hotkey = true; - break; - } - } - ASSERT_TRUE(find_hotkey); - ASSERT_EQ(resp.err, dsn::ERR_OK); - - ASSERT_EQ(resp.hotkey_result, "ThisisahotkeyThisisahotkey"); - - // Wait for collector sending the next start detecting command - sleep(15); - - req.action = dsn::replication::detect_action::STOP; - auto errinfo = ddl_client->detect_hotkey(partitions[partition_index].primary, req, resp); - ASSERT_EQ(errinfo, dsn::ERR_OK); - ASSERT_EQ(resp.err, dsn::ERR_OK); + get_result(detection_type::read_data, key_type::hotspot_dataset); } void read_random_data() { generate_dataset(max_detection_second, detection_type::read_data, key_type::random_dataset); - - req.type = dsn::replication::hotkey_type::type::READ; - req.action = dsn::replication::detect_action::QUERY; - bool find_hotkey = false; - int partition_index; - for (partition_index = 0; partition_index < partitions.size(); partition_index++) { - req.pid = dsn::gpid(app_id, partition_index); - auto errinfo = - ddl_client->detect_hotkey(partitions[partition_index].primary, req, resp); - ASSERT_EQ(errinfo, dsn::ERR_OK); - ASSERT_EQ(resp.err_hint, - "Can't get hotkey now, now state: hotkey_collector_state::STOPPED"); - } + get_result(detection_type::read_data, key_type::random_dataset); } const std::string app_name = "hotspot_test"; @@ -273,11 +239,19 @@ class test_detect_hotspot : public testing::Test TEST_F(test_detect_hotspot, write_hotspot_data) { - std::cout << "start testing write_hotspot_data..." << std::endl; + std::cout << "start testing write hotspot data..." << std::endl; write_hotspot_data(); + std::cout << "write hotspot data passed....." << std::endl; + std::cout << "start testing write random data..." << std::endl; write_random_data(); + std::cout << "write random data passed....." << std::endl; + std::cout << "start testing max detection time..." << std::endl; capture_until_maxtime(); + std::cout << "max detection time passed....." << std::endl; + std::cout << "start testing read hotspot data..." << std::endl; read_hotspot_data(); + std::cout << "read hotspot data passed....." << std::endl; + std::cout << "start testing read random data..." << std::endl; read_random_data(); - std::cout << "hotspot passed....." << std::endl; + std::cout << "read random data passed....." << std::endl; } From a59d53ba4b12e989f8c24a573025be38ab05c709 Mon Sep 17 00:00:00 2001 From: tangyanzhao Date: Wed, 6 Jan 2021 11:48:56 +0800 Subject: [PATCH 19/19] fix --- rdsn | 2 +- src/server/hotkey_collector.cpp | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/rdsn b/rdsn index 00bb368678..5fe7ff16a5 160000 --- a/rdsn +++ b/rdsn @@ -1 +1 @@ -Subproject commit 00bb36867828403f42009a50d8a75d2444853bb1 +Subproject commit 5fe7ff16a5ecd7656f04a8689637d27865678e33 diff --git a/src/server/hotkey_collector.cpp b/src/server/hotkey_collector.cpp index 6b91b7912e..26ff59dee6 100644 --- a/src/server/hotkey_collector.cpp +++ b/src/server/hotkey_collector.cpp @@ -245,19 +245,19 @@ void hotkey_collector::on_start_detect(dsn::replication::detect_hotkey_response hint = fmt::format("still detecting {} hotkey, state is {}", dsn::enum_to_string(_hotkey_type), enum_to_string(now_state)); - return; + break; case hotkey_collector_state::FINISHED: resp.err = dsn::ERR_BUSY; hint = fmt::format("{} hotkey result has been found: {}, you can send a stop rpc to " "restart hotkey detection", dsn::enum_to_string(_hotkey_type), pegasus::utils::c_escape_string(_result.hot_hash_key)); - return; + break; case hotkey_collector_state::STOPPED: change_state_to_coarse_detecting(); resp.err = dsn::ERR_OK; hint = fmt::format("starting to detect {} hotkey", dsn::enum_to_string(_hotkey_type)); - return; + break; default: hint = "invalid collector state"; resp.err = dsn::ERR_INVALID_STATE;