From 08ce98c662e4372060e1984cc9c5d28172717bc0 Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Tue, 11 Jun 2024 22:14:57 +0800 Subject: [PATCH 01/16] fix: fix a bug of hmget and Zset --- src/cmd_hash.cc | 6 ++++++ src/cmd_zset.cc | 5 ++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/cmd_hash.cc b/src/cmd_hash.cc index 60f2e3b01..6f82c6666 100644 --- a/src/cmd_hash.cc +++ b/src/cmd_hash.cc @@ -7,6 +7,7 @@ #include "cmd_hash.h" #include +#include #include "pstd/pstd_string.h" #include "store.h" @@ -93,6 +94,10 @@ HMSetCmd::HMSetCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryHash) {} bool HMSetCmd::DoInitial(PClient* client) { + if (client->argv_.size() % 2 != 0) { + client->SetRes(CmdRes::kWrongNum, kCmdNameHMSet); + return false; + } client->SetKey(client->argv_[1]); client->ClearFvs(); // set fvs @@ -116,6 +121,7 @@ HMGetCmd::HMGetCmd(const std::string& name, int16_t arity) bool HMGetCmd::DoInitial(PClient* client) { client->SetKey(client->argv_[1]); + client->ClearFields(); for (size_t i = 2; i < client->argv_.size(); ++i) { client->Fields().push_back(client->argv_[i]); } diff --git a/src/cmd_zset.cc b/src/cmd_zset.cc index 50550cf32..def7ba633 100644 --- a/src/cmd_zset.cc +++ b/src/cmd_zset.cc @@ -726,7 +726,10 @@ void ZScoreCmd::DoCmd(PClient* client) { storage::Status s; s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZScore(client->Key(), client->argv_[2], &score); if (s.ok() || s.IsNotFound()) { - client->AppendString(std::to_string(score)); + char buf[32]; + int64_t len = pstd::D2string(buf, sizeof(buf), score); + client->AppendStringLenUint64(len); + client->AppendContent(buf); } else { client->SetRes(CmdRes::kErrOther, s.ToString()); } From 106a216968eb1a79f1a1852a26f6414852541ebc Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Tue, 11 Jun 2024 22:19:33 +0800 Subject: [PATCH 02/16] fix: fix a bug of hmget and Zset --- src/cmd_hash.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/src/cmd_hash.cc b/src/cmd_hash.cc index 6f82c6666..507bb2f97 100644 --- a/src/cmd_hash.cc +++ b/src/cmd_hash.cc @@ -7,7 +7,6 @@ #include "cmd_hash.h" #include -#include #include "pstd/pstd_string.h" #include "store.h" From a08635d4f92b0afac0e9ab8504efe045ec0fc8e1 Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Tue, 11 Jun 2024 22:47:04 +0800 Subject: [PATCH 03/16] fix: fix a bug of hmget and Zset --- src/cmd_zset.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd_zset.cc b/src/cmd_zset.cc index def7ba633..a585c5ca7 100644 --- a/src/cmd_zset.cc +++ b/src/cmd_zset.cc @@ -728,7 +728,7 @@ void ZScoreCmd::DoCmd(PClient* client) { if (s.ok() || s.IsNotFound()) { char buf[32]; int64_t len = pstd::D2string(buf, sizeof(buf), score); - client->AppendStringLenUint64(len); + client->AppendStringLen(len); client->AppendContent(buf); } else { client->SetRes(CmdRes::kErrOther, s.ToString()); From cd274a9a35383d14562ec8e3979dbb3b4df6110c Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Thu, 20 Jun 2024 13:59:19 +0800 Subject: [PATCH 04/16] feat: add sort command --- src/base_cmd.h | 1 + src/cmd_admin.cc | 215 +++++++++++++++++++++++++++++++++++++++ src/cmd_admin.h | 20 ++++ src/cmd_set.cc | 6 +- src/cmd_table_manager.cc | 1 + src/cmd_zset.cc | 4 +- 6 files changed, 243 insertions(+), 4 deletions(-) diff --git a/src/base_cmd.h b/src/base_cmd.h index 6bb77dfa2..1ecb7012e 100644 --- a/src/base_cmd.h +++ b/src/base_cmd.h @@ -88,6 +88,7 @@ const std::string kSubCmdNameDebugHelp = "help"; const std::string kSubCmdNameDebugOOM = "oom"; const std::string kSubCmdNameDebugSegfault = "segfault"; const std::string kCmdNameInfo = "info"; +const std::string kCmdNameSort = "sort"; // hash cmd const std::string kCmdNameHSet = "hset"; diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index 24be00f04..d0c3d798b 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -6,10 +6,17 @@ */ #include "cmd_admin.h" +#include +#include +#include +#include +#include #include "db.h" + #include "braft/raft.h" #include "rocksdb/version.h" +#include "pstd_string.h" #include "pikiwidb.h" #include "praft/praft.h" @@ -259,4 +266,212 @@ void CmdDebugSegfault::DoCmd(PClient* client) { *ptr = 0; } +SortCmd::SortCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsAdmin | kCmdFlagsWrite, kAclCategoryAdmin) {} + +bool SortCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void SortCmd::DoCmd(PClient* client) { + // const auto& argv = client->argv_; + int desc = 0; + int alpha = 0; + + size_t offset = 0; + size_t count = -1; + + int dontsort = 0; + int vectorlen; + + int getop = 0; + + std::string store_key; + std::string sortby; + + std::vector get_patterns; + size_t argc = client->argv_.size(); + DEBUG("argc: {}", argc); + for (int i = 2; i < argc; ++i) { + // const auto& arg = pstd::StringToLower(argv[i]); + int leftargs = argc - i - 1; + if (strcasecmp(client->argv_[i].data(), "asc") == 0) { + desc = 0; + } else if (strcasecmp(client->argv_[i].data(), "desc") == 0) { + desc = 1; + } else if (strcasecmp(client->argv_[i].data(), "alpha") == 0) { + alpha = 1; + } else if (strcasecmp(client->argv_[i].data(), "limit") == 0 && leftargs >= 2) { + if (pstd::String2int(client->argv_[i + 1], &offset) == 0 || pstd::String2int(client->argv_[i + 2], &count) == 0) { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + i += 2; + } else if (strcasecmp(client->argv_[i].data(), "store") == 0 && leftargs >= 1) { + store_key = client->argv_[i + 1]; + i++; + } else if (strcasecmp(client->argv_[i].data(), "by") == 0 && leftargs >= 1) { + sortby = client->argv_[i + 1]; + if (sortby.find('*') == std::string::npos) { + dontsort = 1; + } + i++; + } else if (strcasecmp(client->argv_[i].data(), "get") == 0 && leftargs >= 1) { + get_patterns.push_back(client->argv_[i + 1]); + getop++; + i++; + } else { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + } + + DEBUG("finish parser "); + + std::vector types(1); + rocksdb::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->GetType(client->Key(), true, types); + + if (!s.ok()) { + client->SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + + std::vector ret; + if (types[0] == "list") { + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LRange(client->Key(), 0, -1, &ret); + } else if (types[0] == "set") { + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SMembers(client->Key(), &ret); + } else if (types[0] == "zset") { + std::vector score_members; + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZRange(client->Key(), 0, -1, &score_members); + char buf[32]; + int64_t score_len = 0; + + for (auto& c : score_members) { + ret.emplace_back(c.member); + } + } else { + client->SetRes(CmdRes::kErrOther, "WRONGTYPE Operation against a key holding the wrong kind of value"); + return; + } + DEBUG("finish collect ret "); + + std::vector sort_ret(ret.size()); + for (size_t i = 0; i < ret.size(); ++i) { + sort_ret[i].obj = ret[i]; + } + + if (!dontsort) { + for (size_t i = 0; i < ret.size(); ++i) { + std::string byval; + if (!sortby.empty()) { + auto lookup = lookupKeyByPattern(client, sortby, ret[i]); + if (!lookup.has_value()) { + byval = ret[i]; + } else { + byval = std::move(lookup.value()); + } + } else { + byval = ret[i]; + } + + if (alpha) { + sort_ret[i].u = byval; + } else { + // auto double_byval = pstd::String2d() + double double_byval; + if (pstd::String2d(byval, &double_byval)) { + sort_ret[i].u = double_byval; + } else { + client->SetRes(CmdRes::kErrOther, "One or more scores can't be converted into double"); + return; + } + } + } + + std::sort(sort_ret.begin(), sort_ret.end(), [&alpha, &desc](const RedisSortObject& a, const RedisSortObject& b) { + if (alpha) { + std::string score_a = std::get(a.u); + std::string score_b = std::get(b.u); + return !desc ? score_a < score_b : score_a > score_b; + } else { + double score_a = std::get(a.u); + double score_b = std::get(b.u); + return !desc ? score_a < score_b : score_a > score_b; + } + }); + + DEBUG("finish sort ret "); + size_t sort_size = sort_ret.size(); + + count = count >= 0 ? count : sort_size; + offset = (offset >= 0 && offset < sort_size) ? offset : sort_size; + count = (offset + count < sort_size) ? count : sort_size - offset; + + size_t m_start = offset; + size_t m_end = offset + count; + + ret.clear(); + if(get_patterns.empty()){ + get_patterns.emplace_back("#"); + } + + for (; m_start < m_end; m_start++) { + for (const std::string& pattern : get_patterns) { + std::optional val = lookupKeyByPattern(client, pattern, sort_ret[m_start].obj); + if (val.has_value()) { + ret.push_back(val.value()); + } else { + ret.emplace_back(""); + } + } + } + } + + client->AppendStringVector(ret); + + DEBUG("finish print "); + // if(dontsort && types[0] == "set"){ + // dontsort=0; + // alpha=1; + // sortby.clear(); + // } +} + +std::optional SortCmd::lookupKeyByPattern(PClient* client, const std::string& pattern, + const std::string& subst) { + if (pattern == "#") { + return subst; + } + + auto match_pos = pattern.find('*'); + if (match_pos == std::string::npos) { + return std::nullopt; + } + + std::string field; + auto arrow_pos = pattern.find("->", match_pos + 1); + if (arrow_pos != std::string::npos && arrow_pos + 2 < pattern.size()) { + field = pattern.substr(arrow_pos + 2); + } + + std::string key = pattern.substr(0, match_pos + 1); + key.replace(match_pos, 1, subst); + + std::string value; + storage::Status s; + if (!field.empty()) { + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HGet(key, field, &value); + } else { + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Get(key, &value); + } + + if (!s.ok()) { + return std::nullopt; + } + + return value; +} } // namespace pikiwidb diff --git a/src/cmd_admin.h b/src/cmd_admin.h index c78164093..b3a7c0ca5 100644 --- a/src/cmd_admin.h +++ b/src/cmd_admin.h @@ -7,6 +7,8 @@ #pragma once +#include +#include #include "base_cmd.h" #include "config.h" @@ -172,4 +174,22 @@ class CmdDebugSegfault : public BaseCmd { void DoCmd(PClient* client) override; }; +class SortCmd : public BaseCmd { + public: + SortCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; + + std::optional lookupKeyByPattern(PClient* client, const std::string& pattern, const std::string& subst); + + struct RedisSortObject { + std::string obj; + std::variant u; + }; +}; + } // namespace pikiwidb diff --git a/src/cmd_set.cc b/src/cmd_set.cc index 8a750dbb7..c2a76cc97 100644 --- a/src/cmd_set.cc +++ b/src/cmd_set.cc @@ -158,11 +158,11 @@ bool SCardCmd::DoInitial(PClient* client) { void SCardCmd::DoCmd(PClient* client) { int32_t reply_Num = 0; storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SCard(client->Key(), &reply_Num); - if (!s.ok()) { - client->SetRes(CmdRes::kSyntaxErr, "scard cmd error"); + if (s.ok() || s.IsNotFound()) { + client->AppendInteger(reply_Num); return; } - client->AppendInteger(reply_Num); + client->SetRes(CmdRes::kSyntaxErr, "scard cmd error"); } SMoveCmd::SMoveCmd(const std::string& name, int16_t arity) diff --git a/src/cmd_table_manager.cc b/src/cmd_table_manager.cc index 5a335465b..c5c667404 100644 --- a/src/cmd_table_manager.cc +++ b/src/cmd_table_manager.cc @@ -57,6 +57,7 @@ void CmdTableManager::InitCmdTable() { ADD_SUBCOMMAND(Debug, Help, 2); ADD_SUBCOMMAND(Debug, OOM, 2); ADD_SUBCOMMAND(Debug, Segfault, 2); + ADD_COMMAND(Sort, -2); // server ADD_COMMAND(Flushdb, 1); diff --git a/src/cmd_zset.cc b/src/cmd_zset.cc index a585c5ca7..9cf7e065c 100644 --- a/src/cmd_zset.cc +++ b/src/cmd_zset.cc @@ -364,7 +364,9 @@ bool ZRangebyscoreCmd::DoInitial(PClient* client) { void ZRangebyscoreCmd::DoCmd(PClient* client) { double min_score = 0, max_score = 0; - bool left_close = true, right_close = true, with_scores = false; + bool left_close = true; + bool right_close = true; + bool with_scores = false; int64_t offset = 0, count = -1; int32_t ret = DoScoreStrRange(client->argv_[2], client->argv_[3], &left_close, &right_close, &min_score, &max_score); if (ret == -1) { From 9b9dc711e99902b1abd0cab2a20df6c97c2206d3 Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Thu, 20 Jun 2024 14:03:57 +0800 Subject: [PATCH 05/16] make format --- src/cmd_admin.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index d0c3d798b..b5973031a 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -13,10 +13,9 @@ #include #include "db.h" - #include "braft/raft.h" -#include "rocksdb/version.h" #include "pstd_string.h" +#include "rocksdb/version.h" #include "pikiwidb.h" #include "praft/praft.h" @@ -326,7 +325,7 @@ void SortCmd::DoCmd(PClient* client) { return; } } - + DEBUG("finish parser "); std::vector types(1); @@ -414,7 +413,7 @@ void SortCmd::DoCmd(PClient* client) { size_t m_end = offset + count; ret.clear(); - if(get_patterns.empty()){ + if (get_patterns.empty()) { get_patterns.emplace_back("#"); } From f6a753a536ca8ccdb7c52dc17ece578df1d8aacc Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Fri, 21 Jun 2024 21:40:38 +0800 Subject: [PATCH 06/16] add GO test and fix --- pikiwidb_1718975394960.conf | 348 ++++++++++++++++++++++++++++++++++++ pikiwidb_1718975462493.conf | 348 ++++++++++++++++++++++++++++++++++++ pikiwidb_1718975623554.conf | 348 ++++++++++++++++++++++++++++++++++++ pikiwidb_1718975726491.conf | 348 ++++++++++++++++++++++++++++++++++++ pikiwidb_1718975849526.conf | 348 ++++++++++++++++++++++++++++++++++++ pikiwidb_1718975972556.conf | 348 ++++++++++++++++++++++++++++++++++++ pikiwidb_1718976154209.conf | 348 ++++++++++++++++++++++++++++++++++++ pikiwidb_1718976487386.conf | 348 ++++++++++++++++++++++++++++++++++++ src/cmd_admin.cc | 26 +-- tests/admin_test.go | 92 ++++++++++ 10 files changed, 2889 insertions(+), 13 deletions(-) create mode 100644 pikiwidb_1718975394960.conf create mode 100644 pikiwidb_1718975462493.conf create mode 100644 pikiwidb_1718975623554.conf create mode 100644 pikiwidb_1718975726491.conf create mode 100644 pikiwidb_1718975849526.conf create mode 100644 pikiwidb_1718975972556.conf create mode 100644 pikiwidb_1718976154209.conf create mode 100644 pikiwidb_1718976487386.conf diff --git a/pikiwidb_1718975394960.conf b/pikiwidb_1718975394960.conf new file mode 100644 index 000000000..0bf2a08b2 --- /dev/null +++ b/pikiwidb_1718975394960.conf @@ -0,0 +1,348 @@ +# PikiwiDB configuration file example + +# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. +daemonize no + +# Accept connections on the specified port, default is 9221. +# port 0 is not permitted. +port 9221 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +ip 127.0.0.1 + + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Directory to store the data of PikiwiDB. +db-path /data/pikiwidb/db_1718975394960/db/ + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel warning + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +#save 900 1 +#save 300 10 +#save 60000 1000000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes # not support + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes # PikiwiDB always use compression for rdb file + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes # PikiwiDB always check sum for rdb file + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth foobar + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +# slave-serve-stale-data yes # not support yet + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes # PikiwiDB always set slave read only + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted +# By default the size is 1073741824. +# max-client-response-size 1073741824 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 # not support yet + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +#requirepass foobar + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +maxmemory 999999999999 + +# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# + +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# noeviction -> don't expire at all, just return an error on write operations +# The default is: +# +maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default PikiwiDB will check 5 keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +maxmemory-samples 5 + +################################ THREADED I/O ################################# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. +# +worker-threads 2 +slave-threads 2 + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +#lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +############################### BACKENDS CONFIG ############################### +# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging +# to the same DB is distributed among several RocksDB instances. + +# RocksDB instances number per DB +db-instance-num 3 +# default is 86400 * 7 +small-compaction-threshold 604800 +# default is 86400 * 3 +small-compaction-duration-threshold 259200 + +############################### ROCKSDB CONFIG ############################### +rocksdb-max-subcompactions 2 +rocksdb-max-background-jobs 4 +rocksdb-max-write-buffer-number 2 +rocksdb-min-write-buffer-number-to-merge 2 +# default is 64M +rocksdb-write-buffer-size 67108864 +rocksdb-level0-file-num-compaction-trigger 4 +rocksdb-number-levels 7 +rocksdb-enable-pipelined-write no +rocksdb-level0-slowdown-writes-trigger 20 +rocksdb-level0-stop-writes-trigger 36 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200; + +############################### RAFT ############################### +use-raft no +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 diff --git a/pikiwidb_1718975462493.conf b/pikiwidb_1718975462493.conf new file mode 100644 index 000000000..17488111e --- /dev/null +++ b/pikiwidb_1718975462493.conf @@ -0,0 +1,348 @@ +# PikiwiDB configuration file example + +# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. +daemonize no + +# Accept connections on the specified port, default is 9221. +# port 0 is not permitted. +port 9221 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +ip 127.0.0.1 + + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Directory to store the data of PikiwiDB. +db-path /data/pikiwidb/db_1718975462493/db/ + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel warning + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +#save 900 1 +#save 300 10 +#save 60000 1000000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes # not support + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes # PikiwiDB always use compression for rdb file + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes # PikiwiDB always check sum for rdb file + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth foobar + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +# slave-serve-stale-data yes # not support yet + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes # PikiwiDB always set slave read only + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted +# By default the size is 1073741824. +# max-client-response-size 1073741824 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 # not support yet + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +#requirepass foobar + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +maxmemory 999999999999 + +# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# + +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# noeviction -> don't expire at all, just return an error on write operations +# The default is: +# +maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default PikiwiDB will check 5 keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +maxmemory-samples 5 + +################################ THREADED I/O ################################# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. +# +worker-threads 2 +slave-threads 2 + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +#lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +############################### BACKENDS CONFIG ############################### +# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging +# to the same DB is distributed among several RocksDB instances. + +# RocksDB instances number per DB +db-instance-num 3 +# default is 86400 * 7 +small-compaction-threshold 604800 +# default is 86400 * 3 +small-compaction-duration-threshold 259200 + +############################### ROCKSDB CONFIG ############################### +rocksdb-max-subcompactions 2 +rocksdb-max-background-jobs 4 +rocksdb-max-write-buffer-number 2 +rocksdb-min-write-buffer-number-to-merge 2 +# default is 64M +rocksdb-write-buffer-size 67108864 +rocksdb-level0-file-num-compaction-trigger 4 +rocksdb-number-levels 7 +rocksdb-enable-pipelined-write no +rocksdb-level0-slowdown-writes-trigger 20 +rocksdb-level0-stop-writes-trigger 36 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200; + +############################### RAFT ############################### +use-raft no +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 diff --git a/pikiwidb_1718975623554.conf b/pikiwidb_1718975623554.conf new file mode 100644 index 000000000..95f8f889a --- /dev/null +++ b/pikiwidb_1718975623554.conf @@ -0,0 +1,348 @@ +# PikiwiDB configuration file example + +# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. +daemonize no + +# Accept connections on the specified port, default is 9221. +# port 0 is not permitted. +port 9221 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +ip 127.0.0.1 + + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Directory to store the data of PikiwiDB. +db-path /data/pikiwidb/db_1718975623554/db/ + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel warning + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +#save 900 1 +#save 300 10 +#save 60000 1000000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes # not support + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes # PikiwiDB always use compression for rdb file + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes # PikiwiDB always check sum for rdb file + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth foobar + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +# slave-serve-stale-data yes # not support yet + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes # PikiwiDB always set slave read only + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted +# By default the size is 1073741824. +# max-client-response-size 1073741824 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 # not support yet + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +#requirepass foobar + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +maxmemory 999999999999 + +# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# + +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# noeviction -> don't expire at all, just return an error on write operations +# The default is: +# +maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default PikiwiDB will check 5 keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +maxmemory-samples 5 + +################################ THREADED I/O ################################# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. +# +worker-threads 2 +slave-threads 2 + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +#lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +############################### BACKENDS CONFIG ############################### +# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging +# to the same DB is distributed among several RocksDB instances. + +# RocksDB instances number per DB +db-instance-num 3 +# default is 86400 * 7 +small-compaction-threshold 604800 +# default is 86400 * 3 +small-compaction-duration-threshold 259200 + +############################### ROCKSDB CONFIG ############################### +rocksdb-max-subcompactions 2 +rocksdb-max-background-jobs 4 +rocksdb-max-write-buffer-number 2 +rocksdb-min-write-buffer-number-to-merge 2 +# default is 64M +rocksdb-write-buffer-size 67108864 +rocksdb-level0-file-num-compaction-trigger 4 +rocksdb-number-levels 7 +rocksdb-enable-pipelined-write no +rocksdb-level0-slowdown-writes-trigger 20 +rocksdb-level0-stop-writes-trigger 36 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200; + +############################### RAFT ############################### +use-raft no +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 diff --git a/pikiwidb_1718975726491.conf b/pikiwidb_1718975726491.conf new file mode 100644 index 000000000..7234f29b2 --- /dev/null +++ b/pikiwidb_1718975726491.conf @@ -0,0 +1,348 @@ +# PikiwiDB configuration file example + +# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. +daemonize no + +# Accept connections on the specified port, default is 9221. +# port 0 is not permitted. +port 9221 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +ip 127.0.0.1 + + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Directory to store the data of PikiwiDB. +db-path /data/pikiwidb/db_1718975726491/db/ + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel warning + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +#save 900 1 +#save 300 10 +#save 60000 1000000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes # not support + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes # PikiwiDB always use compression for rdb file + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes # PikiwiDB always check sum for rdb file + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth foobar + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +# slave-serve-stale-data yes # not support yet + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes # PikiwiDB always set slave read only + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted +# By default the size is 1073741824. +# max-client-response-size 1073741824 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 # not support yet + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +#requirepass foobar + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +maxmemory 999999999999 + +# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# + +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# noeviction -> don't expire at all, just return an error on write operations +# The default is: +# +maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default PikiwiDB will check 5 keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +maxmemory-samples 5 + +################################ THREADED I/O ################################# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. +# +worker-threads 2 +slave-threads 2 + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +#lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +############################### BACKENDS CONFIG ############################### +# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging +# to the same DB is distributed among several RocksDB instances. + +# RocksDB instances number per DB +db-instance-num 3 +# default is 86400 * 7 +small-compaction-threshold 604800 +# default is 86400 * 3 +small-compaction-duration-threshold 259200 + +############################### ROCKSDB CONFIG ############################### +rocksdb-max-subcompactions 2 +rocksdb-max-background-jobs 4 +rocksdb-max-write-buffer-number 2 +rocksdb-min-write-buffer-number-to-merge 2 +# default is 64M +rocksdb-write-buffer-size 67108864 +rocksdb-level0-file-num-compaction-trigger 4 +rocksdb-number-levels 7 +rocksdb-enable-pipelined-write no +rocksdb-level0-slowdown-writes-trigger 20 +rocksdb-level0-stop-writes-trigger 36 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200; + +############################### RAFT ############################### +use-raft no +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 diff --git a/pikiwidb_1718975849526.conf b/pikiwidb_1718975849526.conf new file mode 100644 index 000000000..e13c3d9b4 --- /dev/null +++ b/pikiwidb_1718975849526.conf @@ -0,0 +1,348 @@ +# PikiwiDB configuration file example + +# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. +daemonize no + +# Accept connections on the specified port, default is 9221. +# port 0 is not permitted. +port 9221 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +ip 127.0.0.1 + + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Directory to store the data of PikiwiDB. +db-path /data/pikiwidb/db_1718975849526/db/ + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel warning + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +#save 900 1 +#save 300 10 +#save 60000 1000000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes # not support + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes # PikiwiDB always use compression for rdb file + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes # PikiwiDB always check sum for rdb file + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth foobar + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +# slave-serve-stale-data yes # not support yet + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes # PikiwiDB always set slave read only + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted +# By default the size is 1073741824. +# max-client-response-size 1073741824 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 # not support yet + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +#requirepass foobar + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +maxmemory 999999999999 + +# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# + +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# noeviction -> don't expire at all, just return an error on write operations +# The default is: +# +maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default PikiwiDB will check 5 keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +maxmemory-samples 5 + +################################ THREADED I/O ################################# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. +# +worker-threads 2 +slave-threads 2 + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +#lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +############################### BACKENDS CONFIG ############################### +# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging +# to the same DB is distributed among several RocksDB instances. + +# RocksDB instances number per DB +db-instance-num 3 +# default is 86400 * 7 +small-compaction-threshold 604800 +# default is 86400 * 3 +small-compaction-duration-threshold 259200 + +############################### ROCKSDB CONFIG ############################### +rocksdb-max-subcompactions 2 +rocksdb-max-background-jobs 4 +rocksdb-max-write-buffer-number 2 +rocksdb-min-write-buffer-number-to-merge 2 +# default is 64M +rocksdb-write-buffer-size 67108864 +rocksdb-level0-file-num-compaction-trigger 4 +rocksdb-number-levels 7 +rocksdb-enable-pipelined-write no +rocksdb-level0-slowdown-writes-trigger 20 +rocksdb-level0-stop-writes-trigger 36 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200; + +############################### RAFT ############################### +use-raft no +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 diff --git a/pikiwidb_1718975972556.conf b/pikiwidb_1718975972556.conf new file mode 100644 index 000000000..fa4dfed3d --- /dev/null +++ b/pikiwidb_1718975972556.conf @@ -0,0 +1,348 @@ +# PikiwiDB configuration file example + +# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. +daemonize no + +# Accept connections on the specified port, default is 9221. +# port 0 is not permitted. +port 9221 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +ip 127.0.0.1 + + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Directory to store the data of PikiwiDB. +db-path /data/pikiwidb/db_1718975972556/db/ + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel warning + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +#save 900 1 +#save 300 10 +#save 60000 1000000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes # not support + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes # PikiwiDB always use compression for rdb file + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes # PikiwiDB always check sum for rdb file + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth foobar + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +# slave-serve-stale-data yes # not support yet + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes # PikiwiDB always set slave read only + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted +# By default the size is 1073741824. +# max-client-response-size 1073741824 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 # not support yet + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +#requirepass foobar + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +maxmemory 999999999999 + +# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# + +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# noeviction -> don't expire at all, just return an error on write operations +# The default is: +# +maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default PikiwiDB will check 5 keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +maxmemory-samples 5 + +################################ THREADED I/O ################################# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. +# +worker-threads 2 +slave-threads 2 + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +#lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +############################### BACKENDS CONFIG ############################### +# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging +# to the same DB is distributed among several RocksDB instances. + +# RocksDB instances number per DB +db-instance-num 3 +# default is 86400 * 7 +small-compaction-threshold 604800 +# default is 86400 * 3 +small-compaction-duration-threshold 259200 + +############################### ROCKSDB CONFIG ############################### +rocksdb-max-subcompactions 2 +rocksdb-max-background-jobs 4 +rocksdb-max-write-buffer-number 2 +rocksdb-min-write-buffer-number-to-merge 2 +# default is 64M +rocksdb-write-buffer-size 67108864 +rocksdb-level0-file-num-compaction-trigger 4 +rocksdb-number-levels 7 +rocksdb-enable-pipelined-write no +rocksdb-level0-slowdown-writes-trigger 20 +rocksdb-level0-stop-writes-trigger 36 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200; + +############################### RAFT ############################### +use-raft no +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 diff --git a/pikiwidb_1718976154209.conf b/pikiwidb_1718976154209.conf new file mode 100644 index 000000000..42d7364da --- /dev/null +++ b/pikiwidb_1718976154209.conf @@ -0,0 +1,348 @@ +# PikiwiDB configuration file example + +# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. +daemonize no + +# Accept connections on the specified port, default is 9221. +# port 0 is not permitted. +port 9221 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +ip 127.0.0.1 + + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Directory to store the data of PikiwiDB. +db-path /data/pikiwidb/db_1718976154209/db/ + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel warning + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +#save 900 1 +#save 300 10 +#save 60000 1000000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes # not support + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes # PikiwiDB always use compression for rdb file + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes # PikiwiDB always check sum for rdb file + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth foobar + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +# slave-serve-stale-data yes # not support yet + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes # PikiwiDB always set slave read only + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted +# By default the size is 1073741824. +# max-client-response-size 1073741824 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 # not support yet + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +#requirepass foobar + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +maxmemory 999999999999 + +# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# + +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# noeviction -> don't expire at all, just return an error on write operations +# The default is: +# +maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default PikiwiDB will check 5 keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +maxmemory-samples 5 + +################################ THREADED I/O ################################# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. +# +worker-threads 2 +slave-threads 2 + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +#lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +############################### BACKENDS CONFIG ############################### +# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging +# to the same DB is distributed among several RocksDB instances. + +# RocksDB instances number per DB +db-instance-num 3 +# default is 86400 * 7 +small-compaction-threshold 604800 +# default is 86400 * 3 +small-compaction-duration-threshold 259200 + +############################### ROCKSDB CONFIG ############################### +rocksdb-max-subcompactions 2 +rocksdb-max-background-jobs 4 +rocksdb-max-write-buffer-number 2 +rocksdb-min-write-buffer-number-to-merge 2 +# default is 64M +rocksdb-write-buffer-size 67108864 +rocksdb-level0-file-num-compaction-trigger 4 +rocksdb-number-levels 7 +rocksdb-enable-pipelined-write no +rocksdb-level0-slowdown-writes-trigger 20 +rocksdb-level0-stop-writes-trigger 36 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200; + +############################### RAFT ############################### +use-raft yes +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 diff --git a/pikiwidb_1718976487386.conf b/pikiwidb_1718976487386.conf new file mode 100644 index 000000000..560f720a2 --- /dev/null +++ b/pikiwidb_1718976487386.conf @@ -0,0 +1,348 @@ +# PikiwiDB configuration file example + +# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. +daemonize no + +# Accept connections on the specified port, default is 9221. +# port 0 is not permitted. +port 9221 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +ip 127.0.0.1 + + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Directory to store the data of PikiwiDB. +db-path /data/pikiwidb/db_1718976487386/db/ + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel warning + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +#save 900 1 +#save 300 10 +#save 60000 1000000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes # not support + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes # PikiwiDB always use compression for rdb file + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes # PikiwiDB always check sum for rdb file + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth foobar + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +# slave-serve-stale-data yes # not support yet + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes # PikiwiDB always set slave read only + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted +# By default the size is 1073741824. +# max-client-response-size 1073741824 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 # not support yet + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +#requirepass foobar + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +maxmemory 999999999999 + +# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# + +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# noeviction -> don't expire at all, just return an error on write operations +# The default is: +# +maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default PikiwiDB will check 5 keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +maxmemory-samples 5 + +################################ THREADED I/O ################################# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. +# +worker-threads 2 +slave-threads 2 + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +#lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +############################### BACKENDS CONFIG ############################### +# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging +# to the same DB is distributed among several RocksDB instances. + +# RocksDB instances number per DB +db-instance-num 3 +# default is 86400 * 7 +small-compaction-threshold 604800 +# default is 86400 * 3 +small-compaction-duration-threshold 259200 + +############################### ROCKSDB CONFIG ############################### +rocksdb-max-subcompactions 2 +rocksdb-max-background-jobs 4 +rocksdb-max-write-buffer-number 2 +rocksdb-min-write-buffer-number-to-merge 2 +# default is 64M +rocksdb-write-buffer-size 67108864 +rocksdb-level0-file-num-compaction-trigger 4 +rocksdb-number-levels 7 +rocksdb-enable-pipelined-write no +rocksdb-level0-slowdown-writes-trigger 20 +rocksdb-level0-stop-writes-trigger 36 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200; + +############################### RAFT ############################### +use-raft no +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index 77a6ee06f..01e1afe49 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -290,7 +290,7 @@ void SortCmd::DoCmd(PClient* client) { std::vector get_patterns; size_t argc = client->argv_.size(); - DEBUG("argc: {}", argc); + for (int i = 2; i < argc; ++i) { // const auto& arg = pstd::StringToLower(argv[i]); int leftargs = argc - i - 1; @@ -325,8 +325,6 @@ void SortCmd::DoCmd(PClient* client) { } } - DEBUG("finish parser "); - std::vector types(1); rocksdb::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->GetType(client->Key(), true, types); @@ -354,7 +352,6 @@ void SortCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kErrOther, "WRONGTYPE Operation against a key holding the wrong kind of value"); return; } - DEBUG("finish collect ret "); std::vector sort_ret(ret.size()); for (size_t i = 0; i < ret.size(); ++i) { @@ -401,7 +398,6 @@ void SortCmd::DoCmd(PClient* client) { } }); - DEBUG("finish sort ret "); size_t sort_size = sort_ret.size(); count = count >= 0 ? count : sort_size; @@ -428,14 +424,18 @@ void SortCmd::DoCmd(PClient* client) { } } - client->AppendStringVector(ret); - - DEBUG("finish print "); - // if(dontsort && types[0] == "set"){ - // dontsort=0; - // alpha=1; - // sortby.clear(); - // } + if (store_key.empty()) { + client->AppendStringVector(ret); + } else { + // std::vector list_values(client->argv_.begin() + 2, client->argv_.end()); + uint64_t reply_num = 0; + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->RPush(store_key, ret, &reply_num); + if (s.ok()) { + client->AppendInteger(reply_num); + } else { + client->SetRes(CmdRes::kSyntaxErr, "rpush cmd error"); + } + } } std::optional SortCmd::lookupKeyByPattern(PClient* client, const std::string& pattern, diff --git a/tests/admin_test.go b/tests/admin_test.go index 8d8713c31..05e463b78 100644 --- a/tests/admin_test.go +++ b/tests/admin_test.go @@ -159,4 +159,96 @@ var _ = Describe("Admin", Ordered, func() { // Expect(res.Err()).NotTo(HaveOccurred()) // Expect(res.Val()).To(Equal(map[string]string{"timeout": "0"})) }) + + It("Cmd Sort", func() { + size, err := client.LPush(ctx, "list", "1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(size).To(Equal(int64(1))) + + size, err = client.LPush(ctx, "list", "3").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(size).To(Equal(int64(2))) + + size, err = client.LPush(ctx, "list", "2").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(size).To(Equal(int64(3))) + + els, err := client.Sort(ctx, "list", &redis.Sort{ + Offset: 0, + Count: 2, + Order: "ASC", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(els).To(Equal([]string{"1", "2"})) + + del := client.Del(ctx, "list") + Expect(del.Err()).NotTo(HaveOccurred()) + }) + + It("should Sort and Get", Label("NonRedisEnterprise"), func() { + size, err := client.LPush(ctx, "list", "1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(size).To(Equal(int64(1))) + + size, err = client.LPush(ctx, "list", "3").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(size).To(Equal(int64(2))) + + size, err = client.LPush(ctx, "list", "2").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(size).To(Equal(int64(3))) + + err = client.Set(ctx, "object_2", "value2", 0).Err() + Expect(err).NotTo(HaveOccurred()) + + { + els, err := client.Sort(ctx, "list", &redis.Sort{ + Get: []string{"object_*"}, + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(els).To(Equal([]string{"", "value2", ""})) + } + + { + els, err := client.SortInterfaces(ctx, "list", &redis.Sort{ + Get: []string{"object_*"}, + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(els).To(Equal([]interface{}{nil, "value2", nil})) + } + del := client.Del(ctx, "list") + Expect(del.Err()).NotTo(HaveOccurred()) + }) + + It("should Sort and Store", Label("NonRedisEnterprise"), func() { + size, err := client.LPush(ctx, "list", "1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(size).To(Equal(int64(1))) + + size, err = client.LPush(ctx, "list", "3").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(size).To(Equal(int64(2))) + + size, err = client.LPush(ctx, "list", "2").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(size).To(Equal(int64(3))) + + n, err := client.SortStore(ctx, "list", "list2", &redis.Sort{ + Offset: 0, + Count: 2, + Order: "ASC", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(n).To(Equal(int64(2))) + + els, err := client.LRange(ctx, "list2", 0, -1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(els).To(Equal([]string{"1", "2"})) + + del := client.Del(ctx, "list") + Expect(del.Err()).NotTo(HaveOccurred()) + + del2 := client.Del(ctx, "list2") + Expect(del2.Err()).NotTo(HaveOccurred()) + }) }) From f086ab3a04dd3b7bfc1d2ceb3f22dca642e4b8df Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Fri, 21 Jun 2024 21:42:12 +0800 Subject: [PATCH 07/16] add GO test and fix --- pikiwidb_1718975394960.conf | 348 ------------------------------------ pikiwidb_1718975462493.conf | 348 ------------------------------------ pikiwidb_1718975623554.conf | 348 ------------------------------------ pikiwidb_1718975726491.conf | 348 ------------------------------------ pikiwidb_1718975849526.conf | 348 ------------------------------------ pikiwidb_1718975972556.conf | 348 ------------------------------------ pikiwidb_1718976154209.conf | 348 ------------------------------------ pikiwidb_1718976487386.conf | 348 ------------------------------------ 8 files changed, 2784 deletions(-) delete mode 100644 pikiwidb_1718975394960.conf delete mode 100644 pikiwidb_1718975462493.conf delete mode 100644 pikiwidb_1718975623554.conf delete mode 100644 pikiwidb_1718975726491.conf delete mode 100644 pikiwidb_1718975849526.conf delete mode 100644 pikiwidb_1718975972556.conf delete mode 100644 pikiwidb_1718976154209.conf delete mode 100644 pikiwidb_1718976487386.conf diff --git a/pikiwidb_1718975394960.conf b/pikiwidb_1718975394960.conf deleted file mode 100644 index 0bf2a08b2..000000000 --- a/pikiwidb_1718975394960.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1718975394960/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft no -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 diff --git a/pikiwidb_1718975462493.conf b/pikiwidb_1718975462493.conf deleted file mode 100644 index 17488111e..000000000 --- a/pikiwidb_1718975462493.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1718975462493/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft no -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 diff --git a/pikiwidb_1718975623554.conf b/pikiwidb_1718975623554.conf deleted file mode 100644 index 95f8f889a..000000000 --- a/pikiwidb_1718975623554.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1718975623554/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft no -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 diff --git a/pikiwidb_1718975726491.conf b/pikiwidb_1718975726491.conf deleted file mode 100644 index 7234f29b2..000000000 --- a/pikiwidb_1718975726491.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1718975726491/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft no -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 diff --git a/pikiwidb_1718975849526.conf b/pikiwidb_1718975849526.conf deleted file mode 100644 index e13c3d9b4..000000000 --- a/pikiwidb_1718975849526.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1718975849526/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft no -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 diff --git a/pikiwidb_1718975972556.conf b/pikiwidb_1718975972556.conf deleted file mode 100644 index fa4dfed3d..000000000 --- a/pikiwidb_1718975972556.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1718975972556/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft no -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 diff --git a/pikiwidb_1718976154209.conf b/pikiwidb_1718976154209.conf deleted file mode 100644 index 42d7364da..000000000 --- a/pikiwidb_1718976154209.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1718976154209/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft yes -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 diff --git a/pikiwidb_1718976487386.conf b/pikiwidb_1718976487386.conf deleted file mode 100644 index 560f720a2..000000000 --- a/pikiwidb_1718976487386.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1718976487386/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft no -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 From bbe662441e035375006e011fab5d691393c143d9 Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Sat, 22 Jun 2024 21:09:22 +0800 Subject: [PATCH 08/16] change timeout --- .github/workflows/pikiwidb.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pikiwidb.yml b/.github/workflows/pikiwidb.yml index fb3f5ffe3..282f898e6 100644 --- a/.github/workflows/pikiwidb.yml +++ b/.github/workflows/pikiwidb.yml @@ -43,7 +43,7 @@ jobs: run: | cd ../tests go mod tidy - go test + go test -timeout 15m build_on_ubuntu: runs-on: ubuntu-latest @@ -67,4 +67,4 @@ jobs: run: | cd ../tests go mod tidy - go test \ No newline at end of file + go test -timeout 15m \ No newline at end of file From b064f70876b8a9db0abfca5c15f079c8cf0fb23f Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Sun, 30 Jun 2024 14:09:53 +0800 Subject: [PATCH 09/16] Delete unnecessary comments --- src/cmd_admin.cc | 4 ---- src/cmd_zset.cc | 4 +--- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index 01e1afe49..e9e7ba796 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -273,7 +273,6 @@ bool SortCmd::DoInitial(PClient* client) { } void SortCmd::DoCmd(PClient* client) { - // const auto& argv = client->argv_; int desc = 0; int alpha = 0; @@ -292,7 +291,6 @@ void SortCmd::DoCmd(PClient* client) { size_t argc = client->argv_.size(); for (int i = 2; i < argc; ++i) { - // const auto& arg = pstd::StringToLower(argv[i]); int leftargs = argc - i - 1; if (strcasecmp(client->argv_[i].data(), "asc") == 0) { desc = 0; @@ -375,7 +373,6 @@ void SortCmd::DoCmd(PClient* client) { if (alpha) { sort_ret[i].u = byval; } else { - // auto double_byval = pstd::String2d() double double_byval; if (pstd::String2d(byval, &double_byval)) { sort_ret[i].u = double_byval; @@ -427,7 +424,6 @@ void SortCmd::DoCmd(PClient* client) { if (store_key.empty()) { client->AppendStringVector(ret); } else { - // std::vector list_values(client->argv_.begin() + 2, client->argv_.end()); uint64_t reply_num = 0; storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->RPush(store_key, ret, &reply_num); if (s.ok()) { diff --git a/src/cmd_zset.cc b/src/cmd_zset.cc index 9cf7e065c..a585c5ca7 100644 --- a/src/cmd_zset.cc +++ b/src/cmd_zset.cc @@ -364,9 +364,7 @@ bool ZRangebyscoreCmd::DoInitial(PClient* client) { void ZRangebyscoreCmd::DoCmd(PClient* client) { double min_score = 0, max_score = 0; - bool left_close = true; - bool right_close = true; - bool with_scores = false; + bool left_close = true, right_close = true, with_scores = false; int64_t offset = 0, count = -1; int32_t ret = DoScoreStrRange(client->argv_[2], client->argv_[3], &left_close, &right_close, &min_score, &max_score); if (ret == -1) { From 990d4bdf8b91890ccd776e0aded84af4bf3e0608 Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Sun, 30 Jun 2024 15:23:05 +0800 Subject: [PATCH 10/16] change max open files --- .github/workflows/pikiwidb.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pikiwidb.yml b/.github/workflows/pikiwidb.yml index 282f898e6..6f85bab7a 100644 --- a/.github/workflows/pikiwidb.yml +++ b/.github/workflows/pikiwidb.yml @@ -43,6 +43,7 @@ jobs: run: | cd ../tests go mod tidy + ulimit -n 4096 go test -timeout 15m build_on_ubuntu: From 789bbac0fb968874b99f14b9eaa7b117a81cbe43 Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Sun, 30 Jun 2024 16:00:06 +0800 Subject: [PATCH 11/16] change max open files --- .github/workflows/pikiwidb.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/pikiwidb.yml b/.github/workflows/pikiwidb.yml index 6f85bab7a..282f898e6 100644 --- a/.github/workflows/pikiwidb.yml +++ b/.github/workflows/pikiwidb.yml @@ -43,7 +43,6 @@ jobs: run: | cd ../tests go mod tidy - ulimit -n 4096 go test -timeout 15m build_on_ubuntu: From ea468664daabd8bae86e60f5e983201cce38bccf Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Thu, 4 Jul 2024 20:34:42 +0800 Subject: [PATCH 12/16] remove unused variables and move parser func to Doinitial --- pikiwidb_1720083379784.conf | 348 ++++++++++++++++++++++++++++++++++++ pikiwidb_1720083904405.conf | 348 ++++++++++++++++++++++++++++++++++++ src/cmd_admin.cc | 91 +++++----- src/cmd_admin.h | 11 ++ 4 files changed, 751 insertions(+), 47 deletions(-) create mode 100644 pikiwidb_1720083379784.conf create mode 100644 pikiwidb_1720083904405.conf diff --git a/pikiwidb_1720083379784.conf b/pikiwidb_1720083379784.conf new file mode 100644 index 000000000..548422fa7 --- /dev/null +++ b/pikiwidb_1720083379784.conf @@ -0,0 +1,348 @@ +# PikiwiDB configuration file example + +# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. +daemonize no + +# Accept connections on the specified port, default is 9221. +# port 0 is not permitted. +port 9221 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +ip 127.0.0.1 + + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Directory to store the data of PikiwiDB. +db-path /data/pikiwidb/db_1720083379784/db/ + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel warning + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +#save 900 1 +#save 300 10 +#save 60000 1000000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes # not support + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes # PikiwiDB always use compression for rdb file + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes # PikiwiDB always check sum for rdb file + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth foobar + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +# slave-serve-stale-data yes # not support yet + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes # PikiwiDB always set slave read only + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted +# By default the size is 1073741824. +# max-client-response-size 1073741824 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 # not support yet + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +#requirepass foobar + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +maxmemory 999999999999 + +# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# + +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# noeviction -> don't expire at all, just return an error on write operations +# The default is: +# +maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default PikiwiDB will check 5 keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +maxmemory-samples 5 + +################################ THREADED I/O ################################# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. +# +worker-threads 2 +slave-threads 2 + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +#lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +############################### BACKENDS CONFIG ############################### +# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging +# to the same DB is distributed among several RocksDB instances. + +# RocksDB instances number per DB +db-instance-num 3 +# default is 86400 * 7 +small-compaction-threshold 604800 +# default is 86400 * 3 +small-compaction-duration-threshold 259200 + +############################### ROCKSDB CONFIG ############################### +rocksdb-max-subcompactions 2 +rocksdb-max-background-jobs 4 +rocksdb-max-write-buffer-number 2 +rocksdb-min-write-buffer-number-to-merge 2 +# default is 64M +rocksdb-write-buffer-size 67108864 +rocksdb-level0-file-num-compaction-trigger 4 +rocksdb-number-levels 7 +rocksdb-enable-pipelined-write no +rocksdb-level0-slowdown-writes-trigger 20 +rocksdb-level0-stop-writes-trigger 36 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200; + +############################### RAFT ############################### +use-raft no +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 diff --git a/pikiwidb_1720083904405.conf b/pikiwidb_1720083904405.conf new file mode 100644 index 000000000..042ff3dce --- /dev/null +++ b/pikiwidb_1720083904405.conf @@ -0,0 +1,348 @@ +# PikiwiDB configuration file example + +# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. +daemonize no + +# Accept connections on the specified port, default is 9221. +# port 0 is not permitted. +port 9221 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +ip 127.0.0.1 + + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Directory to store the data of PikiwiDB. +db-path /data/pikiwidb/db_1720083904405/db/ + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel warning + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +#save 900 1 +#save 300 10 +#save 60000 1000000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes # not support + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes # PikiwiDB always use compression for rdb file + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes # PikiwiDB always check sum for rdb file + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth foobar + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +# slave-serve-stale-data yes # not support yet + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes # PikiwiDB always set slave read only + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted +# By default the size is 1073741824. +# max-client-response-size 1073741824 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 # not support yet + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +#requirepass foobar + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +maxmemory 999999999999 + +# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# + +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# noeviction -> don't expire at all, just return an error on write operations +# The default is: +# +maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default PikiwiDB will check 5 keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +maxmemory-samples 5 + +################################ THREADED I/O ################################# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. +# +worker-threads 2 +slave-threads 2 + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +#lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +############################### BACKENDS CONFIG ############################### +# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging +# to the same DB is distributed among several RocksDB instances. + +# RocksDB instances number per DB +db-instance-num 3 +# default is 86400 * 7 +small-compaction-threshold 604800 +# default is 86400 * 3 +small-compaction-duration-threshold 259200 + +############################### ROCKSDB CONFIG ############################### +rocksdb-max-subcompactions 2 +rocksdb-max-background-jobs 4 +rocksdb-max-write-buffer-number 2 +rocksdb-min-write-buffer-number-to-merge 2 +# default is 64M +rocksdb-write-buffer-size 67108864 +rocksdb-level0-file-num-compaction-trigger 4 +rocksdb-number-levels 7 +rocksdb-enable-pipelined-write no +rocksdb-level0-slowdown-writes-trigger 20 +rocksdb-level0-stop-writes-trigger 36 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200; + +############################### RAFT ############################### +use-raft no +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index e9e7ba796..01294353a 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -268,28 +268,9 @@ SortCmd::SortCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsAdmin | kCmdFlagsWrite, kAclCategoryAdmin) {} bool SortCmd::DoInitial(PClient* client) { + InitialArgument(); client->SetKey(client->argv_[1]); - return true; -} - -void SortCmd::DoCmd(PClient* client) { - int desc = 0; - int alpha = 0; - - size_t offset = 0; - size_t count = -1; - - int dontsort = 0; - int vectorlen; - - int getop = 0; - - std::string store_key; - std::string sortby; - - std::vector get_patterns; size_t argc = client->argv_.size(); - for (int i = 2; i < argc; ++i) { int leftargs = argc - i - 1; if (strcasecmp(client->argv_[i].data(), "asc") == 0) { @@ -301,7 +282,7 @@ void SortCmd::DoCmd(PClient* client) { } else if (strcasecmp(client->argv_[i].data(), "limit") == 0 && leftargs >= 2) { if (pstd::String2int(client->argv_[i + 1], &offset) == 0 || pstd::String2int(client->argv_[i + 2], &count) == 0) { client->SetRes(CmdRes::kSyntaxErr); - return; + return false; } i += 2; } else if (strcasecmp(client->argv_[i].data(), "store") == 0 && leftargs >= 1) { @@ -315,42 +296,46 @@ void SortCmd::DoCmd(PClient* client) { i++; } else if (strcasecmp(client->argv_[i].data(), "get") == 0 && leftargs >= 1) { get_patterns.push_back(client->argv_[i + 1]); - getop++; i++; } else { client->SetRes(CmdRes::kSyntaxErr); - return; + return false; } } - std::vector types(1); - rocksdb::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->GetType(client->Key(), true, types); - - if (!s.ok()) { + Status s; + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LRange(client->Key(), 0, -1, &ret); + if (s.ok()) { + return true; + } else if (!s.IsNotFound()) { client->SetRes(CmdRes::kErrOther, s.ToString()); - return; + return false; } - std::vector ret; - if (types[0] == "list") { - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LRange(client->Key(), 0, -1, &ret); - } else if (types[0] == "set") { - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SMembers(client->Key(), &ret); - } else if (types[0] == "zset") { - std::vector score_members; - storage::Status s = - PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZRange(client->Key(), 0, -1, &score_members); - char buf[32]; - int64_t score_len = 0; + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SMembers(client->Key(), &ret); + if (s.ok()) { + return true; + } else if (!s.IsNotFound()) { + client->SetRes(CmdRes::kErrOther, s.ToString()); + return false; + } + std::vector score_members; + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZRange(client->Key(), 0, -1, &score_members); + if (s.ok()) { for (auto& c : score_members) { ret.emplace_back(c.member); } - } else { - client->SetRes(CmdRes::kErrOther, "WRONGTYPE Operation against a key holding the wrong kind of value"); - return; + return true; + } else if (!s.IsNotFound()) { + client->SetRes(CmdRes::kErrOther, s.ToString()); + return false; } + client->SetRes(CmdRes::kErrOther, "Unknown Type"); + return false; +} +void SortCmd::DoCmd(PClient* client) { std::vector sort_ret(ret.size()); for (size_t i = 0; i < ret.size(); ++i) { sort_ret[i].obj = ret[i]; @@ -383,15 +368,15 @@ void SortCmd::DoCmd(PClient* client) { } } - std::sort(sort_ret.begin(), sort_ret.end(), [&alpha, &desc](const RedisSortObject& a, const RedisSortObject& b) { - if (alpha) { + std::sort(sort_ret.begin(), sort_ret.end(), [this](const RedisSortObject& a, const RedisSortObject& b) { + if (this->alpha) { std::string score_a = std::get(a.u); std::string score_b = std::get(b.u); - return !desc ? score_a < score_b : score_a > score_b; + return !this->desc ? score_a < score_b : score_a > score_b; } else { double score_a = std::get(a.u); double score_b = std::get(b.u); - return !desc ? score_a < score_b : score_a > score_b; + return !this->desc ? score_a < score_b : score_a > score_b; } }); @@ -429,7 +414,7 @@ void SortCmd::DoCmd(PClient* client) { if (s.ok()) { client->AppendInteger(reply_num); } else { - client->SetRes(CmdRes::kSyntaxErr, "rpush cmd error"); + client->SetRes(CmdRes::kErrOther, s.ToString()); } } } @@ -468,4 +453,16 @@ std::optional SortCmd::lookupKeyByPattern(PClient* client, const st return value; } + +void SortCmd::InitialArgument(){ + desc = 0; + alpha = 0; + offset = 0; + count = -1; + dontsort = 0; + store_key.clear(); + sortby.clear(); + get_patterns.clear(); + ret.clear(); +} } // namespace pikiwidb diff --git a/src/cmd_admin.h b/src/cmd_admin.h index b3a7c0ca5..4ed907cc9 100644 --- a/src/cmd_admin.h +++ b/src/cmd_admin.h @@ -184,12 +184,23 @@ class SortCmd : public BaseCmd { private: void DoCmd(PClient* client) override; + void InitialArgument(); std::optional lookupKeyByPattern(PClient* client, const std::string& pattern, const std::string& subst); struct RedisSortObject { std::string obj; std::variant u; }; + + int desc = 0; + int alpha = 0; + size_t offset = 0; + size_t count = -1; + int dontsort = 0; + std::string store_key; + std::string sortby; + std::vector get_patterns; + std::vector ret; }; } // namespace pikiwidb From af9a8ce56070ef9d81b604d47561f1ce35e35182 Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Thu, 4 Jul 2024 20:35:00 +0800 Subject: [PATCH 13/16] remove unused variables and move parser func to Doinitial --- pikiwidb_1720083379784.conf | 348 ------------------------------------ pikiwidb_1720083904405.conf | 348 ------------------------------------ 2 files changed, 696 deletions(-) delete mode 100644 pikiwidb_1720083379784.conf delete mode 100644 pikiwidb_1720083904405.conf diff --git a/pikiwidb_1720083379784.conf b/pikiwidb_1720083379784.conf deleted file mode 100644 index 548422fa7..000000000 --- a/pikiwidb_1720083379784.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1720083379784/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft no -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 diff --git a/pikiwidb_1720083904405.conf b/pikiwidb_1720083904405.conf deleted file mode 100644 index 042ff3dce..000000000 --- a/pikiwidb_1720083904405.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1720083904405/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft no -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 From ccbf94cfb677a68823981ed15e4a485d3499e918 Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Thu, 4 Jul 2024 20:38:08 +0800 Subject: [PATCH 14/16] make format --- src/cmd_admin.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index 01294353a..5f9b788e3 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -454,7 +454,7 @@ std::optional SortCmd::lookupKeyByPattern(PClient* client, const st return value; } -void SortCmd::InitialArgument(){ +void SortCmd::InitialArgument() { desc = 0; alpha = 0; offset = 0; From 648c6bd47db0d6a55f254038e04f743e4843ad4e Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Wed, 10 Jul 2024 22:18:58 +0800 Subject: [PATCH 15/16] fix style privte variable name --- src/cmd_admin.cc | 96 ++++++++++++++++++++++++------------------------ src/cmd_admin.h | 18 ++++----- 2 files changed, 57 insertions(+), 57 deletions(-) diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index 5f9b788e3..9b366ead8 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -274,28 +274,28 @@ bool SortCmd::DoInitial(PClient* client) { for (int i = 2; i < argc; ++i) { int leftargs = argc - i - 1; if (strcasecmp(client->argv_[i].data(), "asc") == 0) { - desc = 0; + desc_ = 0; } else if (strcasecmp(client->argv_[i].data(), "desc") == 0) { - desc = 1; + desc_ = 1; } else if (strcasecmp(client->argv_[i].data(), "alpha") == 0) { - alpha = 1; + alpha_ = 1; } else if (strcasecmp(client->argv_[i].data(), "limit") == 0 && leftargs >= 2) { - if (pstd::String2int(client->argv_[i + 1], &offset) == 0 || pstd::String2int(client->argv_[i + 2], &count) == 0) { + if (pstd::String2int(client->argv_[i + 1], &offset_) == 0 || pstd::String2int(client->argv_[i + 2], &count_) == 0) { client->SetRes(CmdRes::kSyntaxErr); return false; } i += 2; } else if (strcasecmp(client->argv_[i].data(), "store") == 0 && leftargs >= 1) { - store_key = client->argv_[i + 1]; + store_key_ = client->argv_[i + 1]; i++; } else if (strcasecmp(client->argv_[i].data(), "by") == 0 && leftargs >= 1) { - sortby = client->argv_[i + 1]; - if (sortby.find('*') == std::string::npos) { - dontsort = 1; + sortby_ = client->argv_[i + 1]; + if (sortby_.find('*') == std::string::npos) { + dontsort_ = 1; } i++; } else if (strcasecmp(client->argv_[i].data(), "get") == 0 && leftargs >= 1) { - get_patterns.push_back(client->argv_[i + 1]); + get_patterns_.push_back(client->argv_[i + 1]); i++; } else { client->SetRes(CmdRes::kSyntaxErr); @@ -304,7 +304,7 @@ bool SortCmd::DoInitial(PClient* client) { } Status s; - s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LRange(client->Key(), 0, -1, &ret); + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LRange(client->Key(), 0, -1, &ret_); if (s.ok()) { return true; } else if (!s.IsNotFound()) { @@ -312,7 +312,7 @@ bool SortCmd::DoInitial(PClient* client) { return false; } - s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SMembers(client->Key(), &ret); + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SMembers(client->Key(), &ret_); if (s.ok()) { return true; } else if (!s.IsNotFound()) { @@ -324,7 +324,7 @@ bool SortCmd::DoInitial(PClient* client) { s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZRange(client->Key(), 0, -1, &score_members); if (s.ok()) { for (auto& c : score_members) { - ret.emplace_back(c.member); + ret_.emplace_back(c.member); } return true; } else if (!s.IsNotFound()) { @@ -336,26 +336,26 @@ bool SortCmd::DoInitial(PClient* client) { } void SortCmd::DoCmd(PClient* client) { - std::vector sort_ret(ret.size()); - for (size_t i = 0; i < ret.size(); ++i) { - sort_ret[i].obj = ret[i]; + std::vector sort_ret(ret_.size()); + for (size_t i = 0; i < ret_.size(); ++i) { + sort_ret[i].obj = ret_[i]; } - if (!dontsort) { - for (size_t i = 0; i < ret.size(); ++i) { + if (!dontsort_) { + for (size_t i = 0; i < ret_.size(); ++i) { std::string byval; - if (!sortby.empty()) { - auto lookup = lookupKeyByPattern(client, sortby, ret[i]); + if (!sortby_.empty()) { + auto lookup = lookupKeyByPattern(client, sortby_, ret_[i]); if (!lookup.has_value()) { - byval = ret[i]; + byval = ret_[i]; } else { byval = std::move(lookup.value()); } } else { - byval = ret[i]; + byval = ret_[i]; } - if (alpha) { + if (alpha_) { sort_ret[i].u = byval; } else { double double_byval; @@ -369,48 +369,48 @@ void SortCmd::DoCmd(PClient* client) { } std::sort(sort_ret.begin(), sort_ret.end(), [this](const RedisSortObject& a, const RedisSortObject& b) { - if (this->alpha) { + if (this->alpha_) { std::string score_a = std::get(a.u); std::string score_b = std::get(b.u); - return !this->desc ? score_a < score_b : score_a > score_b; + return !this->desc_ ? score_a < score_b : score_a > score_b; } else { double score_a = std::get(a.u); double score_b = std::get(b.u); - return !this->desc ? score_a < score_b : score_a > score_b; + return !this->desc_ ? score_a < score_b : score_a > score_b; } }); size_t sort_size = sort_ret.size(); - count = count >= 0 ? count : sort_size; - offset = (offset >= 0 && offset < sort_size) ? offset : sort_size; - count = (offset + count < sort_size) ? count : sort_size - offset; + count_ = count_ >= 0 ? count_ : sort_size; + offset_ = (offset_ >= 0 && offset_ < sort_size) ? offset_ : sort_size; + count_ = (offset_ + count_ < sort_size) ? count_ : sort_size - offset_; - size_t m_start = offset; - size_t m_end = offset + count; + size_t m_start = offset_; + size_t m_end = offset_ + count_; - ret.clear(); - if (get_patterns.empty()) { - get_patterns.emplace_back("#"); + ret_.clear(); + if (get_patterns_.empty()) { + get_patterns_.emplace_back("#"); } for (; m_start < m_end; m_start++) { - for (const std::string& pattern : get_patterns) { + for (const std::string& pattern : get_patterns_) { std::optional val = lookupKeyByPattern(client, pattern, sort_ret[m_start].obj); if (val.has_value()) { - ret.push_back(val.value()); + ret_.push_back(val.value()); } else { - ret.emplace_back(""); + ret_.emplace_back(""); } } } } - if (store_key.empty()) { - client->AppendStringVector(ret); + if (store_key_.empty()) { + client->AppendStringVector(ret_); } else { uint64_t reply_num = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->RPush(store_key, ret, &reply_num); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->RPush(store_key_, ret_, &reply_num); if (s.ok()) { client->AppendInteger(reply_num); } else { @@ -455,14 +455,14 @@ std::optional SortCmd::lookupKeyByPattern(PClient* client, const st } void SortCmd::InitialArgument() { - desc = 0; - alpha = 0; - offset = 0; - count = -1; - dontsort = 0; - store_key.clear(); - sortby.clear(); - get_patterns.clear(); - ret.clear(); + desc_ = 0; + alpha_ = 0; + offset_ = 0; + count_ = -1; + dontsort_ = 0; + store_key_.clear(); + sortby_.clear(); + get_patterns_.clear(); + ret_.clear(); } } // namespace pikiwidb diff --git a/src/cmd_admin.h b/src/cmd_admin.h index 4ed907cc9..7c6eaa610 100644 --- a/src/cmd_admin.h +++ b/src/cmd_admin.h @@ -192,15 +192,15 @@ class SortCmd : public BaseCmd { std::variant u; }; - int desc = 0; - int alpha = 0; - size_t offset = 0; - size_t count = -1; - int dontsort = 0; - std::string store_key; - std::string sortby; - std::vector get_patterns; - std::vector ret; + int desc_ = 0; + int alpha_ = 0; + size_t offset_ = 0; + size_t count_ = -1; + int dontsort_ = 0; + std::string store_key_; + std::string sortby_; + std::vector get_patterns_; + std::vector ret_; }; } // namespace pikiwidb From a01c008bc1adfb6dea76b9a7448710ec026461db Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Wed, 10 Jul 2024 23:24:05 +0800 Subject: [PATCH 16/16] make format --- src/cmd_admin.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index 9b366ead8..41604d3d5 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -280,7 +280,8 @@ bool SortCmd::DoInitial(PClient* client) { } else if (strcasecmp(client->argv_[i].data(), "alpha") == 0) { alpha_ = 1; } else if (strcasecmp(client->argv_[i].data(), "limit") == 0 && leftargs >= 2) { - if (pstd::String2int(client->argv_[i + 1], &offset_) == 0 || pstd::String2int(client->argv_[i + 2], &count_) == 0) { + if (pstd::String2int(client->argv_[i + 1], &offset_) == 0 || + pstd::String2int(client->argv_[i + 2], &count_) == 0) { client->SetRes(CmdRes::kSyntaxErr); return false; }