From af9a8ce56070ef9d81b604d47561f1ce35e35182 Mon Sep 17 00:00:00 2001 From: zhy <1774657235@qq.com> Date: Thu, 4 Jul 2024 20:35:00 +0800 Subject: [PATCH] remove unused variables and move parser func to Doinitial --- pikiwidb_1720083379784.conf | 348 ------------------------------------ pikiwidb_1720083904405.conf | 348 ------------------------------------ 2 files changed, 696 deletions(-) delete mode 100644 pikiwidb_1720083379784.conf delete mode 100644 pikiwidb_1720083904405.conf diff --git a/pikiwidb_1720083379784.conf b/pikiwidb_1720083379784.conf deleted file mode 100644 index 548422fa7..000000000 --- a/pikiwidb_1720083379784.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1720083379784/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft no -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 diff --git a/pikiwidb_1720083904405.conf b/pikiwidb_1720083904405.conf deleted file mode 100644 index 042ff3dce..000000000 --- a/pikiwidb_1720083904405.conf +++ /dev/null @@ -1,348 +0,0 @@ -# PikiwiDB configuration file example - -# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. -daemonize no - -# Accept connections on the specified port, default is 9221. -# port 0 is not permitted. -port 9221 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -ip 127.0.0.1 - - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# Directory to store the data of PikiwiDB. -db-path /data/pikiwidb/db_1720083904405/db/ - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile stdout - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60000 1000000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes # not support - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes # PikiwiDB always use compression for rdb file - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes # PikiwiDB always check sum for rdb file - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth foobar - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -# slave-serve-stale-data yes # not support yet - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes # PikiwiDB always set slave read only - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted -# By default the size is 1073741824. -# max-client-response-size 1073741824 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 # not support yet - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -#requirepass foobar - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -maxmemory 999999999999 - -# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# - -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# noeviction -> don't expire at all, just return an error on write operations -# The default is: -# -maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default PikiwiDB will check 5 keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -maxmemory-samples 5 - -################################ THREADED I/O ################################# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. -# -worker-threads 2 -slave-threads 2 - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -#lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -############################### BACKENDS CONFIG ############################### -# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging -# to the same DB is distributed among several RocksDB instances. - -# RocksDB instances number per DB -db-instance-num 3 -# default is 86400 * 7 -small-compaction-threshold 604800 -# default is 86400 * 3 -small-compaction-duration-threshold 259200 - -############################### ROCKSDB CONFIG ############################### -rocksdb-max-subcompactions 2 -rocksdb-max-background-jobs 4 -rocksdb-max-write-buffer-number 2 -rocksdb-min-write-buffer-number-to-merge 2 -# default is 64M -rocksdb-write-buffer-size 67108864 -rocksdb-level0-file-num-compaction-trigger 4 -rocksdb-number-levels 7 -rocksdb-enable-pipelined-write no -rocksdb-level0-slowdown-writes-trigger 20 -rocksdb-level0-stop-writes-trigger 36 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; - -############################### RAFT ############################### -use-raft no -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10