From 3dd2f5a586f2c4d5f9cbf984a0565f70547efae7 Mon Sep 17 00:00:00 2001 From: Harkrishn Patro Date: Thu, 23 May 2024 22:14:23 -0700 Subject: [PATCH 01/42] Undeprecate cluster slots command (#536) Undeprecate cluster slots command. This command is widely used by clients to form the cluster topology and with the recent change to improve performance of `CLUSTER SLOTS` command via #53 as well as us looking to further improve the usability via #517, it makes sense to undeprecate this command. --------- Signed-off-by: Harkrishn Patro --- src/commands.def | 2 +- src/commands/cluster-slots.json | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/src/commands.def b/src/commands.def index bc5a1261f2..f76e21f2f3 100644 --- a/src/commands.def +++ b/src/commands.def @@ -975,7 +975,7 @@ struct COMMAND_STRUCT CLUSTER_Subcommands[] = { {MAKE_CMD("setslot","Binds a hash slot to a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SETSLOT_History,1,CLUSTER_SETSLOT_Tips,0,clusterCommand,-4,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE|CMD_MAY_REPLICATE,0,CLUSTER_SETSLOT_Keyspecs,0,NULL,3),.args=CLUSTER_SETSLOT_Args}, {MAKE_CMD("shards","Returns the mapping of cluster slots to shards.","O(N) where N is the total number of cluster nodes","7.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SHARDS_History,0,CLUSTER_SHARDS_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_SHARDS_Keyspecs,0,NULL,0)}, {MAKE_CMD("slaves","Lists the replica nodes of a master node.","O(N) where N is the number of replicas.","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER REPLICAS`","5.0.0","cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,0,CLUSTER_SLAVES_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_SLAVES_Keyspecs,0,NULL,1),.args=CLUSTER_SLAVES_Args}, -{MAKE_CMD("slots","Returns the mapping of cluster slots to nodes.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER SHARDS`","7.0.0","cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLOTS_History,2,CLUSTER_SLOTS_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_SLOTS_Keyspecs,0,NULL,0)}, +{MAKE_CMD("slots","Returns the mapping of cluster slots to nodes.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLOTS_History,2,CLUSTER_SLOTS_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_SLOTS_Keyspecs,0,NULL,0)}, {0} }; diff --git a/src/commands/cluster-slots.json b/src/commands/cluster-slots.json index 13f8c26612..ca48f371ea 100644 --- a/src/commands/cluster-slots.json +++ b/src/commands/cluster-slots.json @@ -7,11 +7,6 @@ "arity": 2, "container": "CLUSTER", "function": "clusterCommand", - "deprecated_since": "7.0.0", - "replaced_by": "`CLUSTER SHARDS`", - "doc_flags": [ - "DEPRECATED" - ], "history": [ [ "4.0.0", From d72ba06dd0519fd0bf578cca2a2f5c457629dc6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20S=C3=B6derqvist?= Date: Fri, 24 May 2024 17:58:03 +0200 Subject: [PATCH 02/42] Make cluster replicas return ASK and TRYAGAIN (#495) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After READONLY, make a cluster replica behave as its primary regarding returning ASK redirects and TRYAGAIN. Without this patch, a client reading from a replica cannot tell if a key doesn't exist or if it has already been migrated to another shard as part of an ongoing slot migration. Therefore, without an ASK redirect in this situation, offloading reads to cluster replicas wasn't reliable. Note: The target of a redirect is always a primary. If a client wants to continue reading from a replica after following a redirect, it needs to figure out the replicas of that new primary using CLUSTER SHARDS or similar. This is related to #21 and has been made possible by the introduction of Replication of Slot Migration States in #445. ---- Release notes: During cluster slot migration, replicas are able to return -ASK redirects and -TRYAGAIN. --------- Signed-off-by: Viktor Söderqvist --- src/cluster.c | 14 +++++--- tests/unit/cluster/slot-migration.tcl | 48 +++++++++++++++++++++++++-- 2 files changed, 55 insertions(+), 7 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index d30d7e19b5..71d1cc9124 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -1048,10 +1048,12 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int * can safely serve the request, otherwise we return a TRYAGAIN * error). To do so we set the importing/migrating state and * increment a counter for every missing key. */ - if (n == myself && getMigratingSlotDest(slot) != NULL) { - migrating_slot = 1; - } else if (getImportingSlotSource(slot) != NULL) { - importing_slot = 1; + if (clusterNodeIsMaster(myself) || c->flags & CLIENT_READONLY) { + if (n == clusterNodeGetMaster(myself) && getMigratingSlotDest(slot) != NULL) { + migrating_slot = 1; + } else if (getImportingSlotSource(slot) != NULL) { + importing_slot = 1; + } } } else { /* If it is not the first key/channel, make sure it is exactly @@ -1120,7 +1122,9 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int /* MIGRATE always works in the context of the local node if the slot * is open (migrating or importing state). We need to be able to freely * move keys among instances in this case. */ - if ((migrating_slot || importing_slot) && cmd->proc == migrateCommand) return myself; + if ((migrating_slot || importing_slot) && cmd->proc == migrateCommand && clusterNodeIsMaster(myself)) { + return myself; + } /* If we don't have all the keys and we are migrating the slot, send * an ASK redirection or TRYAGAIN. */ diff --git a/tests/unit/cluster/slot-migration.tcl b/tests/unit/cluster/slot-migration.tcl index d2cfa8e2cc..d141ccc5e0 100644 --- a/tests/unit/cluster/slot-migration.tcl +++ b/tests/unit/cluster/slot-migration.tcl @@ -71,6 +71,7 @@ start_cluster 3 3 {tags {external:skip cluster} overrides {cluster-allow-replica set R3_id [R 3 CLUSTER MYID] set R4_id [R 4 CLUSTER MYID] set R5_id [R 5 CLUSTER MYID] + R 0 SET "{aga}2" banana test "Slot migration states are replicated" { # Validate initial states @@ -139,8 +140,51 @@ start_cluster 3 3 {tags {external:skip cluster} overrides {cluster-allow-replica assert_equal [get_open_slots 3] "\[609->-$R1_id\]" assert_equal [get_open_slots 4] "\[609-<-$R0_id\]" catch {[R 3 get aga]} e - assert_equal {MOVED} [lindex [split $e] 0] - assert_equal {609} [lindex [split $e] 1] + set port0 [srv 0 port] + assert_equal "MOVED 609 127.0.0.1:$port0" $e + } + + test "Replica of migrating node returns ASK redirect after READONLY" { + # Validate initial states + assert_equal [get_open_slots 0] "\[609->-$R1_id\]" + assert_equal [get_open_slots 1] "\[609-<-$R0_id\]" + assert_equal [get_open_slots 3] "\[609->-$R1_id\]" + assert_equal [get_open_slots 4] "\[609-<-$R0_id\]" + # Read missing key in readonly replica in migrating state. + assert_equal OK [R 3 READONLY] + set port1 [srv -1 port] + catch {[R 3 get aga]} e + assert_equal "ASK 609 127.0.0.1:$port1" $e + assert_equal OK [R 3 READWRITE] + } + + test "Replica of migrating node returns TRYAGAIN after READONLY" { + # Validate initial states + assert_equal [get_open_slots 0] "\[609->-$R1_id\]" + assert_equal [get_open_slots 1] "\[609-<-$R0_id\]" + assert_equal [get_open_slots 3] "\[609->-$R1_id\]" + assert_equal [get_open_slots 4] "\[609-<-$R0_id\]" + # Read some existing and some missing keys in readonly replica in + # migrating state results in TRYAGAIN, just like its primary would do. + assert_equal OK [R 3 READONLY] + catch {[R 3 mget "{aga}1" "{aga}2"]} e + assert_match "TRYAGAIN *" $e + assert_equal OK [R 3 READWRITE] + } + + test "Replica of importing node returns TRYAGAIN after READONLY and ASKING" { + # Validate initial states + assert_equal [get_open_slots 0] "\[609->-$R1_id\]" + assert_equal [get_open_slots 1] "\[609-<-$R0_id\]" + assert_equal [get_open_slots 3] "\[609->-$R1_id\]" + assert_equal [get_open_slots 4] "\[609-<-$R0_id\]" + # A client follows an ASK redirect to a primary, but wants to read from a replica. + # The replica returns TRYAGAIN just like a primary would do for two missing keys. + assert_equal OK [R 4 READONLY] + assert_equal OK [R 4 ASKING] + catch {R 4 MGET "{aga}1" "{aga}2"} e + assert_match "TRYAGAIN *" $e + assert_equal OK [R 4 READWRITE] } test "New replica inherits migrating slot" { From fbbabe3543a6f9bca670a0875dc5eeb5ba6733a3 Mon Sep 17 00:00:00 2001 From: Madelyn Olson Date: Fri, 24 May 2024 15:53:44 -0700 Subject: [PATCH 03/42] Revert format updates on config.c file for config block (#552) Although I think this improves the readability of individual configs, the fact there are now 1k more lines of configs makes this overall much harder to parse. So reverting it back to the way it was before. `,\n [ ]+` replace with `, `. --------- Signed-off-by: Madelyn Olson --- src/config.c | 1558 ++++++-------------------------------------------- 1 file changed, 166 insertions(+), 1392 deletions(-) diff --git a/src/config.c b/src/config.c index 646a5ea639..539d8fdf20 100644 --- a/src/config.c +++ b/src/config.c @@ -3005,1454 +3005,228 @@ static int applyClientMaxMemoryUsage(const char **err) { } standardConfig static_configs[] = { + /* clang-format off */ /* Bool configs */ createBoolConfig("rdbchecksum", NULL, IMMUTABLE_CONFIG, server.rdb_checksum, 1, NULL, NULL), createBoolConfig("daemonize", NULL, IMMUTABLE_CONFIG, server.daemonize, 0, NULL, NULL), - createBoolConfig("io-threads-do-reads", - NULL, - DEBUG_CONFIG | IMMUTABLE_CONFIG, - server.io_threads_do_reads, - 0, - NULL, - NULL), /* Read + parse from threads? */ + createBoolConfig("io-threads-do-reads", NULL, DEBUG_CONFIG | IMMUTABLE_CONFIG, server.io_threads_do_reads, 0, NULL, NULL), /* Read + parse from threads? */ createBoolConfig("always-show-logo", NULL, IMMUTABLE_CONFIG, server.always_show_logo, 0, NULL, NULL), createBoolConfig("protected-mode", NULL, MODIFIABLE_CONFIG, server.protected_mode, 1, NULL, NULL), createBoolConfig("rdbcompression", NULL, MODIFIABLE_CONFIG, server.rdb_compression, 1, NULL, NULL), createBoolConfig("rdb-del-sync-files", NULL, MODIFIABLE_CONFIG, server.rdb_del_sync_files, 0, NULL, NULL), createBoolConfig("activerehashing", NULL, MODIFIABLE_CONFIG, server.activerehashing, 1, NULL, NULL), - createBoolConfig("stop-writes-on-bgsave-error", - NULL, - MODIFIABLE_CONFIG, - server.stop_writes_on_bgsave_err, - 1, - NULL, - NULL), - createBoolConfig("set-proc-title", - NULL, - IMMUTABLE_CONFIG, - server.set_proc_title, - 1, - NULL, - NULL), /* Should setproctitle be used? */ - createBoolConfig("dynamic-hz", NULL, MODIFIABLE_CONFIG, server.dynamic_hz, 1, NULL, NULL), /* Adapt hz to # of - clients.*/ - createBoolConfig("lazyfree-lazy-eviction", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.lazyfree_lazy_eviction, - 0, - NULL, - NULL), - createBoolConfig("lazyfree-lazy-expire", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.lazyfree_lazy_expire, - 0, - NULL, - NULL), - createBoolConfig("lazyfree-lazy-server-del", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.lazyfree_lazy_server_del, - 0, - NULL, - NULL), - createBoolConfig("lazyfree-lazy-user-del", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.lazyfree_lazy_user_del, - 0, - NULL, - NULL), - createBoolConfig("lazyfree-lazy-user-flush", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.lazyfree_lazy_user_flush, - 0, - NULL, - NULL), + createBoolConfig("stop-writes-on-bgsave-error", NULL, MODIFIABLE_CONFIG, server.stop_writes_on_bgsave_err, 1, NULL, NULL), + createBoolConfig("set-proc-title", NULL, IMMUTABLE_CONFIG, server.set_proc_title, 1, NULL, NULL), /* Should setproctitle be used? */ + createBoolConfig("dynamic-hz", NULL, MODIFIABLE_CONFIG, server.dynamic_hz, 1, NULL, NULL), /* Adapt hz to # of clients.*/ + createBoolConfig("lazyfree-lazy-eviction", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.lazyfree_lazy_eviction, 0, NULL, NULL), + createBoolConfig("lazyfree-lazy-expire", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.lazyfree_lazy_expire, 0, NULL, NULL), + createBoolConfig("lazyfree-lazy-server-del", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.lazyfree_lazy_server_del, 0, NULL, NULL), + createBoolConfig("lazyfree-lazy-user-del", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.lazyfree_lazy_user_del, 0, NULL, NULL), + createBoolConfig("lazyfree-lazy-user-flush", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.lazyfree_lazy_user_flush, 0, NULL, NULL), createBoolConfig("repl-disable-tcp-nodelay", NULL, MODIFIABLE_CONFIG, server.repl_disable_tcp_nodelay, 0, NULL, NULL), - createBoolConfig("repl-diskless-sync", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.repl_diskless_sync, - 1, - NULL, - NULL), - createBoolConfig("aof-rewrite-incremental-fsync", - NULL, - MODIFIABLE_CONFIG, - server.aof_rewrite_incremental_fsync, - 1, - NULL, - NULL), + createBoolConfig("repl-diskless-sync", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.repl_diskless_sync, 1, NULL, NULL), + createBoolConfig("aof-rewrite-incremental-fsync", NULL, MODIFIABLE_CONFIG, server.aof_rewrite_incremental_fsync, 1, NULL, NULL), createBoolConfig("no-appendfsync-on-rewrite", NULL, MODIFIABLE_CONFIG, server.aof_no_fsync_on_rewrite, 0, NULL, NULL), - createBoolConfig("cluster-require-full-coverage", - NULL, - MODIFIABLE_CONFIG, - server.cluster_require_full_coverage, - 1, - NULL, - NULL), - createBoolConfig("rdb-save-incremental-fsync", - NULL, - MODIFIABLE_CONFIG, - server.rdb_save_incremental_fsync, - 1, - NULL, - NULL), + createBoolConfig("cluster-require-full-coverage", NULL, MODIFIABLE_CONFIG, server.cluster_require_full_coverage, 1, NULL, NULL), + createBoolConfig("rdb-save-incremental-fsync", NULL, MODIFIABLE_CONFIG, server.rdb_save_incremental_fsync, 1, NULL, NULL), createBoolConfig("aof-load-truncated", NULL, MODIFIABLE_CONFIG, server.aof_load_truncated, 1, NULL, NULL), createBoolConfig("aof-use-rdb-preamble", NULL, MODIFIABLE_CONFIG, server.aof_use_rdb_preamble, 1, NULL, NULL), createBoolConfig("aof-timestamp-enabled", NULL, MODIFIABLE_CONFIG, server.aof_timestamp_enabled, 0, NULL, NULL), - createBoolConfig("cluster-replica-no-failover", - "cluster-slave-no-failover", - MODIFIABLE_CONFIG, - server.cluster_slave_no_failover, - 0, - NULL, - updateClusterFlags), /* Failover by default. */ - createBoolConfig("replica-lazy-flush", - "slave-lazy-flush", - MODIFIABLE_CONFIG, - server.repl_slave_lazy_flush, - 0, - NULL, - NULL), - createBoolConfig("replica-serve-stale-data", - "slave-serve-stale-data", - MODIFIABLE_CONFIG, - server.repl_serve_stale_data, - 1, - NULL, - NULL), - createBoolConfig("replica-read-only", - "slave-read-only", - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.repl_slave_ro, - 1, - NULL, - NULL), - createBoolConfig("replica-ignore-maxmemory", - "slave-ignore-maxmemory", - MODIFIABLE_CONFIG, - server.repl_slave_ignore_maxmemory, - 1, - NULL, - NULL), - createBoolConfig("jemalloc-bg-thread", - NULL, - MODIFIABLE_CONFIG, - server.jemalloc_bg_thread, - 1, - NULL, - updateJemallocBgThread), - createBoolConfig("activedefrag", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.active_defrag_enabled, - 0, - isValidActiveDefrag, - NULL), + createBoolConfig("cluster-replica-no-failover", "cluster-slave-no-failover", MODIFIABLE_CONFIG, server.cluster_slave_no_failover, 0, NULL, updateClusterFlags), /* Failover by default. */ + createBoolConfig("replica-lazy-flush", "slave-lazy-flush", MODIFIABLE_CONFIG, server.repl_slave_lazy_flush, 0, NULL, NULL), + createBoolConfig("replica-serve-stale-data", "slave-serve-stale-data", MODIFIABLE_CONFIG, server.repl_serve_stale_data, 1, NULL, NULL), + createBoolConfig("replica-read-only", "slave-read-only", DEBUG_CONFIG | MODIFIABLE_CONFIG, server.repl_slave_ro, 1, NULL, NULL), + createBoolConfig("replica-ignore-maxmemory", "slave-ignore-maxmemory", MODIFIABLE_CONFIG, server.repl_slave_ignore_maxmemory, 1, NULL, NULL), + createBoolConfig("jemalloc-bg-thread", NULL, MODIFIABLE_CONFIG, server.jemalloc_bg_thread, 1, NULL, updateJemallocBgThread), + createBoolConfig("activedefrag", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.active_defrag_enabled, 0, isValidActiveDefrag, NULL), createBoolConfig("syslog-enabled", NULL, IMMUTABLE_CONFIG, server.syslog_enabled, 0, NULL, NULL), createBoolConfig("cluster-enabled", NULL, IMMUTABLE_CONFIG, server.cluster_enabled, 0, NULL, NULL), - createBoolConfig("appendonly", - NULL, - MODIFIABLE_CONFIG | DENY_LOADING_CONFIG, - server.aof_enabled, - 0, - NULL, - updateAppendonly), - createBoolConfig("cluster-allow-reads-when-down", - NULL, - MODIFIABLE_CONFIG, - server.cluster_allow_reads_when_down, - 0, - NULL, - NULL), - createBoolConfig("cluster-allow-pubsubshard-when-down", - NULL, - MODIFIABLE_CONFIG, - server.cluster_allow_pubsubshard_when_down, - 1, - NULL, - NULL), - createBoolConfig("crash-log-enabled", - NULL, - MODIFIABLE_CONFIG, - server.crashlog_enabled, - 1, - NULL, - updateSighandlerEnabled), + createBoolConfig("appendonly", NULL, MODIFIABLE_CONFIG | DENY_LOADING_CONFIG, server.aof_enabled, 0, NULL, updateAppendonly), + createBoolConfig("cluster-allow-reads-when-down", NULL, MODIFIABLE_CONFIG, server.cluster_allow_reads_when_down, 0, NULL, NULL), + createBoolConfig("cluster-allow-pubsubshard-when-down", NULL, MODIFIABLE_CONFIG, server.cluster_allow_pubsubshard_when_down, 1, NULL, NULL), + createBoolConfig("crash-log-enabled", NULL, MODIFIABLE_CONFIG, server.crashlog_enabled, 1, NULL, updateSighandlerEnabled), createBoolConfig("crash-memcheck-enabled", NULL, MODIFIABLE_CONFIG, server.memcheck_enabled, 1, NULL, NULL), - createBoolConfig("use-exit-on-panic", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - server.use_exit_on_panic, - 0, - NULL, - NULL), + createBoolConfig("use-exit-on-panic", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, server.use_exit_on_panic, 0, NULL, NULL), createBoolConfig("disable-thp", NULL, IMMUTABLE_CONFIG, server.disable_thp, 1, NULL, NULL), - createBoolConfig("cluster-allow-replica-migration", - NULL, - MODIFIABLE_CONFIG, - server.cluster_allow_replica_migration, - 1, - NULL, - NULL), + createBoolConfig("cluster-allow-replica-migration", NULL, MODIFIABLE_CONFIG, server.cluster_allow_replica_migration, 1, NULL, NULL), createBoolConfig("replica-announced", NULL, MODIFIABLE_CONFIG, server.replica_announced, 1, NULL, NULL), createBoolConfig("latency-tracking", NULL, MODIFIABLE_CONFIG, server.latency_tracking_enabled, 1, NULL, NULL), - createBoolConfig("aof-disable-auto-gc", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - server.aof_disable_auto_gc, - 0, - NULL, - updateAofAutoGCEnabled), - createBoolConfig("replica-ignore-disk-write-errors", - NULL, - MODIFIABLE_CONFIG, - server.repl_ignore_disk_write_error, - 0, - NULL, - NULL), - createBoolConfig("extended-redis-compatibility", - NULL, - MODIFIABLE_CONFIG, - server.extended_redis_compat, - 0, - NULL, - updateExtendedRedisCompat), + createBoolConfig("aof-disable-auto-gc", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, server.aof_disable_auto_gc, 0, NULL, updateAofAutoGCEnabled), + createBoolConfig("replica-ignore-disk-write-errors", NULL, MODIFIABLE_CONFIG, server.repl_ignore_disk_write_error, 0, NULL, NULL), + createBoolConfig("extended-redis-compatibility", NULL, MODIFIABLE_CONFIG, server.extended_redis_compat, 0, NULL, updateExtendedRedisCompat), /* String Configs */ createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.acl_filename, "", NULL, NULL), createStringConfig("unixsocket", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.unixsocket, NULL, NULL, NULL), createStringConfig("pidfile", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.pidfile, NULL, NULL, NULL), - createStringConfig("replica-announce-ip", - "slave-announce-ip", - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.slave_announce_ip, - NULL, - NULL, - NULL), - createStringConfig("masteruser", - NULL, - MODIFIABLE_CONFIG | SENSITIVE_CONFIG, - EMPTY_STRING_IS_NULL, - server.masteruser, - NULL, - NULL, - NULL), - createStringConfig("cluster-announce-ip", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.cluster_announce_ip, - NULL, - NULL, - updateClusterIp), - createStringConfig("cluster-config-file", - NULL, - IMMUTABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.cluster_configfile, - "nodes.conf", - NULL, - NULL), - createStringConfig("cluster-announce-hostname", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.cluster_announce_hostname, - NULL, - isValidAnnouncedHostname, - updateClusterHostname), - createStringConfig("cluster-announce-human-nodename", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.cluster_announce_human_nodename, - NULL, - isValidAnnouncedNodename, - updateClusterHumanNodename), - createStringConfig("syslog-ident", - NULL, - IMMUTABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.syslog_ident, - SERVER_NAME, - NULL, - NULL), - createStringConfig("dbfilename", - NULL, - MODIFIABLE_CONFIG | PROTECTED_CONFIG, - ALLOW_EMPTY_STRING, - server.rdb_filename, - "dump.rdb", - isValidDBfilename, - NULL), - createStringConfig("appendfilename", - NULL, - IMMUTABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.aof_filename, - "appendonly.aof", - isValidAOFfilename, - NULL), - createStringConfig("appenddirname", - NULL, - IMMUTABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.aof_dirname, - "appendonlydir", - isValidAOFdirname, - NULL), - createStringConfig("server-cpulist", - "server_cpulist", - IMMUTABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.server_cpulist, - NULL, - NULL, - NULL), - createStringConfig("bio-cpulist", - "bio_cpulist", - IMMUTABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.bio_cpulist, - NULL, - NULL, - NULL), - createStringConfig("aof-rewrite-cpulist", - "aof_rewrite_cpulist", - IMMUTABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.aof_rewrite_cpulist, - NULL, - NULL, - NULL), - createStringConfig("bgsave-cpulist", - "bgsave_cpulist", - IMMUTABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.bgsave_cpulist, - NULL, - NULL, - NULL), - createStringConfig("ignore-warnings", - NULL, - MODIFIABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.ignore_warnings, - "", - NULL, - NULL), - createStringConfig("proc-title-template", - NULL, - MODIFIABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.proc_title_template, - CONFIG_DEFAULT_PROC_TITLE_TEMPLATE, - isValidProcTitleTemplate, - updateProcTitleTemplate), - createStringConfig("bind-source-addr", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.bind_source_addr, - NULL, - NULL, - NULL), + createStringConfig("replica-announce-ip", "slave-announce-ip", MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.slave_announce_ip, NULL, NULL, NULL), + createStringConfig("masteruser", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masteruser, NULL, NULL, NULL), + createStringConfig("cluster-announce-ip", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_ip, NULL, NULL, updateClusterIp), + createStringConfig("cluster-config-file", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.cluster_configfile, "nodes.conf", NULL, NULL), + createStringConfig("cluster-announce-hostname", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_hostname, NULL, isValidAnnouncedHostname, updateClusterHostname), + createStringConfig("cluster-announce-human-nodename", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_human_nodename, NULL, isValidAnnouncedNodename, updateClusterHumanNodename), + createStringConfig("syslog-ident", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.syslog_ident, SERVER_NAME, NULL, NULL), + createStringConfig("dbfilename", NULL, MODIFIABLE_CONFIG | PROTECTED_CONFIG, ALLOW_EMPTY_STRING, server.rdb_filename, "dump.rdb", isValidDBfilename, NULL), + createStringConfig("appendfilename", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.aof_filename, "appendonly.aof", isValidAOFfilename, NULL), + createStringConfig("appenddirname", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.aof_dirname, "appendonlydir", isValidAOFdirname, NULL), + createStringConfig("server-cpulist", "server_cpulist", IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.server_cpulist, NULL, NULL, NULL), + createStringConfig("bio-cpulist", "bio_cpulist", IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.bio_cpulist, NULL, NULL, NULL), + createStringConfig("aof-rewrite-cpulist", "aof_rewrite_cpulist", IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.aof_rewrite_cpulist, NULL, NULL, NULL), + createStringConfig("bgsave-cpulist", "bgsave_cpulist", IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.bgsave_cpulist, NULL, NULL, NULL), + createStringConfig("ignore-warnings", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.ignore_warnings, "", NULL, NULL), + createStringConfig("proc-title-template", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.proc_title_template, CONFIG_DEFAULT_PROC_TITLE_TEMPLATE, isValidProcTitleTemplate, updateProcTitleTemplate), + createStringConfig("bind-source-addr", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.bind_source_addr, NULL, NULL, NULL), createStringConfig("logfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.logfile, "", NULL, NULL), #ifdef LOG_REQ_RES - createStringConfig("req-res-logfile", - NULL, - IMMUTABLE_CONFIG | HIDDEN_CONFIG, - EMPTY_STRING_IS_NULL, - server.req_res_logfile, - NULL, - NULL, - NULL), + createStringConfig("req-res-logfile", NULL, IMMUTABLE_CONFIG | HIDDEN_CONFIG, EMPTY_STRING_IS_NULL, server.req_res_logfile, NULL, NULL, NULL), #endif - createStringConfig("locale-collate", - NULL, - MODIFIABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.locale_collate, - "", - NULL, - updateLocaleCollate), + createStringConfig("locale-collate", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.locale_collate, "", NULL, updateLocaleCollate), /* SDS Configs */ - createSDSConfig("masterauth", - NULL, - MODIFIABLE_CONFIG | SENSITIVE_CONFIG, - EMPTY_STRING_IS_NULL, - server.masterauth, - NULL, - NULL, - NULL), - createSDSConfig("requirepass", - NULL, - MODIFIABLE_CONFIG | SENSITIVE_CONFIG, - EMPTY_STRING_IS_NULL, - server.requirepass, - NULL, - NULL, - updateRequirePass), + createSDSConfig("masterauth", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masterauth, NULL, NULL, NULL), + createSDSConfig("requirepass", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.requirepass, NULL, NULL, updateRequirePass), /* Enum Configs */ - createEnumConfig("supervised", - NULL, - IMMUTABLE_CONFIG, - supervised_mode_enum, - server.supervised_mode, - SUPERVISED_NONE, - NULL, - NULL), - createEnumConfig("syslog-facility", - NULL, - IMMUTABLE_CONFIG, - syslog_facility_enum, - server.syslog_facility, - LOG_LOCAL0, - NULL, - NULL), - createEnumConfig("repl-diskless-load", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG | DENY_LOADING_CONFIG, - repl_diskless_load_enum, - server.repl_diskless_load, - REPL_DISKLESS_LOAD_DISABLED, - NULL, - NULL), + createEnumConfig("supervised", NULL, IMMUTABLE_CONFIG, supervised_mode_enum, server.supervised_mode, SUPERVISED_NONE, NULL, NULL), + createEnumConfig("syslog-facility", NULL, IMMUTABLE_CONFIG, syslog_facility_enum, server.syslog_facility, LOG_LOCAL0, NULL, NULL), + createEnumConfig("repl-diskless-load", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG | DENY_LOADING_CONFIG, repl_diskless_load_enum, server.repl_diskless_load, REPL_DISKLESS_LOAD_DISABLED, NULL, NULL), createEnumConfig("loglevel", NULL, MODIFIABLE_CONFIG, loglevel_enum, server.verbosity, LL_NOTICE, NULL, NULL), - createEnumConfig("maxmemory-policy", - NULL, - MODIFIABLE_CONFIG, - maxmemory_policy_enum, - server.maxmemory_policy, - MAXMEMORY_NO_EVICTION, - NULL, - NULL), - createEnumConfig("appendfsync", - NULL, - MODIFIABLE_CONFIG, - aof_fsync_enum, - server.aof_fsync, - AOF_FSYNC_EVERYSEC, - NULL, - updateAppendFsync), - createEnumConfig("oom-score-adj", - NULL, - MODIFIABLE_CONFIG, - oom_score_adj_enum, - server.oom_score_adj, - OOM_SCORE_ADJ_NO, - NULL, - updateOOMScoreAdj), - createEnumConfig("acl-pubsub-default", - NULL, - MODIFIABLE_CONFIG, - acl_pubsub_default_enum, - server.acl_pubsub_default, - 0, - NULL, - NULL), - createEnumConfig("sanitize-dump-payload", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - sanitize_dump_payload_enum, - server.sanitize_dump_payload, - SANITIZE_DUMP_NO, - NULL, - NULL), - createEnumConfig("enable-protected-configs", - NULL, - IMMUTABLE_CONFIG, - protected_action_enum, - server.enable_protected_configs, - PROTECTED_ACTION_ALLOWED_NO, - NULL, - NULL), - createEnumConfig("enable-debug-command", - NULL, - IMMUTABLE_CONFIG, - protected_action_enum, - server.enable_debug_cmd, - PROTECTED_ACTION_ALLOWED_NO, - NULL, - NULL), - createEnumConfig("enable-module-command", - NULL, - IMMUTABLE_CONFIG, - protected_action_enum, - server.enable_module_cmd, - PROTECTED_ACTION_ALLOWED_NO, - NULL, - NULL), - createEnumConfig("cluster-preferred-endpoint-type", - NULL, - MODIFIABLE_CONFIG, - cluster_preferred_endpoint_type_enum, - server.cluster_preferred_endpoint_type, - CLUSTER_ENDPOINT_TYPE_IP, - NULL, - invalidateClusterSlotsResp), - createEnumConfig("propagation-error-behavior", - NULL, - MODIFIABLE_CONFIG, - propagation_error_behavior_enum, - server.propagation_error_behavior, - PROPAGATION_ERR_BEHAVIOR_IGNORE, - NULL, - NULL), - createEnumConfig("shutdown-on-sigint", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - shutdown_on_sig_enum, - server.shutdown_on_sigint, - 0, - isValidShutdownOnSigFlags, - NULL), - createEnumConfig("shutdown-on-sigterm", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - shutdown_on_sig_enum, - server.shutdown_on_sigterm, - 0, - isValidShutdownOnSigFlags, - NULL), + createEnumConfig("maxmemory-policy", NULL, MODIFIABLE_CONFIG, maxmemory_policy_enum, server.maxmemory_policy, MAXMEMORY_NO_EVICTION, NULL, NULL), + createEnumConfig("appendfsync", NULL, MODIFIABLE_CONFIG, aof_fsync_enum, server.aof_fsync, AOF_FSYNC_EVERYSEC, NULL, updateAppendFsync), + createEnumConfig("oom-score-adj", NULL, MODIFIABLE_CONFIG, oom_score_adj_enum, server.oom_score_adj, OOM_SCORE_ADJ_NO, NULL, updateOOMScoreAdj), + createEnumConfig("acl-pubsub-default", NULL, MODIFIABLE_CONFIG, acl_pubsub_default_enum, server.acl_pubsub_default, 0, NULL, NULL), + createEnumConfig("sanitize-dump-payload", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, sanitize_dump_payload_enum, server.sanitize_dump_payload, SANITIZE_DUMP_NO, NULL, NULL), + createEnumConfig("enable-protected-configs", NULL, IMMUTABLE_CONFIG, protected_action_enum, server.enable_protected_configs, PROTECTED_ACTION_ALLOWED_NO, NULL, NULL), + createEnumConfig("enable-debug-command", NULL, IMMUTABLE_CONFIG, protected_action_enum, server.enable_debug_cmd, PROTECTED_ACTION_ALLOWED_NO, NULL, NULL), + createEnumConfig("enable-module-command", NULL, IMMUTABLE_CONFIG, protected_action_enum, server.enable_module_cmd, PROTECTED_ACTION_ALLOWED_NO, NULL, NULL), + createEnumConfig("cluster-preferred-endpoint-type", NULL, MODIFIABLE_CONFIG, cluster_preferred_endpoint_type_enum, server.cluster_preferred_endpoint_type, CLUSTER_ENDPOINT_TYPE_IP, NULL, invalidateClusterSlotsResp), + createEnumConfig("propagation-error-behavior", NULL, MODIFIABLE_CONFIG, propagation_error_behavior_enum, server.propagation_error_behavior, PROPAGATION_ERR_BEHAVIOR_IGNORE, NULL, NULL), + createEnumConfig("shutdown-on-sigint", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, shutdown_on_sig_enum, server.shutdown_on_sigint, 0, isValidShutdownOnSigFlags, NULL), + createEnumConfig("shutdown-on-sigterm", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, shutdown_on_sig_enum, server.shutdown_on_sigterm, 0, isValidShutdownOnSigFlags, NULL), /* Integer configs */ createIntConfig("databases", NULL, IMMUTABLE_CONFIG, 1, INT_MAX, server.dbnum, 16, INTEGER_CONFIG, NULL, NULL), - createIntConfig("port", - NULL, - MODIFIABLE_CONFIG, - 0, - 65535, - server.port, - 6379, - INTEGER_CONFIG, - NULL, - updatePort), /* TCP port. */ - createIntConfig("io-threads", - NULL, - DEBUG_CONFIG | IMMUTABLE_CONFIG, - 1, - 128, - server.io_threads_num, - 1, - INTEGER_CONFIG, - NULL, - NULL), /* Single threaded by default */ - createIntConfig("auto-aof-rewrite-percentage", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.aof_rewrite_perc, - 100, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("cluster-replica-validity-factor", - "cluster-slave-validity-factor", - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.cluster_slave_validity_factor, - 10, - INTEGER_CONFIG, - NULL, - NULL), /* Slave max data age factor. */ - createIntConfig("list-max-listpack-size", - "list-max-ziplist-size", - MODIFIABLE_CONFIG, - INT_MIN, - INT_MAX, - server.list_max_listpack_size, - -2, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("tcp-keepalive", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.tcpkeepalive, - 300, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("cluster-migration-barrier", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.cluster_migration_barrier, - 1, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("active-defrag-cycle-min", - NULL, - MODIFIABLE_CONFIG, - 1, - 99, - server.active_defrag_cycle_min, - 1, - INTEGER_CONFIG, - NULL, - updateDefragConfiguration), /* Default: 1% CPU min (at lower threshold) */ - createIntConfig("active-defrag-cycle-max", - NULL, - MODIFIABLE_CONFIG, - 1, - 99, - server.active_defrag_cycle_max, - 25, - INTEGER_CONFIG, - NULL, - updateDefragConfiguration), /* Default: 25% CPU max (at upper threshold) */ - createIntConfig("active-defrag-threshold-lower", - NULL, - MODIFIABLE_CONFIG, - 0, - 1000, - server.active_defrag_threshold_lower, - 10, - INTEGER_CONFIG, - NULL, - NULL), /* Default: don't defrag when fragmentation is below 10% */ - createIntConfig("active-defrag-threshold-upper", - NULL, - MODIFIABLE_CONFIG, - 0, - 1000, - server.active_defrag_threshold_upper, - 100, - INTEGER_CONFIG, - NULL, - updateDefragConfiguration), /* Default: maximum defrag force at 100% fragmentation */ - createIntConfig("lfu-log-factor", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.lfu_log_factor, - 10, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("lfu-decay-time", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.lfu_decay_time, - 1, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("replica-priority", - "slave-priority", - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.slave_priority, - 100, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("repl-diskless-sync-delay", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.repl_diskless_sync_delay, - 5, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("maxmemory-samples", - NULL, - MODIFIABLE_CONFIG, - 1, - 64, - server.maxmemory_samples, - 5, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("maxmemory-eviction-tenacity", - NULL, - MODIFIABLE_CONFIG, - 0, - 100, - server.maxmemory_eviction_tenacity, - 10, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("timeout", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.maxidletime, - 0, - INTEGER_CONFIG, - NULL, - NULL), /* Default client timeout: infinite */ - createIntConfig("replica-announce-port", - "slave-announce-port", - MODIFIABLE_CONFIG, - 0, - 65535, - server.slave_announce_port, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("tcp-backlog", - NULL, - IMMUTABLE_CONFIG, - 0, - INT_MAX, - server.tcp_backlog, - 511, - INTEGER_CONFIG, - NULL, - NULL), /* TCP listen backlog. */ + createIntConfig("port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.port, 6379, INTEGER_CONFIG, NULL, updatePort), /* TCP port. */ + createIntConfig("io-threads", NULL, DEBUG_CONFIG | IMMUTABLE_CONFIG, 1, 128, server.io_threads_num, 1, INTEGER_CONFIG, NULL, NULL), /* Single threaded by default */ + createIntConfig("auto-aof-rewrite-percentage", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.aof_rewrite_perc, 100, INTEGER_CONFIG, NULL, NULL), + createIntConfig("cluster-replica-validity-factor", "cluster-slave-validity-factor", MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_slave_validity_factor, 10, INTEGER_CONFIG, NULL, NULL), /* Slave max data age factor. */ + createIntConfig("list-max-listpack-size", "list-max-ziplist-size", MODIFIABLE_CONFIG, INT_MIN, INT_MAX, server.list_max_listpack_size, -2, INTEGER_CONFIG, NULL, NULL), + createIntConfig("tcp-keepalive", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tcpkeepalive, 300, INTEGER_CONFIG, NULL, NULL), + createIntConfig("cluster-migration-barrier", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_migration_barrier, 1, INTEGER_CONFIG, NULL, NULL), + createIntConfig("active-defrag-cycle-min", NULL, MODIFIABLE_CONFIG, 1, 99, server.active_defrag_cycle_min, 1, INTEGER_CONFIG, NULL, updateDefragConfiguration), /* Default: 1% CPU min (at lower threshold) */ + createIntConfig("active-defrag-cycle-max", NULL, MODIFIABLE_CONFIG, 1, 99, server.active_defrag_cycle_max, 25, INTEGER_CONFIG, NULL, updateDefragConfiguration), /* Default: 25% CPU max (at upper threshold) */ + createIntConfig("active-defrag-threshold-lower", NULL, MODIFIABLE_CONFIG, 0, 1000, server.active_defrag_threshold_lower, 10, INTEGER_CONFIG, NULL, NULL), /* Default: don't defrag when fragmentation is below 10% */ + createIntConfig("active-defrag-threshold-upper", NULL, MODIFIABLE_CONFIG, 0, 1000, server.active_defrag_threshold_upper, 100, INTEGER_CONFIG, NULL, updateDefragConfiguration), /* Default: maximum defrag force at 100% fragmentation */ + createIntConfig("lfu-log-factor", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.lfu_log_factor, 10, INTEGER_CONFIG, NULL, NULL), + createIntConfig("lfu-decay-time", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.lfu_decay_time, 1, INTEGER_CONFIG, NULL, NULL), + createIntConfig("replica-priority", "slave-priority", MODIFIABLE_CONFIG, 0, INT_MAX, server.slave_priority, 100, INTEGER_CONFIG, NULL, NULL), + createIntConfig("repl-diskless-sync-delay", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_diskless_sync_delay, 5, INTEGER_CONFIG, NULL, NULL), + createIntConfig("maxmemory-samples", NULL, MODIFIABLE_CONFIG, 1, 64, server.maxmemory_samples, 5, INTEGER_CONFIG, NULL, NULL), + createIntConfig("maxmemory-eviction-tenacity", NULL, MODIFIABLE_CONFIG, 0, 100, server.maxmemory_eviction_tenacity, 10, INTEGER_CONFIG, NULL, NULL), + createIntConfig("timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.maxidletime, 0, INTEGER_CONFIG, NULL, NULL), /* Default client timeout: infinite */ + createIntConfig("replica-announce-port", "slave-announce-port", MODIFIABLE_CONFIG, 0, 65535, server.slave_announce_port, 0, INTEGER_CONFIG, NULL, NULL), + createIntConfig("tcp-backlog", NULL, IMMUTABLE_CONFIG, 0, INT_MAX, server.tcp_backlog, 511, INTEGER_CONFIG, NULL, NULL), /* TCP listen backlog. */ createIntConfig("cluster-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.cluster_port, 0, INTEGER_CONFIG, NULL, NULL), - createIntConfig("cluster-announce-bus-port", - NULL, - MODIFIABLE_CONFIG, - 0, - 65535, - server.cluster_announce_bus_port, - 0, - INTEGER_CONFIG, - NULL, - updateClusterAnnouncedPort), /* Default: Use +10000 offset. */ - createIntConfig("cluster-announce-port", - NULL, - MODIFIABLE_CONFIG, - 0, - 65535, - server.cluster_announce_port, - 0, - INTEGER_CONFIG, - NULL, - updateClusterAnnouncedPort), /* Use server.port */ - createIntConfig("cluster-announce-tls-port", - NULL, - MODIFIABLE_CONFIG, - 0, - 65535, - server.cluster_announce_tls_port, - 0, - INTEGER_CONFIG, - NULL, - updateClusterAnnouncedPort), /* Use server.tls_port */ - createIntConfig("repl-timeout", - NULL, - MODIFIABLE_CONFIG, - 1, - INT_MAX, - server.repl_timeout, - 60, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("repl-ping-replica-period", - "repl-ping-slave-period", - MODIFIABLE_CONFIG, - 1, - INT_MAX, - server.repl_ping_slave_period, - 10, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("list-compress-depth", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.list_compress_depth, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("rdb-key-save-delay", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - INT_MIN, - INT_MAX, - server.rdb_key_save_delay, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("key-load-delay", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - INT_MIN, - INT_MAX, - server.key_load_delay, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("active-expire-effort", - NULL, - MODIFIABLE_CONFIG, - 1, - 10, - server.active_expire_effort, - 1, - INTEGER_CONFIG, - NULL, - NULL), /* From 1 to 10. */ - createIntConfig("hz", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.config_hz, - CONFIG_DEFAULT_HZ, - INTEGER_CONFIG, - NULL, - updateHZ), - createIntConfig("min-replicas-to-write", - "min-slaves-to-write", - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.repl_min_slaves_to_write, - 0, - INTEGER_CONFIG, - NULL, - updateGoodSlaves), - createIntConfig("min-replicas-max-lag", - "min-slaves-max-lag", - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.repl_min_slaves_max_lag, - 10, - INTEGER_CONFIG, - NULL, - updateGoodSlaves), - createIntConfig("watchdog-period", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - 0, - INT_MAX, - server.watchdog_period, - 0, - INTEGER_CONFIG, - NULL, - updateWatchdogPeriod), - createIntConfig("shutdown-timeout", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.shutdown_timeout, - 10, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("repl-diskless-sync-max-replicas", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.repl_diskless_sync_max_replicas, - 0, - INTEGER_CONFIG, - NULL, - NULL), + createIntConfig("cluster-announce-bus-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_bus_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Default: Use +10000 offset. */ + createIntConfig("cluster-announce-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Use server.port */ + createIntConfig("cluster-announce-tls-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_tls_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Use server.tls_port */ + createIntConfig("repl-timeout", NULL, MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_timeout, 60, INTEGER_CONFIG, NULL, NULL), + createIntConfig("repl-ping-replica-period", "repl-ping-slave-period", MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_ping_slave_period, 10, INTEGER_CONFIG, NULL, NULL), + createIntConfig("list-compress-depth", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, 0, INT_MAX, server.list_compress_depth, 0, INTEGER_CONFIG, NULL, NULL), + createIntConfig("rdb-key-save-delay", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, INT_MIN, INT_MAX, server.rdb_key_save_delay, 0, INTEGER_CONFIG, NULL, NULL), + createIntConfig("key-load-delay", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, INT_MIN, INT_MAX, server.key_load_delay, 0, INTEGER_CONFIG, NULL, NULL), + createIntConfig("active-expire-effort", NULL, MODIFIABLE_CONFIG, 1, 10, server.active_expire_effort, 1, INTEGER_CONFIG, NULL, NULL), /* From 1 to 10. */ + createIntConfig("hz", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.config_hz, CONFIG_DEFAULT_HZ, INTEGER_CONFIG, NULL, updateHZ), + createIntConfig("min-replicas-to-write", "min-slaves-to-write", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_slaves_to_write, 0, INTEGER_CONFIG, NULL, updateGoodSlaves), + createIntConfig("min-replicas-max-lag", "min-slaves-max-lag", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_slaves_max_lag, 10, INTEGER_CONFIG, NULL, updateGoodSlaves), + createIntConfig("watchdog-period", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, 0, INT_MAX, server.watchdog_period, 0, INTEGER_CONFIG, NULL, updateWatchdogPeriod), + createIntConfig("shutdown-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.shutdown_timeout, 10, INTEGER_CONFIG, NULL, NULL), + createIntConfig("repl-diskless-sync-max-replicas", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_diskless_sync_max_replicas, 0, INTEGER_CONFIG, NULL, NULL), /* Unsigned int configs */ - createUIntConfig("maxclients", - NULL, - MODIFIABLE_CONFIG, - 1, - UINT_MAX, - server.maxclients, - 10000, - INTEGER_CONFIG, - NULL, - updateMaxclients), - createUIntConfig("unixsocketperm", - NULL, - IMMUTABLE_CONFIG, - 0, - 0777, - server.unixsocketperm, - 0, - OCTAL_CONFIG, - NULL, - NULL), - createUIntConfig("socket-mark-id", - NULL, - IMMUTABLE_CONFIG, - 0, - UINT_MAX, - server.socket_mark_id, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createUIntConfig("max-new-connections-per-cycle", - NULL, - MODIFIABLE_CONFIG, - 1, - 1000, - server.max_new_conns_per_cycle, - 10, - INTEGER_CONFIG, - NULL, - NULL), - createUIntConfig("max-new-tls-connections-per-cycle", - NULL, - MODIFIABLE_CONFIG, - 1, - 1000, - server.max_new_tls_conns_per_cycle, - 1, - INTEGER_CONFIG, - NULL, - NULL), + createUIntConfig("maxclients", NULL, MODIFIABLE_CONFIG, 1, UINT_MAX, server.maxclients, 10000, INTEGER_CONFIG, NULL, updateMaxclients), + createUIntConfig("unixsocketperm", NULL, IMMUTABLE_CONFIG, 0, 0777, server.unixsocketperm, 0, OCTAL_CONFIG, NULL, NULL), + createUIntConfig("socket-mark-id", NULL, IMMUTABLE_CONFIG, 0, UINT_MAX, server.socket_mark_id, 0, INTEGER_CONFIG, NULL, NULL), + createUIntConfig("max-new-connections-per-cycle", NULL, MODIFIABLE_CONFIG, 1, 1000, server.max_new_conns_per_cycle, 10, INTEGER_CONFIG, NULL, NULL), + createUIntConfig("max-new-tls-connections-per-cycle", NULL, MODIFIABLE_CONFIG, 1, 1000, server.max_new_tls_conns_per_cycle, 1, INTEGER_CONFIG, NULL, NULL), #ifdef LOG_REQ_RES - createUIntConfig("client-default-resp", - NULL, - IMMUTABLE_CONFIG | HIDDEN_CONFIG, - 2, - 3, - server.client_default_resp, - 2, - INTEGER_CONFIG, - NULL, - NULL), + createUIntConfig("client-default-resp", NULL, IMMUTABLE_CONFIG | HIDDEN_CONFIG, 2, 3, server.client_default_resp, 2, INTEGER_CONFIG, NULL, NULL), #endif /* Unsigned Long configs */ - createULongConfig("active-defrag-max-scan-fields", - NULL, - MODIFIABLE_CONFIG, - 1, - LONG_MAX, - server.active_defrag_max_scan_fields, - 1000, - INTEGER_CONFIG, - NULL, - NULL), /* Default: keys with more than 1000 fields will be processed separately */ - createULongConfig("slowlog-max-len", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.slowlog_max_len, - 128, - INTEGER_CONFIG, - NULL, - NULL), - createULongConfig("acllog-max-len", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.acllog_max_len, - 128, - INTEGER_CONFIG, - NULL, - NULL), + createULongConfig("active-defrag-max-scan-fields", NULL, MODIFIABLE_CONFIG, 1, LONG_MAX, server.active_defrag_max_scan_fields, 1000, INTEGER_CONFIG, NULL, NULL), /* Default: keys with more than 1000 fields will be processed separately */ + createULongConfig("slowlog-max-len", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.slowlog_max_len, 128, INTEGER_CONFIG, NULL, NULL), + createULongConfig("acllog-max-len", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.acllog_max_len, 128, INTEGER_CONFIG, NULL, NULL), /* Long Long configs */ - createLongLongConfig("busy-reply-threshold", - "lua-time-limit", - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.busy_reply_threshold, - 5000, - INTEGER_CONFIG, - NULL, - NULL), /* milliseconds */ - createLongLongConfig("cluster-node-timeout", - NULL, - MODIFIABLE_CONFIG, - 0, - LLONG_MAX, - server.cluster_node_timeout, - 15000, - INTEGER_CONFIG, - NULL, - NULL), - createLongLongConfig("cluster-ping-interval", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - 0, - LLONG_MAX, - server.cluster_ping_interval, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createLongLongConfig("slowlog-log-slower-than", - NULL, - MODIFIABLE_CONFIG, - -1, - LLONG_MAX, - server.slowlog_log_slower_than, - 10000, - INTEGER_CONFIG, - NULL, - NULL), - createLongLongConfig("latency-monitor-threshold", - NULL, - MODIFIABLE_CONFIG, - 0, - LLONG_MAX, - server.latency_monitor_threshold, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createLongLongConfig("proto-max-bulk-len", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - 1024 * 1024, - LONG_MAX, - server.proto_max_bulk_len, - 512ll * 1024 * 1024, - MEMORY_CONFIG, - NULL, - NULL), /* Bulk request max size */ - createLongLongConfig("stream-node-max-entries", - NULL, - MODIFIABLE_CONFIG, - 0, - LLONG_MAX, - server.stream_node_max_entries, - 100, - INTEGER_CONFIG, - NULL, - NULL), - createLongLongConfig("repl-backlog-size", - NULL, - MODIFIABLE_CONFIG, - 1, - LLONG_MAX, - server.repl_backlog_size, - 1024 * 1024, - MEMORY_CONFIG, - NULL, - updateReplBacklogSize), /* Default: 1mb */ + createLongLongConfig("busy-reply-threshold", "lua-time-limit", MODIFIABLE_CONFIG, 0, LONG_MAX, server.busy_reply_threshold, 5000, INTEGER_CONFIG, NULL, NULL), /* milliseconds */ + createLongLongConfig("cluster-node-timeout", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.cluster_node_timeout, 15000, INTEGER_CONFIG, NULL, NULL), + createLongLongConfig("cluster-ping-interval", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, 0, LLONG_MAX, server.cluster_ping_interval, 0, INTEGER_CONFIG, NULL, NULL), + createLongLongConfig("slowlog-log-slower-than", NULL, MODIFIABLE_CONFIG, -1, LLONG_MAX, server.slowlog_log_slower_than, 10000, INTEGER_CONFIG, NULL, NULL), + createLongLongConfig("latency-monitor-threshold", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.latency_monitor_threshold, 0, INTEGER_CONFIG, NULL, NULL), + createLongLongConfig("proto-max-bulk-len", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, 1024 * 1024, LONG_MAX, server.proto_max_bulk_len, 512ll * 1024 * 1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */ + createLongLongConfig("stream-node-max-entries", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.stream_node_max_entries, 100, INTEGER_CONFIG, NULL, NULL), + createLongLongConfig("repl-backlog-size", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.repl_backlog_size, 1024 * 1024, MEMORY_CONFIG, NULL, updateReplBacklogSize), /* Default: 1mb */ /* Unsigned Long Long configs */ - createULongLongConfig("maxmemory", - NULL, - MODIFIABLE_CONFIG, - 0, - ULLONG_MAX, - server.maxmemory, - 0, - MEMORY_CONFIG, - NULL, - updateMaxmemory), - createULongLongConfig("cluster-link-sendbuf-limit", - NULL, - MODIFIABLE_CONFIG, - 0, - ULLONG_MAX, - server.cluster_link_msg_queue_limit_bytes, - 0, - MEMORY_CONFIG, - NULL, - NULL), + createULongLongConfig("maxmemory", NULL, MODIFIABLE_CONFIG, 0, ULLONG_MAX, server.maxmemory, 0, MEMORY_CONFIG, NULL, updateMaxmemory), + createULongLongConfig("cluster-link-sendbuf-limit", NULL, MODIFIABLE_CONFIG, 0, ULLONG_MAX, server.cluster_link_msg_queue_limit_bytes, 0, MEMORY_CONFIG, NULL, NULL), /* Size_t configs */ - createSizeTConfig("hash-max-listpack-entries", - "hash-max-ziplist-entries", - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.hash_max_listpack_entries, - 512, - INTEGER_CONFIG, - NULL, - NULL), - createSizeTConfig("set-max-intset-entries", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.set_max_intset_entries, - 512, - INTEGER_CONFIG, - NULL, - NULL), - createSizeTConfig("set-max-listpack-entries", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.set_max_listpack_entries, - 128, - INTEGER_CONFIG, - NULL, - NULL), - createSizeTConfig("set-max-listpack-value", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.set_max_listpack_value, - 64, - INTEGER_CONFIG, - NULL, - NULL), - createSizeTConfig("zset-max-listpack-entries", - "zset-max-ziplist-entries", - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.zset_max_listpack_entries, - 128, - INTEGER_CONFIG, - NULL, - NULL), - createSizeTConfig("active-defrag-ignore-bytes", - NULL, - MODIFIABLE_CONFIG, - 1, - LLONG_MAX, - server.active_defrag_ignore_bytes, - 100 << 20, - MEMORY_CONFIG, - NULL, - NULL), /* Default: don't defrag if frag overhead is below 100mb */ - createSizeTConfig("hash-max-listpack-value", - "hash-max-ziplist-value", - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.hash_max_listpack_value, - 64, - MEMORY_CONFIG, - NULL, - NULL), - createSizeTConfig("stream-node-max-bytes", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.stream_node_max_bytes, - 4096, - MEMORY_CONFIG, - NULL, - NULL), - createSizeTConfig("zset-max-listpack-value", - "zset-max-ziplist-value", - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.zset_max_listpack_value, - 64, - MEMORY_CONFIG, - NULL, - NULL), - createSizeTConfig("hll-sparse-max-bytes", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.hll_sparse_max_bytes, - 3000, - MEMORY_CONFIG, - NULL, - NULL), - createSizeTConfig("tracking-table-max-keys", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.tracking_table_max_keys, - 1000000, - INTEGER_CONFIG, - NULL, - NULL), /* Default: 1 million keys max. */ - createSizeTConfig("client-query-buffer-limit", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - 1024 * 1024, - LONG_MAX, - server.client_max_querybuf_len, - 1024 * 1024 * 1024, - MEMORY_CONFIG, - NULL, - NULL), /* Default: 1GB max query buffer. */ - createSSizeTConfig("maxmemory-clients", - NULL, - MODIFIABLE_CONFIG, - -100, - SSIZE_MAX, - server.maxmemory_clients, - 0, - MEMORY_CONFIG | PERCENT_CONFIG, - NULL, - applyClientMaxMemoryUsage), + createSizeTConfig("hash-max-listpack-entries", "hash-max-ziplist-entries", MODIFIABLE_CONFIG, 0, LONG_MAX, server.hash_max_listpack_entries, 512, INTEGER_CONFIG, NULL, NULL), + createSizeTConfig("set-max-intset-entries", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.set_max_intset_entries, 512, INTEGER_CONFIG, NULL, NULL), + createSizeTConfig("set-max-listpack-entries", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.set_max_listpack_entries, 128, INTEGER_CONFIG, NULL, NULL), + createSizeTConfig("set-max-listpack-value", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.set_max_listpack_value, 64, INTEGER_CONFIG, NULL, NULL), + createSizeTConfig("zset-max-listpack-entries", "zset-max-ziplist-entries", MODIFIABLE_CONFIG, 0, LONG_MAX, server.zset_max_listpack_entries, 128, INTEGER_CONFIG, NULL, NULL), + createSizeTConfig("active-defrag-ignore-bytes", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.active_defrag_ignore_bytes, 100 << 20, MEMORY_CONFIG, NULL, NULL), /* Default: don't defrag if frag overhead is below 100mb */ + createSizeTConfig("hash-max-listpack-value", "hash-max-ziplist-value", MODIFIABLE_CONFIG, 0, LONG_MAX, server.hash_max_listpack_value, 64, MEMORY_CONFIG, NULL, NULL), + createSizeTConfig("stream-node-max-bytes", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.stream_node_max_bytes, 4096, MEMORY_CONFIG, NULL, NULL), + createSizeTConfig("zset-max-listpack-value", "zset-max-ziplist-value", MODIFIABLE_CONFIG, 0, LONG_MAX, server.zset_max_listpack_value, 64, MEMORY_CONFIG, NULL, NULL), + createSizeTConfig("hll-sparse-max-bytes", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.hll_sparse_max_bytes, 3000, MEMORY_CONFIG, NULL, NULL), + createSizeTConfig("tracking-table-max-keys", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.tracking_table_max_keys, 1000000, INTEGER_CONFIG, NULL, NULL), /* Default: 1 million keys max. */ + createSizeTConfig("client-query-buffer-limit", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, 1024 * 1024, LONG_MAX, server.client_max_querybuf_len, 1024 * 1024 * 1024, MEMORY_CONFIG, NULL, NULL), /* Default: 1GB max query buffer. */ + createSSizeTConfig("maxmemory-clients", NULL, MODIFIABLE_CONFIG, -100, SSIZE_MAX, server.maxmemory_clients, 0, MEMORY_CONFIG | PERCENT_CONFIG, NULL, applyClientMaxMemoryUsage), /* Other configs */ - createTimeTConfig("repl-backlog-ttl", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.repl_backlog_time_limit, - 60 * 60, - INTEGER_CONFIG, - NULL, - NULL), /* Default: 1 hour */ - createOffTConfig("auto-aof-rewrite-min-size", - NULL, - MODIFIABLE_CONFIG, - 0, - LLONG_MAX, - server.aof_rewrite_min_size, - 64 * 1024 * 1024, - MEMORY_CONFIG, - NULL, - NULL), - createOffTConfig("loading-process-events-interval-bytes", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - 1024, - INT_MAX, - server.loading_process_events_interval_bytes, - 1024 * 1024 * 2, - INTEGER_CONFIG, - NULL, - NULL), - - createIntConfig("tls-port", - NULL, - MODIFIABLE_CONFIG, - 0, - 65535, - server.tls_port, - 0, - INTEGER_CONFIG, - NULL, - applyTLSPort), /* TCP port. */ - createIntConfig("tls-session-cache-size", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.tls_ctx_config.session_cache_size, - 20 * 1024, - INTEGER_CONFIG, - NULL, - applyTlsCfg), - createIntConfig("tls-session-cache-timeout", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.tls_ctx_config.session_cache_timeout, - 300, - INTEGER_CONFIG, - NULL, - applyTlsCfg), + createTimeTConfig("repl-backlog-ttl", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.repl_backlog_time_limit, 60 * 60, INTEGER_CONFIG, NULL, NULL), /* Default: 1 hour */ + createOffTConfig("auto-aof-rewrite-min-size", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.aof_rewrite_min_size, 64 * 1024 * 1024, MEMORY_CONFIG, NULL, NULL), + createOffTConfig("loading-process-events-interval-bytes", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, 1024, INT_MAX, server.loading_process_events_interval_bytes, 1024 * 1024 * 2, INTEGER_CONFIG, NULL, NULL), + + createIntConfig("tls-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, applyTLSPort), /* TCP port. */ + createIntConfig("tls-session-cache-size", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_size, 20 * 1024, INTEGER_CONFIG, NULL, applyTlsCfg), + createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, applyTlsCfg), createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, applyTlsCfg), createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, applyTlsCfg), - createEnumConfig("tls-auth-clients", - NULL, - MODIFIABLE_CONFIG, - tls_auth_clients_enum, - server.tls_auth_clients, - TLS_CLIENT_AUTH_YES, - NULL, - NULL), - createBoolConfig("tls-prefer-server-ciphers", - NULL, - MODIFIABLE_CONFIG, - server.tls_ctx_config.prefer_server_ciphers, - 0, - NULL, - applyTlsCfg), - createBoolConfig("tls-session-caching", - NULL, - MODIFIABLE_CONFIG, - server.tls_ctx_config.session_caching, - 1, - NULL, - applyTlsCfg), - createStringConfig("tls-cert-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.cert_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-key-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.key_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-key-file-pass", - NULL, - MODIFIABLE_CONFIG | SENSITIVE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.key_file_pass, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-client-cert-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.client_cert_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-client-key-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.client_key_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-client-key-file-pass", - NULL, - MODIFIABLE_CONFIG | SENSITIVE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.client_key_file_pass, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-dh-params-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.dh_params_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-ca-cert-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.ca_cert_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-ca-cert-dir", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.ca_cert_dir, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-protocols", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.protocols, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-ciphers", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.ciphers, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-ciphersuites", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.ciphersuites, - NULL, - NULL, - applyTlsCfg), + createEnumConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, tls_auth_clients_enum, server.tls_auth_clients, TLS_CLIENT_AUTH_YES, NULL, NULL), + createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.prefer_server_ciphers, 0, NULL, applyTlsCfg), + createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.session_caching, 1, NULL, applyTlsCfg), + createStringConfig("tls-cert-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.cert_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-key-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.key_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-key-file-pass", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.key_file_pass, NULL, NULL, applyTlsCfg), + createStringConfig("tls-client-cert-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.client_cert_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-client-key-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.client_key_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-client-key-file-pass", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.client_key_file_pass, NULL, NULL, applyTlsCfg), + createStringConfig("tls-dh-params-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.dh_params_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-ca-cert-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ca_cert_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-ca-cert-dir", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ca_cert_dir, NULL, NULL, applyTlsCfg), + createStringConfig("tls-protocols", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.protocols, NULL, NULL, applyTlsCfg), + createStringConfig("tls-ciphers", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ciphers, NULL, NULL, applyTlsCfg), + createStringConfig("tls-ciphersuites", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ciphersuites, NULL, NULL, applyTlsCfg), /* Special configs */ - createSpecialConfig("dir", - NULL, - MODIFIABLE_CONFIG | PROTECTED_CONFIG | DENY_LOADING_CONFIG, - setConfigDirOption, - getConfigDirOption, - rewriteConfigDirOption, - NULL), - createSpecialConfig("save", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigSaveOption, - getConfigSaveOption, - rewriteConfigSaveOption, - NULL), - createSpecialConfig("client-output-buffer-limit", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigClientOutputBufferLimitOption, - getConfigClientOutputBufferLimitOption, - rewriteConfigClientOutputBufferLimitOption, - NULL), - createSpecialConfig("oom-score-adj-values", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigOOMScoreAdjValuesOption, - getConfigOOMScoreAdjValuesOption, - rewriteConfigOOMScoreAdjValuesOption, - updateOOMScoreAdj), - createSpecialConfig("notify-keyspace-events", - NULL, - MODIFIABLE_CONFIG, - setConfigNotifyKeyspaceEventsOption, - getConfigNotifyKeyspaceEventsOption, - rewriteConfigNotifyKeyspaceEventsOption, - NULL), - createSpecialConfig("bind", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigBindOption, - getConfigBindOption, - rewriteConfigBindOption, - applyBind), - createSpecialConfig("replicaof", - "slaveof", - IMMUTABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigReplicaOfOption, - getConfigReplicaOfOption, - rewriteConfigReplicaOfOption, - NULL), - createSpecialConfig("latency-tracking-info-percentiles", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigLatencyTrackingInfoPercentilesOutputOption, - getConfigLatencyTrackingInfoPercentilesOutputOption, - rewriteConfigLatencyTrackingInfoPercentilesOutputOption, - NULL), + createSpecialConfig("dir", NULL, MODIFIABLE_CONFIG | PROTECTED_CONFIG | DENY_LOADING_CONFIG, setConfigDirOption, getConfigDirOption, rewriteConfigDirOption, NULL), + createSpecialConfig("save", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigSaveOption, getConfigSaveOption, rewriteConfigSaveOption, NULL), + createSpecialConfig("client-output-buffer-limit", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigClientOutputBufferLimitOption, getConfigClientOutputBufferLimitOption, rewriteConfigClientOutputBufferLimitOption, NULL), + createSpecialConfig("oom-score-adj-values", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigOOMScoreAdjValuesOption, getConfigOOMScoreAdjValuesOption, rewriteConfigOOMScoreAdjValuesOption, updateOOMScoreAdj), + createSpecialConfig("notify-keyspace-events", NULL, MODIFIABLE_CONFIG, setConfigNotifyKeyspaceEventsOption, getConfigNotifyKeyspaceEventsOption, rewriteConfigNotifyKeyspaceEventsOption, NULL), + createSpecialConfig("bind", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigBindOption, getConfigBindOption, rewriteConfigBindOption, applyBind), + createSpecialConfig("replicaof", "slaveof", IMMUTABLE_CONFIG | MULTI_ARG_CONFIG, setConfigReplicaOfOption, getConfigReplicaOfOption, rewriteConfigReplicaOfOption, NULL), + createSpecialConfig("latency-tracking-info-percentiles", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigLatencyTrackingInfoPercentilesOutputOption, getConfigLatencyTrackingInfoPercentilesOutputOption, rewriteConfigLatencyTrackingInfoPercentilesOutputOption, NULL), /* NULL Terminator, this is dropped when we convert to the runtime array. */ - {NULL}}; + {NULL} + /* clang-format on */ +}; /* Create a new config by copying the passed in config. Returns 1 on success * or 0 when their was already a config with the same name.. */ From 1c55f3ca5a19e57a5f918a900c7579009f6b60a2 Mon Sep 17 00:00:00 2001 From: Jonathan Wright Date: Fri, 24 May 2024 18:08:51 -0500 Subject: [PATCH 04/42] Replace centos 7 with alternative versions (#543) replace centos 7 with almalinux 8, add almalinux 9, centos stream 9, fedora stable, rawhide Fixes #527 --------- Signed-off-by: Jonathan Wright Signed-off-by: Madelyn Olson Co-authored-by: Madelyn Olson --- .github/workflows/ci.yml | 12 ++-- .github/workflows/daily.yml | 138 ++++++++++++++++++++++++++---------- 2 files changed, 106 insertions(+), 44 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b64798a6f9..40376f1628 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -78,16 +78,14 @@ jobs: - name: make run: make SERVER_CFLAGS='-Werror' MALLOC=libc - build-centos7-jemalloc: + build-almalinux8-jemalloc: runs-on: ubuntu-latest - container: centos:7 + container: almalinux:8 steps: - # on centos7, actions/checkout@v4 does not work, so we use v3 - # ref. https://github.com/actions/checkout/issues/1487 - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: make run: | - yum -y install gcc make - make SERVER_CFLAGS='-Werror' + dnf -y install epel-release gcc make procps-ng which + make -j SERVER_CFLAGS='-Werror' diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 356178f097..658e58b235 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -11,7 +11,7 @@ on: inputs: skipjobs: description: 'jobs to skip (delete the ones you wanna keep, do not leave empty)' - default: 'valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,iothreads,ubuntu,centos,malloc,specific,fortify,reply-schema' + default: 'valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,iothreads,ubuntu,rpm-distros,malloc,specific,fortify,reply-schema' skiptests: description: 'tests to skip (delete the ones you wanna keep, do not leave empty)' default: 'valkey,modules,sentinel,cluster,unittest' @@ -672,13 +672,34 @@ jobs: if: true && !contains(github.event.inputs.skiptests, 'unittest') run: ./src/valkey-unit-tests --accurate - test-centos7-jemalloc: - runs-on: ubuntu-latest + test-rpm-distros-jemalloc: if: | - (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && - !contains(github.event.inputs.skipjobs, 'centos') - container: centos:7 + (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && + !contains(github.event.inputs.skipjobs, 'rpm-distros') + strategy: + fail-fast: false + matrix: + include: + - name: test-almalinux8-jemalloc + container: almalinux:8 + install_epel: true + - name: test-almalinux9-jemalloc + container: almalinux:8 + install_epel: true + - name: test-centosstream9-jemalloc + container: quay.io/centos/centos:stream9 + install_epel: true + - name: test-fedoralatest-jemalloc + container: fedora:latest + - name: test-fedorarawhide-jemalloc + container: fedora:rawhide + + name: ${{ matrix.name }} + runs-on: ubuntu-latest + + container: ${{ matrix.container }} timeout-minutes: 14400 + steps: - name: prep if: github.event_name == 'workflow_dispatch' @@ -689,18 +710,19 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - # On centos7 actions/checkout@v4 does not work, so we use v3 - # ref. https://github.com/actions/checkout/issues/1487 - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release - name: make run: | - yum -y install gcc make - make SERVER_CFLAGS='-Werror' + dnf -y install gcc make procps-ng which /usr/bin/kill + make -j SERVER_CFLAGS='-Werror' - name: testprep - run: yum -y install which tcl tclx + run: dnf -y install tcl tcltls - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} @@ -714,13 +736,34 @@ jobs: if: true && !contains(github.event.inputs.skiptests, 'cluster') run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} - test-centos7-tls-module: - runs-on: ubuntu-latest + test-rpm-distros-tls-module: if: | - (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && - !contains(github.event.inputs.skipjobs, 'tls') - container: centos:7 + (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && + !contains(github.event.inputs.skipjobs, 'tls') + strategy: + fail-fast: false + matrix: + include: + - name: test-almalinux8-tls-module + container: almalinux:8 + install_epel: true + - name: test-almalinux9-tls-module + container: almalinux:8 + install_epel: true + - name: test-centosstream9-tls-module + container: quay.io/centos/centos:stream9 + install_epel: true + - name: test-fedoralatest-tls-module + container: fedora:latest + - name: test-fedorarawhide-tls-module + container: fedora:rawhide + + name: ${{ matrix.name }} + runs-on: ubuntu-latest + + container: ${{ matrix.container }} timeout-minutes: 14400 + steps: - name: prep if: github.event_name == 'workflow_dispatch' @@ -731,20 +774,20 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - # On centos7 actions/checkout@v4 does not work, so we use v3 - # ref. https://github.com/actions/checkout/issues/1487 - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release - name: make run: | - yum -y install centos-release-scl epel-release - yum -y install devtoolset-7 openssl-devel openssl - scl enable devtoolset-7 "make BUILD_TLS=module SERVER_CFLAGS='-Werror'" + dnf -y install make gcc openssl-devel openssl procps-ng which /usr/bin/kill + make -j BUILD_TLS=module SERVER_CFLAGS='-Werror' - name: testprep run: | - yum -y install tcl tcltls tclx + dnf -y install tcl tcltls ./utils/gen-test-certs.sh - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') @@ -763,13 +806,34 @@ jobs: run: | ./runtest-cluster --tls-module ${{github.event.inputs.cluster_test_args}} - test-centos7-tls-module-no-tls: - runs-on: ubuntu-latest + test-rpm-distros-tls-module-no-tls: if: | - (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && - !contains(github.event.inputs.skipjobs, 'tls') - container: centos:7 + (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && + !contains(github.event.inputs.skipjobs, 'tls') + strategy: + fail-fast: false + matrix: + include: + - name: test-almalinux8-tls-module-no-tls + container: almalinux:8 + install_epel: true + - name: test-almalinux9-tls-module-no-tls + container: almalinux:8 + install_epel: true + - name: test-centosstream9-tls-module-no-tls + container: quay.io/centos/centos:stream9 + install_epel: true + - name: test-fedoralatest-tls-module-no-tls + container: fedora:latest + - name: test-fedorarawhide-tls-module-no-tls + container: fedora:rawhide + + name: ${{ matrix.name }} + runs-on: ubuntu-latest + + container: ${{ matrix.container }} timeout-minutes: 14400 + steps: - name: prep if: github.event_name == 'workflow_dispatch' @@ -780,20 +844,20 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - # On centos7 actions/checkout@v4 does not work, so we use v3 - # ref. https://github.com/actions/checkout/issues/1487 - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release - name: make run: | - yum -y install centos-release-scl epel-release - yum -y install devtoolset-7 openssl-devel openssl - scl enable devtoolset-7 "make BUILD_TLS=module SERVER_CFLAGS='-Werror'" + dnf -y install make gcc openssl-devel openssl procps-ng which /usr/bin/kill + make -j BUILD_TLS=module SERVER_CFLAGS='-Werror' - name: testprep run: | - yum -y install tcl tcltls tclx + dnf -y install tcl tcltls ./utils/gen-test-certs.sh - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') @@ -1074,7 +1138,7 @@ jobs: notify-about-job-results: runs-on: ubuntu-latest if: always() && github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey' - needs: [test-ubuntu-jemalloc, test-ubuntu-jemalloc-fortify, test-ubuntu-libc-malloc, test-ubuntu-no-malloc-usable-size, test-ubuntu-32bit, test-ubuntu-tls, test-ubuntu-tls-no-tls, test-ubuntu-io-threads, test-ubuntu-reclaim-cache, test-valgrind-test, test-valgrind-misc, test-valgrind-no-malloc-usable-size-test, test-valgrind-no-malloc-usable-size-misc, test-sanitizer-address, test-sanitizer-undefined, test-centos7-jemalloc, test-centos7-tls-module, test-centos7-tls-module-no-tls, test-macos-latest, test-macos-latest-sentinel, test-macos-latest-cluster, build-macos, test-freebsd, test-alpine-jemalloc, test-alpine-libc-malloc, reply-schemas-validator] + needs: [test-ubuntu-jemalloc, test-ubuntu-jemalloc-fortify, test-ubuntu-libc-malloc, test-ubuntu-no-malloc-usable-size, test-ubuntu-32bit, test-ubuntu-tls, test-ubuntu-tls-no-tls, test-ubuntu-io-threads, test-ubuntu-reclaim-cache, test-valgrind-test, test-valgrind-misc, test-valgrind-no-malloc-usable-size-test, test-valgrind-no-malloc-usable-size-misc, test-sanitizer-address, test-sanitizer-undefined, test-rpm-distros-jemalloc, test-rpm-distros-tls-module, test-rpm-distros-tls-module-no-tls, test-macos-latest, test-macos-latest-sentinel, test-macos-latest-cluster, build-macos, test-freebsd, test-alpine-jemalloc, test-alpine-libc-malloc, reply-schemas-validator] steps: - name: Collect job status run: | From 5d0f4bc9f0089f83cc54e1978f2dd549f7392c9f Mon Sep 17 00:00:00 2001 From: Samuel Adetunji <45036507+adetunjii@users.noreply.github.com> Date: Sun, 26 May 2024 17:41:11 +0100 Subject: [PATCH 05/42] Require C11 atomics (#490) - Replaces custom atomics logic with C11 default atomics logic. - Drops "atomicvar_api" field from server info Closes #485 --------- Signed-off-by: adetunjii Signed-off-by: Samuel Adetunji Co-authored-by: teej4y --- src/aof.c | 22 ++-- src/bio.c | 17 +-- src/db.c | 1 - src/evict.c | 1 - src/functions.c | 1 - src/lazyfree.c | 73 +++++------ src/module.c | 5 +- src/networking.c | 28 ++--- src/rdb.c | 3 +- src/replication.c | 15 +-- src/server.c | 65 +++++----- src/server.h | 278 ++++++++++++++++++++--------------------- src/threads_mngr.c | 17 ++- src/valkey-benchmark.c | 113 ++++++++--------- src/zmalloc.c | 11 +- 15 files changed, 318 insertions(+), 332 deletions(-) diff --git a/src/aof.c b/src/aof.c index a2ed139f8b..d15ff379e5 100644 --- a/src/aof.c +++ b/src/aof.c @@ -946,7 +946,7 @@ void stopAppendOnly(void) { server.aof_last_incr_size = 0; server.aof_last_incr_fsync_offset = 0; server.fsynced_reploff = -1; - atomicSet(server.fsynced_reploff_pending, 0); + atomic_store_explicit(&server.fsynced_reploff_pending, 0, memory_order_relaxed); killAppendOnlyChild(); sdsfree(server.aof_buf); server.aof_buf = sdsempty(); @@ -985,11 +985,11 @@ int startAppendOnly(void) { } server.aof_last_fsync = server.mstime; /* If AOF fsync error in bio job, we just ignore it and log the event. */ - int aof_bio_fsync_status; - atomicGet(server.aof_bio_fsync_status, aof_bio_fsync_status); + int aof_bio_fsync_status = atomic_load_explicit(&server.aof_bio_fsync_status, memory_order_relaxed); if (aof_bio_fsync_status == C_ERR) { - serverLog(LL_WARNING, "AOF reopen, just ignore the AOF fsync error in bio job"); - atomicSet(server.aof_bio_fsync_status, C_OK); + serverLog(LL_WARNING, + "AOF reopen, just ignore the AOF fsync error in bio job"); + atomic_store_explicit(&server.aof_bio_fsync_status, C_OK, memory_order_relaxed); } /* If AOF was in error state, we just ignore it and log the event. */ @@ -1074,7 +1074,7 @@ void flushAppendOnlyFile(int force) { * (because there's no reason, from the AOF POV, to call fsync) and then WAITAOF may wait on * the higher offset (which contains data that was only propagated to replicas, and not to AOF) */ if (!sync_in_progress && server.aof_fsync != AOF_FSYNC_NO) - atomicSet(server.fsynced_reploff_pending, server.master_repl_offset); + atomic_store_explicit(&server.fsynced_reploff_pending, server.master_repl_offset, memory_order_relaxed); return; } } @@ -1244,8 +1244,9 @@ void flushAppendOnlyFile(int force) { latencyAddSampleIfNeeded("aof-fsync-always", latency); server.aof_last_incr_fsync_offset = server.aof_last_incr_size; server.aof_last_fsync = server.mstime; - atomicSet(server.fsynced_reploff_pending, server.master_repl_offset); - } else if (server.aof_fsync == AOF_FSYNC_EVERYSEC && server.mstime - server.aof_last_fsync >= 1000) { + atomic_store_explicit(&server.fsynced_reploff_pending, server.master_repl_offset, memory_order_relaxed); + } else if (server.aof_fsync == AOF_FSYNC_EVERYSEC && + server.mstime - server.aof_last_fsync >= 1000) { if (!sync_in_progress) { aof_background_fsync(server.aof_fd); server.aof_last_incr_fsync_offset = server.aof_last_incr_size; @@ -2409,7 +2410,7 @@ int rewriteAppendOnlyFileBackground(void) { /* Set the initial repl_offset, which will be applied to fsynced_reploff * when AOFRW finishes (after possibly being updated by a bio thread) */ - atomicSet(server.fsynced_reploff_pending, server.master_repl_offset); + atomic_store_explicit(&server.fsynced_reploff_pending, server.master_repl_offset, memory_order_relaxed); server.fsynced_reploff = 0; } @@ -2647,8 +2648,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { /* Update the fsynced replication offset that just now become valid. * This could either be the one we took in startAppendOnly, or a * newer one set by the bio thread. */ - long long fsynced_reploff_pending; - atomicGet(server.fsynced_reploff_pending, fsynced_reploff_pending); + long long fsynced_reploff_pending = atomic_load_explicit(&server.fsynced_reploff_pending, memory_order_relaxed); server.fsynced_reploff = fsynced_reploff_pending; } diff --git a/src/bio.c b/src/bio.c index 4d1d268e62..31d946e566 100644 --- a/src/bio.c +++ b/src/bio.c @@ -62,6 +62,7 @@ #include "server.h" #include "bio.h" +#include static char *bio_worker_title[] = { "bio_close_file", @@ -256,17 +257,19 @@ void *bioProcessBackgroundJobs(void *arg) { /* The fd may be closed by main thread and reused for another * socket, pipe, or file. We just ignore these errno because * aof fsync did not really fail. */ - if (valkey_fsync(job->fd_args.fd) == -1 && errno != EBADF && errno != EINVAL) { - int last_status; - atomicGet(server.aof_bio_fsync_status, last_status); - atomicSet(server.aof_bio_fsync_status, C_ERR); - atomicSet(server.aof_bio_fsync_errno, errno); + if (valkey_fsync(job->fd_args.fd) == -1 && + errno != EBADF && errno != EINVAL) + { + int last_status = atomic_load_explicit(&server.aof_bio_fsync_status, memory_order_relaxed); + + atomic_store_explicit(&server.aof_bio_fsync_errno, errno, memory_order_relaxed); + atomic_store_explicit(&server.aof_bio_fsync_status, C_ERR, memory_order_release); if (last_status == C_OK) { serverLog(LL_WARNING, "Fail to fsync the AOF file: %s", strerror(errno)); } } else { - atomicSet(server.aof_bio_fsync_status, C_OK); - atomicSet(server.fsynced_reploff_pending, job->fd_args.offset); + atomic_store_explicit(&server.aof_bio_fsync_status, C_OK, memory_order_relaxed); + atomic_store_explicit(&server.fsynced_reploff_pending, job->fd_args.offset, memory_order_relaxed); } if (job->fd_args.need_reclaim_cache) { diff --git a/src/db.c b/src/db.c index fa07deeb4b..a78c8bad2b 100644 --- a/src/db.c +++ b/src/db.c @@ -29,7 +29,6 @@ #include "server.h" #include "cluster.h" -#include "atomicvar.h" #include "latency.h" #include "script.h" #include "functions.h" diff --git a/src/evict.c b/src/evict.c index 4a51974ac6..cf209ff065 100644 --- a/src/evict.c +++ b/src/evict.c @@ -32,7 +32,6 @@ #include "server.h" #include "bio.h" -#include "atomicvar.h" #include "script.h" #include diff --git a/src/functions.c b/src/functions.c index 76e40c5231..3076f3b90a 100644 --- a/src/functions.c +++ b/src/functions.c @@ -31,7 +31,6 @@ #include "sds.h" #include "dict.h" #include "adlist.h" -#include "atomicvar.h" #define LOAD_TIMEOUT_MS 500 diff --git a/src/lazyfree.c b/src/lazyfree.c index f9811f0e64..82e468322e 100644 --- a/src/lazyfree.c +++ b/src/lazyfree.c @@ -1,19 +1,20 @@ #include "server.h" #include "bio.h" -#include "atomicvar.h" #include "functions.h" #include "cluster.h" -static serverAtomic size_t lazyfree_objects = 0; -static serverAtomic size_t lazyfreed_objects = 0; +#include + +static _Atomic size_t lazyfree_objects = 0; +static _Atomic size_t lazyfreed_objects = 0; /* Release objects from the lazyfree thread. It's just decrRefCount() * updating the count of objects to release. */ void lazyfreeFreeObject(void *args[]) { robj *o = (robj *)args[0]; decrRefCount(o); - atomicDecr(lazyfree_objects, 1); - atomicIncr(lazyfreed_objects, 1); + atomic_fetch_sub_explicit(&lazyfree_objects,1,memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects,1,memory_order_relaxed); } /* Release a database from the lazyfree thread. The 'db' pointer is the @@ -26,8 +27,8 @@ void lazyfreeFreeDatabase(void *args[]) { size_t numkeys = kvstoreSize(da1); kvstoreRelease(da1); kvstoreRelease(da2); - atomicDecr(lazyfree_objects, numkeys); - atomicIncr(lazyfreed_objects, numkeys); + atomic_fetch_sub_explicit(&lazyfree_objects,numkeys,memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects,numkeys,memory_order_relaxed); } /* Release the key tracking table. */ @@ -35,8 +36,8 @@ void lazyFreeTrackingTable(void *args[]) { rax *rt = args[0]; size_t len = rt->numele; freeTrackingRadixTree(rt); - atomicDecr(lazyfree_objects, len); - atomicIncr(lazyfreed_objects, len); + atomic_fetch_sub_explicit(&lazyfree_objects,len,memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects,len,memory_order_relaxed); } /* Release the error stats rax tree. */ @@ -44,8 +45,8 @@ void lazyFreeErrors(void *args[]) { rax *errors = args[0]; size_t len = errors->numele; raxFreeWithCallback(errors, zfree); - atomicDecr(lazyfree_objects, len); - atomicIncr(lazyfreed_objects, len); + atomic_fetch_sub_explicit(&lazyfree_objects,len,memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects,len,memory_order_relaxed); } /* Release the lua_scripts dict. */ @@ -55,8 +56,8 @@ void lazyFreeLuaScripts(void *args[]) { lua_State *lua = args[2]; long long len = dictSize(lua_scripts); freeLuaScriptsSync(lua_scripts, lua_scripts_lru_list, lua); - atomicDecr(lazyfree_objects, len); - atomicIncr(lazyfreed_objects, len); + atomic_fetch_sub_explicit(&lazyfree_objects,len,memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects,len,memory_order_relaxed); } /* Release the functions ctx. */ @@ -64,8 +65,8 @@ void lazyFreeFunctionsCtx(void *args[]) { functionsLibCtx *functions_lib_ctx = args[0]; size_t len = functionsLibCtxFunctionsLen(functions_lib_ctx); functionsLibCtxFree(functions_lib_ctx); - atomicDecr(lazyfree_objects, len); - atomicIncr(lazyfreed_objects, len); + atomic_fetch_sub_explicit(&lazyfree_objects,len,memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects,len,memory_order_relaxed); } /* Release replication backlog referencing memory. */ @@ -76,26 +77,24 @@ void lazyFreeReplicationBacklogRefMem(void *args[]) { len += raxSize(index); listRelease(blocks); raxFree(index); - atomicDecr(lazyfree_objects, len); - atomicIncr(lazyfreed_objects, len); + atomic_fetch_sub_explicit(&lazyfree_objects,len,memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects,len,memory_order_relaxed); } /* Return the number of currently pending objects to free. */ size_t lazyfreeGetPendingObjectsCount(void) { - size_t aux; - atomicGet(lazyfree_objects, aux); + size_t aux = atomic_load_explicit(&lazyfree_objects,memory_order_relaxed); return aux; } /* Return the number of objects that have been freed. */ size_t lazyfreeGetFreedObjectsCount(void) { - size_t aux; - atomicGet(lazyfreed_objects, aux); + size_t aux = atomic_load_explicit(&lazyfreed_objects,memory_order_relaxed); return aux; } void lazyfreeResetStats(void) { - atomicSet(lazyfreed_objects, 0); + atomic_store_explicit(&lazyfreed_objects,0,memory_order_relaxed); } /* Return the amount of work needed in order to free an object. @@ -175,8 +174,8 @@ void freeObjAsync(robj *key, robj *obj, int dbid) { * of parts of the server core may call incrRefCount() to protect * objects, and then call dbDelete(). */ if (free_effort > LAZYFREE_THRESHOLD && obj->refcount == 1) { - atomicIncr(lazyfree_objects, 1); - bioCreateLazyFreeJob(lazyfreeFreeObject, 1, obj); + atomic_fetch_add_explicit(&lazyfree_objects,1,memory_order_relaxed); + bioCreateLazyFreeJob(lazyfreeFreeObject,1,obj); } else { decrRefCount(obj); } @@ -195,7 +194,7 @@ void emptyDbAsync(serverDb *db) { kvstore *oldkeys = db->keys, *oldexpires = db->expires; db->keys = kvstoreCreate(&dbDictType, slot_count_bits, flags); db->expires = kvstoreCreate(&dbExpiresDictType, slot_count_bits, flags); - atomicIncr(lazyfree_objects, kvstoreSize(oldkeys)); + atomic_fetch_add_explicit(&lazyfree_objects, kvstoreSize(oldkeys), memory_order_relaxed); bioCreateLazyFreeJob(lazyfreeFreeDatabase, 2, oldkeys, oldexpires); } @@ -204,8 +203,8 @@ void emptyDbAsync(serverDb *db) { void freeTrackingRadixTreeAsync(rax *tracking) { /* Because this rax has only keys and no values so we use numnodes. */ if (tracking->numnodes > LAZYFREE_THRESHOLD) { - atomicIncr(lazyfree_objects, tracking->numele); - bioCreateLazyFreeJob(lazyFreeTrackingTable, 1, tracking); + atomic_fetch_add_explicit(&lazyfree_objects,tracking->numele,memory_order_relaxed); + bioCreateLazyFreeJob(lazyFreeTrackingTable,1,tracking); } else { freeTrackingRadixTree(tracking); } @@ -216,8 +215,8 @@ void freeTrackingRadixTreeAsync(rax *tracking) { void freeErrorsRadixTreeAsync(rax *errors) { /* Because this rax has only keys and no values so we use numnodes. */ if (errors->numnodes > LAZYFREE_THRESHOLD) { - atomicIncr(lazyfree_objects, errors->numele); - bioCreateLazyFreeJob(lazyFreeErrors, 1, errors); + atomic_fetch_add_explicit(&lazyfree_objects,errors->numele,memory_order_relaxed); + bioCreateLazyFreeJob(lazyFreeErrors,1,errors); } else { raxFreeWithCallback(errors, zfree); } @@ -227,8 +226,8 @@ void freeErrorsRadixTreeAsync(rax *errors) { * Close lua interpreter, if there are a lot of lua scripts, close it in async way. */ void freeLuaScriptsAsync(dict *lua_scripts, list *lua_scripts_lru_list, lua_State *lua) { if (dictSize(lua_scripts) > LAZYFREE_THRESHOLD) { - atomicIncr(lazyfree_objects, dictSize(lua_scripts)); - bioCreateLazyFreeJob(lazyFreeLuaScripts, 3, lua_scripts, lua_scripts_lru_list, lua); + atomic_fetch_add_explicit(&lazyfree_objects,dictSize(lua_scripts),memory_order_relaxed); + bioCreateLazyFreeJob(lazyFreeLuaScripts,3,lua_scripts,lua_scripts_lru_list,lua); } else { freeLuaScriptsSync(lua_scripts, lua_scripts_lru_list, lua); } @@ -237,8 +236,8 @@ void freeLuaScriptsAsync(dict *lua_scripts, list *lua_scripts_lru_list, lua_Stat /* Free functions ctx, if the functions ctx contains enough functions, free it in async way. */ void freeFunctionsAsync(functionsLibCtx *functions_lib_ctx) { if (functionsLibCtxFunctionsLen(functions_lib_ctx) > LAZYFREE_THRESHOLD) { - atomicIncr(lazyfree_objects, functionsLibCtxFunctionsLen(functions_lib_ctx)); - bioCreateLazyFreeJob(lazyFreeFunctionsCtx, 1, functions_lib_ctx); + atomic_fetch_add_explicit(&lazyfree_objects,functionsLibCtxFunctionsLen(functions_lib_ctx),memory_order_relaxed); + bioCreateLazyFreeJob(lazyFreeFunctionsCtx,1,functions_lib_ctx); } else { functionsLibCtxFree(functions_lib_ctx); } @@ -246,9 +245,11 @@ void freeFunctionsAsync(functionsLibCtx *functions_lib_ctx) { /* Free replication backlog referencing buffer blocks and rax index. */ void freeReplicationBacklogRefMemAsync(list *blocks, rax *index) { - if (listLength(blocks) > LAZYFREE_THRESHOLD || raxSize(index) > LAZYFREE_THRESHOLD) { - atomicIncr(lazyfree_objects, listLength(blocks) + raxSize(index)); - bioCreateLazyFreeJob(lazyFreeReplicationBacklogRefMem, 2, blocks, index); + if (listLength(blocks) > LAZYFREE_THRESHOLD || + raxSize(index) > LAZYFREE_THRESHOLD) + { + atomic_fetch_add_explicit(&lazyfree_objects,listLength(blocks)+raxSize(index),memory_order_relaxed); + bioCreateLazyFreeJob(lazyFreeReplicationBacklogRefMem,2,blocks,index); } else { listRelease(blocks); raxFree(index); diff --git a/src/module.c b/src/module.c index f37d3a8302..f82f30326d 100644 --- a/src/module.c +++ b/src/module.c @@ -2413,8 +2413,7 @@ void VM_Yield(ValkeyModuleCtx *ctx, int flags, const char *busy_reply) { * after the main thread enters acquiring GIL state in order to protect the event * loop (ae.c) and avoid potential race conditions. */ - int acquiring; - atomicGet(server.module_gil_acquring, acquiring); + int acquiring = atomic_load_explicit(&server.module_gil_acquiring, memory_order_relaxed); if (!acquiring) { /* If the main thread has not yet entered the acquiring GIL state, * we attempt to wake it up and exit without waiting for it to @@ -11823,7 +11822,7 @@ void moduleInitModulesSystem(void) { moduleUnblockedClients = listCreate(); server.loadmodule_queue = listCreate(); server.module_configs_queue = dictCreate(&sdsKeyValueHashDictType); - server.module_gil_acquring = 0; + server.module_gil_acquiring = 0; modules = dictCreate(&modulesDictType); moduleAuthCallbacks = listCreate(); diff --git a/src/networking.c b/src/networking.c index 39eaf8b17e..121931a111 100644 --- a/src/networking.c +++ b/src/networking.c @@ -28,7 +28,6 @@ */ #include "server.h" -#include "atomicvar.h" #include "cluster.h" #include "script.h" #include "fpconv_dtoa.h" @@ -37,6 +36,7 @@ #include #include #include +#include static void setProtocolError(const char *errstr, client *c); static void pauseClientsByClient(mstime_t end, int isPauseClientAll); @@ -128,9 +128,8 @@ client *createClient(connection *conn) { connSetPrivateData(conn, c); } c->buf = zmalloc_usable(PROTO_REPLY_CHUNK_BYTES, &c->buf_usable_size); - selectDb(c, 0); - uint64_t client_id; - atomicGetIncr(server.next_client_id, client_id, 1); + selectDb(c,0); + uint64_t client_id = atomic_fetch_add_explicit(&server.next_client_id,1,memory_order_relaxed); c->id = client_id; #ifdef LOG_REQ_RES reqresReset(c, 0); @@ -1943,7 +1942,7 @@ int _writeToClient(client *c, ssize_t *nwritten) { * thread safe. */ int writeToClient(client *c, int handler_installed) { /* Update total number of writes on server */ - atomicIncr(server.stat_total_writes_processed, 1); + atomic_fetch_add_explicit(&server.stat_total_writes_processed,1, memory_order_relaxed); ssize_t nwritten = 0, totwritten = 0; @@ -1969,9 +1968,9 @@ int writeToClient(client *c, int handler_installed) { } if (getClientType(c) == CLIENT_TYPE_SLAVE) { - atomicIncr(server.stat_net_repl_output_bytes, totwritten); + atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes, totwritten, memory_order_relaxed); } else { - atomicIncr(server.stat_net_output_bytes, totwritten); + atomic_fetch_add_explicit(&server.stat_net_output_bytes, totwritten, memory_order_relaxed); } c->net_output_bytes += totwritten; @@ -2611,7 +2610,7 @@ void readQueryFromClient(connection *conn) { if (postponeClientRead(c)) return; /* Update total number of reads on server */ - atomicIncr(server.stat_total_reads_processed, 1); + atomic_fetch_add_explicit(&server.stat_total_reads_processed,1,memory_order_relaxed); readlen = PROTO_IOBUF_LEN; /* If this is a multi bulk request, and we are processing a bulk reply @@ -2677,9 +2676,9 @@ void readQueryFromClient(connection *conn) { c->lastinteraction = server.unixtime; if (c->flags & CLIENT_MASTER) { c->read_reploff += nread; - atomicIncr(server.stat_net_repl_input_bytes, nread); + atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes,nread,memory_order_relaxed); } else { - atomicIncr(server.stat_net_input_bytes, nread); + atomic_fetch_add_explicit(&server.stat_net_input_bytes,nread,memory_order_relaxed); } c->net_input_bytes += nread; @@ -2698,7 +2697,7 @@ void readQueryFromClient(connection *conn) { sdsfree(ci); sdsfree(bytes); freeClientAsync(c); - atomicIncr(server.stat_client_qbuf_limit_disconnections, 1); + atomic_fetch_add_explicit(&server.stat_client_qbuf_limit_disconnections,1,memory_order_relaxed); goto done; } @@ -4135,7 +4134,7 @@ void processEventsWhileBlocked(void) { #endif typedef struct __attribute__((aligned(CACHE_LINE_SIZE))) threads_pending { - serverAtomic unsigned long value; + _Atomic unsigned long value; } threads_pending; pthread_t io_threads[IO_THREADS_MAX_NUM]; @@ -4150,13 +4149,12 @@ int io_threads_op; list *io_threads_list[IO_THREADS_MAX_NUM]; static inline unsigned long getIOPendingCount(int i) { - unsigned long count = 0; - atomicGetWithSync(io_threads_pending[i].value, count); + unsigned long count = atomic_load(&io_threads_pending[i].value); return count; } static inline void setIOPendingCount(int i, unsigned long count) { - atomicSetWithSync(io_threads_pending[i].value, count); + atomic_store(&io_threads_pending[i].value, count); } void *IOThreadMain(void *myid) { diff --git a/src/rdb.c b/src/rdb.c index 5e398dee74..abc86566d0 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -39,6 +39,7 @@ #include #include +#include #include #include #include @@ -2888,7 +2889,7 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) { processModuleLoadingProgressEvent(0); } if (server.repl_state == REPL_STATE_TRANSFER && rioCheckType(r) == RIO_TYPE_CONN) { - atomicIncr(server.stat_net_repl_input_bytes, len); + atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes,len,memory_order_relaxed); } } diff --git a/src/replication.c b/src/replication.c index a3268f41db..e2612e75b5 100644 --- a/src/replication.c +++ b/src/replication.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -1363,8 +1364,8 @@ void sendBulkToSlave(connection *conn) { freeClient(slave); return; } - atomicIncr(server.stat_net_repl_output_bytes, nwritten); - sdsrange(slave->replpreamble, nwritten, -1); + atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes,nwritten,memory_order_relaxed); + sdsrange(slave->replpreamble,nwritten,-1); if (sdslen(slave->replpreamble) == 0) { sdsfree(slave->replpreamble); slave->replpreamble = NULL; @@ -1391,7 +1392,7 @@ void sendBulkToSlave(connection *conn) { return; } slave->repldboff += nwritten; - atomicIncr(server.stat_net_repl_output_bytes, nwritten); + atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes,nwritten,memory_order_relaxed); if (slave->repldboff == slave->repldbsize) { closeRepldbfd(slave); connSetWriteHandler(slave->conn, NULL); @@ -1433,7 +1434,7 @@ void rdbPipeWriteHandler(struct connection *conn) { return; } else { slave->repldboff += nwritten; - atomicIncr(server.stat_net_repl_output_bytes, nwritten); + atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes,nwritten,memory_order_relaxed); if (slave->repldboff < server.rdb_pipe_bufflen) { slave->repl_last_partial_write = server.unixtime; return; /* more data to write.. */ @@ -1506,7 +1507,7 @@ void rdbPipeReadHandler(struct aeEventLoop *eventLoop, int fd, void *clientData, /* Note: when use diskless replication, 'repldboff' is the offset * of 'rdb_pipe_buff' sent rather than the offset of entire RDB. */ slave->repldboff = nwritten; - atomicIncr(server.stat_net_repl_output_bytes, nwritten); + atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes,nwritten,memory_order_relaxed); } /* If we were unable to write all the data to one of the replicas, * setup write handler (and disable pipe read handler, below) */ @@ -1814,7 +1815,7 @@ void readSyncBulkPayload(connection *conn) { } else { /* nread here is returned by connSyncReadLine(), which calls syncReadLine() and * convert "\r\n" to '\0' so 1 byte is lost. */ - atomicIncr(server.stat_net_repl_input_bytes, nread + 1); + atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes,nread+1,memory_order_relaxed); } if (buf[0] == '-') { @@ -1883,7 +1884,7 @@ void readSyncBulkPayload(connection *conn) { cancelReplicationHandshake(1); return; } - atomicIncr(server.stat_net_repl_input_bytes, nread); + atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes,nread,memory_order_relaxed); /* When a mark is used, we want to detect EOF asap in order to avoid * writing the EOF mark into the file... */ diff --git a/src/server.c b/src/server.c index 3e6dc56d6d..edf215eac2 100644 --- a/src/server.c +++ b/src/server.c @@ -33,7 +33,6 @@ #include "slowlog.h" #include "bio.h" #include "latency.h" -#include "atomicvar.h" #include "mt19937-64.h" #include "functions.h" #include "hdr_histogram.h" @@ -1075,7 +1074,7 @@ static inline void updateCachedTimeWithUs(int update_daylight_info, const long l server.ustime = ustime; server.mstime = server.ustime / 1000; time_t unixtime = server.mstime / 1000; - atomicSet(server.unixtime, unixtime); + atomic_store_explicit(&server.unixtime, unixtime, memory_order_relaxed); /* To get information about daylight saving time, we need to call * localtime_r and cache the result. However calling localtime_r in this @@ -1258,10 +1257,12 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { run_with_period(100) { long long stat_net_input_bytes, stat_net_output_bytes; long long stat_net_repl_input_bytes, stat_net_repl_output_bytes; - atomicGet(server.stat_net_input_bytes, stat_net_input_bytes); - atomicGet(server.stat_net_output_bytes, stat_net_output_bytes); - atomicGet(server.stat_net_repl_input_bytes, stat_net_repl_input_bytes); - atomicGet(server.stat_net_repl_output_bytes, stat_net_repl_output_bytes); + + stat_net_input_bytes = atomic_load_explicit(&server.stat_net_input_bytes,memory_order_relaxed); + stat_net_output_bytes = atomic_load_explicit(&server.stat_net_output_bytes,memory_order_relaxed); + stat_net_repl_input_bytes = atomic_load_explicit(&server.stat_net_repl_input_bytes,memory_order_relaxed); + stat_net_repl_output_bytes = atomic_load_explicit(&server.stat_net_repl_output_bytes,memory_order_relaxed); + monotime current_time = getMonotonicUs(); long long factor = 1000000; // us trackInstantaneousMetric(STATS_METRIC_COMMAND, server.stat_numcommands, current_time, factor); @@ -1667,8 +1668,7 @@ void beforeSleep(struct aeEventLoop *eventLoop) { * If an initial rewrite is in progress then not all data is guaranteed to have actually been * persisted to disk yet, so we cannot update the field. We will wait for the rewrite to complete. */ if (server.aof_state == AOF_ON && server.fsynced_reploff != -1) { - long long fsynced_reploff_pending; - atomicGet(server.fsynced_reploff_pending, fsynced_reploff_pending); + long long fsynced_reploff_pending = atomic_load_explicit(&server.fsynced_reploff_pending, memory_order_relaxed); server.fsynced_reploff = fsynced_reploff_pending; /* If we have blocked [WAIT]AOF clients, and fsynced_reploff changed, we want to try to @@ -1737,11 +1737,12 @@ void afterSleep(struct aeEventLoop *eventLoop) { if (moduleCount()) { mstime_t latency; latencyStartMonitor(latency); - - atomicSet(server.module_gil_acquring, 1); + atomic_store_explicit(&server.module_gil_acquiring,1,memory_order_relaxed); moduleAcquireGIL(); - atomicSet(server.module_gil_acquring, 0); - moduleFireServerEvent(VALKEYMODULE_EVENT_EVENTLOOP, VALKEYMODULE_SUBEVENT_EVENTLOOP_AFTER_SLEEP, NULL); + atomic_store_explicit(&server.module_gil_acquiring,0,memory_order_relaxed); + moduleFireServerEvent(VALKEYMODULE_EVENT_EVENTLOOP, + VALKEYMODULE_SUBEVENT_EVENTLOOP_AFTER_SLEEP, + NULL); latencyEndMonitor(latency); latencyAddSampleIfNeeded("module-acquire-GIL", latency); } @@ -1990,7 +1991,7 @@ void initServerConfig(void) { server.aof_flush_sleep = 0; server.aof_last_fsync = time(NULL) * 1000; server.aof_cur_timestamp = 0; - atomicSet(server.aof_bio_fsync_status, C_OK); + atomic_store_explicit(&server.aof_bio_fsync_status,C_OK,memory_order_relaxed); server.aof_rewrite_time_last = -1; server.aof_rewrite_time_start = -1; server.aof_lastbgrewrite_status = C_OK; @@ -2480,10 +2481,10 @@ void resetServerStats(void) { server.stat_sync_partial_ok = 0; server.stat_sync_partial_err = 0; server.stat_io_reads_processed = 0; - atomicSet(server.stat_total_reads_processed, 0); + atomic_store_explicit(&server.stat_total_reads_processed,0,memory_order_relaxed); server.stat_io_writes_processed = 0; - atomicSet(server.stat_total_writes_processed, 0); - atomicSet(server.stat_client_qbuf_limit_disconnections, 0); + atomic_store_explicit(&server.stat_total_writes_processed,0,memory_order_relaxed); + atomic_store_explicit(&server.stat_client_qbuf_limit_disconnections,0,memory_order_relaxed); server.stat_client_outbuf_limit_disconnections = 0; for (j = 0; j < STATS_METRIC_COUNT; j++) { server.inst_metric[j].idx = 0; @@ -2494,10 +2495,10 @@ void resetServerStats(void) { server.stat_aof_rewrites = 0; server.stat_rdb_saves = 0; server.stat_aofrw_consecutive_failures = 0; - atomicSet(server.stat_net_input_bytes, 0); - atomicSet(server.stat_net_output_bytes, 0); - atomicSet(server.stat_net_repl_input_bytes, 0); - atomicSet(server.stat_net_repl_output_bytes, 0); + atomic_store_explicit(&server.stat_net_input_bytes,0,memory_order_relaxed); + atomic_store_explicit(&server.stat_net_output_bytes,0,memory_order_relaxed); + atomic_store_explicit(&server.stat_net_repl_input_bytes,0,memory_order_relaxed); + atomic_store_explicit(&server.stat_net_repl_output_bytes,0,memory_order_relaxed); server.stat_unexpected_error_replies = 0; server.stat_total_error_replies = 0; server.stat_dump_payload_sanitizations = 0; @@ -4380,10 +4381,9 @@ int writeCommandsDeniedByDiskError(void) { return DISK_ERROR_TYPE_AOF; } /* AOF fsync error. */ - int aof_bio_fsync_status; - atomicGet(server.aof_bio_fsync_status, aof_bio_fsync_status); + int aof_bio_fsync_status = atomic_load_explicit(&server.aof_bio_fsync_status, memory_order_acquire); if (aof_bio_fsync_status == C_ERR) { - atomicGet(server.aof_bio_fsync_errno, server.aof_last_write_errno); + server.aof_last_write_errno = atomic_load_explicit(&server.aof_bio_fsync_errno, memory_order_relaxed); return DISK_ERROR_TYPE_AOF; } } @@ -5374,7 +5374,6 @@ sds genValkeyInfoString(dict *section_dict, int all_sections, int everything) { "arch_bits:%i\r\n", server.arch_bits, "monotonic_clock:%s\r\n", monotonicInfoString(), "multiplexing_api:%s\r\n", aeGetApiName(), - "atomicvar_api:%s\r\n", REDIS_ATOMIC_API, "gcc_version:%s\r\n", GNUC_VERSION_STR, "process_id:%I\r\n", (int64_t) getpid(), "process_supervised:%s\r\n", supervised, @@ -5531,8 +5530,7 @@ sds genValkeyInfoString(dict *section_dict, int all_sections, int everything) { } else if (server.stat_current_save_keys_total) { fork_perc = ((double)server.stat_current_save_keys_processed / server.stat_current_save_keys_total) * 100; } - int aof_bio_fsync_status; - atomicGet(server.aof_bio_fsync_status, aof_bio_fsync_status); + int aof_bio_fsync_status = atomic_load_explicit(&server.aof_bio_fsync_status,memory_order_relaxed); /* clang-format off */ info = sdscatprintf(info, "# Persistence\r\n" FMTARGS( @@ -5631,13 +5629,14 @@ sds genValkeyInfoString(dict *section_dict, int all_sections, int everything) { long long current_active_defrag_time = server.stat_last_active_defrag_time ? (long long)elapsedUs(server.stat_last_active_defrag_time) : 0; long long stat_client_qbuf_limit_disconnections; - atomicGet(server.stat_total_reads_processed, stat_total_reads_processed); - atomicGet(server.stat_total_writes_processed, stat_total_writes_processed); - atomicGet(server.stat_net_input_bytes, stat_net_input_bytes); - atomicGet(server.stat_net_output_bytes, stat_net_output_bytes); - atomicGet(server.stat_net_repl_input_bytes, stat_net_repl_input_bytes); - atomicGet(server.stat_net_repl_output_bytes, stat_net_repl_output_bytes); - atomicGet(server.stat_client_qbuf_limit_disconnections, stat_client_qbuf_limit_disconnections); + + stat_total_reads_processed = atomic_load_explicit(&server.stat_total_reads_processed, memory_order_relaxed); + stat_total_writes_processed = atomic_load_explicit(&server.stat_total_writes_processed, memory_order_relaxed); + stat_net_input_bytes = atomic_load_explicit(&server.stat_net_input_bytes, memory_order_relaxed); + stat_net_output_bytes = atomic_load_explicit(&server.stat_net_output_bytes, memory_order_relaxed); + stat_net_repl_input_bytes = atomic_load_explicit(&server.stat_net_repl_input_bytes, memory_order_relaxed); + stat_net_repl_output_bytes = atomic_load_explicit(&server.stat_net_repl_output_bytes, memory_order_relaxed); + stat_client_qbuf_limit_disconnections = atomic_load_explicit(&server.stat_client_qbuf_limit_disconnections, memory_order_relaxed); if (sections++) info = sdscat(info, "\r\n"); /* clang-format off */ diff --git a/src/server.h b/src/server.h index abf66fbf5a..70beb54f43 100644 --- a/src/server.h +++ b/src/server.h @@ -34,12 +34,12 @@ #include "config.h" #include "solarisfixes.h" #include "rio.h" -#include "atomicvar.h" #include "commands.h" #include #include #include +#include #include #include #include @@ -1631,7 +1631,7 @@ struct valkeyServer { int module_pipe[2]; /* Pipe used to awake the event loop by module threads. */ pid_t child_pid; /* PID of current child */ int child_type; /* Type of current child */ - serverAtomic int module_gil_acquring; /* Indicates whether the GIL is being acquiring by the main thread. */ + _Atomic int module_gil_acquiring; /* Indicates whether the GIL is being acquiring by the main thread. */ /* Networking */ int port; /* TCP listening port */ int tls_port; /* TLS listening port */ @@ -1669,13 +1669,13 @@ struct valkeyServer { uint32_t paused_actions; /* Bitmask of actions that are currently paused */ list *postponed_clients; /* List of postponed clients */ pause_event client_pause_per_purpose[NUM_PAUSE_PURPOSES]; - char neterr[ANET_ERR_LEN]; /* Error buffer for anet.c */ - dict *migrate_cached_sockets; /* MIGRATE cached sockets */ - serverAtomic uint64_t next_client_id; /* Next client unique ID. Incremental. */ - int protected_mode; /* Don't accept external connections. */ - int io_threads_num; /* Number of IO threads to use. */ - int io_threads_do_reads; /* Read and parse from IO threads? */ - int io_threads_active; /* Is IO threads currently active? */ + char neterr[ANET_ERR_LEN]; /* Error buffer for anet.c */ + dict *migrate_cached_sockets;/* MIGRATE cached sockets */ + _Atomic uint64_t next_client_id; /* Next client unique ID. Incremental. */ + int protected_mode; /* Don't accept external connections. */ + int io_threads_num; /* Number of IO threads to use. */ + int io_threads_do_reads; /* Read and parse from IO threads? */ + int io_threads_active; /* Is IO threads currently active? */ long long events_processed_while_blocked; /* processEventsWhileBlocked() */ int enable_protected_configs; /* Enable the modification of protected configs, see PROTECTED_ACTION_ALLOWED_* */ int enable_debug_cmd; /* Enable DEBUG commands, see PROTECTED_ACTION_ALLOWED_* */ @@ -1696,65 +1696,61 @@ struct valkeyServer { long long stat_expiredkeys; /* Number of expired keys */ double stat_expired_stale_perc; /* Percentage of keys probably expired */ long long stat_expired_time_cap_reached_count; /* Early expire cycle stops.*/ - long long stat_expire_cycle_time_used; /* Cumulative microseconds used. */ - long long stat_evictedkeys; /* Number of evicted keys (maxmemory) */ - long long stat_evictedclients; /* Number of evicted clients */ - long long stat_evictedscripts; /* Number of evicted lua scripts. */ - long long stat_total_eviction_exceeded_time; /* Total time over the memory limit, unit us */ - monotime stat_last_eviction_exceeded_time; /* Timestamp of current eviction start, unit us */ - long long stat_keyspace_hits; /* Number of successful lookups of keys */ - long long stat_keyspace_misses; /* Number of failed lookups of keys */ - long long stat_active_defrag_hits; /* number of allocations moved */ - long long stat_active_defrag_misses; /* number of allocations scanned but not moved */ - long long stat_active_defrag_key_hits; /* number of keys with moved allocations */ - long long stat_active_defrag_key_misses; /* number of keys scanned and not moved */ - long long stat_active_defrag_scanned; /* number of dictEntries scanned */ - long long stat_total_active_defrag_time; /* Total time memory fragmentation over the limit, unit us */ - monotime stat_last_active_defrag_time; /* Timestamp of current active defrag start */ - size_t stat_peak_memory; /* Max used memory record */ - long long stat_aof_rewrites; /* number of aof file rewrites performed */ - long long stat_aofrw_consecutive_failures; /* The number of consecutive failures of aofrw */ - long long stat_rdb_saves; /* number of rdb saves performed */ - long long stat_fork_time; /* Time needed to perform latest fork() */ - double stat_fork_rate; /* Fork rate in GB/sec. */ - long long stat_total_forks; /* Total count of fork. */ - long long stat_rejected_conn; /* Clients rejected because of maxclients */ - long long stat_sync_full; /* Number of full resyncs with slaves. */ - long long stat_sync_partial_ok; /* Number of accepted PSYNC requests. */ - long long stat_sync_partial_err; /* Number of unaccepted PSYNC requests. */ - list *slowlog; /* SLOWLOG list of commands */ - long long slowlog_entry_id; /* SLOWLOG current entry ID */ - long long slowlog_log_slower_than; /* SLOWLOG time limit (to get logged) */ - unsigned long slowlog_max_len; /* SLOWLOG max number of items logged */ - struct malloc_stats cron_malloc_stats; /* sampled in serverCron(). */ - serverAtomic long long stat_net_input_bytes; /* Bytes read from network. */ - serverAtomic long long stat_net_output_bytes; /* Bytes written to network. */ - serverAtomic long long - stat_net_repl_input_bytes; /* Bytes read during replication, added to stat_net_input_bytes in 'info'. */ - serverAtomic long long - stat_net_repl_output_bytes; /* Bytes written during replication, added to stat_net_output_bytes in 'info'. */ - size_t stat_current_cow_peak; /* Peak size of copy on write bytes. */ - size_t stat_current_cow_bytes; /* Copy on write bytes while child is active. */ - monotime stat_current_cow_updated; /* Last update time of stat_current_cow_bytes */ - size_t stat_current_save_keys_processed; /* Processed keys while child is active. */ - size_t stat_current_save_keys_total; /* Number of keys when child started. */ - size_t stat_rdb_cow_bytes; /* Copy on write bytes during RDB saving. */ - size_t stat_aof_cow_bytes; /* Copy on write bytes during AOF rewrite. */ - size_t stat_module_cow_bytes; /* Copy on write bytes during module fork. */ - double stat_module_progress; /* Module save progress. */ - size_t stat_clients_type_memory[CLIENT_TYPE_COUNT]; /* Mem usage by type */ - size_t stat_cluster_links_memory; /* Mem usage by cluster links */ - long long - stat_unexpected_error_replies; /* Number of unexpected (aof-loading, replica to master, etc.) error replies */ + long long stat_expire_cycle_time_used; /* Cumulative microseconds used. */ + long long stat_evictedkeys; /* Number of evicted keys (maxmemory) */ + long long stat_evictedclients; /* Number of evicted clients */ + long long stat_evictedscripts; /* Number of evicted lua scripts. */ + long long stat_total_eviction_exceeded_time; /* Total time over the memory limit, unit us */ + monotime stat_last_eviction_exceeded_time; /* Timestamp of current eviction start, unit us */ + long long stat_keyspace_hits; /* Number of successful lookups of keys */ + long long stat_keyspace_misses; /* Number of failed lookups of keys */ + long long stat_active_defrag_hits; /* number of allocations moved */ + long long stat_active_defrag_misses; /* number of allocations scanned but not moved */ + long long stat_active_defrag_key_hits; /* number of keys with moved allocations */ + long long stat_active_defrag_key_misses;/* number of keys scanned and not moved */ + long long stat_active_defrag_scanned; /* number of dictEntries scanned */ + long long stat_total_active_defrag_time; /* Total time memory fragmentation over the limit, unit us */ + monotime stat_last_active_defrag_time; /* Timestamp of current active defrag start */ + size_t stat_peak_memory; /* Max used memory record */ + long long stat_aof_rewrites; /* number of aof file rewrites performed */ + long long stat_aofrw_consecutive_failures; /* The number of consecutive failures of aofrw */ + long long stat_rdb_saves; /* number of rdb saves performed */ + long long stat_fork_time; /* Time needed to perform latest fork() */ + double stat_fork_rate; /* Fork rate in GB/sec. */ + long long stat_total_forks; /* Total count of fork. */ + long long stat_rejected_conn; /* Clients rejected because of maxclients */ + long long stat_sync_full; /* Number of full resyncs with slaves. */ + long long stat_sync_partial_ok; /* Number of accepted PSYNC requests. */ + long long stat_sync_partial_err;/* Number of unaccepted PSYNC requests. */ + list *slowlog; /* SLOWLOG list of commands */ + long long slowlog_entry_id; /* SLOWLOG current entry ID */ + long long slowlog_log_slower_than; /* SLOWLOG time limit (to get logged) */ + unsigned long slowlog_max_len; /* SLOWLOG max number of items logged */ + struct malloc_stats cron_malloc_stats; /* sampled in serverCron(). */ + _Atomic long long stat_net_input_bytes; /* Bytes read from network. */ + _Atomic long long stat_net_output_bytes; /* Bytes written to network. */ + _Atomic long long stat_net_repl_input_bytes; /* Bytes read during replication, added to stat_net_input_bytes in 'info'. */ + _Atomic long long stat_net_repl_output_bytes; /* Bytes written during replication, added to stat_net_output_bytes in 'info'. */ + size_t stat_current_cow_peak; /* Peak size of copy on write bytes. */ + size_t stat_current_cow_bytes; /* Copy on write bytes while child is active. */ + monotime stat_current_cow_updated; /* Last update time of stat_current_cow_bytes */ + size_t stat_current_save_keys_processed; /* Processed keys while child is active. */ + size_t stat_current_save_keys_total; /* Number of keys when child started. */ + size_t stat_rdb_cow_bytes; /* Copy on write bytes during RDB saving. */ + size_t stat_aof_cow_bytes; /* Copy on write bytes during AOF rewrite. */ + size_t stat_module_cow_bytes; /* Copy on write bytes during module fork. */ + double stat_module_progress; /* Module save progress. */ + size_t stat_clients_type_memory[CLIENT_TYPE_COUNT];/* Mem usage by type */ + size_t stat_cluster_links_memory; /* Mem usage by cluster links */ + long long stat_unexpected_error_replies; /* Number of unexpected (aof-loading, replica to master, etc.) error replies */ long long stat_total_error_replies; /* Total number of issued error replies ( command + rejected errors ) */ - long long stat_dump_payload_sanitizations; /* Number deep dump payloads integrity validations. */ - long long stat_io_reads_processed; /* Number of read events processed by IO / Main threads */ - long long stat_io_writes_processed; /* Number of write events processed by IO / Main threads */ - serverAtomic long long stat_total_reads_processed; /* Total number of read events processed */ - serverAtomic long long stat_total_writes_processed; /* Total number of write events processed */ - serverAtomic long long - stat_client_qbuf_limit_disconnections; /* Total number of clients reached query buf length limit */ - long long stat_client_outbuf_limit_disconnections; /* Total number of clients reached output buf length limit */ + long long stat_dump_payload_sanitizations; /* Number deep dump payloads integrity validations. */ + long long stat_io_reads_processed; /* Number of read events processed by IO / Main threads */ + long long stat_io_writes_processed; /* Number of write events processed by IO / Main threads */ + _Atomic long long stat_total_reads_processed; /* Total number of read events processed */ + _Atomic long long stat_total_writes_processed; /* Total number of write events processed */ + _Atomic long long stat_client_qbuf_limit_disconnections; /* Total number of clients reached query buf length limit */ + long long stat_client_outbuf_limit_disconnections; /* Total number of clients reached output buf length limit */ /* The following two are used to track instantaneous metrics, like * number of operations per second, network traffic. */ struct { @@ -1814,43 +1810,43 @@ struct valkeyServer { unsigned int max_new_conns_per_cycle; /* The maximum number of tcp connections that will be accepted during each invocation of the event loop. */ /* AOF persistence */ - int aof_enabled; /* AOF configuration */ - int aof_state; /* AOF_(ON|OFF|WAIT_REWRITE) */ - int aof_fsync; /* Kind of fsync() policy */ - char *aof_filename; /* Basename of the AOF file and manifest file */ - char *aof_dirname; /* Name of the AOF directory */ - int aof_no_fsync_on_rewrite; /* Don't fsync if a rewrite is in prog. */ - int aof_rewrite_perc; /* Rewrite AOF if % growth is > M and... */ - off_t aof_rewrite_min_size; /* the AOF file is at least N bytes. */ - off_t aof_rewrite_base_size; /* AOF size on latest startup or rewrite. */ - off_t aof_current_size; /* AOF current size (Including BASE + INCRs). */ - off_t aof_last_incr_size; /* The size of the latest incr AOF. */ - off_t aof_last_incr_fsync_offset; /* AOF offset which is already requested to be synced to disk. - * Compare with the aof_last_incr_size. */ - int aof_flush_sleep; /* Micros to sleep before flush. (used by tests) */ - int aof_rewrite_scheduled; /* Rewrite once BGSAVE terminates. */ - sds aof_buf; /* AOF buffer, written before entering the event loop */ - int aof_fd; /* File descriptor of currently selected AOF file */ - int aof_selected_db; /* Currently selected DB in AOF */ - mstime_t aof_flush_postponed_start; /* mstime of postponed AOF flush */ - mstime_t aof_last_fsync; /* mstime of last fsync() */ - time_t aof_rewrite_time_last; /* Time used by last AOF rewrite run. */ - time_t aof_rewrite_time_start; /* Current AOF rewrite start time. */ - time_t aof_cur_timestamp; /* Current record timestamp in AOF */ - int aof_timestamp_enabled; /* Enable record timestamp in AOF */ - int aof_lastbgrewrite_status; /* C_OK or C_ERR */ - unsigned long aof_delayed_fsync; /* delayed AOF fsync() counter */ - int aof_rewrite_incremental_fsync; /* fsync incrementally while aof rewriting? */ - int rdb_save_incremental_fsync; /* fsync incrementally while rdb saving? */ - int aof_last_write_status; /* C_OK or C_ERR */ - int aof_last_write_errno; /* Valid if aof write/fsync status is ERR */ - int aof_load_truncated; /* Don't stop on unexpected AOF EOF. */ - int aof_use_rdb_preamble; /* Specify base AOF to use RDB encoding on AOF rewrites. */ - serverAtomic int aof_bio_fsync_status; /* Status of AOF fsync in bio job. */ - serverAtomic int aof_bio_fsync_errno; /* Errno of AOF fsync in bio job. */ - aofManifest *aof_manifest; /* Used to track AOFs. */ - int aof_disable_auto_gc; /* If disable automatically deleting HISTORY type AOFs? - default no. (for testings). */ + int aof_enabled; /* AOF configuration */ + int aof_state; /* AOF_(ON|OFF|WAIT_REWRITE) */ + int aof_fsync; /* Kind of fsync() policy */ + char *aof_filename; /* Basename of the AOF file and manifest file */ + char *aof_dirname; /* Name of the AOF directory */ + int aof_no_fsync_on_rewrite; /* Don't fsync if a rewrite is in prog. */ + int aof_rewrite_perc; /* Rewrite AOF if % growth is > M and... */ + off_t aof_rewrite_min_size; /* the AOF file is at least N bytes. */ + off_t aof_rewrite_base_size; /* AOF size on latest startup or rewrite. */ + off_t aof_current_size; /* AOF current size (Including BASE + INCRs). */ + off_t aof_last_incr_size; /* The size of the latest incr AOF. */ + off_t aof_last_incr_fsync_offset; /* AOF offset which is already requested to be synced to disk. + * Compare with the aof_last_incr_size. */ + int aof_flush_sleep; /* Micros to sleep before flush. (used by tests) */ + int aof_rewrite_scheduled; /* Rewrite once BGSAVE terminates. */ + sds aof_buf; /* AOF buffer, written before entering the event loop */ + int aof_fd; /* File descriptor of currently selected AOF file */ + int aof_selected_db; /* Currently selected DB in AOF */ + mstime_t aof_flush_postponed_start; /* mstime of postponed AOF flush */ + mstime_t aof_last_fsync; /* mstime of last fsync() */ + time_t aof_rewrite_time_last; /* Time used by last AOF rewrite run. */ + time_t aof_rewrite_time_start; /* Current AOF rewrite start time. */ + time_t aof_cur_timestamp; /* Current record timestamp in AOF */ + int aof_timestamp_enabled; /* Enable record timestamp in AOF */ + int aof_lastbgrewrite_status; /* C_OK or C_ERR */ + unsigned long aof_delayed_fsync; /* delayed AOF fsync() counter */ + int aof_rewrite_incremental_fsync;/* fsync incrementally while aof rewriting? */ + int rdb_save_incremental_fsync; /* fsync incrementally while rdb saving? */ + int aof_last_write_status; /* C_OK or C_ERR */ + int aof_last_write_errno; /* Valid if aof write/fsync status is ERR */ + int aof_load_truncated; /* Don't stop on unexpected AOF EOF. */ + int aof_use_rdb_preamble; /* Specify base AOF to use RDB encoding on AOF rewrites. */ + _Atomic int aof_bio_fsync_status; /* Status of AOF fsync in bio job. */ + _Atomic int aof_bio_fsync_errno; /* Errno of AOF fsync in bio job. */ + aofManifest *aof_manifest; /* Used to track AOFs. */ + int aof_disable_auto_gc; /* If disable automatically deleting HISTORY type AOFs? + default no. (for testings). */ /* RDB persistence */ long long dirty; /* Changes to DB from the last save */ @@ -1908,35 +1904,35 @@ struct valkeyServer { int shutdown_on_sigterm; /* Shutdown flags configured for SIGTERM. */ /* Replication (master) */ - char replid[CONFIG_RUN_ID_SIZE + 1]; /* My current replication ID. */ - char replid2[CONFIG_RUN_ID_SIZE + 1]; /* replid inherited from master*/ - long long master_repl_offset; /* My current replication offset */ - long long second_replid_offset; /* Accept offsets up to this for replid2. */ - serverAtomic long long fsynced_reploff_pending; /* Largest replication offset to - * potentially have been fsynced, applied to - fsynced_reploff only when AOF state is AOF_ON - (not during the initial rewrite) */ - long long fsynced_reploff; /* Largest replication offset that has been confirmed to be fsynced */ - int slaveseldb; /* Last SELECTed DB in replication output */ - int repl_ping_slave_period; /* Master pings the slave every N seconds */ - replBacklog *repl_backlog; /* Replication backlog for partial syncs */ - long long repl_backlog_size; /* Backlog circular buffer size */ - time_t repl_backlog_time_limit; /* Time without slaves after the backlog - gets released. */ - time_t repl_no_slaves_since; /* We have no slaves since that time. - Only valid if server.slaves len is 0. */ - int repl_min_slaves_to_write; /* Min number of slaves to write. */ - int repl_min_slaves_max_lag; /* Max lag of slaves to write. */ - int repl_good_slaves_count; /* Number of slaves with lag <= max_lag. */ - int repl_diskless_sync; /* Master send RDB to slaves sockets directly. */ - int repl_diskless_load; /* Slave parse RDB directly from the socket. - * see REPL_DISKLESS_LOAD_* enum */ - int repl_diskless_sync_delay; /* Delay to start a diskless repl BGSAVE. */ - int repl_diskless_sync_max_replicas; /* Max replicas for diskless repl BGSAVE - * delay (start sooner if they all connect). */ - size_t repl_buffer_mem; /* The memory of replication buffer. */ - list *repl_buffer_blocks; /* Replication buffers blocks list - * (serving replica clients and repl backlog) */ + char replid[CONFIG_RUN_ID_SIZE+1]; /* My current replication ID. */ + char replid2[CONFIG_RUN_ID_SIZE+1]; /* replid inherited from master*/ + long long master_repl_offset; /* My current replication offset */ + long long second_replid_offset; /* Accept offsets up to this for replid2. */ + _Atomic long long fsynced_reploff_pending;/* Largest replication offset to + * potentially have been fsynced, applied to + fsynced_reploff only when AOF state is AOF_ON + (not during the initial rewrite) */ + long long fsynced_reploff; /* Largest replication offset that has been confirmed to be fsynced */ + int slaveseldb; /* Last SELECTed DB in replication output */ + int repl_ping_slave_period; /* Master pings the slave every N seconds */ + replBacklog *repl_backlog; /* Replication backlog for partial syncs */ + long long repl_backlog_size; /* Backlog circular buffer size */ + time_t repl_backlog_time_limit; /* Time without slaves after the backlog + gets released. */ + time_t repl_no_slaves_since; /* We have no slaves since that time. + Only valid if server.slaves len is 0. */ + int repl_min_slaves_to_write; /* Min number of slaves to write. */ + int repl_min_slaves_max_lag; /* Max lag of slaves to write. */ + int repl_good_slaves_count; /* Number of slaves with lag <= max_lag. */ + int repl_diskless_sync; /* Master send RDB to slaves sockets directly. */ + int repl_diskless_load; /* Slave parse RDB directly from the socket. + * see REPL_DISKLESS_LOAD_* enum */ + int repl_diskless_sync_delay; /* Delay to start a diskless repl BGSAVE. */ + int repl_diskless_sync_max_replicas;/* Max replicas for diskless repl BGSAVE + * delay (start sooner if they all connect). */ + size_t repl_buffer_mem; /* The memory of replication buffer. */ + list *repl_buffer_blocks; /* Replication buffers blocks list + * (serving replica clients and repl backlog) */ /* Replication (slave) */ char *masteruser; /* AUTH with this user and masterauth with master */ sds masterauth; /* AUTH with this password with master */ @@ -2020,14 +2016,14 @@ struct valkeyServer { int list_max_listpack_size; int list_compress_depth; /* time cache */ - serverAtomic time_t unixtime; /* Unix time sampled every cron cycle. */ - time_t timezone; /* Cached timezone. As set by tzset(). */ - int daylight_active; /* Currently in daylight saving time. */ - mstime_t mstime; /* 'unixtime' in milliseconds. */ - ustime_t ustime; /* 'unixtime' in microseconds. */ - mstime_t cmd_time_snapshot; /* Time snapshot of the root execution nesting. */ - size_t blocking_op_nesting; /* Nesting level of blocking operation, used to reset blocked_last_cron. */ - long long blocked_last_cron; /* Indicate the mstime of the last time we did cron jobs from a blocking operation */ + _Atomic time_t unixtime; /* Unix time sampled every cron cycle. */ + time_t timezone; /* Cached timezone. As set by tzset(). */ + int daylight_active; /* Currently in daylight saving time. */ + mstime_t mstime; /* 'unixtime' in milliseconds. */ + ustime_t ustime; /* 'unixtime' in microseconds. */ + mstime_t cmd_time_snapshot; /* Time snapshot of the root execution nesting. */ + size_t blocking_op_nesting; /* Nesting level of blocking operation, used to reset blocked_last_cron. */ + long long blocked_last_cron; /* Indicate the mstime of the last time we did cron jobs from a blocking operation */ /* Pubsub */ kvstore *pubsub_channels; /* Map channels to list of subscribed clients */ dict *pubsub_patterns; /* A dict of pubsub_patterns */ diff --git a/src/threads_mngr.c b/src/threads_mngr.c index 4dbf1ea427..e1f1d7e7b4 100644 --- a/src/threads_mngr.c +++ b/src/threads_mngr.c @@ -32,12 +32,12 @@ #define UNUSED(V) ((void)V) #ifdef __linux__ -#include "atomicvar.h" #include "server.h" #include #include #include +#include #define IN_PROGRESS 1 static const clock_t RUN_ON_THREADS_TIMEOUT = 2; @@ -46,10 +46,10 @@ static const clock_t RUN_ON_THREADS_TIMEOUT = 2; static run_on_thread_cb g_callback = NULL; static volatile size_t g_tids_len = 0; -static serverAtomic size_t g_num_threads_done = 0; +static _Atomic size_t g_num_threads_done = 0; /* This flag is set while ThreadsManager_runOnThreads is running */ -static serverAtomic int g_in_progress = 0; +static _Atomic int g_in_progress = 0; /*============================ Internal prototypes ========================== */ @@ -111,9 +111,8 @@ __attribute__((noinline)) int ThreadsManager_runOnThreads(pid_t *tids, size_t ti static int test_and_start(void) { - /* atomicFlagGetSet sets the variable to 1 and returns the previous value */ - int prev_state; - atomicFlagGetSet(g_in_progress, prev_state); + /* atomic_exchange_explicit sets the variable to 1 and returns the previous value */ + int prev_state = atomic_exchange_explicit(&g_in_progress,1,memory_order_relaxed); /* If prev_state is 1, g_in_progress was on. */ return prev_state; @@ -124,7 +123,7 @@ __attribute__((noinline)) static void invoke_callback(int sig) { run_on_thread_cb callback = g_callback; if (callback) { callback(); - atomicIncr(g_num_threads_done, 1); + atomic_fetch_add_explicit(&g_num_threads_done,1,memory_order_relaxed); } else { serverLogFromHandler(LL_WARNING, "tid %ld: ThreadsManager g_callback is NULL", syscall(SYS_gettid)); } @@ -146,7 +145,7 @@ static void wait_threads(void) { /* Sleep a bit to yield to other threads. */ /* usleep isn't listed as signal safe, so we use select instead */ select(0, NULL, NULL, NULL, &tv); - atomicGet(g_num_threads_done, curr_done_count); + curr_done_count = atomic_load_explicit(&g_num_threads_done,memory_order_relaxed); clock_gettime(CLOCK_REALTIME, &curr_time); } while (curr_done_count < g_tids_len && curr_time.tv_sec <= timeout_time.tv_sec); @@ -161,7 +160,7 @@ static void ThreadsManager_cleanups(void) { g_num_threads_done = 0; /* Lastly, turn off g_in_progress */ - atomicSet(g_in_progress, 0); + atomic_store_explicit(&g_in_progress,0,memory_order_relaxed); } #else diff --git a/src/valkey-benchmark.c b/src/valkey-benchmark.c index 0232d631b4..2eec75a10a 100644 --- a/src/valkey-benchmark.c +++ b/src/valkey-benchmark.c @@ -41,6 +41,7 @@ #include #include #include +#include #include /* Use hiredis' sds compat header that maps sds calls to their hi_ variants */ #include /* Use hiredis sds. */ @@ -54,7 +55,6 @@ #include "adlist.h" #include "dict.h" #include "zmalloc.h" -#include "atomicvar.h" #include "crc16_slottable.h" #include "hdr_histogram.h" #include "cli_common.h" @@ -84,11 +84,11 @@ static struct config { int tls; struct cliSSLconfig sslconfig; int numclients; - serverAtomic int liveclients; + _Atomic int liveclients; int requests; - serverAtomic int requests_issued; - serverAtomic int requests_finished; - serverAtomic int previous_requests_finished; + _Atomic int requests_issued; + _Atomic int requests_finished; + _Atomic int previous_requests_finished; int last_printed_bytes; long long previous_tick; int keysize; @@ -115,11 +115,11 @@ static struct config { int cluster_node_count; struct clusterNode **cluster_nodes; struct serverConfig *redis_config; - struct hdr_histogram *latency_histogram; - struct hdr_histogram *current_sec_latency_histogram; - serverAtomic int is_fetching_slots; - serverAtomic int is_updating_slots; - serverAtomic int slots_last_update; + struct hdr_histogram* latency_histogram; + struct hdr_histogram* current_sec_latency_histogram; + _Atomic int is_fetching_slots; + _Atomic int is_updating_slots; + _Atomic int slots_last_update; int enable_tracking; pthread_mutex_t liveclients_mutex; pthread_mutex_t is_updating_slots_mutex; @@ -344,8 +344,7 @@ static void freeClient(client c) { aeDeleteFileEvent(el, c->context->fd, AE_WRITABLE); aeDeleteFileEvent(el, c->context->fd, AE_READABLE); if (c->thread_id >= 0) { - int requests_finished = 0; - atomicGet(config.requests_finished, requests_finished); + int requests_finished = atomic_load_explicit(&config.requests_finished,memory_order_relaxed); if (requests_finished >= config.requests) { aeStop(el); } @@ -403,8 +402,7 @@ static void setClusterKeyHashTag(client c) { assert(c->thread_id >= 0); clusterNode *node = c->cluster_node; assert(node); - int is_updating_slots = 0; - atomicGet(config.is_updating_slots, is_updating_slots); + int is_updating_slots = atomic_load_explicit(&config.is_updating_slots,memory_order_relaxed); /* If updateClusterSlotsConfiguration is updating the slots array, * call updateClusterSlotsConfiguration is order to block the thread * since the mutex is locked. When the slots will be updated by the @@ -425,8 +423,7 @@ static void setClusterKeyHashTag(client c) { } static void clientDone(client c) { - int requests_finished = 0; - atomicGet(config.requests_finished, requests_finished); + int requests_finished = atomic_load_explicit(&config.requests_finished,memory_order_relaxed); if (requests_finished >= config.requests) { freeClient(c); if (!config.num_threads && config.el) aeStop(config.el); @@ -520,28 +517,23 @@ static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) { } continue; } - int requests_finished = 0; - atomicGetIncr(config.requests_finished, requests_finished, 1); - if (requests_finished < config.requests) { - if (config.num_threads == 0) { - hdr_record_value(config.latency_histogram, // Histogram to record to - (long)c->latency <= CONFIG_LATENCY_HISTOGRAM_MAX_VALUE - ? (long)c->latency - : CONFIG_LATENCY_HISTOGRAM_MAX_VALUE); // Value to record - hdr_record_value(config.current_sec_latency_histogram, // Histogram to record to - (long)c->latency <= CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE - ? (long)c->latency - : CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE); // Value to record - } else { - hdr_record_value_atomic(config.latency_histogram, // Histogram to record to - (long)c->latency <= CONFIG_LATENCY_HISTOGRAM_MAX_VALUE - ? (long)c->latency - : CONFIG_LATENCY_HISTOGRAM_MAX_VALUE); // Value to record - hdr_record_value_atomic(config.current_sec_latency_histogram, // Histogram to record to - (long)c->latency <= CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE - ? (long)c->latency - : CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE); // Value to record - } + int requests_finished = atomic_fetch_add_explicit(&config.requests_finished,1,memory_order_relaxed); + if (requests_finished < config.requests){ + if (config.num_threads == 0) { + hdr_record_value( + config.latency_histogram, // Histogram to record to + (long)c->latency<=CONFIG_LATENCY_HISTOGRAM_MAX_VALUE ? (long)c->latency : CONFIG_LATENCY_HISTOGRAM_MAX_VALUE); // Value to record + hdr_record_value( + config.current_sec_latency_histogram, // Histogram to record to + (long)c->latency<=CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE ? (long)c->latency : CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE); // Value to record + } else { + hdr_record_value_atomic( + config.latency_histogram, // Histogram to record to + (long)c->latency<=CONFIG_LATENCY_HISTOGRAM_MAX_VALUE ? (long)c->latency : CONFIG_LATENCY_HISTOGRAM_MAX_VALUE); // Value to record + hdr_record_value_atomic( + config.current_sec_latency_histogram, // Histogram to record to + (long)c->latency<=CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE ? (long)c->latency : CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE); // Value to record + } } c->pending--; if (c->pending == 0) { @@ -564,8 +556,7 @@ static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) { /* Initialize request when nothing was written. */ if (c->written == 0) { /* Enforce upper bound to number of requests. */ - int requests_issued = 0; - atomicGetIncr(config.requests_issued, requests_issued, config.pipeline); + int requests_issued = atomic_fetch_add_explicit(&config.requests_issued,config.pipeline,memory_order_relaxed); if (requests_issued >= config.requests) { return; } @@ -573,7 +564,7 @@ static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) { /* Really initialize: randomize keys and set start time. */ if (config.randomkeys) randomizeClientKey(c); if (config.cluster_mode && c->staglen > 0) setClusterKeyHashTag(c); - atomicGet(config.slots_last_update, c->slots_last_update); + c->slots_last_update = atomic_load_explicit(&config.slots_last_update, memory_order_relaxed); c->start = ustime(); c->latency = -1; } @@ -803,9 +794,10 @@ static client createClient(char *cmd, size_t len, client from, int thread_id) { /* In idle mode, clients still need to register readHandler for catching errors */ aeCreateFileEvent(el, c->context->fd, AE_READABLE, readHandler, c); - listAddNodeTail(config.clients, c); - atomicIncr(config.liveclients, 1); - atomicGet(config.slots_last_update, c->slots_last_update); + listAddNodeTail(config.clients,c); + atomic_fetch_add_explicit(&config.liveclients, 1, memory_order_relaxed); + + c->slots_last_update = atomic_load_explicit(&config.slots_last_update, memory_order_relaxed); return c; } @@ -1231,16 +1223,19 @@ static int fetchClusterSlotsConfiguration(client c) { UNUSED(c); int success = 1, is_fetching_slots = 0, last_update = 0; size_t i; - atomicGet(config.slots_last_update, last_update); + + last_update = atomic_load_explicit(&config.slots_last_update, memory_order_relaxed); if (c->slots_last_update < last_update) { c->slots_last_update = last_update; return -1; } redisReply *reply = NULL; - atomicGetIncr(config.is_fetching_slots, is_fetching_slots, 1); - if (is_fetching_slots) return -1; // TODO: use other codes || errno ? - atomicSet(config.is_fetching_slots, 1); - fprintf(stderr, "WARNING: Cluster slots configuration changed, fetching new one...\n"); + + is_fetching_slots = atomic_fetch_add_explicit(&config.is_fetching_slots, 1, memory_order_relaxed); + if (is_fetching_slots) return -1; //TODO: use other codes || errno ? + atomic_store_explicit(&config.is_fetching_slots, 1, memory_order_relaxed); + fprintf(stderr, + "WARNING: Cluster slots configuration changed, fetching new one...\n"); const char *errmsg = "Failed to update cluster slots configuration"; static dictType dtype = { dictSdsHash, /* hash function */ @@ -1310,14 +1305,15 @@ static int fetchClusterSlotsConfiguration(client c) { freeReplyObject(reply); redisFree(ctx); dictRelease(masters); - atomicSet(config.is_fetching_slots, 0); + atomic_store_explicit(&config.is_fetching_slots, 0, memory_order_relaxed); return success; } /* Atomically update the new slots configuration. */ static void updateClusterSlotsConfiguration(void) { pthread_mutex_lock(&config.is_updating_slots_mutex); - atomicSet(config.is_updating_slots, 1); + atomic_store_explicit(&config.is_updating_slots, 1, memory_order_relaxed); + int i; for (i = 0; i < config.cluster_node_count; i++) { clusterNode *node = config.cluster_nodes[i]; @@ -1330,8 +1326,8 @@ static void updateClusterSlotsConfiguration(void) { zfree(oldslots); } } - atomicSet(config.is_updating_slots, 0); - atomicIncr(config.slots_last_update, 1); + atomic_store_explicit(&config.is_updating_slots, 0, memory_order_relaxed); + atomic_fetch_add_explicit(&config.slots_last_update, 1, memory_order_relaxed); pthread_mutex_unlock(&config.is_updating_slots_mutex); } @@ -1615,13 +1611,10 @@ int showThroughput(struct aeEventLoop *eventLoop, long long id, void *clientData UNUSED(eventLoop); UNUSED(id); benchmarkThread *thread = (benchmarkThread *)clientData; - int liveclients = 0; - int requests_finished = 0; - int previous_requests_finished = 0; + int liveclients = atomic_load_explicit(&config.liveclients, memory_order_relaxed); + int requests_finished = atomic_load_explicit(&config.requests_finished, memory_order_relaxed); + int previous_requests_finished = atomic_load_explicit(&config.previous_requests_finished, memory_order_relaxed); long long current_tick = mstime(); - atomicGet(config.liveclients, liveclients); - atomicGet(config.requests_finished, requests_finished); - atomicGet(config.previous_requests_finished, previous_requests_finished); if (liveclients == 0 && requests_finished != config.requests) { fprintf(stderr, "All clients disconnected... aborting.\n"); @@ -1646,7 +1639,7 @@ int showThroughput(struct aeEventLoop *eventLoop, long long id, void *clientData const float instantaneous_dt = (float)(current_tick - config.previous_tick) / 1000.0; const float instantaneous_rps = (float)(requests_finished - previous_requests_finished) / instantaneous_dt; config.previous_tick = current_tick; - atomicSet(config.previous_requests_finished, requests_finished); + atomic_store_explicit(&config.previous_requests_finished, requests_finished, memory_order_relaxed); printf("%*s\r", config.last_printed_bytes, " "); /* ensure there is a clean line */ int printed_bytes = printf("%s: rps=%.1f (overall: %.1f) avg_msec=%.3f (overall: %.3f)\r", config.title, instantaneous_rps, rps, diff --git a/src/zmalloc.c b/src/zmalloc.c index 9819ab23a4..8819f0c518 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -51,7 +51,7 @@ void zlibc_free(void *ptr) { #include #include "zmalloc.h" -#include "atomicvar.h" +#include #define UNUSED(x) ((void)(x)) @@ -87,10 +87,10 @@ void zlibc_free(void *ptr) { #define dallocx(ptr, flags) je_dallocx(ptr, flags) #endif -#define update_zmalloc_stat_alloc(__n) atomicIncr(used_memory, (__n)) -#define update_zmalloc_stat_free(__n) atomicDecr(used_memory, (__n)) +#define update_zmalloc_stat_alloc(__n) atomic_fetch_add_explicit(&used_memory, (__n), memory_order_relaxed) +#define update_zmalloc_stat_free(__n) atomic_fetch_sub_explicit(&used_memory, (__n), memory_order_relaxed) -static serverAtomic size_t used_memory = 0; +static _Atomic size_t used_memory = 0; static void zmalloc_default_oom(size_t size) { fprintf(stderr, "zmalloc: Out of memory trying to allocate %zu bytes\n", size); @@ -388,8 +388,7 @@ char *zstrdup(const char *s) { } size_t zmalloc_used_memory(void) { - size_t um; - atomicGet(used_memory, um); + size_t um = atomic_load_explicit(&used_memory,memory_order_relaxed); return um; } From e4ead9442b8e970defc26d25c62580bedfd42a80 Mon Sep 17 00:00:00 2001 From: Ping Xie Date: Mon, 27 May 2024 07:11:24 -0700 Subject: [PATCH 06/42] Make CLUSTER SETSLOT with TIMEOUT 0 block indefinitely (#556) This aligns the behaviour with established Valkey commands with a TIMEOUT argument, such as BLPOP. Fix #422 Signed-off-by: Ping Xie --- src/cluster_legacy.c | 56 ++++++++++++++------------- tests/unit/cluster/slot-migration.tcl | 21 ++++++++++ 2 files changed, 51 insertions(+), 26 deletions(-) diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index 0822429934..63364d3596 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -5878,10 +5878,18 @@ char *clusterNodeGetShardId(clusterNode *node) { return node->shard_id; } -int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, int *timeout_out) { +/* clusterParseSetSlotCommand validates the arguments of the CLUSTER SETSLOT command, + * extracts the target slot number (slot_out), and determines the target node (node_out) + * if applicable. It also calculates a timeout value (timeout_out) based on an optional + * timeout argument. If provided, the timeout is added to the current time to obtain an + * absolute timestamp; if omitted, the default timeout CLUSTER_OPERATION_TIMEOUT is used; + * if set to 0, it indicates no timeout. The function returns 1 if successful, and 0 + * otherwise, after sending an error message to the client. */ +int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, mstime_t *timeout_out) { int slot = -1; clusterNode *n = NULL; - int timeout = 0; + mstime_t timeout = commandTimeSnapshot() + CLUSTER_OPERATION_TIMEOUT; + int optarg_pos = 0; /* Allow primaries to replicate "CLUSTER SETSLOT" */ if (!(c->flags & CLIENT_MASTER) && nodeIsSlave(myself)) { @@ -5889,27 +5897,10 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, return 0; } - /* Process optional arguments */ - for (int i = 0; i < c->argc;) { - if (!strcasecmp(c->argv[i]->ptr, "timeout")) { - if (i + 1 < c->argc) { - timeout = (int)strtol(c->argv[i + 1]->ptr, NULL, 10); - decrRefCount(c->argv[i]); - decrRefCount(c->argv[i + 1]); - memmove(&c->argv[i], &c->argv[i + 2], c->argc - i - 2); - c->argc -= 2; - continue; - } - addReplyError(c, "Missing timeout value."); - return 0; - } - i++; - } - if ((slot = getSlotOrReply(c, c->argv[2])) == -1) return 0; if (!strcasecmp(c->argv[3]->ptr, "migrating") && c->argc >= 5) { - /* Scope the check to primaries only */ + /* CLUSTER SETSLOT MIGRATING */ if (nodeIsMaster(myself) && server.cluster->slots[slot] != myself) { addReplyErrorFormat(c, "I'm not the owner of hash slot %u", slot); return 0; @@ -5923,7 +5914,9 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, addReplyError(c, "Target node is not a master"); return 0; } + if (c->argc > 5) optarg_pos = 5; } else if (!strcasecmp(c->argv[3]->ptr, "importing") && c->argc >= 5) { + /* CLUSTER SETSLOT IMPORTING */ if (server.cluster->slots[slot] == myself) { addReplyErrorFormat(c, "I'm already the owner of hash slot %u", slot); return 0; @@ -5937,8 +5930,10 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, addReplyError(c, "Target node is not a master"); return 0; } + if (c->argc > 5) optarg_pos = 5; } else if (!strcasecmp(c->argv[3]->ptr, "stable") && c->argc >= 4) { - /* Do nothing */ + /* CLUSTER SETSLOT STABLE */ + if (c->argc > 4) optarg_pos = 4; } else if (!strcasecmp(c->argv[3]->ptr, "node") && c->argc >= 5) { /* CLUSTER SETSLOT NODE */ n = clusterLookupNode(c->argv[4]->ptr, sdslen(c->argv[4]->ptr)); @@ -5961,11 +5956,23 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, return 0; } } + if (c->argc > 5) optarg_pos = 5; } else { addReplyError(c, "Invalid CLUSTER SETSLOT action or number of arguments. Try CLUSTER HELP"); return 0; } + /* Process optional arguments */ + for (int i = optarg_pos; i < c->argc; i++) { + if (!strcasecmp(c->argv[i]->ptr, "timeout")) { + if (i + 1 >= c->argc) { + addReplyError(c, "Missing timeout value"); + return 0; + } + if (getTimeoutFromObjectOrReply(c, c->argv[i + 1], &timeout, UNIT_MILLISECONDS) != C_OK) return 0; + } + } + *slot_out = slot; *node_out = n; *timeout_out = timeout; @@ -5974,7 +5981,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, void clusterCommandSetSlot(client *c) { int slot; - int timeout_ms; + mstime_t timeout_ms; clusterNode *n; if (!clusterParseSetSlotCommand(c, &slot, &n, &timeout_ms)) return; @@ -6019,10 +6026,7 @@ void clusterCommandSetSlot(client *c) { * 2. The repl offset target is set to the master's current repl offset + 1. * There is no concern of partial replication because replicas always * ack the repl offset at the command boundary. */ - if (timeout_ms == 0) { - timeout_ms = CLUSTER_OPERATION_TIMEOUT; - } - blockForPreReplication(c, mstime() + timeout_ms, server.master_repl_offset + 1, myself->numslaves); + blockForPreReplication(c, timeout_ms, server.master_repl_offset + 1, myself->numslaves); replicationRequestAckFromSlaves(); return; } diff --git a/tests/unit/cluster/slot-migration.tcl b/tests/unit/cluster/slot-migration.tcl index d141ccc5e0..008e97e037 100644 --- a/tests/unit/cluster/slot-migration.tcl +++ b/tests/unit/cluster/slot-migration.tcl @@ -379,6 +379,27 @@ start_cluster 3 3 {tags {external:skip cluster} overrides {cluster-allow-replica } } +start_cluster 3 3 {tags {external:skip cluster} overrides {cluster-allow-replica-migration no cluster-node-timeout 1000} } { + set R1_id [R 1 CLUSTER MYID] + + test "CLUSTER SETSLOT with invalid timeouts" { + catch {R 0 CLUSTER SETSLOT 609 MIGRATING $R1_id TIMEOUT} e + assert_equal $e "ERR Missing timeout value" + + catch {R 0 CLUSTER SETSLOT 609 MIGRATING $R1_id TIMEOUT -1} e + assert_equal $e "ERR timeout is negative" + + catch {R 0 CLUSTER SETSLOT 609 MIGRATING $R1_id TIMEOUT 99999999999999999999} e + assert_equal $e "ERR timeout is not an integer or out of range" + + catch {R 0 CLUSTER SETSLOT 609 MIGRATING $R1_id TIMEOUT abc} e + assert_equal $e "ERR timeout is not an integer or out of range" + + catch {R 0 CLUSTER SETSLOT 609 TIMEOUT 100 MIGRATING $R1_id} e + assert_equal $e "ERR Invalid CLUSTER SETSLOT action or number of arguments. Try CLUSTER HELP" + } +} + start_cluster 3 3 {tags {external:skip cluster} overrides {cluster-allow-replica-migration no cluster-node-timeout 1000} } { set R1_id [R 1 CLUSTER MYID] From 045d475a94f1dceae46170426195d3b9dd4fdf81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20S=C3=B6derqvist?= Date: Mon, 27 May 2024 23:03:34 +0200 Subject: [PATCH 07/42] Implement REPLCONF VERSION (#554) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The replica sends its version when initiating replication, in pipeline with other REPLCONF commands. The primary stores it in the client struct. Other fields are made smaller to avoid making the client struct consume more memory. Fixes #414. --------- Signed-off-by: Viktor Söderqvist --- src/networking.c | 1 + src/replication.c | 34 +++++++++++++++++++++++++++++++++- src/server.h | 22 ++++++++++++---------- src/unit/test_files.h | 3 ++- src/unit/test_util.c | 15 +++++++++++++++ src/util.c | 23 +++++++++++++++++++++++ src/util.h | 1 + 7 files changed, 87 insertions(+), 12 deletions(-) diff --git a/src/networking.c b/src/networking.c index 121931a111..7054ffc126 100644 --- a/src/networking.c +++ b/src/networking.c @@ -177,6 +177,7 @@ client *createClient(connection *conn) { c->repl_last_partial_write = 0; c->slave_listening_port = 0; c->slave_addr = NULL; + c->replica_version = 0; c->slave_capa = SLAVE_CAPA_NONE; c->slave_req = SLAVE_REQ_NONE; c->reply = listCreate(); diff --git a/src/replication.c b/src/replication.c index e2612e75b5..069d60a678 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1122,7 +1122,10 @@ void syncCommand(client *c) { * - rdb-filter-only * Define "include" filters for the RDB snapshot. Currently we only support * a single include filter: "functions". Passing an empty string "" will - * result in an empty RDB. */ + * result in an empty RDB. + * + * - version + * The replica reports its version. */ void replconfCommand(client *c) { int j; @@ -1225,6 +1228,15 @@ void replconfCommand(client *c) { } } sdsfreesplitres(filters, filter_count); + } else if (!strcasecmp(c->argv[j]->ptr, "version")) { + /* REPLCONF VERSION x.y.z */ + int version = version2num(c->argv[j + 1]->ptr); + if (version >= 0) { + c->replica_version = version; + } else { + addReplyErrorFormat(c, "Unrecognized version format: %s", (char *)c->argv[j + 1]->ptr); + return; + } } else { addReplyErrorFormat(c, "Unrecognized REPLCONF option: %s", (char *)c->argv[j]->ptr); return; @@ -2623,6 +2635,10 @@ void syncWithMaster(connection *conn) { err = sendCommand(conn, "REPLCONF", "capa", "eof", "capa", "psync2", NULL); if (err) goto write_error; + /* Inform the primary of our (replica) version. */ + err = sendCommand(conn, "REPLCONF", "version", VALKEY_VERSION, NULL); + if (err) goto write_error; + server.repl_state = REPL_STATE_RECEIVE_AUTH_REPLY; return; } @@ -2696,6 +2712,22 @@ void syncWithMaster(connection *conn) { } sdsfree(err); err = NULL; + server.repl_state = REPL_STATE_RECEIVE_VERSION_REPLY; + } + + /* Receive VERSION reply. */ + if (server.repl_state == REPL_STATE_RECEIVE_VERSION_REPLY) { + err = receiveSynchronousResponse(conn); + if (err == NULL) goto no_response_error; + /* Ignore the error if any. Valkey >= 8 supports REPLCONF VERSION. */ + if (err[0] == '-') { + serverLog(LL_NOTICE, + "(Non critical) Primary does not understand " + "REPLCONF VERSION: %s", + err); + } + sdsfree(err); + err = NULL; server.repl_state = REPL_STATE_SEND_PSYNC; } diff --git a/src/server.h b/src/server.h index 70beb54f43..7011be3033 100644 --- a/src/server.h +++ b/src/server.h @@ -468,14 +468,15 @@ typedef enum { REPL_STATE_CONNECT, /* Must connect to master */ REPL_STATE_CONNECTING, /* Connecting to master */ /* --- Handshake states, must be ordered --- */ - REPL_STATE_RECEIVE_PING_REPLY, /* Wait for PING reply */ - REPL_STATE_SEND_HANDSHAKE, /* Send handshake sequence to master */ - REPL_STATE_RECEIVE_AUTH_REPLY, /* Wait for AUTH reply */ - REPL_STATE_RECEIVE_PORT_REPLY, /* Wait for REPLCONF reply */ - REPL_STATE_RECEIVE_IP_REPLY, /* Wait for REPLCONF reply */ - REPL_STATE_RECEIVE_CAPA_REPLY, /* Wait for REPLCONF reply */ - REPL_STATE_SEND_PSYNC, /* Send PSYNC */ - REPL_STATE_RECEIVE_PSYNC_REPLY, /* Wait for PSYNC reply */ + REPL_STATE_RECEIVE_PING_REPLY, /* Wait for PING reply */ + REPL_STATE_SEND_HANDSHAKE, /* Send handshake sequence to master */ + REPL_STATE_RECEIVE_AUTH_REPLY, /* Wait for AUTH reply */ + REPL_STATE_RECEIVE_PORT_REPLY, /* Wait for REPLCONF reply */ + REPL_STATE_RECEIVE_IP_REPLY, /* Wait for REPLCONF reply */ + REPL_STATE_RECEIVE_CAPA_REPLY, /* Wait for REPLCONF reply */ + REPL_STATE_RECEIVE_VERSION_REPLY, /* Wait for REPLCONF reply */ + REPL_STATE_SEND_PSYNC, /* Send PSYNC */ + REPL_STATE_RECEIVE_PSYNC_REPLY, /* Wait for PSYNC reply */ /* --- End of handshake states --- */ REPL_STATE_TRANSFER, /* Receiving .rdb from master */ REPL_STATE_CONNECTED, /* Connected to master */ @@ -1258,8 +1259,9 @@ typedef struct client { char replid[CONFIG_RUN_ID_SIZE + 1]; /* Master replication ID (if master). */ int slave_listening_port; /* As configured with: REPLCONF listening-port */ char *slave_addr; /* Optionally given by REPLCONF ip-address */ - int slave_capa; /* Slave capabilities: SLAVE_CAPA_* bitwise OR. */ - int slave_req; /* Slave requirements: SLAVE_REQ_* */ + int replica_version; /* Version on the form 0xMMmmpp. */ + short slave_capa; /* Slave capabilities: SLAVE_CAPA_* bitwise OR. */ + short slave_req; /* Slave requirements: SLAVE_REQ_* */ multiState mstate; /* MULTI/EXEC state */ blockingState bstate; /* blocking state */ long long woff; /* Last write global replication offset. */ diff --git a/src/unit/test_files.h b/src/unit/test_files.h index 6af8f4c380..7da3d26473 100644 --- a/src/unit/test_files.h +++ b/src/unit/test_files.h @@ -29,6 +29,7 @@ int test_string2l(int argc, char **argv, int flags); int test_ll2string(int argc, char **argv, int flags); int test_ld2string(int argc, char **argv, int flags); int test_fixedpoint_d2string(int argc, char **argv, int flags); +int test_version2num(int argc, char **argv, int flags); int test_reclaimFilePageCache(int argc, char **argv, int flags); int test_ziplistCreateIntList(int argc, char **argv, int flags); int test_ziplistPop(int argc, char **argv, int flags); @@ -77,7 +78,7 @@ unitTest __test_intset_c[] = {{"test_intsetValueEncodings", test_intsetValueEnco unitTest __test_kvstore_c[] = {{"test_kvstoreAdd16Keys", test_kvstoreAdd16Keys}, {"test_kvstoreIteratorRemoveAllKeysNoDeleteEmptyDict", test_kvstoreIteratorRemoveAllKeysNoDeleteEmptyDict}, {"test_kvstoreIteratorRemoveAllKeysDeleteEmptyDict", test_kvstoreIteratorRemoveAllKeysDeleteEmptyDict}, {"test_kvstoreDictIteratorRemoveAllKeysNoDeleteEmptyDict", test_kvstoreDictIteratorRemoveAllKeysNoDeleteEmptyDict}, {"test_kvstoreDictIteratorRemoveAllKeysDeleteEmptyDict", test_kvstoreDictIteratorRemoveAllKeysDeleteEmptyDict}, {NULL, NULL}}; unitTest __test_sds_c[] = {{"test_sds", test_sds}, {NULL, NULL}}; unitTest __test_sha1_c[] = {{"test_sha1", test_sha1}, {NULL, NULL}}; -unitTest __test_util_c[] = {{"test_string2ll", test_string2ll}, {"test_string2l", test_string2l}, {"test_ll2string", test_ll2string}, {"test_ld2string", test_ld2string}, {"test_fixedpoint_d2string", test_fixedpoint_d2string}, {"test_reclaimFilePageCache", test_reclaimFilePageCache}, {NULL, NULL}}; +unitTest __test_util_c[] = {{"test_string2ll", test_string2ll}, {"test_string2l", test_string2l}, {"test_ll2string", test_ll2string}, {"test_ld2string", test_ld2string}, {"test_fixedpoint_d2string", test_fixedpoint_d2string}, {"test_version2num", test_version2num}, {"test_reclaimFilePageCache", test_reclaimFilePageCache}, {NULL, NULL}}; unitTest __test_ziplist_c[] = {{"test_ziplistCreateIntList", test_ziplistCreateIntList}, {"test_ziplistPop", test_ziplistPop}, {"test_ziplistGetElementAtIndex3", test_ziplistGetElementAtIndex3}, {"test_ziplistGetElementOutOfRange", test_ziplistGetElementOutOfRange}, {"test_ziplistGetLastElement", test_ziplistGetLastElement}, {"test_ziplistGetFirstElement", test_ziplistGetFirstElement}, {"test_ziplistGetElementOutOfRangeReverse", test_ziplistGetElementOutOfRangeReverse}, {"test_ziplistIterateThroughFullList", test_ziplistIterateThroughFullList}, {"test_ziplistIterateThroughListFrom1ToEnd", test_ziplistIterateThroughListFrom1ToEnd}, {"test_ziplistIterateThroughListFrom2ToEnd", test_ziplistIterateThroughListFrom2ToEnd}, {"test_ziplistIterateThroughStartOutOfRange", test_ziplistIterateThroughStartOutOfRange}, {"test_ziplistIterateBackToFront", test_ziplistIterateBackToFront}, {"test_ziplistIterateBackToFrontDeletingAllItems", test_ziplistIterateBackToFrontDeletingAllItems}, {"test_ziplistDeleteInclusiveRange0To0", test_ziplistDeleteInclusiveRange0To0}, {"test_ziplistDeleteInclusiveRange0To1", test_ziplistDeleteInclusiveRange0To1}, {"test_ziplistDeleteInclusiveRange1To2", test_ziplistDeleteInclusiveRange1To2}, {"test_ziplistDeleteWithStartIndexOutOfRange", test_ziplistDeleteWithStartIndexOutOfRange}, {"test_ziplistDeleteWithNumOverflow", test_ziplistDeleteWithNumOverflow}, {"test_ziplistDeleteFooWhileIterating", test_ziplistDeleteFooWhileIterating}, {"test_ziplistReplaceWithSameSize", test_ziplistReplaceWithSameSize}, {"test_ziplistReplaceWithDifferentSize", test_ziplistReplaceWithDifferentSize}, {"test_ziplistRegressionTestForOver255ByteStrings", test_ziplistRegressionTestForOver255ByteStrings}, {"test_ziplistRegressionTestDeleteNextToLastEntries", test_ziplistRegressionTestDeleteNextToLastEntries}, {"test_ziplistCreateLongListAndCheckIndices", test_ziplistCreateLongListAndCheckIndices}, {"test_ziplistCompareStringWithZiplistEntries", test_ziplistCompareStringWithZiplistEntries}, {"test_ziplistMergeTest", test_ziplistMergeTest}, {"test_ziplistStressWithRandomPayloadsOfDifferentEncoding", test_ziplistStressWithRandomPayloadsOfDifferentEncoding}, {"test_ziplistCascadeUpdateEdgeCases", test_ziplistCascadeUpdateEdgeCases}, {"test_ziplistInsertEdgeCase", test_ziplistInsertEdgeCase}, {"test_ziplistStressWithVariableSize", test_ziplistStressWithVariableSize}, {"test_BenchmarkziplistFind", test_BenchmarkziplistFind}, {"test_BenchmarkziplistIndex", test_BenchmarkziplistIndex}, {"test_BenchmarkziplistValidateIntegrity", test_BenchmarkziplistValidateIntegrity}, {"test_BenchmarkziplistCompareWithString", test_BenchmarkziplistCompareWithString}, {"test_BenchmarkziplistCompareWithNumber", test_BenchmarkziplistCompareWithNumber}, {"test_ziplistStress__ziplistCascadeUpdate", test_ziplistStress__ziplistCascadeUpdate}, {NULL, NULL}}; unitTest __test_zmalloc_c[] = {{"test_zmallocInitialUsedMemory", test_zmallocInitialUsedMemory}, {"test_zmallocAllocReallocCallocAndFree", test_zmallocAllocReallocCallocAndFree}, {"test_zmallocAllocZeroByteAndFree", test_zmallocAllocZeroByteAndFree}, {NULL, NULL}}; diff --git a/src/unit/test_util.c b/src/unit/test_util.c index 30f70c8350..ade3d60901 100644 --- a/src/unit/test_util.c +++ b/src/unit/test_util.c @@ -253,6 +253,21 @@ int test_fixedpoint_d2string(int argc, char **argv, int flags) { return 0; } +int test_version2num(int argc, char **argv, int flags) { + UNUSED(argc); + UNUSED(argv); + UNUSED(flags); + TEST_ASSERT(version2num("7.2.5") == 0x070205); + TEST_ASSERT(version2num("255.255.255") == 0xffffff); + TEST_ASSERT(version2num("7.2.256") == -1); + TEST_ASSERT(version2num("7.2") == -1); + TEST_ASSERT(version2num("7.2.1.0") == -1); + TEST_ASSERT(version2num("1.-2.-3") == -1); + TEST_ASSERT(version2num("1.2.3-rc4") == -1); + TEST_ASSERT(version2num("") == -1); + return 0; +} + #if defined(__linux__) /* Since fadvise and mincore is only supported in specific platforms like * Linux, we only verify the fadvise mechanism works in Linux */ diff --git a/src/util.c b/src/util.c index c3e494f526..8f5e03614c 100644 --- a/src/util.c +++ b/src/util.c @@ -872,6 +872,29 @@ int ld2string(char *buf, size_t len, long double value, ld2string_mode mode) { return 0; } +/* Parses a version string on the form "major.minor.patch" and returns an + * integer on the form 0xMMmmpp. Returns -1 on parse error. */ +int version2num(const char *version) { + int v = 0, part = 0, numdots = 0; + const char *p = version; + do { + if (*p >= '0' && *p <= '9') { + part = part * 10 + (unsigned)(*p - '0'); + if (part > 255) return -1; + } else if (*p == '.') { + if (++numdots > 2) return -1; + v = (v << 8) | part; + part = 0; + } else { + return -1; + } + p++; + } while (*p); + if (numdots != 2) return -1; + v = (v << 8) | part; + return v; +} + /* Get random bytes, attempts to get an initial seed from /dev/urandom and * the uses a one way hash function in counter mode to generate a random * stream. However if /dev/urandom is not available, a weaker seed is used. diff --git a/src/util.h b/src/util.h index 3d19c9c04c..d675f4c6cd 100644 --- a/src/util.h +++ b/src/util.h @@ -78,6 +78,7 @@ int d2string(char *buf, size_t len, double value); int fixedpoint_d2string(char *dst, size_t dstlen, double dvalue, int fractional_digits); int ld2string(char *buf, size_t len, long double value, ld2string_mode mode); int double2ll(double d, long long *out); +int version2num(const char *version); int yesnotoi(char *s); sds getAbsolutePath(char *filename); long getTimeZone(void); From 4e44f5aae934b2d459d0c6d3957ec01d6d2b014a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20S=C3=B6derqvist?= Date: Tue, 28 May 2024 17:13:16 +0200 Subject: [PATCH 08/42] Fix races in test for tot-net-in, tot-net-out, tot-cmds (#559) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The races are between the '$rd' client and the 'r' client in the test case. Test case "client input output and command process statistics" in unit/introspection. --------- Signed-off-by: Viktor Söderqvist --- tests/unit/introspection.tcl | 44 ++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index 92bacaa071..9a0f3d7b31 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -41,6 +41,14 @@ start_server {tags {"introspection"}} { return "" } + proc get_client_tot_in_out_cmds {id} { + set info_list [r client list] + set in [get_field_in_client_list $id $info_list "tot-net-in"] + set out [get_field_in_client_list $id $info_list "tot-net-out"] + set cmds [get_field_in_client_list $id $info_list "tot-cmds"] + return [list $in $out $cmds] + } + test {client input output and command process statistics} { set info1 [r client info] set input1 [get_field_in_client_info $info1 "tot-net-in"] @@ -63,21 +71,29 @@ start_server {tags {"introspection"}} { set output3 [get_field_in_client_list $rd_id $info_list "tot-net-out"] set cmd3 [get_field_in_client_list $rd_id $info_list "tot-cmds"] $rd blpop mylist 0 - set info_list [r client list] - set input4 [get_field_in_client_list $rd_id $info_list "tot-net-in"] - set output4 [get_field_in_client_list $rd_id $info_list "tot-net-out"] - set cmd4 [get_field_in_client_list $rd_id $info_list "tot-cmds"] - assert_equal [expr $input3+34] $input4 - assert_equal $output3 $output4 - assert_equal $cmd3 $cmd4 + set input4 [expr $input3 + 34] + set output4 $output3 + set cmd4 $cmd3 + wait_for_condition 5 100 { + [list $input4 $output4 $cmd4] eq [get_client_tot_in_out_cmds $rd_id] + } else { + puts "--------- tot-net-in tot-net-out tot-cmds (4)" + puts "Expected: [list $input4 $output4 $cmd4]" + puts "Actual: [get_client_tot_in_out_cmds $rd_id]" + fail "Blocked BLPOP didn't increment expected client fields" + } r lpush mylist a - set info_list [r client list] - set input5 [get_field_in_client_list $rd_id $info_list "tot-net-in"] - set output5 [get_field_in_client_list $rd_id $info_list "tot-net-out"] - set cmd5 [get_field_in_client_list $rd_id $info_list "tot-cmds"] - assert_equal $input4 $input5 - assert_equal [expr $output4+23] $output5 - assert_equal [expr $cmd4+1] $cmd5 + set input5 $input4 + set output5 [expr $output4 + 23] + set cmd5 [expr $cmd4 + 1] + wait_for_condition 5 100 { + [list $input5 $output5 $cmd5] eq [get_client_tot_in_out_cmds $rd_id] + } else { + puts "--------- tot-net-in tot-net-out tot-cmds (5)" + puts "Expected: [list $input5 $output5 $cmd5]" + puts "Actual: [get_client_tot_in_out_cmds $rd_id]" + fail "Unblocked BLPOP didn't increment expected client fields" + } $rd close # test recursive command set info [r client info] From 84157890fd710c6117032c20ba4602cbe19970d1 Mon Sep 17 00:00:00 2001 From: Ping Xie Date: Tue, 28 May 2024 09:27:51 -0700 Subject: [PATCH 09/42] Set up clang-format github action (#538) Setup clang-format GitHub action to ensure coding style consistency --------- Signed-off-by: Ping Xie --- .github/workflows/clang-format.yml | 48 ++++ src/acl.c | 15 +- src/aof.c | 9 +- src/bio.c | 4 +- src/lazyfree.c | 63 +++-- src/listpack.c | 14 +- src/modules/helloacl.c | 46 ++-- src/modules/helloblock.c | 69 +++--- src/modules/hellocluster.c | 55 +++-- src/modules/hellodict.c | 36 ++- src/modules/hellohook.c | 27 +- src/modules/hellotimer.c | 11 +- src/modules/hellotype.c | 148 +++++------ src/modules/helloworld.c | 302 ++++++++++------------ src/networking.c | 16 +- src/rdb.c | 2 +- src/replication.c | 14 +- src/server.c | 39 ++- src/server.h | 269 ++++++++++---------- src/threads_mngr.c | 8 +- src/unit/test_crc64.c | 14 +- src/unit/test_crc64combine.c | 65 +++-- src/unit/test_endianconv.c | 6 +- src/unit/test_files.h | 1 + src/unit/test_help.h | 42 ++-- src/unit/test_intset.c | 107 ++++---- src/unit/test_kvstore.c | 27 +- src/unit/test_main.c | 15 +- src/unit/test_sds.c | 109 ++++---- src/unit/test_sha1.c | 10 +- src/unit/test_util.c | 106 ++++---- src/unit/test_ziplist.c | 385 +++++++++++++---------------- src/util.c | 8 +- src/valkey-benchmark.c | 57 +++-- src/zmalloc.c | 2 +- utils/generate-unit-test-header.py | 3 +- 36 files changed, 1031 insertions(+), 1121 deletions(-) create mode 100644 .github/workflows/clang-format.yml diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml new file mode 100644 index 0000000000..b851ffe926 --- /dev/null +++ b/.github/workflows/clang-format.yml @@ -0,0 +1,48 @@ +name: Clang Format Check + +on: + pull_request: + paths: + - 'src/**' + +jobs: + clang-format-check: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Set up Clang + run: | + sudo apt-get update -y + sudo apt-get upgrade -y + sudo apt-get install software-properties-common -y + wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | gpg --dearmor | sudo tee /usr/share/keyrings/llvm-toolchain.gpg > /dev/null + echo "deb [signed-by=/usr/share/keyrings/llvm-toolchain.gpg] http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-18 main" | sudo tee /etc/apt/sources.list.d/llvm.list + sudo apt-get update -y + sudo apt-get install clang-format-18 -y + - name: Run clang-format + id: clang-format + run: | + # Run clang-format and capture the diff + cd src + shopt -s globstar + clang-format-18 -i **/*.c **/*.h + # Capture the diff output + DIFF=$(git diff) + if [ ! -z "$DIFF" ]; then + # Encode the diff in Base64 to ensure it's handled as a single line + ENCODED_DIFF=$(echo "$DIFF" | base64 -w 0) + echo "diff=$ENCODED_DIFF" >> $GITHUB_OUTPUT + fi + shell: bash + + - name: Check for formatting changes + if: ${{ steps.clang-format.outputs.diff }} + run: | + echo "Code is not formatted correctly. Here is the diff:" + # Decode the Base64 diff to display it + echo "${{ steps.clang-format.outputs.diff }}" | base64 --decode + exit 1 + shell: bash diff --git a/src/acl.c b/src/acl.c index 46ed85baf2..0c3ccb7f6d 100644 --- a/src/acl.c +++ b/src/acl.c @@ -2428,20 +2428,21 @@ sds ACLLoadFromFile(const char *filename) { client *c = listNodeValue(ln); user *original = c->user; list *channels = NULL; - user *new = ACLGetUserByName(c->user->name, sdslen(c->user->name)); - if (new && user_channels) { - if (!raxFind(user_channels, (unsigned char *)(new->name), sdslen(new->name), (void **)&channels)) { - channels = getUpcomingChannelList(new, original); - raxInsert(user_channels, (unsigned char *)(new->name), sdslen(new->name), channels, NULL); + user *new_user = ACLGetUserByName(c->user->name, sdslen(c->user->name)); + if (new_user && user_channels) { + if (!raxFind(user_channels, (unsigned char *)(new_user->name), sdslen(new_user->name), + (void **)&channels)) { + channels = getUpcomingChannelList(new_user, original); + raxInsert(user_channels, (unsigned char *)(new_user->name), sdslen(new_user->name), channels, NULL); } } /* When the new channel list is NULL, it means the new user's channel list is a superset of the old user's * list. */ - if (!new || (channels && ACLShouldKillPubsubClient(c, channels))) { + if (!new_user || (channels && ACLShouldKillPubsubClient(c, channels))) { freeClient(c); continue; } - c->user = new; + c->user = new_user; } if (user_channels) raxFreeWithCallback(user_channels, (void (*)(void *))listRelease); diff --git a/src/aof.c b/src/aof.c index d15ff379e5..f3538f64fa 100644 --- a/src/aof.c +++ b/src/aof.c @@ -987,8 +987,7 @@ int startAppendOnly(void) { /* If AOF fsync error in bio job, we just ignore it and log the event. */ int aof_bio_fsync_status = atomic_load_explicit(&server.aof_bio_fsync_status, memory_order_relaxed); if (aof_bio_fsync_status == C_ERR) { - serverLog(LL_WARNING, - "AOF reopen, just ignore the AOF fsync error in bio job"); + serverLog(LL_WARNING, "AOF reopen, just ignore the AOF fsync error in bio job"); atomic_store_explicit(&server.aof_bio_fsync_status, C_OK, memory_order_relaxed); } @@ -1245,8 +1244,7 @@ void flushAppendOnlyFile(int force) { server.aof_last_incr_fsync_offset = server.aof_last_incr_size; server.aof_last_fsync = server.mstime; atomic_store_explicit(&server.fsynced_reploff_pending, server.master_repl_offset, memory_order_relaxed); - } else if (server.aof_fsync == AOF_FSYNC_EVERYSEC && - server.mstime - server.aof_last_fsync >= 1000) { + } else if (server.aof_fsync == AOF_FSYNC_EVERYSEC && server.mstime - server.aof_last_fsync >= 1000) { if (!sync_in_progress) { aof_background_fsync(server.aof_fd); server.aof_last_incr_fsync_offset = server.aof_last_incr_size; @@ -2648,7 +2646,8 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { /* Update the fsynced replication offset that just now become valid. * This could either be the one we took in startAppendOnly, or a * newer one set by the bio thread. */ - long long fsynced_reploff_pending = atomic_load_explicit(&server.fsynced_reploff_pending, memory_order_relaxed); + long long fsynced_reploff_pending = + atomic_load_explicit(&server.fsynced_reploff_pending, memory_order_relaxed); server.fsynced_reploff = fsynced_reploff_pending; } diff --git a/src/bio.c b/src/bio.c index 31d946e566..11692e77ef 100644 --- a/src/bio.c +++ b/src/bio.c @@ -257,9 +257,7 @@ void *bioProcessBackgroundJobs(void *arg) { /* The fd may be closed by main thread and reused for another * socket, pipe, or file. We just ignore these errno because * aof fsync did not really fail. */ - if (valkey_fsync(job->fd_args.fd) == -1 && - errno != EBADF && errno != EINVAL) - { + if (valkey_fsync(job->fd_args.fd) == -1 && errno != EBADF && errno != EINVAL) { int last_status = atomic_load_explicit(&server.aof_bio_fsync_status, memory_order_relaxed); atomic_store_explicit(&server.aof_bio_fsync_errno, errno, memory_order_relaxed); diff --git a/src/lazyfree.c b/src/lazyfree.c index 82e468322e..38ccd913bd 100644 --- a/src/lazyfree.c +++ b/src/lazyfree.c @@ -13,8 +13,8 @@ static _Atomic size_t lazyfreed_objects = 0; void lazyfreeFreeObject(void *args[]) { robj *o = (robj *)args[0]; decrRefCount(o); - atomic_fetch_sub_explicit(&lazyfree_objects,1,memory_order_relaxed); - atomic_fetch_add_explicit(&lazyfreed_objects,1,memory_order_relaxed); + atomic_fetch_sub_explicit(&lazyfree_objects, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, 1, memory_order_relaxed); } /* Release a database from the lazyfree thread. The 'db' pointer is the @@ -27,8 +27,8 @@ void lazyfreeFreeDatabase(void *args[]) { size_t numkeys = kvstoreSize(da1); kvstoreRelease(da1); kvstoreRelease(da2); - atomic_fetch_sub_explicit(&lazyfree_objects,numkeys,memory_order_relaxed); - atomic_fetch_add_explicit(&lazyfreed_objects,numkeys,memory_order_relaxed); + atomic_fetch_sub_explicit(&lazyfree_objects, numkeys, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, numkeys, memory_order_relaxed); } /* Release the key tracking table. */ @@ -36,8 +36,8 @@ void lazyFreeTrackingTable(void *args[]) { rax *rt = args[0]; size_t len = rt->numele; freeTrackingRadixTree(rt); - atomic_fetch_sub_explicit(&lazyfree_objects,len,memory_order_relaxed); - atomic_fetch_add_explicit(&lazyfreed_objects,len,memory_order_relaxed); + atomic_fetch_sub_explicit(&lazyfree_objects, len, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, len, memory_order_relaxed); } /* Release the error stats rax tree. */ @@ -45,8 +45,8 @@ void lazyFreeErrors(void *args[]) { rax *errors = args[0]; size_t len = errors->numele; raxFreeWithCallback(errors, zfree); - atomic_fetch_sub_explicit(&lazyfree_objects,len,memory_order_relaxed); - atomic_fetch_add_explicit(&lazyfreed_objects,len,memory_order_relaxed); + atomic_fetch_sub_explicit(&lazyfree_objects, len, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, len, memory_order_relaxed); } /* Release the lua_scripts dict. */ @@ -56,8 +56,8 @@ void lazyFreeLuaScripts(void *args[]) { lua_State *lua = args[2]; long long len = dictSize(lua_scripts); freeLuaScriptsSync(lua_scripts, lua_scripts_lru_list, lua); - atomic_fetch_sub_explicit(&lazyfree_objects,len,memory_order_relaxed); - atomic_fetch_add_explicit(&lazyfreed_objects,len,memory_order_relaxed); + atomic_fetch_sub_explicit(&lazyfree_objects, len, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, len, memory_order_relaxed); } /* Release the functions ctx. */ @@ -65,8 +65,8 @@ void lazyFreeFunctionsCtx(void *args[]) { functionsLibCtx *functions_lib_ctx = args[0]; size_t len = functionsLibCtxFunctionsLen(functions_lib_ctx); functionsLibCtxFree(functions_lib_ctx); - atomic_fetch_sub_explicit(&lazyfree_objects,len,memory_order_relaxed); - atomic_fetch_add_explicit(&lazyfreed_objects,len,memory_order_relaxed); + atomic_fetch_sub_explicit(&lazyfree_objects, len, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, len, memory_order_relaxed); } /* Release replication backlog referencing memory. */ @@ -77,24 +77,24 @@ void lazyFreeReplicationBacklogRefMem(void *args[]) { len += raxSize(index); listRelease(blocks); raxFree(index); - atomic_fetch_sub_explicit(&lazyfree_objects,len,memory_order_relaxed); - atomic_fetch_add_explicit(&lazyfreed_objects,len,memory_order_relaxed); + atomic_fetch_sub_explicit(&lazyfree_objects, len, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, len, memory_order_relaxed); } /* Return the number of currently pending objects to free. */ size_t lazyfreeGetPendingObjectsCount(void) { - size_t aux = atomic_load_explicit(&lazyfree_objects,memory_order_relaxed); + size_t aux = atomic_load_explicit(&lazyfree_objects, memory_order_relaxed); return aux; } /* Return the number of objects that have been freed. */ size_t lazyfreeGetFreedObjectsCount(void) { - size_t aux = atomic_load_explicit(&lazyfreed_objects,memory_order_relaxed); + size_t aux = atomic_load_explicit(&lazyfreed_objects, memory_order_relaxed); return aux; } void lazyfreeResetStats(void) { - atomic_store_explicit(&lazyfreed_objects,0,memory_order_relaxed); + atomic_store_explicit(&lazyfreed_objects, 0, memory_order_relaxed); } /* Return the amount of work needed in order to free an object. @@ -174,8 +174,8 @@ void freeObjAsync(robj *key, robj *obj, int dbid) { * of parts of the server core may call incrRefCount() to protect * objects, and then call dbDelete(). */ if (free_effort > LAZYFREE_THRESHOLD && obj->refcount == 1) { - atomic_fetch_add_explicit(&lazyfree_objects,1,memory_order_relaxed); - bioCreateLazyFreeJob(lazyfreeFreeObject,1,obj); + atomic_fetch_add_explicit(&lazyfree_objects, 1, memory_order_relaxed); + bioCreateLazyFreeJob(lazyfreeFreeObject, 1, obj); } else { decrRefCount(obj); } @@ -203,8 +203,8 @@ void emptyDbAsync(serverDb *db) { void freeTrackingRadixTreeAsync(rax *tracking) { /* Because this rax has only keys and no values so we use numnodes. */ if (tracking->numnodes > LAZYFREE_THRESHOLD) { - atomic_fetch_add_explicit(&lazyfree_objects,tracking->numele,memory_order_relaxed); - bioCreateLazyFreeJob(lazyFreeTrackingTable,1,tracking); + atomic_fetch_add_explicit(&lazyfree_objects, tracking->numele, memory_order_relaxed); + bioCreateLazyFreeJob(lazyFreeTrackingTable, 1, tracking); } else { freeTrackingRadixTree(tracking); } @@ -215,8 +215,8 @@ void freeTrackingRadixTreeAsync(rax *tracking) { void freeErrorsRadixTreeAsync(rax *errors) { /* Because this rax has only keys and no values so we use numnodes. */ if (errors->numnodes > LAZYFREE_THRESHOLD) { - atomic_fetch_add_explicit(&lazyfree_objects,errors->numele,memory_order_relaxed); - bioCreateLazyFreeJob(lazyFreeErrors,1,errors); + atomic_fetch_add_explicit(&lazyfree_objects, errors->numele, memory_order_relaxed); + bioCreateLazyFreeJob(lazyFreeErrors, 1, errors); } else { raxFreeWithCallback(errors, zfree); } @@ -226,8 +226,8 @@ void freeErrorsRadixTreeAsync(rax *errors) { * Close lua interpreter, if there are a lot of lua scripts, close it in async way. */ void freeLuaScriptsAsync(dict *lua_scripts, list *lua_scripts_lru_list, lua_State *lua) { if (dictSize(lua_scripts) > LAZYFREE_THRESHOLD) { - atomic_fetch_add_explicit(&lazyfree_objects,dictSize(lua_scripts),memory_order_relaxed); - bioCreateLazyFreeJob(lazyFreeLuaScripts,3,lua_scripts,lua_scripts_lru_list,lua); + atomic_fetch_add_explicit(&lazyfree_objects, dictSize(lua_scripts), memory_order_relaxed); + bioCreateLazyFreeJob(lazyFreeLuaScripts, 3, lua_scripts, lua_scripts_lru_list, lua); } else { freeLuaScriptsSync(lua_scripts, lua_scripts_lru_list, lua); } @@ -236,8 +236,9 @@ void freeLuaScriptsAsync(dict *lua_scripts, list *lua_scripts_lru_list, lua_Stat /* Free functions ctx, if the functions ctx contains enough functions, free it in async way. */ void freeFunctionsAsync(functionsLibCtx *functions_lib_ctx) { if (functionsLibCtxFunctionsLen(functions_lib_ctx) > LAZYFREE_THRESHOLD) { - atomic_fetch_add_explicit(&lazyfree_objects,functionsLibCtxFunctionsLen(functions_lib_ctx),memory_order_relaxed); - bioCreateLazyFreeJob(lazyFreeFunctionsCtx,1,functions_lib_ctx); + atomic_fetch_add_explicit(&lazyfree_objects, functionsLibCtxFunctionsLen(functions_lib_ctx), + memory_order_relaxed); + bioCreateLazyFreeJob(lazyFreeFunctionsCtx, 1, functions_lib_ctx); } else { functionsLibCtxFree(functions_lib_ctx); } @@ -245,11 +246,9 @@ void freeFunctionsAsync(functionsLibCtx *functions_lib_ctx) { /* Free replication backlog referencing buffer blocks and rax index. */ void freeReplicationBacklogRefMemAsync(list *blocks, rax *index) { - if (listLength(blocks) > LAZYFREE_THRESHOLD || - raxSize(index) > LAZYFREE_THRESHOLD) - { - atomic_fetch_add_explicit(&lazyfree_objects,listLength(blocks)+raxSize(index),memory_order_relaxed); - bioCreateLazyFreeJob(lazyFreeReplicationBacklogRefMem,2,blocks,index); + if (listLength(blocks) > LAZYFREE_THRESHOLD || raxSize(index) > LAZYFREE_THRESHOLD) { + atomic_fetch_add_explicit(&lazyfree_objects, listLength(blocks) + raxSize(index), memory_order_relaxed); + bioCreateLazyFreeJob(lazyFreeReplicationBacklogRefMem, 2, blocks, index); } else { listRelease(blocks); raxFree(index); diff --git a/src/listpack.c b/src/listpack.c index baa6f98be3..640f10142d 100644 --- a/src/listpack.c +++ b/src/listpack.c @@ -781,12 +781,12 @@ unsigned char *lpInsert(unsigned char *lp, unsigned char backlen[LP_MAX_BACKLEN_SIZE]; uint64_t enclen; /* The length of the encoded element. */ - int delete = (elestr == NULL && eleint == NULL); + int del_ele = (elestr == NULL && eleint == NULL); /* when deletion, it is conceptually replacing the element with a * zero-length element. So whatever we get passed as 'where', set * it to LP_REPLACE. */ - if (delete) where = LP_REPLACE; + if (del_ele) where = LP_REPLACE; /* If we need to insert after the current element, we just jump to the * next element (that could be the EOF one) and handle the case of @@ -825,7 +825,7 @@ unsigned char *lpInsert(unsigned char *lp, /* We need to also encode the backward-parsable length of the element * and append it to the end: this allows to traverse the listpack from * the end to the start. */ - unsigned long backlen_size = (!delete) ? lpEncodeBacklen(backlen, enclen) : 0; + unsigned long backlen_size = (!del_ele) ? lpEncodeBacklen(backlen, enclen) : 0; uint64_t old_listpack_bytes = lpGetTotalBytes(lp); uint32_t replaced_len = 0; if (where == LP_REPLACE) { @@ -870,9 +870,9 @@ unsigned char *lpInsert(unsigned char *lp, *newp = dst; /* In case of deletion, set 'newp' to NULL if the next element is * the EOF element. */ - if (delete && dst[0] == LP_EOF) *newp = NULL; + if (del_ele && dst[0] == LP_EOF) *newp = NULL; } - if (!delete) { + if (!del_ele) { if (enctype == LP_ENCODING_INT) { memcpy(dst, eleint, enclen); } else if (elestr) { @@ -886,10 +886,10 @@ unsigned char *lpInsert(unsigned char *lp, } /* Update header. */ - if (where != LP_REPLACE || delete) { + if (where != LP_REPLACE || del_ele) { uint32_t num_elements = lpGetNumElements(lp); if (num_elements != LP_HDR_NUMELE_UNKNOWN) { - if (!delete) + if (!del_ele) lpSetNumElements(lp, num_elements + 1); else lpSetNumElements(lp, num_elements - 1); diff --git a/src/modules/helloacl.c b/src/modules/helloacl.c index ed7298e696..6659b98f8c 100644 --- a/src/modules/helloacl.c +++ b/src/modules/helloacl.c @@ -39,7 +39,7 @@ static ValkeyModuleUser *global; static uint64_t global_auth_client_id = 0; -/* HELLOACL.REVOKE +/* HELLOACL.REVOKE * Synchronously revoke access from a user. */ int RevokeCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); @@ -49,11 +49,11 @@ int RevokeCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, ValkeyModule_DeauthenticateAndCloseClient(ctx, global_auth_client_id); return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } else { - return ValkeyModule_ReplyWithError(ctx, "Global user currently not used"); + return ValkeyModule_ReplyWithError(ctx, "Global user currently not used"); } } -/* HELLOACL.RESET +/* HELLOACL.RESET * Synchronously delete and re-create a module user. */ int ResetCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); @@ -68,7 +68,7 @@ int ResetCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } -/* Callback handler for user changes, use this to notify a module of +/* Callback handler for user changes, use this to notify a module of * changes to users authenticated by the module */ void HelloACL_UserChanged(uint64_t client_id, void *privdata) { VALKEYMODULE_NOT_USED(privdata); @@ -76,14 +76,14 @@ void HelloACL_UserChanged(uint64_t client_id, void *privdata) { global_auth_client_id = 0; } -/* HELLOACL.AUTHGLOBAL +/* HELLOACL.AUTHGLOBAL * Synchronously assigns a module user to the current context. */ int AuthGlobalCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); if (global_auth_client_id) { - return ValkeyModule_ReplyWithError(ctx, "Global user currently used"); + return ValkeyModule_ReplyWithError(ctx, "Global user currently used"); } ValkeyModule_AuthenticateClientWithUser(ctx, global, HelloACL_UserChanged, NULL, &global_auth_client_id); @@ -102,9 +102,8 @@ int HelloACL_Reply(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { ValkeyModuleString *user_string = ValkeyModule_GetBlockedClientPrivateData(ctx); const char *name = ValkeyModule_StringPtrLen(user_string, &length); - if (ValkeyModule_AuthenticateClientWithACLUser(ctx, name, length, NULL, NULL, NULL) == - VALKEYMODULE_ERR) { - return ValkeyModule_ReplyWithError(ctx, "Invalid Username or password"); + if (ValkeyModule_AuthenticateClientWithACLUser(ctx, name, length, NULL, NULL, NULL) == VALKEYMODULE_ERR) { + return ValkeyModule_ReplyWithError(ctx, "Invalid Username or password"); } return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } @@ -129,20 +128,21 @@ void *HelloACL_ThreadMain(void *args) { ValkeyModuleString *user = targs[1]; ValkeyModule_Free(targs); - ValkeyModule_UnblockClient(bc,user); + ValkeyModule_UnblockClient(bc, user); return NULL; } -/* HELLOACL.AUTHASYNC +/* HELLOACL.AUTHASYNC * Asynchronously assigns an ACL user to the current context. */ int AuthAsyncCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 2) return ValkeyModule_WrongArity(ctx); pthread_t tid; - ValkeyModuleBlockedClient *bc = ValkeyModule_BlockClient(ctx, HelloACL_Reply, HelloACL_Timeout, HelloACL_FreeData, TIMEOUT_TIME); - + ValkeyModuleBlockedClient *bc = + ValkeyModule_BlockClient(ctx, HelloACL_Reply, HelloACL_Timeout, HelloACL_FreeData, TIMEOUT_TIME); - void **targs = ValkeyModule_Alloc(sizeof(void*)*2); + + void **targs = ValkeyModule_Alloc(sizeof(void *) * 2); targs[0] = bc; targs[1] = ValkeyModule_CreateStringFromString(NULL, argv[1]); @@ -160,23 +160,21 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"helloacl",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "helloacl", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"helloacl.reset", - ResetCommand_ValkeyCommand,"",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "helloacl.reset", ResetCommand_ValkeyCommand, "", 0, 0, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"helloacl.revoke", - RevokeCommand_ValkeyCommand,"",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "helloacl.revoke", RevokeCommand_ValkeyCommand, "", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"helloacl.authglobal", - AuthGlobalCommand_ValkeyCommand,"no-auth",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "helloacl.authglobal", AuthGlobalCommand_ValkeyCommand, "no-auth", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"helloacl.authasync", - AuthAsyncCommand_ValkeyCommand,"no-auth",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "helloacl.authasync", AuthAsyncCommand_ValkeyCommand, "no-auth", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; global = ValkeyModule_CreateModuleUser("global"); diff --git a/src/modules/helloblock.c b/src/modules/helloblock.c index 40f01f191c..65e9bb71a2 100644 --- a/src/modules/helloblock.c +++ b/src/modules/helloblock.c @@ -42,14 +42,14 @@ int HelloBlock_Reply(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); int *myint = ValkeyModule_GetBlockedClientPrivateData(ctx); - return ValkeyModule_ReplyWithLongLong(ctx,*myint); + return ValkeyModule_ReplyWithLongLong(ctx, *myint); } /* Timeout callback for blocking command HELLO.BLOCK */ int HelloBlock_Timeout(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - return ValkeyModule_ReplyWithSimpleString(ctx,"Request timedout"); + return ValkeyModule_ReplyWithSimpleString(ctx, "Request timedout"); } /* Private data freeing callback for HELLO.BLOCK command. */ @@ -69,7 +69,7 @@ void *HelloBlock_ThreadMain(void *arg) { sleep(delay); int *r = ValkeyModule_Alloc(sizeof(int)); *r = rand(); - ValkeyModule_UnblockClient(bc,r); + ValkeyModule_UnblockClient(bc, r); return NULL; } @@ -82,8 +82,7 @@ void *HelloBlock_ThreadMain(void *arg) { * amount of seconds with a while loop calling sleep(1), so that once we * detect the client disconnection, we can terminate the thread ASAP. */ void HelloBlock_Disconnected(ValkeyModuleCtx *ctx, ValkeyModuleBlockedClient *bc) { - ValkeyModule_Log(ctx,"warning","Blocked client %p disconnected!", - (void*)bc); + ValkeyModule_Log(ctx, "warning", "Blocked client %p disconnected!", (void *)bc); /* Here you should cleanup your state / threads, and if possible * call ValkeyModule_UnblockClient(), or notify the thread that will @@ -98,32 +97,33 @@ int HelloBlock_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, in long long delay; long long timeout; - if (ValkeyModule_StringToLongLong(argv[1],&delay) != VALKEYMODULE_OK) { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + if (ValkeyModule_StringToLongLong(argv[1], &delay) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); } - if (ValkeyModule_StringToLongLong(argv[2],&timeout) != VALKEYMODULE_OK) { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + if (ValkeyModule_StringToLongLong(argv[2], &timeout) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); } pthread_t tid; - ValkeyModuleBlockedClient *bc = ValkeyModule_BlockClient(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,timeout); + ValkeyModuleBlockedClient *bc = + ValkeyModule_BlockClient(ctx, HelloBlock_Reply, HelloBlock_Timeout, HelloBlock_FreeData, timeout); /* Here we set a disconnection handler, however since this module will * block in sleep() in a thread, there is not much we can do in the * callback, so this is just to show you the API. */ - ValkeyModule_SetDisconnectCallback(bc,HelloBlock_Disconnected); + ValkeyModule_SetDisconnectCallback(bc, HelloBlock_Disconnected); /* Now that we setup a blocking client, we need to pass the control * to the thread. However we need to pass arguments to the thread: * the delay and a reference to the blocked client handle. */ - void **targ = ValkeyModule_Alloc(sizeof(void*)*2); + void **targ = ValkeyModule_Alloc(sizeof(void *) * 2); targ[0] = bc; - targ[1] = (void*)(unsigned long) delay; + targ[1] = (void *)(unsigned long)delay; - if (pthread_create(&tid,NULL,HelloBlock_ThreadMain,targ) != 0) { + if (pthread_create(&tid, NULL, HelloBlock_ThreadMain, targ) != 0) { ValkeyModule_AbortBlock(bc); - return ValkeyModule_ReplyWithError(ctx,"-ERR Can't start thread"); + return ValkeyModule_ReplyWithError(ctx, "-ERR Can't start thread"); } return VALKEYMODULE_OK; } @@ -141,35 +141,31 @@ void *HelloKeys_ThreadMain(void *arg) { long long cursor = 0; size_t replylen = 0; - ValkeyModule_ReplyWithArray(ctx,VALKEYMODULE_POSTPONED_LEN); + ValkeyModule_ReplyWithArray(ctx, VALKEYMODULE_POSTPONED_LEN); do { ValkeyModule_ThreadSafeContextLock(ctx); - ValkeyModuleCallReply *reply = ValkeyModule_Call(ctx, - "SCAN","l",(long long)cursor); + ValkeyModuleCallReply *reply = ValkeyModule_Call(ctx, "SCAN", "l", (long long)cursor); ValkeyModule_ThreadSafeContextUnlock(ctx); - ValkeyModuleCallReply *cr_cursor = - ValkeyModule_CallReplyArrayElement(reply,0); - ValkeyModuleCallReply *cr_keys = - ValkeyModule_CallReplyArrayElement(reply,1); + ValkeyModuleCallReply *cr_cursor = ValkeyModule_CallReplyArrayElement(reply, 0); + ValkeyModuleCallReply *cr_keys = ValkeyModule_CallReplyArrayElement(reply, 1); ValkeyModuleString *s = ValkeyModule_CreateStringFromCallReply(cr_cursor); - ValkeyModule_StringToLongLong(s,&cursor); - ValkeyModule_FreeString(ctx,s); + ValkeyModule_StringToLongLong(s, &cursor); + ValkeyModule_FreeString(ctx, s); size_t items = ValkeyModule_CallReplyLength(cr_keys); for (size_t j = 0; j < items; j++) { - ValkeyModuleCallReply *ele = - ValkeyModule_CallReplyArrayElement(cr_keys,j); - ValkeyModule_ReplyWithCallReply(ctx,ele); + ValkeyModuleCallReply *ele = ValkeyModule_CallReplyArrayElement(cr_keys, j); + ValkeyModule_ReplyWithCallReply(ctx, ele); replylen++; } ValkeyModule_FreeCallReply(reply); } while (cursor != 0); - ValkeyModule_ReplySetArrayLength(ctx,replylen); + ValkeyModule_ReplySetArrayLength(ctx, replylen); ValkeyModule_FreeThreadSafeContext(ctx); - ValkeyModule_UnblockClient(bc,NULL); + ValkeyModule_UnblockClient(bc, NULL); return NULL; } @@ -186,14 +182,14 @@ int HelloKeys_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int /* Note that when blocking the client we do not set any callback: no * timeout is possible since we passed '0', nor we need a reply callback * because we'll use the thread safe context to accumulate a reply. */ - ValkeyModuleBlockedClient *bc = ValkeyModule_BlockClient(ctx,NULL,NULL,NULL,0); + ValkeyModuleBlockedClient *bc = ValkeyModule_BlockClient(ctx, NULL, NULL, NULL, 0); /* Now that we setup a blocking client, we need to pass the control * to the thread. However we need to pass arguments to the thread: * the reference to the blocked client handle. */ - if (pthread_create(&tid,NULL,HelloKeys_ThreadMain,bc) != 0) { + if (pthread_create(&tid, NULL, HelloKeys_ThreadMain, bc) != 0) { ValkeyModule_AbortBlock(bc); - return ValkeyModule_ReplyWithError(ctx,"-ERR Can't start thread"); + return ValkeyModule_ReplyWithError(ctx, "-ERR Can't start thread"); } return VALKEYMODULE_OK; } @@ -204,14 +200,11 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"helloblock",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "helloblock", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.block", - HelloBlock_ValkeyCommand,"",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.block", HelloBlock_ValkeyCommand, "", 0, 0, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.keys", - HelloKeys_ValkeyCommand,"",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.keys", HelloKeys_ValkeyCommand, "", 0, 0, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; return VALKEYMODULE_OK; diff --git a/src/modules/hellocluster.c b/src/modules/hellocluster.c index 996b506535..cfc0d4f0f4 100644 --- a/src/modules/hellocluster.c +++ b/src/modules/hellocluster.c @@ -44,7 +44,7 @@ int PingallCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - ValkeyModule_SendClusterMessage(ctx,NULL,MSGTYPE_PING,"Hey",3); + ValkeyModule_SendClusterMessage(ctx, NULL, MSGTYPE_PING, "Hey", 3); return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } @@ -54,36 +54,44 @@ int ListCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, i VALKEYMODULE_NOT_USED(argc); size_t numnodes; - char **ids = ValkeyModule_GetClusterNodesList(ctx,&numnodes); + char **ids = ValkeyModule_GetClusterNodesList(ctx, &numnodes); if (ids == NULL) { - return ValkeyModule_ReplyWithError(ctx,"Cluster not enabled"); + return ValkeyModule_ReplyWithError(ctx, "Cluster not enabled"); } - ValkeyModule_ReplyWithArray(ctx,numnodes); + ValkeyModule_ReplyWithArray(ctx, numnodes); for (size_t j = 0; j < numnodes; j++) { int port; - ValkeyModule_GetClusterNodeInfo(ctx,ids[j],NULL,NULL,&port,NULL); - ValkeyModule_ReplyWithArray(ctx,2); - ValkeyModule_ReplyWithStringBuffer(ctx,ids[j],VALKEYMODULE_NODE_ID_LEN); - ValkeyModule_ReplyWithLongLong(ctx,port); + ValkeyModule_GetClusterNodeInfo(ctx, ids[j], NULL, NULL, &port, NULL); + ValkeyModule_ReplyWithArray(ctx, 2); + ValkeyModule_ReplyWithStringBuffer(ctx, ids[j], VALKEYMODULE_NODE_ID_LEN); + ValkeyModule_ReplyWithLongLong(ctx, port); } ValkeyModule_FreeClusterNodesList(ids); return VALKEYMODULE_OK; } /* Callback for message MSGTYPE_PING */ -void PingReceiver(ValkeyModuleCtx *ctx, const char *sender_id, uint8_t type, const unsigned char *payload, uint32_t len) { - ValkeyModule_Log(ctx,"notice","PING (type %d) RECEIVED from %.*s: '%.*s'", - type,VALKEYMODULE_NODE_ID_LEN,sender_id,(int)len, payload); - ValkeyModule_SendClusterMessage(ctx,NULL,MSGTYPE_PONG,"Ohi!",4); +void PingReceiver(ValkeyModuleCtx *ctx, + const char *sender_id, + uint8_t type, + const unsigned char *payload, + uint32_t len) { + ValkeyModule_Log(ctx, "notice", "PING (type %d) RECEIVED from %.*s: '%.*s'", type, VALKEYMODULE_NODE_ID_LEN, + sender_id, (int)len, payload); + ValkeyModule_SendClusterMessage(ctx, NULL, MSGTYPE_PONG, "Ohi!", 4); ValkeyModuleCallReply *reply = ValkeyModule_Call(ctx, "INCR", "c", "pings_received"); ValkeyModule_FreeCallReply(reply); } /* Callback for message MSGTYPE_PONG. */ -void PongReceiver(ValkeyModuleCtx *ctx, const char *sender_id, uint8_t type, const unsigned char *payload, uint32_t len) { - ValkeyModule_Log(ctx,"notice","PONG (type %d) RECEIVED from %.*s: '%.*s'", - type,VALKEYMODULE_NODE_ID_LEN,sender_id,(int)len, payload); +void PongReceiver(ValkeyModuleCtx *ctx, + const char *sender_id, + uint8_t type, + const unsigned char *payload, + uint32_t len) { + ValkeyModule_Log(ctx, "notice", "PONG (type %d) RECEIVED from %.*s: '%.*s'", type, VALKEYMODULE_NODE_ID_LEN, + sender_id, (int)len, payload); } /* This function must be present on each module. It is used in order to @@ -92,15 +100,14 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"hellocluster",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "hellocluster", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellocluster.pingall", - PingallCommand_ValkeyCommand,"readonly",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellocluster.pingall", PingallCommand_ValkeyCommand, "readonly", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellocluster.list", - ListCommand_ValkeyCommand,"readonly",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellocluster.list", ListCommand_ValkeyCommand, "readonly", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; /* Disable Cluster sharding and redirections. This way every node @@ -109,10 +116,10 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg * variable. Normally you do that in order for the distributed system * you create as a module to have total freedom in the keyspace * manipulation. */ - ValkeyModule_SetClusterFlags(ctx,VALKEYMODULE_CLUSTER_FLAG_NO_REDIRECTION); + ValkeyModule_SetClusterFlags(ctx, VALKEYMODULE_CLUSTER_FLAG_NO_REDIRECTION); /* Register our handlers for different message types. */ - ValkeyModule_RegisterClusterMessageReceiver(ctx,MSGTYPE_PING,PingReceiver); - ValkeyModule_RegisterClusterMessageReceiver(ctx,MSGTYPE_PONG,PongReceiver); + ValkeyModule_RegisterClusterMessageReceiver(ctx, MSGTYPE_PING, PingReceiver); + ValkeyModule_RegisterClusterMessageReceiver(ctx, MSGTYPE_PONG, PongReceiver); return VALKEYMODULE_OK; } diff --git a/src/modules/hellodict.c b/src/modules/hellodict.c index e699e38f1c..38081919f3 100644 --- a/src/modules/hellodict.c +++ b/src/modules/hellodict.c @@ -46,10 +46,10 @@ static ValkeyModuleDict *Keyspace; * Set the specified key to the specified value. */ int cmd_SET(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 3) return ValkeyModule_WrongArity(ctx); - ValkeyModule_DictSet(Keyspace,argv[1],argv[2]); + ValkeyModule_DictSet(Keyspace, argv[1], argv[2]); /* We need to keep a reference to the value stored at the key, otherwise * it would be freed when this callback returns. */ - ValkeyModule_RetainString(NULL,argv[2]); + ValkeyModule_RetainString(NULL, argv[2]); return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } @@ -59,7 +59,7 @@ int cmd_SET(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { * is not defined. */ int cmd_GET(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 2) return ValkeyModule_WrongArity(ctx); - ValkeyModuleString *val = ValkeyModule_DictGet(Keyspace,argv[1],NULL); + ValkeyModuleString *val = ValkeyModule_DictGet(Keyspace, argv[1], NULL); if (val == NULL) { return ValkeyModule_ReplyWithNull(ctx); } else { @@ -76,27 +76,25 @@ int cmd_KEYRANGE(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { /* Parse the count argument. */ long long count; - if (ValkeyModule_StringToLongLong(argv[3],&count) != VALKEYMODULE_OK) { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + if (ValkeyModule_StringToLongLong(argv[3], &count) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); } /* Seek the iterator. */ - ValkeyModuleDictIter *iter = ValkeyModule_DictIteratorStart( - Keyspace, ">=", argv[1]); + ValkeyModuleDictIter *iter = ValkeyModule_DictIteratorStart(Keyspace, ">=", argv[1]); /* Reply with the matching items. */ char *key; size_t keylen; long long replylen = 0; /* Keep track of the emitted array len. */ - ValkeyModule_ReplyWithArray(ctx,VALKEYMODULE_POSTPONED_LEN); - while((key = ValkeyModule_DictNextC(iter,&keylen,NULL)) != NULL) { + ValkeyModule_ReplyWithArray(ctx, VALKEYMODULE_POSTPONED_LEN); + while ((key = ValkeyModule_DictNextC(iter, &keylen, NULL)) != NULL) { if (replylen >= count) break; - if (ValkeyModule_DictCompare(iter,"<=",argv[2]) == VALKEYMODULE_ERR) - break; - ValkeyModule_ReplyWithStringBuffer(ctx,key,keylen); + if (ValkeyModule_DictCompare(iter, "<=", argv[2]) == VALKEYMODULE_ERR) break; + ValkeyModule_ReplyWithStringBuffer(ctx, key, keylen); replylen++; } - ValkeyModule_ReplySetArrayLength(ctx,replylen); + ValkeyModule_ReplySetArrayLength(ctx, replylen); /* Cleanup. */ ValkeyModule_DictIteratorStop(iter); @@ -109,19 +107,15 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"hellodict",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "hellodict", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellodict.set", - cmd_SET,"write deny-oom",1,1,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellodict.set", cmd_SET, "write deny-oom", 1, 1, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellodict.get", - cmd_GET,"readonly",1,1,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellodict.get", cmd_GET, "readonly", 1, 1, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellodict.keyrange", - cmd_KEYRANGE,"readonly",1,1,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellodict.keyrange", cmd_KEYRANGE, "readonly", 1, 1, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; /* Create our global dictionary. Here we'll set our keys and values. */ diff --git a/src/modules/hellohook.c b/src/modules/hellohook.c index d6eead5b80..35a1ed0a1a 100644 --- a/src/modules/hellohook.c +++ b/src/modules/hellohook.c @@ -37,20 +37,17 @@ #include /* Client state change callback. */ -void clientChangeCallback(ValkeyModuleCtx *ctx, ValkeyModuleEvent e, uint64_t sub, void *data) -{ +void clientChangeCallback(ValkeyModuleCtx *ctx, ValkeyModuleEvent e, uint64_t sub, void *data) { VALKEYMODULE_NOT_USED(ctx); VALKEYMODULE_NOT_USED(e); ValkeyModuleClientInfo *ci = data; printf("Client %s event for client #%llu %s:%d\n", - (sub == VALKEYMODULE_SUBEVENT_CLIENT_CHANGE_CONNECTED) ? - "connection" : "disconnection", - (unsigned long long)ci->id,ci->addr,ci->port); + (sub == VALKEYMODULE_SUBEVENT_CLIENT_CHANGE_CONNECTED) ? "connection" : "disconnection", + (unsigned long long)ci->id, ci->addr, ci->port); } -void flushdbCallback(ValkeyModuleCtx *ctx, ValkeyModuleEvent e, uint64_t sub, void *data) -{ +void flushdbCallback(ValkeyModuleCtx *ctx, ValkeyModuleEvent e, uint64_t sub, void *data) { VALKEYMODULE_NOT_USED(ctx); VALKEYMODULE_NOT_USED(e); @@ -58,17 +55,16 @@ void flushdbCallback(ValkeyModuleCtx *ctx, ValkeyModuleEvent e, uint64_t sub, vo if (sub == VALKEYMODULE_SUBEVENT_FLUSHDB_START) { if (fi->dbnum != -1) { ValkeyModuleCallReply *reply; - reply = ValkeyModule_Call(ctx,"DBSIZE",""); + reply = ValkeyModule_Call(ctx, "DBSIZE", ""); long long numkeys = ValkeyModule_CallReplyInteger(reply); - printf("FLUSHDB event of database %d started (%lld keys in DB)\n", - fi->dbnum, numkeys); + printf("FLUSHDB event of database %d started (%lld keys in DB)\n", fi->dbnum, numkeys); ValkeyModule_FreeCallReply(reply); } else { printf("FLUSHALL event started\n"); } } else { if (fi->dbnum != -1) { - printf("FLUSHDB event of database %d ended\n",fi->dbnum); + printf("FLUSHDB event of database %d ended\n", fi->dbnum); } else { printf("FLUSHALL event ended\n"); } @@ -81,12 +77,9 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"hellohook",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "hellohook", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - ValkeyModule_SubscribeToServerEvent(ctx, - ValkeyModuleEvent_ClientChange, clientChangeCallback); - ValkeyModule_SubscribeToServerEvent(ctx, - ValkeyModuleEvent_FlushDB, flushdbCallback); + ValkeyModule_SubscribeToServerEvent(ctx, ValkeyModuleEvent_ClientChange, clientChangeCallback); + ValkeyModule_SubscribeToServerEvent(ctx, ValkeyModuleEvent_FlushDB, flushdbCallback); return VALKEYMODULE_OK; } diff --git a/src/modules/hellotimer.c b/src/modules/hellotimer.c index b0ed3d0b08..40ba323e58 100644 --- a/src/modules/hellotimer.c +++ b/src/modules/hellotimer.c @@ -51,8 +51,8 @@ int TimerCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, for (int j = 0; j < 10; j++) { int delay = rand() % 5000; char *buf = ValkeyModule_Alloc(256); - snprintf(buf,256,"After %d", delay); - ValkeyModuleTimerID tid = ValkeyModule_CreateTimer(ctx,delay,timerHandler,buf); + snprintf(buf, 256, "After %d", delay); + ValkeyModuleTimerID tid = ValkeyModule_CreateTimer(ctx, delay, timerHandler, buf); VALKEYMODULE_NOT_USED(tid); } return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); @@ -64,11 +64,10 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"hellotimer",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "hellotimer", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellotimer.timer", - TimerCommand_ValkeyCommand,"readonly",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellotimer.timer", TimerCommand_ValkeyCommand, "readonly", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; return VALKEYMODULE_OK; diff --git a/src/modules/hellotype.c b/src/modules/hellotype.c index 531f7465ce..7e2dc60c68 100644 --- a/src/modules/hellotype.c +++ b/src/modules/hellotype.c @@ -71,7 +71,7 @@ struct HelloTypeObject *createHelloTypeObject(void) { void HelloTypeInsert(struct HelloTypeObject *o, int64_t ele) { struct HelloTypeNode *next = o->head, *newnode, *prev = NULL; - while(next && next->value < ele) { + while (next && next->value < ele) { prev = next; next = next->next; } @@ -89,7 +89,7 @@ void HelloTypeInsert(struct HelloTypeObject *o, int64_t ele) { void HelloTypeReleaseObject(struct HelloTypeObject *o) { struct HelloTypeNode *cur, *next; cur = o->head; - while(cur) { + while (cur) { next = cur->next; ValkeyModule_Free(cur); cur = next; @@ -104,34 +104,31 @@ int HelloTypeInsert_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **arg ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ if (argc != 3) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_EMPTY && - ValkeyModule_ModuleTypeGetType(key) != HelloType) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + if (type != VALKEYMODULE_KEYTYPE_EMPTY && ValkeyModule_ModuleTypeGetType(key) != HelloType) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } long long value; - if ((ValkeyModule_StringToLongLong(argv[2],&value) != VALKEYMODULE_OK)) { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid value: must be a signed 64 bit integer"); + if ((ValkeyModule_StringToLongLong(argv[2], &value) != VALKEYMODULE_OK)) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid value: must be a signed 64 bit integer"); } /* Create an empty value object if the key is currently empty. */ struct HelloTypeObject *hto; if (type == VALKEYMODULE_KEYTYPE_EMPTY) { hto = createHelloTypeObject(); - ValkeyModule_ModuleTypeSetValue(key,HelloType,hto); + ValkeyModule_ModuleTypeSetValue(key, HelloType, hto); } else { hto = ValkeyModule_ModuleTypeGetValue(key); } /* Insert the new element. */ - HelloTypeInsert(hto,value); - ValkeyModule_SignalKeyAsReady(ctx,argv[1]); + HelloTypeInsert(hto, value); + ValkeyModule_SignalKeyAsReady(ctx, argv[1]); - ValkeyModule_ReplyWithLongLong(ctx,hto->len); + ValkeyModule_ReplyWithLongLong(ctx, hto->len); ValkeyModule_ReplicateVerbatim(ctx); return VALKEYMODULE_OK; } @@ -141,34 +138,28 @@ int HelloTypeRange_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ if (argc != 4) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_EMPTY && - ValkeyModule_ModuleTypeGetType(key) != HelloType) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + if (type != VALKEYMODULE_KEYTYPE_EMPTY && ValkeyModule_ModuleTypeGetType(key) != HelloType) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } long long first, count; - if (ValkeyModule_StringToLongLong(argv[2],&first) != VALKEYMODULE_OK || - ValkeyModule_StringToLongLong(argv[3],&count) != VALKEYMODULE_OK || - first < 0 || count < 0) - { - return ValkeyModule_ReplyWithError(ctx, - "ERR invalid first or count parameters"); + if (ValkeyModule_StringToLongLong(argv[2], &first) != VALKEYMODULE_OK || + ValkeyModule_StringToLongLong(argv[3], &count) != VALKEYMODULE_OK || first < 0 || count < 0) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid first or count parameters"); } struct HelloTypeObject *hto = ValkeyModule_ModuleTypeGetValue(key); struct HelloTypeNode *node = hto ? hto->head : NULL; - ValkeyModule_ReplyWithArray(ctx,VALKEYMODULE_POSTPONED_LEN); + ValkeyModule_ReplyWithArray(ctx, VALKEYMODULE_POSTPONED_LEN); long long arraylen = 0; - while(node && count--) { - ValkeyModule_ReplyWithLongLong(ctx,node->value); + while (node && count--) { + ValkeyModule_ReplyWithLongLong(ctx, node->value); arraylen++; node = node->next; } - ValkeyModule_ReplySetArrayLength(ctx,arraylen); + ValkeyModule_ReplySetArrayLength(ctx, arraylen); return VALKEYMODULE_OK; } @@ -177,17 +168,14 @@ int HelloTypeLen_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ if (argc != 2) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_EMPTY && - ValkeyModule_ModuleTypeGetType(key) != HelloType) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + if (type != VALKEYMODULE_KEYTYPE_EMPTY && ValkeyModule_ModuleTypeGetType(key) != HelloType) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } struct HelloTypeObject *hto = ValkeyModule_ModuleTypeGetValue(key); - ValkeyModule_ReplyWithLongLong(ctx,hto ? hto->len : 0); + ValkeyModule_ReplyWithLongLong(ctx, hto ? hto->len : 0); return VALKEYMODULE_OK; } @@ -201,11 +189,9 @@ int HelloBlock_Reply(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) VALKEYMODULE_NOT_USED(argc); ValkeyModuleString *keyname = ValkeyModule_GetBlockedClientReadyKey(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,keyname,VALKEYMODULE_READ); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, keyname, VALKEYMODULE_READ); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_MODULE || - ValkeyModule_ModuleTypeGetType(key) != HelloType) - { + if (type != VALKEYMODULE_KEYTYPE_MODULE || ValkeyModule_ModuleTypeGetType(key) != HelloType) { ValkeyModule_CloseKey(key); return VALKEYMODULE_ERR; } @@ -213,14 +199,14 @@ int HelloBlock_Reply(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) /* In case the key is able to serve our blocked client, let's directly * use our original command implementation to make this example simpler. */ ValkeyModule_CloseKey(key); - return HelloTypeRange_ValkeyCommand(ctx,argv,argc-1); + return HelloTypeRange_ValkeyCommand(ctx, argv, argc - 1); } /* Timeout callback for blocking command HELLOTYPE.BRANGE */ int HelloBlock_Timeout(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - return ValkeyModule_ReplyWithSimpleString(ctx,"Request timedout"); + return ValkeyModule_ReplyWithSimpleString(ctx, "Request timedout"); } /* Private data freeing callback for HELLOTYPE.BRANGE command. */ @@ -235,31 +221,28 @@ void HelloBlock_FreeData(ValkeyModuleCtx *ctx, void *privdata) { int HelloTypeBRange_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 5) return ValkeyModule_WrongArity(ctx); ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_EMPTY && - ValkeyModule_ModuleTypeGetType(key) != HelloType) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + if (type != VALKEYMODULE_KEYTYPE_EMPTY && ValkeyModule_ModuleTypeGetType(key) != HelloType) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } /* Parse the timeout before even trying to serve the client synchronously, * so that we always fail ASAP on syntax errors. */ long long timeout; - if (ValkeyModule_StringToLongLong(argv[4],&timeout) != VALKEYMODULE_OK) { - return ValkeyModule_ReplyWithError(ctx, - "ERR invalid timeout parameter"); + if (ValkeyModule_StringToLongLong(argv[4], &timeout) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid timeout parameter"); } /* Can we serve the reply synchronously? */ if (type != VALKEYMODULE_KEYTYPE_EMPTY) { - return HelloTypeRange_ValkeyCommand(ctx,argv,argc-1); + return HelloTypeRange_ValkeyCommand(ctx, argv, argc - 1); } /* Otherwise let's block on the key. */ void *privdata = ValkeyModule_Alloc(100); - ValkeyModule_BlockClientOnKeys(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,timeout,argv+1,1,privdata); + ValkeyModule_BlockClientOnKeys(ctx, HelloBlock_Reply, HelloBlock_Timeout, HelloBlock_FreeData, timeout, argv + 1, 1, + privdata); return VALKEYMODULE_OK; } @@ -272,9 +255,9 @@ void *HelloTypeRdbLoad(ValkeyModuleIO *rdb, int encver) { } uint64_t elements = ValkeyModule_LoadUnsigned(rdb); struct HelloTypeObject *hto = createHelloTypeObject(); - while(elements--) { + while (elements--) { int64_t ele = ValkeyModule_LoadSigned(rdb); - HelloTypeInsert(hto,ele); + HelloTypeInsert(hto, ele); } return hto; } @@ -282,9 +265,9 @@ void *HelloTypeRdbLoad(ValkeyModuleIO *rdb, int encver) { void HelloTypeRdbSave(ValkeyModuleIO *rdb, void *value) { struct HelloTypeObject *hto = value; struct HelloTypeNode *node = hto->head; - ValkeyModule_SaveUnsigned(rdb,hto->len); - while(node) { - ValkeyModule_SaveSigned(rdb,node->value); + ValkeyModule_SaveUnsigned(rdb, hto->len); + while (node) { + ValkeyModule_SaveSigned(rdb, node->value); node = node->next; } } @@ -292,8 +275,8 @@ void HelloTypeRdbSave(ValkeyModuleIO *rdb, void *value) { void HelloTypeAofRewrite(ValkeyModuleIO *aof, ValkeyModuleString *key, void *value) { struct HelloTypeObject *hto = value; struct HelloTypeNode *node = hto->head; - while(node) { - ValkeyModule_EmitAOF(aof,"HELLOTYPE.INSERT","sl",key,node->value); + while (node) { + ValkeyModule_EmitAOF(aof, "HELLOTYPE.INSERT", "sl", key, node->value); node = node->next; } } @@ -303,7 +286,7 @@ void HelloTypeAofRewrite(ValkeyModuleIO *aof, ValkeyModuleString *key, void *val size_t HelloTypeMemUsage(const void *value) { const struct HelloTypeObject *hto = value; struct HelloTypeNode *node = hto->head; - return sizeof(*hto) + sizeof(*node)*hto->len; + return sizeof(*hto) + sizeof(*node) * hto->len; } void HelloTypeFree(void *value) { @@ -313,8 +296,8 @@ void HelloTypeFree(void *value) { void HelloTypeDigest(ValkeyModuleDigest *md, void *value) { struct HelloTypeObject *hto = value; struct HelloTypeNode *node = hto->head; - while(node) { - ValkeyModule_DigestAddLongLong(md,node->value); + while (node) { + ValkeyModule_DigestAddLongLong(md, node->value); node = node->next; } ValkeyModule_DigestEndSequence(md); @@ -326,36 +309,33 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"hellotype",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "hellotype", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - ValkeyModuleTypeMethods tm = { - .version = VALKEYMODULE_TYPE_METHOD_VERSION, - .rdb_load = HelloTypeRdbLoad, - .rdb_save = HelloTypeRdbSave, - .aof_rewrite = HelloTypeAofRewrite, - .mem_usage = HelloTypeMemUsage, - .free = HelloTypeFree, - .digest = HelloTypeDigest - }; + ValkeyModuleTypeMethods tm = {.version = VALKEYMODULE_TYPE_METHOD_VERSION, + .rdb_load = HelloTypeRdbLoad, + .rdb_save = HelloTypeRdbSave, + .aof_rewrite = HelloTypeAofRewrite, + .mem_usage = HelloTypeMemUsage, + .free = HelloTypeFree, + .digest = HelloTypeDigest}; - HelloType = ValkeyModule_CreateDataType(ctx,"hellotype",0,&tm); + HelloType = ValkeyModule_CreateDataType(ctx, "hellotype", 0, &tm); if (HelloType == NULL) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellotype.insert", - HelloTypeInsert_ValkeyCommand,"write deny-oom",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellotype.insert", HelloTypeInsert_ValkeyCommand, "write deny-oom", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellotype.range", - HelloTypeRange_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellotype.range", HelloTypeRange_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellotype.len", - HelloTypeLen_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellotype.len", HelloTypeLen_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellotype.brange", - HelloTypeBRange_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellotype.brange", HelloTypeBRange_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; return VALKEYMODULE_OK; diff --git a/src/modules/helloworld.c b/src/modules/helloworld.c index af42ec3a33..43f28a14d4 100644 --- a/src/modules/helloworld.c +++ b/src/modules/helloworld.c @@ -48,7 +48,7 @@ int HelloSimple_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - ValkeyModule_ReplyWithLongLong(ctx,ValkeyModule_GetSelectedDb(ctx)); + ValkeyModule_ReplyWithLongLong(ctx, ValkeyModule_GetSelectedDb(ctx)); return VALKEYMODULE_OK; } @@ -58,17 +58,15 @@ int HelloSimple_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, i * * You'll find this command to be roughly as fast as the actual RPUSH * command. */ -int HelloPushNative_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) -{ +int HelloPushNative_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 3) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); - ValkeyModule_ListPush(key,VALKEYMODULE_LIST_TAIL,argv[2]); + ValkeyModule_ListPush(key, VALKEYMODULE_LIST_TAIL, argv[2]); size_t newlen = ValkeyModule_ValueLength(key); ValkeyModule_CloseKey(key); - ValkeyModule_ReplyWithLongLong(ctx,newlen); + ValkeyModule_ReplyWithLongLong(ctx, newlen); return VALKEYMODULE_OK; } @@ -77,30 +75,28 @@ int HelloPushNative_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **arg * approach is useful when you need to call commands that are not * available as low level APIs, or when you don't need the maximum speed * possible but instead prefer implementation simplicity. */ -int HelloPushCall_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) -{ +int HelloPushCall_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 3) return ValkeyModule_WrongArity(ctx); ValkeyModuleCallReply *reply; - reply = ValkeyModule_Call(ctx,"RPUSH","ss",argv[1],argv[2]); + reply = ValkeyModule_Call(ctx, "RPUSH", "ss", argv[1], argv[2]); long long len = ValkeyModule_CallReplyInteger(reply); ValkeyModule_FreeCallReply(reply); - ValkeyModule_ReplyWithLongLong(ctx,len); + ValkeyModule_ReplyWithLongLong(ctx, len); return VALKEYMODULE_OK; } /* HELLO.PUSH.CALL2 * This is exactly as HELLO.PUSH.CALL, but shows how we can reply to the * client using directly a reply object that Call() returned. */ -int HelloPushCall2_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) -{ +int HelloPushCall2_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 3) return ValkeyModule_WrongArity(ctx); ValkeyModuleCallReply *reply; - reply = ValkeyModule_Call(ctx,"RPUSH","ss",argv[1],argv[2]); - ValkeyModule_ReplyWithCallReply(ctx,reply); + reply = ValkeyModule_Call(ctx, "RPUSH", "ss", argv[1], argv[2]); + ValkeyModule_ReplyWithCallReply(ctx, reply); ValkeyModule_FreeCallReply(reply); return VALKEYMODULE_OK; } @@ -108,22 +104,21 @@ int HelloPushCall2_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv /* HELLO.LIST.SUM.LEN returns the total length of all the items inside * a list, by using the high level Call() API. * This command is an example of the array reply access. */ -int HelloListSumLen_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) -{ +int HelloListSumLen_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 2) return ValkeyModule_WrongArity(ctx); ValkeyModuleCallReply *reply; - reply = ValkeyModule_Call(ctx,"LRANGE","sll",argv[1],(long long)0,(long long)-1); + reply = ValkeyModule_Call(ctx, "LRANGE", "sll", argv[1], (long long)0, (long long)-1); size_t strlen = 0; size_t items = ValkeyModule_CallReplyLength(reply); size_t j; for (j = 0; j < items; j++) { - ValkeyModuleCallReply *ele = ValkeyModule_CallReplyArrayElement(reply,j); + ValkeyModuleCallReply *ele = ValkeyModule_CallReplyArrayElement(reply, j); strlen += ValkeyModule_CallReplyLength(ele); } ValkeyModule_FreeCallReply(reply); - ValkeyModule_ReplyWithLongLong(ctx,strlen); + ValkeyModule_ReplyWithLongLong(ctx, strlen); return VALKEYMODULE_OK; } @@ -134,43 +129,39 @@ int HelloListSumLen_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **arg int HelloListSplice_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 4) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *srckey = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); - ValkeyModuleKey *dstkey = ValkeyModule_OpenKey(ctx,argv[2], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *srckey = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); + ValkeyModuleKey *dstkey = ValkeyModule_OpenKey(ctx, argv[2], VALKEYMODULE_READ | VALKEYMODULE_WRITE); /* Src and dst key must be empty or lists. */ if ((ValkeyModule_KeyType(srckey) != VALKEYMODULE_KEYTYPE_LIST && ValkeyModule_KeyType(srckey) != VALKEYMODULE_KEYTYPE_EMPTY) || (ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_LIST && - ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_EMPTY)) - { + ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_EMPTY)) { ValkeyModule_CloseKey(srckey); ValkeyModule_CloseKey(dstkey); - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } long long count; - if ((ValkeyModule_StringToLongLong(argv[3],&count) != VALKEYMODULE_OK) || - (count < 0)) { + if ((ValkeyModule_StringToLongLong(argv[3], &count) != VALKEYMODULE_OK) || (count < 0)) { ValkeyModule_CloseKey(srckey); ValkeyModule_CloseKey(dstkey); - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); } - while(count-- > 0) { + while (count-- > 0) { ValkeyModuleString *ele; - ele = ValkeyModule_ListPop(srckey,VALKEYMODULE_LIST_TAIL); + ele = ValkeyModule_ListPop(srckey, VALKEYMODULE_LIST_TAIL); if (ele == NULL) break; - ValkeyModule_ListPush(dstkey,VALKEYMODULE_LIST_HEAD,ele); - ValkeyModule_FreeString(ctx,ele); + ValkeyModule_ListPush(dstkey, VALKEYMODULE_LIST_HEAD, ele); + ValkeyModule_FreeString(ctx, ele); } size_t len = ValkeyModule_ValueLength(srckey); ValkeyModule_CloseKey(srckey); ValkeyModule_CloseKey(dstkey); - ValkeyModule_ReplyWithLongLong(ctx,len); + ValkeyModule_ReplyWithLongLong(ctx, len); return VALKEYMODULE_OK; } @@ -181,37 +172,32 @@ int HelloListSpliceAuto_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString * ValkeyModule_AutoMemory(ctx); - ValkeyModuleKey *srckey = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); - ValkeyModuleKey *dstkey = ValkeyModule_OpenKey(ctx,argv[2], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *srckey = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); + ValkeyModuleKey *dstkey = ValkeyModule_OpenKey(ctx, argv[2], VALKEYMODULE_READ | VALKEYMODULE_WRITE); /* Src and dst key must be empty or lists. */ if ((ValkeyModule_KeyType(srckey) != VALKEYMODULE_KEYTYPE_LIST && ValkeyModule_KeyType(srckey) != VALKEYMODULE_KEYTYPE_EMPTY) || (ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_LIST && - ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_EMPTY)) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_EMPTY)) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } long long count; - if ((ValkeyModule_StringToLongLong(argv[3],&count) != VALKEYMODULE_OK) || - (count < 0)) - { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + if ((ValkeyModule_StringToLongLong(argv[3], &count) != VALKEYMODULE_OK) || (count < 0)) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); } - while(count-- > 0) { + while (count-- > 0) { ValkeyModuleString *ele; - ele = ValkeyModule_ListPop(srckey,VALKEYMODULE_LIST_TAIL); + ele = ValkeyModule_ListPop(srckey, VALKEYMODULE_LIST_TAIL); if (ele == NULL) break; - ValkeyModule_ListPush(dstkey,VALKEYMODULE_LIST_HEAD,ele); + ValkeyModule_ListPush(dstkey, VALKEYMODULE_LIST_HEAD, ele); } size_t len = ValkeyModule_ValueLength(srckey); - ValkeyModule_ReplyWithLongLong(ctx,len); + ValkeyModule_ReplyWithLongLong(ctx, len); return VALKEYMODULE_OK; } @@ -221,15 +207,14 @@ int HelloListSpliceAuto_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString * int HelloRandArray_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 2) return ValkeyModule_WrongArity(ctx); long long count; - if (ValkeyModule_StringToLongLong(argv[1],&count) != VALKEYMODULE_OK || - count < 0) - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + if (ValkeyModule_StringToLongLong(argv[1], &count) != VALKEYMODULE_OK || count < 0) + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); /* To reply with an array, we call ValkeyModule_ReplyWithArray() followed * by other "count" calls to other reply functions in order to generate * the elements of the array. */ - ValkeyModule_ReplyWithArray(ctx,count); - while(count--) ValkeyModule_ReplyWithLongLong(ctx,rand()); + ValkeyModule_ReplyWithArray(ctx, count); + while (count--) ValkeyModule_ReplyWithLongLong(ctx, rand()); return VALKEYMODULE_OK; } @@ -237,8 +222,7 @@ int HelloRandArray_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv * in the ValkeyModule_Call() call, the two INCRs get replicated. * Also note how the ECHO is replicated in an unexpected position (check * comments the function implementation). */ -int HelloRepl1_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) -{ +int HelloRepl1_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); ValkeyModule_AutoMemory(ctx); @@ -253,14 +237,14 @@ int HelloRepl1_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, in * ECHO c foo * EXEC */ - ValkeyModule_Replicate(ctx,"ECHO","c","foo"); + ValkeyModule_Replicate(ctx, "ECHO", "c", "foo"); /* Using the "!" modifier we replicate the command if it * modified the dataset in some way. */ - ValkeyModule_Call(ctx,"INCR","c!","foo"); - ValkeyModule_Call(ctx,"INCR","c!","bar"); + ValkeyModule_Call(ctx, "INCR", "c!", "foo"); + ValkeyModule_Call(ctx, "INCR", "c!", "bar"); - ValkeyModule_ReplyWithLongLong(ctx,0); + ValkeyModule_ReplyWithLongLong(ctx, 0); return VALKEYMODULE_OK; } @@ -279,26 +263,25 @@ int HelloRepl2_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, in if (argc != 2) return ValkeyModule_WrongArity(ctx); ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); if (ValkeyModule_KeyType(key) != VALKEYMODULE_KEYTYPE_LIST) - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); size_t listlen = ValkeyModule_ValueLength(key); long long sum = 0; /* Rotate and increment. */ - while(listlen--) { - ValkeyModuleString *ele = ValkeyModule_ListPop(key,VALKEYMODULE_LIST_TAIL); + while (listlen--) { + ValkeyModuleString *ele = ValkeyModule_ListPop(key, VALKEYMODULE_LIST_TAIL); long long val; - if (ValkeyModule_StringToLongLong(ele,&val) != VALKEYMODULE_OK) val = 0; + if (ValkeyModule_StringToLongLong(ele, &val) != VALKEYMODULE_OK) val = 0; val++; sum += val; - ValkeyModuleString *newele = ValkeyModule_CreateStringFromLongLong(ctx,val); - ValkeyModule_ListPush(key,VALKEYMODULE_LIST_HEAD,newele); + ValkeyModuleString *newele = ValkeyModule_CreateStringFromLongLong(ctx, val); + ValkeyModule_ListPush(key, VALKEYMODULE_LIST_HEAD, newele); } - ValkeyModule_ReplyWithLongLong(ctx,sum); + ValkeyModule_ReplyWithLongLong(ctx, sum); ValkeyModule_ReplicateVerbatim(ctx); return VALKEYMODULE_OK; } @@ -314,20 +297,17 @@ int HelloRepl2_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, in int HelloToggleCase_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 2) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int keytype = ValkeyModule_KeyType(key); - if (keytype != VALKEYMODULE_KEYTYPE_STRING && - keytype != VALKEYMODULE_KEYTYPE_EMPTY) - { + if (keytype != VALKEYMODULE_KEYTYPE_STRING && keytype != VALKEYMODULE_KEYTYPE_EMPTY) { ValkeyModule_CloseKey(key); - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } if (keytype == VALKEYMODULE_KEYTYPE_STRING) { size_t len, j; - char *s = ValkeyModule_StringDMA(key,&len,VALKEYMODULE_WRITE); + char *s = ValkeyModule_StringDMA(key, &len, VALKEYMODULE_WRITE); for (j = 0; j < len; j++) { if (isupper(s[j])) { s[j] = tolower(s[j]); @@ -338,7 +318,7 @@ int HelloToggleCase_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **arg } ValkeyModule_CloseKey(key); - ValkeyModule_ReplyWithSimpleString(ctx,"OK"); + ValkeyModule_ReplyWithSimpleString(ctx, "OK"); ValkeyModule_ReplicateVerbatim(ctx); return VALKEYMODULE_OK; } @@ -353,17 +333,16 @@ int HelloMoreExpire_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **arg mstime_t addms, expire; - if (ValkeyModule_StringToLongLong(argv[2],&addms) != VALKEYMODULE_OK) - return ValkeyModule_ReplyWithError(ctx,"ERR invalid expire time"); + if (ValkeyModule_StringToLongLong(argv[2], &addms) != VALKEYMODULE_OK) + return ValkeyModule_ReplyWithError(ctx, "ERR invalid expire time"); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); expire = ValkeyModule_GetExpire(key); if (expire != VALKEYMODULE_NO_EXPIRE) { expire += addms; - ValkeyModule_SetExpire(key,expire); + ValkeyModule_SetExpire(key, expire); } - return ValkeyModule_ReplyWithSimpleString(ctx,"OK"); + return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } /* HELLO.ZSUMRANGE key startscore endscore @@ -376,36 +355,34 @@ int HelloZsumRange_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv double score_start, score_end; if (argc != 4) return ValkeyModule_WrongArity(ctx); - if (ValkeyModule_StringToDouble(argv[2],&score_start) != VALKEYMODULE_OK || - ValkeyModule_StringToDouble(argv[3],&score_end) != VALKEYMODULE_OK) - { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid range"); + if (ValkeyModule_StringToDouble(argv[2], &score_start) != VALKEYMODULE_OK || + ValkeyModule_StringToDouble(argv[3], &score_end) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid range"); } - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); if (ValkeyModule_KeyType(key) != VALKEYMODULE_KEYTYPE_ZSET) { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } double scoresum_a = 0; double scoresum_b = 0; - ValkeyModule_ZsetFirstInScoreRange(key,score_start,score_end,0,0); - while(!ValkeyModule_ZsetRangeEndReached(key)) { + ValkeyModule_ZsetFirstInScoreRange(key, score_start, score_end, 0, 0); + while (!ValkeyModule_ZsetRangeEndReached(key)) { double score; - ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key,&score); - ValkeyModule_FreeString(ctx,ele); + ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key, &score); + ValkeyModule_FreeString(ctx, ele); scoresum_a += score; ValkeyModule_ZsetRangeNext(key); } ValkeyModule_ZsetRangeStop(key); - ValkeyModule_ZsetLastInScoreRange(key,score_start,score_end,0,0); - while(!ValkeyModule_ZsetRangeEndReached(key)) { + ValkeyModule_ZsetLastInScoreRange(key, score_start, score_end, 0, 0); + while (!ValkeyModule_ZsetRangeEndReached(key)) { double score; - ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key,&score); - ValkeyModule_FreeString(ctx,ele); + ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key, &score); + ValkeyModule_FreeString(ctx, ele); scoresum_b += score; ValkeyModule_ZsetRangePrev(key); } @@ -414,9 +391,9 @@ int HelloZsumRange_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv ValkeyModule_CloseKey(key); - ValkeyModule_ReplyWithArray(ctx,2); - ValkeyModule_ReplyWithDouble(ctx,scoresum_a); - ValkeyModule_ReplyWithDouble(ctx,scoresum_b); + ValkeyModule_ReplyWithArray(ctx, 2); + ValkeyModule_ReplyWithDouble(ctx, scoresum_a); + ValkeyModule_ReplyWithDouble(ctx, scoresum_b); return VALKEYMODULE_OK; } @@ -432,28 +409,27 @@ int HelloLexRange_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, if (argc != 6) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); if (ValkeyModule_KeyType(key) != VALKEYMODULE_KEYTYPE_ZSET) { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } - if (ValkeyModule_ZsetFirstInLexRange(key,argv[2],argv[3]) != VALKEYMODULE_OK) { - return ValkeyModule_ReplyWithError(ctx,"invalid range"); + if (ValkeyModule_ZsetFirstInLexRange(key, argv[2], argv[3]) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "invalid range"); } int arraylen = 0; - ValkeyModule_ReplyWithArray(ctx,VALKEYMODULE_POSTPONED_LEN); - while(!ValkeyModule_ZsetRangeEndReached(key)) { + ValkeyModule_ReplyWithArray(ctx, VALKEYMODULE_POSTPONED_LEN); + while (!ValkeyModule_ZsetRangeEndReached(key)) { double score; - ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key,&score); - ValkeyModule_ReplyWithString(ctx,ele); - ValkeyModule_FreeString(ctx,ele); + ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key, &score); + ValkeyModule_ReplyWithString(ctx, ele); + ValkeyModule_FreeString(ctx, ele); ValkeyModule_ZsetRangeNext(key); arraylen++; } ValkeyModule_ZsetRangeStop(key); - ValkeyModule_ReplySetArrayLength(ctx,arraylen); + ValkeyModule_ReplySetArrayLength(ctx, arraylen); ValkeyModule_CloseKey(key); return VALKEYMODULE_OK; } @@ -469,22 +445,19 @@ int HelloHCopy_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, in ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ if (argc != 4) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_HASH && - type != VALKEYMODULE_KEYTYPE_EMPTY) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + if (type != VALKEYMODULE_KEYTYPE_HASH && type != VALKEYMODULE_KEYTYPE_EMPTY) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } /* Get the old field value. */ ValkeyModuleString *oldval; - ValkeyModule_HashGet(key,VALKEYMODULE_HASH_NONE,argv[2],&oldval,NULL); + ValkeyModule_HashGet(key, VALKEYMODULE_HASH_NONE, argv[2], &oldval, NULL); if (oldval) { - ValkeyModule_HashSet(key,VALKEYMODULE_HASH_NONE,argv[3],oldval,NULL); + ValkeyModule_HashSet(key, VALKEYMODULE_HASH_NONE, argv[3], oldval, NULL); } - ValkeyModule_ReplyWithLongLong(ctx,oldval != NULL); + ValkeyModule_ReplyWithLongLong(ctx, oldval != NULL); return VALKEYMODULE_OK; } @@ -512,9 +485,8 @@ int HelloLeftPad_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, if (argc != 4) return ValkeyModule_WrongArity(ctx); - if ((ValkeyModule_StringToLongLong(argv[2],&padlen) != VALKEYMODULE_OK) || - (padlen< 0)) { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid padding length"); + if ((ValkeyModule_StringToLongLong(argv[2], &padlen) != VALKEYMODULE_OK) || (padlen < 0)) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid padding length"); } size_t strlen, chlen; const char *str = ValkeyModule_StringPtrLen(argv[1], &strlen); @@ -522,99 +494,91 @@ int HelloLeftPad_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, /* If the string is already larger than the target len, just return * the string itself. */ - if (strlen >= (size_t)padlen) - return ValkeyModule_ReplyWithString(ctx,argv[1]); + if (strlen >= (size_t)padlen) return ValkeyModule_ReplyWithString(ctx, argv[1]); /* Padding must be a single character in this simple implementation. */ - if (chlen != 1) - return ValkeyModule_ReplyWithError(ctx, - "ERR padding must be a single char"); + if (chlen != 1) return ValkeyModule_ReplyWithError(ctx, "ERR padding must be a single char"); /* Here we use our pool allocator, for our throw-away allocation. */ padlen -= strlen; - char *buf = ValkeyModule_PoolAlloc(ctx,padlen+strlen); + char *buf = ValkeyModule_PoolAlloc(ctx, padlen + strlen); for (long long j = 0; j < padlen; j++) buf[j] = *ch; - memcpy(buf+padlen,str,strlen); + memcpy(buf + padlen, str, strlen); - ValkeyModule_ReplyWithStringBuffer(ctx,buf,padlen+strlen); + ValkeyModule_ReplyWithStringBuffer(ctx, buf, padlen + strlen); return VALKEYMODULE_OK; } /* This function must be present on each module. It is used in order to * register the commands into the server. */ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { - if (ValkeyModule_Init(ctx,"helloworld",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "helloworld", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; /* Log the list of parameters passing loading the module. */ for (int j = 0; j < argc; j++) { - const char *s = ValkeyModule_StringPtrLen(argv[j],NULL); + const char *s = ValkeyModule_StringPtrLen(argv[j], NULL); printf("Module loaded with ARGV[%d] = %s\n", j, s); } - if (ValkeyModule_CreateCommand(ctx,"hello.simple", - HelloSimple_ValkeyCommand,"readonly",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.simple", HelloSimple_ValkeyCommand, "readonly", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.push.native", - HelloPushNative_ValkeyCommand,"write deny-oom",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.push.native", HelloPushNative_ValkeyCommand, "write deny-oom", 1, 1, + 1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.push.call", - HelloPushCall_ValkeyCommand,"write deny-oom",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.push.call", HelloPushCall_ValkeyCommand, "write deny-oom", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.push.call2", - HelloPushCall2_ValkeyCommand,"write deny-oom",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.push.call2", HelloPushCall2_ValkeyCommand, "write deny-oom", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.list.sum.len", - HelloListSumLen_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.list.sum.len", HelloListSumLen_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.list.splice", - HelloListSplice_ValkeyCommand,"write deny-oom",1,2,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.list.splice", HelloListSplice_ValkeyCommand, "write deny-oom", 1, 2, + 1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.list.splice.auto", - HelloListSpliceAuto_ValkeyCommand, - "write deny-oom",1,2,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.list.splice.auto", HelloListSpliceAuto_ValkeyCommand, "write deny-oom", + 1, 2, 1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.rand.array", - HelloRandArray_ValkeyCommand,"readonly",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.rand.array", HelloRandArray_ValkeyCommand, "readonly", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.repl1", - HelloRepl1_ValkeyCommand,"write",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.repl1", HelloRepl1_ValkeyCommand, "write", 0, 0, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.repl2", - HelloRepl2_ValkeyCommand,"write",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.repl2", HelloRepl2_ValkeyCommand, "write", 1, 1, 1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.toggle.case", - HelloToggleCase_ValkeyCommand,"write",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.toggle.case", HelloToggleCase_ValkeyCommand, "write", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.more.expire", - HelloMoreExpire_ValkeyCommand,"write",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.more.expire", HelloMoreExpire_ValkeyCommand, "write", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.zsumrange", - HelloZsumRange_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.zsumrange", HelloZsumRange_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.lexrange", - HelloLexRange_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.lexrange", HelloLexRange_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.hcopy", - HelloHCopy_ValkeyCommand,"write deny-oom",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.hcopy", HelloHCopy_ValkeyCommand, "write deny-oom", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.leftpad", - HelloLeftPad_ValkeyCommand,"",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.leftpad", HelloLeftPad_ValkeyCommand, "", 1, 1, 1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; return VALKEYMODULE_OK; diff --git a/src/networking.c b/src/networking.c index 7054ffc126..2ff9a7e366 100644 --- a/src/networking.c +++ b/src/networking.c @@ -128,8 +128,8 @@ client *createClient(connection *conn) { connSetPrivateData(conn, c); } c->buf = zmalloc_usable(PROTO_REPLY_CHUNK_BYTES, &c->buf_usable_size); - selectDb(c,0); - uint64_t client_id = atomic_fetch_add_explicit(&server.next_client_id,1,memory_order_relaxed); + selectDb(c, 0); + uint64_t client_id = atomic_fetch_add_explicit(&server.next_client_id, 1, memory_order_relaxed); c->id = client_id; #ifdef LOG_REQ_RES reqresReset(c, 0); @@ -1943,7 +1943,7 @@ int _writeToClient(client *c, ssize_t *nwritten) { * thread safe. */ int writeToClient(client *c, int handler_installed) { /* Update total number of writes on server */ - atomic_fetch_add_explicit(&server.stat_total_writes_processed,1, memory_order_relaxed); + atomic_fetch_add_explicit(&server.stat_total_writes_processed, 1, memory_order_relaxed); ssize_t nwritten = 0, totwritten = 0; @@ -2611,7 +2611,7 @@ void readQueryFromClient(connection *conn) { if (postponeClientRead(c)) return; /* Update total number of reads on server */ - atomic_fetch_add_explicit(&server.stat_total_reads_processed,1,memory_order_relaxed); + atomic_fetch_add_explicit(&server.stat_total_reads_processed, 1, memory_order_relaxed); readlen = PROTO_IOBUF_LEN; /* If this is a multi bulk request, and we are processing a bulk reply @@ -2677,9 +2677,9 @@ void readQueryFromClient(connection *conn) { c->lastinteraction = server.unixtime; if (c->flags & CLIENT_MASTER) { c->read_reploff += nread; - atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes,nread,memory_order_relaxed); + atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes, nread, memory_order_relaxed); } else { - atomic_fetch_add_explicit(&server.stat_net_input_bytes,nread,memory_order_relaxed); + atomic_fetch_add_explicit(&server.stat_net_input_bytes, nread, memory_order_relaxed); } c->net_input_bytes += nread; @@ -2698,7 +2698,7 @@ void readQueryFromClient(connection *conn) { sdsfree(ci); sdsfree(bytes); freeClientAsync(c); - atomic_fetch_add_explicit(&server.stat_client_qbuf_limit_disconnections,1,memory_order_relaxed); + atomic_fetch_add_explicit(&server.stat_client_qbuf_limit_disconnections, 1, memory_order_relaxed); goto done; } @@ -4142,7 +4142,7 @@ pthread_t io_threads[IO_THREADS_MAX_NUM]; pthread_mutex_t io_threads_mutex[IO_THREADS_MAX_NUM]; threads_pending io_threads_pending[IO_THREADS_MAX_NUM]; int io_threads_op; - /* IO_THREADS_OP_IDLE, IO_THREADS_OP_READ or IO_THREADS_OP_WRITE. */ // TODO: should access to this be atomic??! +/* IO_THREADS_OP_IDLE, IO_THREADS_OP_READ or IO_THREADS_OP_WRITE. */ // TODO: should access to this be atomic??! /* This is the list of clients each thread will serve when threaded I/O is * used. We spawn io_threads_num-1 threads, since one is the main thread diff --git a/src/rdb.c b/src/rdb.c index abc86566d0..fe297cb7a9 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -2889,7 +2889,7 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) { processModuleLoadingProgressEvent(0); } if (server.repl_state == REPL_STATE_TRANSFER && rioCheckType(r) == RIO_TYPE_CONN) { - atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes,len,memory_order_relaxed); + atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes, len, memory_order_relaxed); } } diff --git a/src/replication.c b/src/replication.c index 069d60a678..0c561f1204 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1376,8 +1376,8 @@ void sendBulkToSlave(connection *conn) { freeClient(slave); return; } - atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes,nwritten,memory_order_relaxed); - sdsrange(slave->replpreamble,nwritten,-1); + atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes, nwritten, memory_order_relaxed); + sdsrange(slave->replpreamble, nwritten, -1); if (sdslen(slave->replpreamble) == 0) { sdsfree(slave->replpreamble); slave->replpreamble = NULL; @@ -1404,7 +1404,7 @@ void sendBulkToSlave(connection *conn) { return; } slave->repldboff += nwritten; - atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes,nwritten,memory_order_relaxed); + atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes, nwritten, memory_order_relaxed); if (slave->repldboff == slave->repldbsize) { closeRepldbfd(slave); connSetWriteHandler(slave->conn, NULL); @@ -1446,7 +1446,7 @@ void rdbPipeWriteHandler(struct connection *conn) { return; } else { slave->repldboff += nwritten; - atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes,nwritten,memory_order_relaxed); + atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes, nwritten, memory_order_relaxed); if (slave->repldboff < server.rdb_pipe_bufflen) { slave->repl_last_partial_write = server.unixtime; return; /* more data to write.. */ @@ -1519,7 +1519,7 @@ void rdbPipeReadHandler(struct aeEventLoop *eventLoop, int fd, void *clientData, /* Note: when use diskless replication, 'repldboff' is the offset * of 'rdb_pipe_buff' sent rather than the offset of entire RDB. */ slave->repldboff = nwritten; - atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes,nwritten,memory_order_relaxed); + atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes, nwritten, memory_order_relaxed); } /* If we were unable to write all the data to one of the replicas, * setup write handler (and disable pipe read handler, below) */ @@ -1827,7 +1827,7 @@ void readSyncBulkPayload(connection *conn) { } else { /* nread here is returned by connSyncReadLine(), which calls syncReadLine() and * convert "\r\n" to '\0' so 1 byte is lost. */ - atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes,nread+1,memory_order_relaxed); + atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes, nread + 1, memory_order_relaxed); } if (buf[0] == '-') { @@ -1896,7 +1896,7 @@ void readSyncBulkPayload(connection *conn) { cancelReplicationHandshake(1); return; } - atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes,nread,memory_order_relaxed); + atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes, nread, memory_order_relaxed); /* When a mark is used, we want to detect EOF asap in order to avoid * writing the EOF mark into the file... */ diff --git a/src/server.c b/src/server.c index edf215eac2..e0590706d3 100644 --- a/src/server.c +++ b/src/server.c @@ -1258,10 +1258,10 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { long long stat_net_input_bytes, stat_net_output_bytes; long long stat_net_repl_input_bytes, stat_net_repl_output_bytes; - stat_net_input_bytes = atomic_load_explicit(&server.stat_net_input_bytes,memory_order_relaxed); - stat_net_output_bytes = atomic_load_explicit(&server.stat_net_output_bytes,memory_order_relaxed); - stat_net_repl_input_bytes = atomic_load_explicit(&server.stat_net_repl_input_bytes,memory_order_relaxed); - stat_net_repl_output_bytes = atomic_load_explicit(&server.stat_net_repl_output_bytes,memory_order_relaxed); + stat_net_input_bytes = atomic_load_explicit(&server.stat_net_input_bytes, memory_order_relaxed); + stat_net_output_bytes = atomic_load_explicit(&server.stat_net_output_bytes, memory_order_relaxed); + stat_net_repl_input_bytes = atomic_load_explicit(&server.stat_net_repl_input_bytes, memory_order_relaxed); + stat_net_repl_output_bytes = atomic_load_explicit(&server.stat_net_repl_output_bytes, memory_order_relaxed); monotime current_time = getMonotonicUs(); long long factor = 1000000; // us @@ -1737,12 +1737,10 @@ void afterSleep(struct aeEventLoop *eventLoop) { if (moduleCount()) { mstime_t latency; latencyStartMonitor(latency); - atomic_store_explicit(&server.module_gil_acquiring,1,memory_order_relaxed); + atomic_store_explicit(&server.module_gil_acquiring, 1, memory_order_relaxed); moduleAcquireGIL(); - atomic_store_explicit(&server.module_gil_acquiring,0,memory_order_relaxed); - moduleFireServerEvent(VALKEYMODULE_EVENT_EVENTLOOP, - VALKEYMODULE_SUBEVENT_EVENTLOOP_AFTER_SLEEP, - NULL); + atomic_store_explicit(&server.module_gil_acquiring, 0, memory_order_relaxed); + moduleFireServerEvent(VALKEYMODULE_EVENT_EVENTLOOP, VALKEYMODULE_SUBEVENT_EVENTLOOP_AFTER_SLEEP, NULL); latencyEndMonitor(latency); latencyAddSampleIfNeeded("module-acquire-GIL", latency); } @@ -1991,7 +1989,7 @@ void initServerConfig(void) { server.aof_flush_sleep = 0; server.aof_last_fsync = time(NULL) * 1000; server.aof_cur_timestamp = 0; - atomic_store_explicit(&server.aof_bio_fsync_status,C_OK,memory_order_relaxed); + atomic_store_explicit(&server.aof_bio_fsync_status, C_OK, memory_order_relaxed); server.aof_rewrite_time_last = -1; server.aof_rewrite_time_start = -1; server.aof_lastbgrewrite_status = C_OK; @@ -2481,10 +2479,10 @@ void resetServerStats(void) { server.stat_sync_partial_ok = 0; server.stat_sync_partial_err = 0; server.stat_io_reads_processed = 0; - atomic_store_explicit(&server.stat_total_reads_processed,0,memory_order_relaxed); + atomic_store_explicit(&server.stat_total_reads_processed, 0, memory_order_relaxed); server.stat_io_writes_processed = 0; - atomic_store_explicit(&server.stat_total_writes_processed,0,memory_order_relaxed); - atomic_store_explicit(&server.stat_client_qbuf_limit_disconnections,0,memory_order_relaxed); + atomic_store_explicit(&server.stat_total_writes_processed, 0, memory_order_relaxed); + atomic_store_explicit(&server.stat_client_qbuf_limit_disconnections, 0, memory_order_relaxed); server.stat_client_outbuf_limit_disconnections = 0; for (j = 0; j < STATS_METRIC_COUNT; j++) { server.inst_metric[j].idx = 0; @@ -2495,10 +2493,10 @@ void resetServerStats(void) { server.stat_aof_rewrites = 0; server.stat_rdb_saves = 0; server.stat_aofrw_consecutive_failures = 0; - atomic_store_explicit(&server.stat_net_input_bytes,0,memory_order_relaxed); - atomic_store_explicit(&server.stat_net_output_bytes,0,memory_order_relaxed); - atomic_store_explicit(&server.stat_net_repl_input_bytes,0,memory_order_relaxed); - atomic_store_explicit(&server.stat_net_repl_output_bytes,0,memory_order_relaxed); + atomic_store_explicit(&server.stat_net_input_bytes, 0, memory_order_relaxed); + atomic_store_explicit(&server.stat_net_output_bytes, 0, memory_order_relaxed); + atomic_store_explicit(&server.stat_net_repl_input_bytes, 0, memory_order_relaxed); + atomic_store_explicit(&server.stat_net_repl_output_bytes, 0, memory_order_relaxed); server.stat_unexpected_error_replies = 0; server.stat_total_error_replies = 0; server.stat_dump_payload_sanitizations = 0; @@ -5530,7 +5528,7 @@ sds genValkeyInfoString(dict *section_dict, int all_sections, int everything) { } else if (server.stat_current_save_keys_total) { fork_perc = ((double)server.stat_current_save_keys_processed / server.stat_current_save_keys_total) * 100; } - int aof_bio_fsync_status = atomic_load_explicit(&server.aof_bio_fsync_status,memory_order_relaxed); + int aof_bio_fsync_status = atomic_load_explicit(&server.aof_bio_fsync_status, memory_order_relaxed); /* clang-format off */ info = sdscatprintf(info, "# Persistence\r\n" FMTARGS( @@ -5629,14 +5627,15 @@ sds genValkeyInfoString(dict *section_dict, int all_sections, int everything) { long long current_active_defrag_time = server.stat_last_active_defrag_time ? (long long)elapsedUs(server.stat_last_active_defrag_time) : 0; long long stat_client_qbuf_limit_disconnections; - + stat_total_reads_processed = atomic_load_explicit(&server.stat_total_reads_processed, memory_order_relaxed); stat_total_writes_processed = atomic_load_explicit(&server.stat_total_writes_processed, memory_order_relaxed); stat_net_input_bytes = atomic_load_explicit(&server.stat_net_input_bytes, memory_order_relaxed); stat_net_output_bytes = atomic_load_explicit(&server.stat_net_output_bytes, memory_order_relaxed); stat_net_repl_input_bytes = atomic_load_explicit(&server.stat_net_repl_input_bytes, memory_order_relaxed); stat_net_repl_output_bytes = atomic_load_explicit(&server.stat_net_repl_output_bytes, memory_order_relaxed); - stat_client_qbuf_limit_disconnections = atomic_load_explicit(&server.stat_client_qbuf_limit_disconnections, memory_order_relaxed); + stat_client_qbuf_limit_disconnections = + atomic_load_explicit(&server.stat_client_qbuf_limit_disconnections, memory_order_relaxed); if (sections++) info = sdscat(info, "\r\n"); /* clang-format off */ diff --git a/src/server.h b/src/server.h index 7011be3033..bdf8b12574 100644 --- a/src/server.h +++ b/src/server.h @@ -716,7 +716,7 @@ typedef enum { #define LATENCY_HISTOGRAM_MAX_VALUE 1000000000L /* <= 1 secs */ #define LATENCY_HISTOGRAM_PRECISION \ 2 /* Maintain a value precision of 2 significant digits across LATENCY_HISTOGRAM_MIN_VALUE and \ - * LATENCY_HISTOGRAM_MAX_VALUE range. Value quantization within the range will thus be no larger than 1/100th \ + * LATENCY_HISTOGRAM_MAX_VALUE range. Value quantization within the range will thus be no larger than 1/100th \ * (or 1%) of any value. The total size per histogram should sit around 40 KiB Bytes. */ /* Busy module flags, see busy_module_yield_flags */ @@ -1671,13 +1671,13 @@ struct valkeyServer { uint32_t paused_actions; /* Bitmask of actions that are currently paused */ list *postponed_clients; /* List of postponed clients */ pause_event client_pause_per_purpose[NUM_PAUSE_PURPOSES]; - char neterr[ANET_ERR_LEN]; /* Error buffer for anet.c */ - dict *migrate_cached_sockets;/* MIGRATE cached sockets */ - _Atomic uint64_t next_client_id; /* Next client unique ID. Incremental. */ - int protected_mode; /* Don't accept external connections. */ - int io_threads_num; /* Number of IO threads to use. */ - int io_threads_do_reads; /* Read and parse from IO threads? */ - int io_threads_active; /* Is IO threads currently active? */ + char neterr[ANET_ERR_LEN]; /* Error buffer for anet.c */ + dict *migrate_cached_sockets; /* MIGRATE cached sockets */ + _Atomic uint64_t next_client_id; /* Next client unique ID. Incremental. */ + int protected_mode; /* Don't accept external connections. */ + int io_threads_num; /* Number of IO threads to use. */ + int io_threads_do_reads; /* Read and parse from IO threads? */ + int io_threads_active; /* Is IO threads currently active? */ long long events_processed_while_blocked; /* processEventsWhileBlocked() */ int enable_protected_configs; /* Enable the modification of protected configs, see PROTECTED_ACTION_ALLOWED_* */ int enable_debug_cmd; /* Enable DEBUG commands, see PROTECTED_ACTION_ALLOWED_* */ @@ -1698,61 +1698,64 @@ struct valkeyServer { long long stat_expiredkeys; /* Number of expired keys */ double stat_expired_stale_perc; /* Percentage of keys probably expired */ long long stat_expired_time_cap_reached_count; /* Early expire cycle stops.*/ - long long stat_expire_cycle_time_used; /* Cumulative microseconds used. */ - long long stat_evictedkeys; /* Number of evicted keys (maxmemory) */ - long long stat_evictedclients; /* Number of evicted clients */ - long long stat_evictedscripts; /* Number of evicted lua scripts. */ - long long stat_total_eviction_exceeded_time; /* Total time over the memory limit, unit us */ - monotime stat_last_eviction_exceeded_time; /* Timestamp of current eviction start, unit us */ - long long stat_keyspace_hits; /* Number of successful lookups of keys */ - long long stat_keyspace_misses; /* Number of failed lookups of keys */ - long long stat_active_defrag_hits; /* number of allocations moved */ - long long stat_active_defrag_misses; /* number of allocations scanned but not moved */ - long long stat_active_defrag_key_hits; /* number of keys with moved allocations */ - long long stat_active_defrag_key_misses;/* number of keys scanned and not moved */ - long long stat_active_defrag_scanned; /* number of dictEntries scanned */ - long long stat_total_active_defrag_time; /* Total time memory fragmentation over the limit, unit us */ - monotime stat_last_active_defrag_time; /* Timestamp of current active defrag start */ - size_t stat_peak_memory; /* Max used memory record */ - long long stat_aof_rewrites; /* number of aof file rewrites performed */ - long long stat_aofrw_consecutive_failures; /* The number of consecutive failures of aofrw */ - long long stat_rdb_saves; /* number of rdb saves performed */ - long long stat_fork_time; /* Time needed to perform latest fork() */ - double stat_fork_rate; /* Fork rate in GB/sec. */ - long long stat_total_forks; /* Total count of fork. */ - long long stat_rejected_conn; /* Clients rejected because of maxclients */ - long long stat_sync_full; /* Number of full resyncs with slaves. */ - long long stat_sync_partial_ok; /* Number of accepted PSYNC requests. */ - long long stat_sync_partial_err;/* Number of unaccepted PSYNC requests. */ - list *slowlog; /* SLOWLOG list of commands */ - long long slowlog_entry_id; /* SLOWLOG current entry ID */ - long long slowlog_log_slower_than; /* SLOWLOG time limit (to get logged) */ - unsigned long slowlog_max_len; /* SLOWLOG max number of items logged */ - struct malloc_stats cron_malloc_stats; /* sampled in serverCron(). */ - _Atomic long long stat_net_input_bytes; /* Bytes read from network. */ - _Atomic long long stat_net_output_bytes; /* Bytes written to network. */ - _Atomic long long stat_net_repl_input_bytes; /* Bytes read during replication, added to stat_net_input_bytes in 'info'. */ - _Atomic long long stat_net_repl_output_bytes; /* Bytes written during replication, added to stat_net_output_bytes in 'info'. */ - size_t stat_current_cow_peak; /* Peak size of copy on write bytes. */ - size_t stat_current_cow_bytes; /* Copy on write bytes while child is active. */ - monotime stat_current_cow_updated; /* Last update time of stat_current_cow_bytes */ - size_t stat_current_save_keys_processed; /* Processed keys while child is active. */ - size_t stat_current_save_keys_total; /* Number of keys when child started. */ - size_t stat_rdb_cow_bytes; /* Copy on write bytes during RDB saving. */ - size_t stat_aof_cow_bytes; /* Copy on write bytes during AOF rewrite. */ - size_t stat_module_cow_bytes; /* Copy on write bytes during module fork. */ - double stat_module_progress; /* Module save progress. */ - size_t stat_clients_type_memory[CLIENT_TYPE_COUNT];/* Mem usage by type */ - size_t stat_cluster_links_memory; /* Mem usage by cluster links */ - long long stat_unexpected_error_replies; /* Number of unexpected (aof-loading, replica to master, etc.) error replies */ + long long stat_expire_cycle_time_used; /* Cumulative microseconds used. */ + long long stat_evictedkeys; /* Number of evicted keys (maxmemory) */ + long long stat_evictedclients; /* Number of evicted clients */ + long long stat_evictedscripts; /* Number of evicted lua scripts. */ + long long stat_total_eviction_exceeded_time; /* Total time over the memory limit, unit us */ + monotime stat_last_eviction_exceeded_time; /* Timestamp of current eviction start, unit us */ + long long stat_keyspace_hits; /* Number of successful lookups of keys */ + long long stat_keyspace_misses; /* Number of failed lookups of keys */ + long long stat_active_defrag_hits; /* number of allocations moved */ + long long stat_active_defrag_misses; /* number of allocations scanned but not moved */ + long long stat_active_defrag_key_hits; /* number of keys with moved allocations */ + long long stat_active_defrag_key_misses; /* number of keys scanned and not moved */ + long long stat_active_defrag_scanned; /* number of dictEntries scanned */ + long long stat_total_active_defrag_time; /* Total time memory fragmentation over the limit, unit us */ + monotime stat_last_active_defrag_time; /* Timestamp of current active defrag start */ + size_t stat_peak_memory; /* Max used memory record */ + long long stat_aof_rewrites; /* number of aof file rewrites performed */ + long long stat_aofrw_consecutive_failures; /* The number of consecutive failures of aofrw */ + long long stat_rdb_saves; /* number of rdb saves performed */ + long long stat_fork_time; /* Time needed to perform latest fork() */ + double stat_fork_rate; /* Fork rate in GB/sec. */ + long long stat_total_forks; /* Total count of fork. */ + long long stat_rejected_conn; /* Clients rejected because of maxclients */ + long long stat_sync_full; /* Number of full resyncs with slaves. */ + long long stat_sync_partial_ok; /* Number of accepted PSYNC requests. */ + long long stat_sync_partial_err; /* Number of unaccepted PSYNC requests. */ + list *slowlog; /* SLOWLOG list of commands */ + long long slowlog_entry_id; /* SLOWLOG current entry ID */ + long long slowlog_log_slower_than; /* SLOWLOG time limit (to get logged) */ + unsigned long slowlog_max_len; /* SLOWLOG max number of items logged */ + struct malloc_stats cron_malloc_stats; /* sampled in serverCron(). */ + _Atomic long long stat_net_input_bytes; /* Bytes read from network. */ + _Atomic long long stat_net_output_bytes; /* Bytes written to network. */ + _Atomic long long + stat_net_repl_input_bytes; /* Bytes read during replication, added to stat_net_input_bytes in 'info'. */ + _Atomic long long + stat_net_repl_output_bytes; /* Bytes written during replication, added to stat_net_output_bytes in 'info'. */ + size_t stat_current_cow_peak; /* Peak size of copy on write bytes. */ + size_t stat_current_cow_bytes; /* Copy on write bytes while child is active. */ + monotime stat_current_cow_updated; /* Last update time of stat_current_cow_bytes */ + size_t stat_current_save_keys_processed; /* Processed keys while child is active. */ + size_t stat_current_save_keys_total; /* Number of keys when child started. */ + size_t stat_rdb_cow_bytes; /* Copy on write bytes during RDB saving. */ + size_t stat_aof_cow_bytes; /* Copy on write bytes during AOF rewrite. */ + size_t stat_module_cow_bytes; /* Copy on write bytes during module fork. */ + double stat_module_progress; /* Module save progress. */ + size_t stat_clients_type_memory[CLIENT_TYPE_COUNT]; /* Mem usage by type */ + size_t stat_cluster_links_memory; /* Mem usage by cluster links */ + long long + stat_unexpected_error_replies; /* Number of unexpected (aof-loading, replica to master, etc.) error replies */ long long stat_total_error_replies; /* Total number of issued error replies ( command + rejected errors ) */ - long long stat_dump_payload_sanitizations; /* Number deep dump payloads integrity validations. */ - long long stat_io_reads_processed; /* Number of read events processed by IO / Main threads */ - long long stat_io_writes_processed; /* Number of write events processed by IO / Main threads */ - _Atomic long long stat_total_reads_processed; /* Total number of read events processed */ - _Atomic long long stat_total_writes_processed; /* Total number of write events processed */ - _Atomic long long stat_client_qbuf_limit_disconnections; /* Total number of clients reached query buf length limit */ - long long stat_client_outbuf_limit_disconnections; /* Total number of clients reached output buf length limit */ + long long stat_dump_payload_sanitizations; /* Number deep dump payloads integrity validations. */ + long long stat_io_reads_processed; /* Number of read events processed by IO / Main threads */ + long long stat_io_writes_processed; /* Number of write events processed by IO / Main threads */ + _Atomic long long stat_total_reads_processed; /* Total number of read events processed */ + _Atomic long long stat_total_writes_processed; /* Total number of write events processed */ + _Atomic long long stat_client_qbuf_limit_disconnections; /* Total number of clients reached query buf length limit */ + long long stat_client_outbuf_limit_disconnections; /* Total number of clients reached output buf length limit */ /* The following two are used to track instantaneous metrics, like * number of operations per second, network traffic. */ struct { @@ -1812,43 +1815,43 @@ struct valkeyServer { unsigned int max_new_conns_per_cycle; /* The maximum number of tcp connections that will be accepted during each invocation of the event loop. */ /* AOF persistence */ - int aof_enabled; /* AOF configuration */ - int aof_state; /* AOF_(ON|OFF|WAIT_REWRITE) */ - int aof_fsync; /* Kind of fsync() policy */ - char *aof_filename; /* Basename of the AOF file and manifest file */ - char *aof_dirname; /* Name of the AOF directory */ - int aof_no_fsync_on_rewrite; /* Don't fsync if a rewrite is in prog. */ - int aof_rewrite_perc; /* Rewrite AOF if % growth is > M and... */ - off_t aof_rewrite_min_size; /* the AOF file is at least N bytes. */ - off_t aof_rewrite_base_size; /* AOF size on latest startup or rewrite. */ - off_t aof_current_size; /* AOF current size (Including BASE + INCRs). */ - off_t aof_last_incr_size; /* The size of the latest incr AOF. */ - off_t aof_last_incr_fsync_offset; /* AOF offset which is already requested to be synced to disk. - * Compare with the aof_last_incr_size. */ - int aof_flush_sleep; /* Micros to sleep before flush. (used by tests) */ - int aof_rewrite_scheduled; /* Rewrite once BGSAVE terminates. */ - sds aof_buf; /* AOF buffer, written before entering the event loop */ - int aof_fd; /* File descriptor of currently selected AOF file */ - int aof_selected_db; /* Currently selected DB in AOF */ + int aof_enabled; /* AOF configuration */ + int aof_state; /* AOF_(ON|OFF|WAIT_REWRITE) */ + int aof_fsync; /* Kind of fsync() policy */ + char *aof_filename; /* Basename of the AOF file and manifest file */ + char *aof_dirname; /* Name of the AOF directory */ + int aof_no_fsync_on_rewrite; /* Don't fsync if a rewrite is in prog. */ + int aof_rewrite_perc; /* Rewrite AOF if % growth is > M and... */ + off_t aof_rewrite_min_size; /* the AOF file is at least N bytes. */ + off_t aof_rewrite_base_size; /* AOF size on latest startup or rewrite. */ + off_t aof_current_size; /* AOF current size (Including BASE + INCRs). */ + off_t aof_last_incr_size; /* The size of the latest incr AOF. */ + off_t aof_last_incr_fsync_offset; /* AOF offset which is already requested to be synced to disk. + * Compare with the aof_last_incr_size. */ + int aof_flush_sleep; /* Micros to sleep before flush. (used by tests) */ + int aof_rewrite_scheduled; /* Rewrite once BGSAVE terminates. */ + sds aof_buf; /* AOF buffer, written before entering the event loop */ + int aof_fd; /* File descriptor of currently selected AOF file */ + int aof_selected_db; /* Currently selected DB in AOF */ mstime_t aof_flush_postponed_start; /* mstime of postponed AOF flush */ mstime_t aof_last_fsync; /* mstime of last fsync() */ - time_t aof_rewrite_time_last; /* Time used by last AOF rewrite run. */ - time_t aof_rewrite_time_start; /* Current AOF rewrite start time. */ - time_t aof_cur_timestamp; /* Current record timestamp in AOF */ - int aof_timestamp_enabled; /* Enable record timestamp in AOF */ - int aof_lastbgrewrite_status; /* C_OK or C_ERR */ - unsigned long aof_delayed_fsync; /* delayed AOF fsync() counter */ - int aof_rewrite_incremental_fsync;/* fsync incrementally while aof rewriting? */ - int rdb_save_incremental_fsync; /* fsync incrementally while rdb saving? */ - int aof_last_write_status; /* C_OK or C_ERR */ - int aof_last_write_errno; /* Valid if aof write/fsync status is ERR */ - int aof_load_truncated; /* Don't stop on unexpected AOF EOF. */ - int aof_use_rdb_preamble; /* Specify base AOF to use RDB encoding on AOF rewrites. */ - _Atomic int aof_bio_fsync_status; /* Status of AOF fsync in bio job. */ - _Atomic int aof_bio_fsync_errno; /* Errno of AOF fsync in bio job. */ - aofManifest *aof_manifest; /* Used to track AOFs. */ - int aof_disable_auto_gc; /* If disable automatically deleting HISTORY type AOFs? - default no. (for testings). */ + time_t aof_rewrite_time_last; /* Time used by last AOF rewrite run. */ + time_t aof_rewrite_time_start; /* Current AOF rewrite start time. */ + time_t aof_cur_timestamp; /* Current record timestamp in AOF */ + int aof_timestamp_enabled; /* Enable record timestamp in AOF */ + int aof_lastbgrewrite_status; /* C_OK or C_ERR */ + unsigned long aof_delayed_fsync; /* delayed AOF fsync() counter */ + int aof_rewrite_incremental_fsync; /* fsync incrementally while aof rewriting? */ + int rdb_save_incremental_fsync; /* fsync incrementally while rdb saving? */ + int aof_last_write_status; /* C_OK or C_ERR */ + int aof_last_write_errno; /* Valid if aof write/fsync status is ERR */ + int aof_load_truncated; /* Don't stop on unexpected AOF EOF. */ + int aof_use_rdb_preamble; /* Specify base AOF to use RDB encoding on AOF rewrites. */ + _Atomic int aof_bio_fsync_status; /* Status of AOF fsync in bio job. */ + _Atomic int aof_bio_fsync_errno; /* Errno of AOF fsync in bio job. */ + aofManifest *aof_manifest; /* Used to track AOFs. */ + int aof_disable_auto_gc; /* If disable automatically deleting HISTORY type AOFs? + default no. (for testings). */ /* RDB persistence */ long long dirty; /* Changes to DB from the last save */ @@ -1906,35 +1909,35 @@ struct valkeyServer { int shutdown_on_sigterm; /* Shutdown flags configured for SIGTERM. */ /* Replication (master) */ - char replid[CONFIG_RUN_ID_SIZE+1]; /* My current replication ID. */ - char replid2[CONFIG_RUN_ID_SIZE+1]; /* replid inherited from master*/ - long long master_repl_offset; /* My current replication offset */ - long long second_replid_offset; /* Accept offsets up to this for replid2. */ - _Atomic long long fsynced_reploff_pending;/* Largest replication offset to - * potentially have been fsynced, applied to - fsynced_reploff only when AOF state is AOF_ON - (not during the initial rewrite) */ - long long fsynced_reploff; /* Largest replication offset that has been confirmed to be fsynced */ - int slaveseldb; /* Last SELECTed DB in replication output */ - int repl_ping_slave_period; /* Master pings the slave every N seconds */ - replBacklog *repl_backlog; /* Replication backlog for partial syncs */ - long long repl_backlog_size; /* Backlog circular buffer size */ - time_t repl_backlog_time_limit; /* Time without slaves after the backlog - gets released. */ - time_t repl_no_slaves_since; /* We have no slaves since that time. - Only valid if server.slaves len is 0. */ - int repl_min_slaves_to_write; /* Min number of slaves to write. */ - int repl_min_slaves_max_lag; /* Max lag of slaves to write. */ - int repl_good_slaves_count; /* Number of slaves with lag <= max_lag. */ - int repl_diskless_sync; /* Master send RDB to slaves sockets directly. */ - int repl_diskless_load; /* Slave parse RDB directly from the socket. - * see REPL_DISKLESS_LOAD_* enum */ - int repl_diskless_sync_delay; /* Delay to start a diskless repl BGSAVE. */ - int repl_diskless_sync_max_replicas;/* Max replicas for diskless repl BGSAVE - * delay (start sooner if they all connect). */ - size_t repl_buffer_mem; /* The memory of replication buffer. */ - list *repl_buffer_blocks; /* Replication buffers blocks list - * (serving replica clients and repl backlog) */ + char replid[CONFIG_RUN_ID_SIZE + 1]; /* My current replication ID. */ + char replid2[CONFIG_RUN_ID_SIZE + 1]; /* replid inherited from master*/ + long long master_repl_offset; /* My current replication offset */ + long long second_replid_offset; /* Accept offsets up to this for replid2. */ + _Atomic long long fsynced_reploff_pending; /* Largest replication offset to + * potentially have been fsynced, applied to + fsynced_reploff only when AOF state is AOF_ON + (not during the initial rewrite) */ + long long fsynced_reploff; /* Largest replication offset that has been confirmed to be fsynced */ + int slaveseldb; /* Last SELECTed DB in replication output */ + int repl_ping_slave_period; /* Master pings the slave every N seconds */ + replBacklog *repl_backlog; /* Replication backlog for partial syncs */ + long long repl_backlog_size; /* Backlog circular buffer size */ + time_t repl_backlog_time_limit; /* Time without slaves after the backlog + gets released. */ + time_t repl_no_slaves_since; /* We have no slaves since that time. + Only valid if server.slaves len is 0. */ + int repl_min_slaves_to_write; /* Min number of slaves to write. */ + int repl_min_slaves_max_lag; /* Max lag of slaves to write. */ + int repl_good_slaves_count; /* Number of slaves with lag <= max_lag. */ + int repl_diskless_sync; /* Master send RDB to slaves sockets directly. */ + int repl_diskless_load; /* Slave parse RDB directly from the socket. + * see REPL_DISKLESS_LOAD_* enum */ + int repl_diskless_sync_delay; /* Delay to start a diskless repl BGSAVE. */ + int repl_diskless_sync_max_replicas; /* Max replicas for diskless repl BGSAVE + * delay (start sooner if they all connect). */ + size_t repl_buffer_mem; /* The memory of replication buffer. */ + list *repl_buffer_blocks; /* Replication buffers blocks list + * (serving replica clients and repl backlog) */ /* Replication (slave) */ char *masteruser; /* AUTH with this user and masterauth with master */ sds masterauth; /* AUTH with this password with master */ @@ -2018,13 +2021,13 @@ struct valkeyServer { int list_max_listpack_size; int list_compress_depth; /* time cache */ - _Atomic time_t unixtime; /* Unix time sampled every cron cycle. */ - time_t timezone; /* Cached timezone. As set by tzset(). */ - int daylight_active; /* Currently in daylight saving time. */ - mstime_t mstime; /* 'unixtime' in milliseconds. */ - ustime_t ustime; /* 'unixtime' in microseconds. */ - mstime_t cmd_time_snapshot; /* Time snapshot of the root execution nesting. */ - size_t blocking_op_nesting; /* Nesting level of blocking operation, used to reset blocked_last_cron. */ + _Atomic time_t unixtime; /* Unix time sampled every cron cycle. */ + time_t timezone; /* Cached timezone. As set by tzset(). */ + int daylight_active; /* Currently in daylight saving time. */ + mstime_t mstime; /* 'unixtime' in milliseconds. */ + ustime_t ustime; /* 'unixtime' in microseconds. */ + mstime_t cmd_time_snapshot; /* Time snapshot of the root execution nesting. */ + size_t blocking_op_nesting; /* Nesting level of blocking operation, used to reset blocked_last_cron. */ long long blocked_last_cron; /* Indicate the mstime of the last time we did cron jobs from a blocking operation */ /* Pubsub */ kvstore *pubsub_channels; /* Map channels to list of subscribed clients */ diff --git a/src/threads_mngr.c b/src/threads_mngr.c index e1f1d7e7b4..f18bc79b92 100644 --- a/src/threads_mngr.c +++ b/src/threads_mngr.c @@ -112,7 +112,7 @@ __attribute__((noinline)) int ThreadsManager_runOnThreads(pid_t *tids, size_t ti static int test_and_start(void) { /* atomic_exchange_explicit sets the variable to 1 and returns the previous value */ - int prev_state = atomic_exchange_explicit(&g_in_progress,1,memory_order_relaxed); + int prev_state = atomic_exchange_explicit(&g_in_progress, 1, memory_order_relaxed); /* If prev_state is 1, g_in_progress was on. */ return prev_state; @@ -123,7 +123,7 @@ __attribute__((noinline)) static void invoke_callback(int sig) { run_on_thread_cb callback = g_callback; if (callback) { callback(); - atomic_fetch_add_explicit(&g_num_threads_done,1,memory_order_relaxed); + atomic_fetch_add_explicit(&g_num_threads_done, 1, memory_order_relaxed); } else { serverLogFromHandler(LL_WARNING, "tid %ld: ThreadsManager g_callback is NULL", syscall(SYS_gettid)); } @@ -145,7 +145,7 @@ static void wait_threads(void) { /* Sleep a bit to yield to other threads. */ /* usleep isn't listed as signal safe, so we use select instead */ select(0, NULL, NULL, NULL, &tv); - curr_done_count = atomic_load_explicit(&g_num_threads_done,memory_order_relaxed); + curr_done_count = atomic_load_explicit(&g_num_threads_done, memory_order_relaxed); clock_gettime(CLOCK_REALTIME, &curr_time); } while (curr_done_count < g_tids_len && curr_time.tv_sec <= timeout_time.tv_sec); @@ -160,7 +160,7 @@ static void ThreadsManager_cleanups(void) { g_num_threads_done = 0; /* Lastly, turn off g_in_progress */ - atomic_store_explicit(&g_in_progress,0,memory_order_relaxed); + atomic_store_explicit(&g_in_progress, 0, memory_order_relaxed); } #else diff --git a/src/unit/test_crc64.c b/src/unit/test_crc64.c index 9489a24625..87aeed9e6c 100644 --- a/src/unit/test_crc64.c +++ b/src/unit/test_crc64.c @@ -16,13 +16,13 @@ int test_crc64(int argc, char **argv, int flags) { TEST_ASSERT_MESSAGE("[calcula]: CRC64 '123456789'", (uint64_t)crc64(0, numbers, 9) == 16845390139448941002ull); unsigned char li[] = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed " - "do eiusmod tempor incididunt ut labore et dolore magna " - "aliqua. Ut enim ad minim veniam, quis nostrud exercitation " - "ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis " - "aute irure dolor in reprehenderit in voluptate velit esse " - "cillum dolore eu fugiat nulla pariatur. Excepteur sint " - "occaecat cupidatat non proident, sunt in culpa qui officia " - "deserunt mollit anim id est laborum."; + "do eiusmod tempor incididunt ut labore et dolore magna " + "aliqua. Ut enim ad minim veniam, quis nostrud exercitation " + "ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis " + "aute irure dolor in reprehenderit in voluptate velit esse " + "cillum dolore eu fugiat nulla pariatur. Excepteur sint " + "occaecat cupidatat non proident, sunt in culpa qui officia " + "deserunt mollit anim id est laborum."; TEST_ASSERT_MESSAGE("[calcula]: CRC64 TEXT'", (uint64_t)_crc64(0, li, sizeof(li)) == 14373597793578550195ull); TEST_ASSERT_MESSAGE("[calcula]: CRC64 TEXT", (uint64_t)crc64(0, li, sizeof(li)) == 14373597793578550195ull); diff --git a/src/unit/test_crc64combine.c b/src/unit/test_crc64combine.c index cb1d96f1eb..67ef4ade43 100644 --- a/src/unit/test_crc64combine.c +++ b/src/unit/test_crc64combine.c @@ -20,7 +20,7 @@ long long _ustime(void) { long long ust; gettimeofday(&tv, NULL); - ust = ((long long)tv.tv_sec)*1000000; + ust = ((long long)tv.tv_sec) * 1000000; ust += tv.tv_usec; return ust; } @@ -28,18 +28,17 @@ long long _ustime(void) { static int bench_crc64(unsigned char *data, uint64_t size, long long passes, uint64_t check, char *name, int csv) { uint64_t min = size, hash = 0; long long original_start = _ustime(), original_end; - for (long long i=passes; i > 0; i--) { + for (long long i = passes; i > 0; i--) { hash = crc64(0, data, size); } original_end = _ustime(); min = (original_end - original_start) * 1000 / passes; /* approximate nanoseconds without nstime */ if (csv) { - printf("%s,%" PRIu64 ",%" PRIu64 ",%d\n", - name, size, (1000 * size) / min, hash == check); + printf("%s,%" PRIu64 ",%" PRIu64 ",%d\n", name, size, (1000 * size) / min, hash == check); } else { - TEST_PRINT_INFO("test size=%" PRIu64 " algorithm=%s %" PRIu64 " M/sec matches=%d", - size, name, (1000 * size) / min, hash == check); + TEST_PRINT_INFO("test size=%" PRIu64 " algorithm=%s %" PRIu64 " M/sec matches=%d", size, name, + (1000 * size) / min, hash == check); } return hash != check; } @@ -49,7 +48,7 @@ const uint64_t BENCH_RPOLY = UINT64_C(0x95ac9329ac4bc9b5); static void bench_combine(char *label, uint64_t size, uint64_t expect, int csv) { uint64_t min = size, start = expect, thash = expect ^ (expect >> 17); long long original_start = _ustime(), original_end; - for (int i=0; i < 1000; i++) { + for (int i = 0; i < 1000; i++) { crc64_combine(thash, start, size, BENCH_RPOLY, 64); } original_end = _ustime(); @@ -67,8 +66,8 @@ static void genBenchmarkRandomData(char *data, int count) { int i = 0; while (count--) { - state = (state*1103515245+12345); - data[i++] = '0'+((state>>16)&63); + state = (state * 1103515245 + 12345); + data[i++] = '0' + ((state >> 16) & 63); } } @@ -84,29 +83,27 @@ int test_crc64combine(int argc, char **argv, int flags) { int i, lastarg, csv = 0, loop = 0, combine = 0; again: for (i = 3; i < argc; i++) { - lastarg = (i == (argc-1)); - if (!strcmp(argv[i],"--help")) { + lastarg = (i == (argc - 1)); + if (!strcmp(argv[i], "--help")) { goto usage; - } else if (!strcmp(argv[i],"--csv")) { + } else if (!strcmp(argv[i], "--csv")) { csv = 1; - } else if (!strcmp(argv[i],"-l")) { + } else if (!strcmp(argv[i], "-l")) { loop = 1; - } else if (!strcmp(argv[i],"--crc")) { + } else if (!strcmp(argv[i], "--crc")) { if (lastarg) goto invalid; crc64_test_size = atoll(argv[++i]); - } else if (!strcmp(argv[i],"--combine")) { + } else if (!strcmp(argv[i], "--combine")) { combine = 1; } else { -invalid: - printf("Invalid option \"%s\" or option argument missing\n\n",argv[i]); -usage: - printf( -"Usage: --single test_crc64combine.c [OPTIONS]\n\n" -" --csv Output in CSV format\n" -" -l Loop. Run the tests forever\n" -" --crc Benchmark crc64 faster options, using a buffer this big, and quit when done.\n" -" --combine Benchmark crc64 combine value ranges and timings.\n" - ); + invalid: + printf("Invalid option \"%s\" or option argument missing\n\n", argv[i]); + usage: + printf("Usage: --single test_crc64combine.c [OPTIONS]\n\n" + " --csv Output in CSV format\n" + " -l Loop. Run the tests forever\n" + " --crc Benchmark crc64 faster options, using a buffer this big, and quit when done.\n" + " --combine Benchmark crc64 combine value ranges and timings.\n"); return 1; } } @@ -115,11 +112,11 @@ int test_crc64combine(int argc, char **argv, int flags) { long long init_start, init_end; do { - unsigned char* data = NULL; + unsigned char *data = NULL; uint64_t passes = 0; if (crc64_test_size) { data = zmalloc(crc64_test_size); - genBenchmarkRandomData((char*)data, crc64_test_size); + genBenchmarkRandomData((char *)data, crc64_test_size); /* We want to hash about 1 gig of data in total, looped, to get a good * idea of our performance. */ @@ -130,22 +127,22 @@ int test_crc64combine(int argc, char **argv, int flags) { crc64_init(); /* warm up the cache */ - set_crc64_cutoffs(crc64_test_size+1, crc64_test_size+1); + set_crc64_cutoffs(crc64_test_size + 1, crc64_test_size + 1); uint64_t expect = crc64(0, data, crc64_test_size); if (!combine && crc64_test_size) { if (csv && init_this_loop) printf("algorithm,buffer,performance,crc64_matches\n"); /* get the single-character version for single-byte Redis behavior */ - set_crc64_cutoffs(0, crc64_test_size+1); + set_crc64_cutoffs(0, crc64_test_size + 1); if (bench_crc64(data, crc64_test_size, passes, expect, "crc_1byte", csv)) return 1; - set_crc64_cutoffs(crc64_test_size+1, crc64_test_size+1); + set_crc64_cutoffs(crc64_test_size + 1, crc64_test_size + 1); /* run with 8-byte "single" path, crcfaster */ if (bench_crc64(data, crc64_test_size, passes, expect, "crcspeed", csv)) return 1; /* run with dual 8-byte paths */ - set_crc64_cutoffs(1, crc64_test_size+1); + set_crc64_cutoffs(1, crc64_test_size + 1); if (bench_crc64(data, crc64_test_size, passes, expect, "crcdual", csv)) return 1; /* run with tri 8-byte paths */ @@ -161,11 +158,7 @@ int test_crc64combine(int argc, char **argv, int flags) { if (combine) { if (init_this_loop) { init_start = _ustime(); - crc64_combine( - UINT64_C(0xdeadbeefdeadbeef), - UINT64_C(0xfeebdaedfeebdaed), - INIT_SIZE, - BENCH_RPOLY, 64); + crc64_combine(UINT64_C(0xdeadbeefdeadbeef), UINT64_C(0xfeebdaedfeebdaed), INIT_SIZE, BENCH_RPOLY, 64); init_end = _ustime(); init_end -= init_start; diff --git a/src/unit/test_endianconv.c b/src/unit/test_endianconv.c index c7b683f5de..7470fcf06d 100644 --- a/src/unit/test_endianconv.c +++ b/src/unit/test_endianconv.c @@ -10,15 +10,15 @@ int test_endianconv(int argc, char *argv[], int flags) { char buf[32]; - snprintf(buf,sizeof(buf),"ciaoroma"); + snprintf(buf, sizeof(buf), "ciaoroma"); memrev16(buf); TEST_ASSERT(!strcmp(buf, "icaoroma")); - snprintf(buf,sizeof(buf),"ciaoroma"); + snprintf(buf, sizeof(buf), "ciaoroma"); memrev32(buf); TEST_ASSERT(!strcmp(buf, "oaicroma")); - snprintf(buf,sizeof(buf),"ciaoroma"); + snprintf(buf, sizeof(buf), "ciaoroma"); memrev64(buf); TEST_ASSERT(!strcmp(buf, "amoroaic")); diff --git a/src/unit/test_files.h b/src/unit/test_files.h index 7da3d26473..4a67f67052 100644 --- a/src/unit/test_files.h +++ b/src/unit/test_files.h @@ -1,4 +1,5 @@ /* Do not modify this file, it's automatically generated from utils/generate-unit-test-header.py */ +/* clang-format off */ typedef int unitTestProc(int argc, char **argv, int flags); typedef struct unitTest { diff --git a/src/unit/test_help.h b/src/unit/test_help.h index ec09295a94..9be2f2b40e 100644 --- a/src/unit/test_help.h +++ b/src/unit/test_help.h @@ -1,5 +1,5 @@ /* A very simple test framework for valkey. See unit/README.me for more information on usage. - * + * * Example: * * int test_example(int argc, char *argv[], int flags) { @@ -16,30 +16,30 @@ #include /* The flags are the following: -* --accurate: Runs tests with more iterations. -* --large-memory: Enables tests that consume more than 100mb. -* --single: A flag to indicate a specific test file was executed. */ -#define UNIT_TEST_ACCURATE (1<<0) -#define UNIT_TEST_LARGE_MEMORY (1<<1) -#define UNIT_TEST_SINGLE (1<<2) - -#define KRED "\33[31m" -#define KGRN "\33[32m" -#define KBLUE "\33[34m" + * --accurate: Runs tests with more iterations. + * --large-memory: Enables tests that consume more than 100mb. + * --single: A flag to indicate a specific test file was executed. */ +#define UNIT_TEST_ACCURATE (1 << 0) +#define UNIT_TEST_LARGE_MEMORY (1 << 1) +#define UNIT_TEST_SINGLE (1 << 2) + +#define KRED "\33[31m" +#define KGRN "\33[32m" +#define KBLUE "\33[34m" #define KRESET "\33[0m" -#define TEST_PRINT_ERROR(descr) \ - printf("[" KRED "%s - %s:%d" KRESET "] %s\n", __func__, __FILE__, __LINE__, descr) +#define TEST_PRINT_ERROR(descr) printf("[" KRED "%s - %s:%d" KRESET "] %s\n", __func__, __FILE__, __LINE__, descr) -#define TEST_PRINT_INFO(descr, ...) \ - printf("[" KBLUE "%s - %s:%d" KRESET "] " descr "\n", __func__, __FILE__, __LINE__, __VA_ARGS__) +#define TEST_PRINT_INFO(descr, ...) \ + printf("[" KBLUE "%s - %s:%d" KRESET "] " descr "\n", __func__, __FILE__, __LINE__, __VA_ARGS__) -#define TEST_ASSERT_MESSAGE(descr, _c) do { \ - if (!(_c)) { \ - TEST_PRINT_ERROR(descr); \ - return 1; \ - } \ -} while(0) +#define TEST_ASSERT_MESSAGE(descr, _c) \ + do { \ + if (!(_c)) { \ + TEST_PRINT_ERROR(descr); \ + return 1; \ + } \ + } while (0) #define TEST_ASSERT(_c) TEST_ASSERT_MESSAGE("Failed assertion: " #_c, _c) diff --git a/src/unit/test_intset.c b/src/unit/test_intset.c index 8aa6a63928..f47b162184 100644 --- a/src/unit/test_intset.c +++ b/src/unit/test_intset.c @@ -14,39 +14,39 @@ static long long usec(void) { struct timeval tv; - gettimeofday(&tv,NULL); - return (((long long)tv.tv_sec)*1000000)+tv.tv_usec; + gettimeofday(&tv, NULL); + return (((long long)tv.tv_sec) * 1000000) + tv.tv_usec; } static intset *createSet(int bits, int size) { - uint64_t mask = (1< 32) { - value = (rand()*rand()) & mask; + value = (rand() * rand()) & mask; } else { value = rand() & mask; } - is = intsetAdd(is,value,NULL); + is = intsetAdd(is, value, NULL); } return is; } static int checkConsistency(intset *is) { - for (uint32_t i = 0; i < (intrev32ifbe(is->length)-1); i++) { + for (uint32_t i = 0; i < (intrev32ifbe(is->length) - 1); i++) { uint32_t encoding = intrev32ifbe(is->encoding); if (encoding == INTSET_ENC_INT16) { - int16_t *i16 = (int16_t*)is->contents; - TEST_ASSERT(i16[i] < i16[i+1]); + int16_t *i16 = (int16_t *)is->contents; + TEST_ASSERT(i16[i] < i16[i + 1]); } else if (encoding == INTSET_ENC_INT32) { - int32_t *i32 = (int32_t*)is->contents; - TEST_ASSERT(i32[i] < i32[i+1]); + int32_t *i32 = (int32_t *)is->contents; + TEST_ASSERT(i32[i] < i32[i + 1]); } else { - int64_t *i64 = (int64_t*)is->contents; - TEST_ASSERT(i64[i] < i64[i+1]); + int64_t *i64 = (int64_t *)is->contents; + TEST_ASSERT(i64[i] < i64[i + 1]); } } return 1; @@ -65,10 +65,8 @@ int test_intsetValueEncodings(int argc, char **argv, int flags) { TEST_ASSERT(_intsetValueEncoding(+2147483647) == INTSET_ENC_INT32); TEST_ASSERT(_intsetValueEncoding(-2147483649) == INTSET_ENC_INT64); TEST_ASSERT(_intsetValueEncoding(+2147483648) == INTSET_ENC_INT64); - TEST_ASSERT(_intsetValueEncoding(-9223372036854775808ull) == - INTSET_ENC_INT64); - TEST_ASSERT(_intsetValueEncoding(+9223372036854775807ull) == - INTSET_ENC_INT64); + TEST_ASSERT(_intsetValueEncoding(-9223372036854775808ull) == INTSET_ENC_INT64); + TEST_ASSERT(_intsetValueEncoding(+9223372036854775807ull) == INTSET_ENC_INT64); return 0; } @@ -80,10 +78,14 @@ int test_intsetBasicAdding(int argc, char **argv, int flags) { intset *is = intsetNew(); uint8_t success; - is = intsetAdd(is,5,&success); TEST_ASSERT(success); - is = intsetAdd(is,6,&success); TEST_ASSERT(success); - is = intsetAdd(is,4,&success); TEST_ASSERT(success); - is = intsetAdd(is,4,&success); TEST_ASSERT(!success); + is = intsetAdd(is, 5, &success); + TEST_ASSERT(success); + is = intsetAdd(is, 6, &success); + TEST_ASSERT(success); + is = intsetAdd(is, 4, &success); + TEST_ASSERT(success); + is = intsetAdd(is, 4, &success); + TEST_ASSERT(!success); TEST_ASSERT(6 == intsetMax(is)); TEST_ASSERT(4 == intsetMin(is)); zfree(is); @@ -100,7 +102,7 @@ int test_intsetLargeNumberRandomAdd(int argc, char **argv, int flags) { uint8_t success; intset *is = intsetNew(); for (int i = 0; i < 1024; i++) { - is = intsetAdd(is,rand()%0x800,&success); + is = intsetAdd(is, rand() % 0x800, &success); if (success) inserts++; } TEST_ASSERT(intrev32ifbe(is->length) == inserts); @@ -115,22 +117,22 @@ int test_intsetUpgradeFromint16Toint32(int argc, char **argv, int flags) { UNUSED(flags); intset *is = intsetNew(); - is = intsetAdd(is,32,NULL); + is = intsetAdd(is, 32, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); - is = intsetAdd(is,65535,NULL); + is = intsetAdd(is, 65535, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); - TEST_ASSERT(intsetFind(is,32)); - TEST_ASSERT(intsetFind(is,65535)); + TEST_ASSERT(intsetFind(is, 32)); + TEST_ASSERT(intsetFind(is, 65535)); TEST_ASSERT(checkConsistency(is) == 1); zfree(is); is = intsetNew(); - is = intsetAdd(is,32,NULL); + is = intsetAdd(is, 32, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); - is = intsetAdd(is,-65535,NULL); + is = intsetAdd(is, -65535, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); - TEST_ASSERT(intsetFind(is,32)); - TEST_ASSERT(intsetFind(is,-65535)); + TEST_ASSERT(intsetFind(is, 32)); + TEST_ASSERT(intsetFind(is, -65535)); TEST_ASSERT(checkConsistency(is) == 1); zfree(is); @@ -143,22 +145,22 @@ int test_intsetUpgradeFromint16Toint64(int argc, char **argv, int flags) { UNUSED(flags); intset *is = intsetNew(); - is = intsetAdd(is,32,NULL); + is = intsetAdd(is, 32, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); - is = intsetAdd(is,4294967295,NULL); + is = intsetAdd(is, 4294967295, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); - TEST_ASSERT(intsetFind(is,32)); - TEST_ASSERT(intsetFind(is,4294967295)); + TEST_ASSERT(intsetFind(is, 32)); + TEST_ASSERT(intsetFind(is, 4294967295)); TEST_ASSERT(checkConsistency(is) == 1); zfree(is); is = intsetNew(); - is = intsetAdd(is,32,NULL); + is = intsetAdd(is, 32, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); - is = intsetAdd(is,-4294967295,NULL); + is = intsetAdd(is, -4294967295, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); - TEST_ASSERT(intsetFind(is,32)); - TEST_ASSERT(intsetFind(is,-4294967295)); + TEST_ASSERT(intsetFind(is, 32)); + TEST_ASSERT(intsetFind(is, -4294967295)); TEST_ASSERT(checkConsistency(is) == 1); zfree(is); @@ -171,22 +173,22 @@ int test_intsetUpgradeFromint32Toint64(int argc, char **argv, int flags) { UNUSED(flags); intset *is = intsetNew(); - is = intsetAdd(is,65535,NULL); + is = intsetAdd(is, 65535, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); - is = intsetAdd(is,4294967295,NULL); + is = intsetAdd(is, 4294967295, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); - TEST_ASSERT(intsetFind(is,65535)); - TEST_ASSERT(intsetFind(is,4294967295)); + TEST_ASSERT(intsetFind(is, 65535)); + TEST_ASSERT(intsetFind(is, 4294967295)); TEST_ASSERT(checkConsistency(is) == 1); zfree(is); is = intsetNew(); - is = intsetAdd(is,65535,NULL); + is = intsetAdd(is, 65535, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); - is = intsetAdd(is,-4294967295,NULL); + is = intsetAdd(is, -4294967295, NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); - TEST_ASSERT(intsetFind(is,65535)); - TEST_ASSERT(intsetFind(is,-4294967295)); + TEST_ASSERT(intsetFind(is, 65535)); + TEST_ASSERT(intsetFind(is, -4294967295)); TEST_ASSERT(checkConsistency(is) == 1); zfree(is); @@ -201,13 +203,12 @@ int test_intsetStressLookups(int argc, char **argv, int flags) { long num = 100000, size = 10000; int i, bits = 20; long long start; - intset *is = createSet(bits,size); + intset *is = createSet(bits, size); TEST_ASSERT(checkConsistency(is) == 1); start = usec(); - for (i = 0; i < num; i++) intsetSearch(is,rand() % ((1<filename); - printf("%d tests, %d passed, %d failed\n", test_num, - test_num - failed_tests, failed_tests); + printf("%d tests, %d passed, %d failed\n", test_num, test_num - failed_tests, failed_tests); return !failed_tests; } @@ -43,15 +42,17 @@ int main(int argc, char **argv) { char *file = NULL; for (int j = 1; j < argc; j++) { char *arg = argv[j]; - if (!strcasecmp(arg, "--accurate")) flags |= UNIT_TEST_ACCURATE; - else if (!strcasecmp(arg, "--large-memory")) flags |= UNIT_TEST_LARGE_MEMORY; + if (!strcasecmp(arg, "--accurate")) + flags |= UNIT_TEST_ACCURATE; + else if (!strcasecmp(arg, "--large-memory")) + flags |= UNIT_TEST_LARGE_MEMORY; else if (!strcasecmp(arg, "--single") && (j + 1 < argc)) { flags |= UNIT_TEST_SINGLE; file = argv[j + 1]; } } - int numtests = sizeof(unitTestSuite)/sizeof(struct unitTestSuite); + int numtests = sizeof(unitTestSuite) / sizeof(struct unitTestSuite); int failed_num = 0, suites_executed = 0; for (int j = 0; j < numtests; j++) { if (file && strcasecmp(file, unitTestSuite[j].filename)) continue; @@ -60,8 +61,8 @@ int main(int argc, char **argv) { } suites_executed++; } - printf("%d test suites executed, %d passed, %d failed\n", suites_executed, - suites_executed-failed_num, failed_num); + printf("%d test suites executed, %d passed, %d failed\n", suites_executed, suites_executed - failed_num, + failed_num); return failed_num == 0 ? 0 : 1; } diff --git a/src/unit/test_sds.c b/src/unit/test_sds.c index 9826750391..adf3d37f2c 100644 --- a/src/unit/test_sds.c +++ b/src/unit/test_sds.c @@ -10,9 +10,12 @@ static sds sdsTestTemplateCallback(sds varname, void *arg) { static const char *_var1 = "variable1"; static const char *_var2 = "variable2"; - if (!strcmp(varname, _var1)) return sdsnew("value1"); - else if (!strcmp(varname, _var2)) return sdsnew("value2"); - else return NULL; + if (!strcmp(varname, _var1)) + return sdsnew("value1"); + else if (!strcmp(varname, _var2)) + return sdsnew("value2"); + else + return NULL; } int test_sds(int argc, char **argv, int flags) { @@ -22,36 +25,32 @@ int test_sds(int argc, char **argv, int flags) { sds x = sdsnew("foo"), y; - TEST_ASSERT_MESSAGE("Create a string and obtain the length", - sdslen(x) == 3 && memcmp(x, "foo\0", 4) == 0); + TEST_ASSERT_MESSAGE("Create a string and obtain the length", sdslen(x) == 3 && memcmp(x, "foo\0", 4) == 0); sdsfree(x); x = sdsnewlen("foo", 2); - TEST_ASSERT_MESSAGE("Create a string with specified length", - sdslen(x) == 2 && memcmp(x, "fo\0", 3) == 0); + TEST_ASSERT_MESSAGE("Create a string with specified length", sdslen(x) == 2 && memcmp(x, "fo\0", 3) == 0); x = sdscat(x, "bar"); - TEST_ASSERT_MESSAGE("Strings concatenation", - sdslen(x) == 5 && memcmp(x, "fobar\0", 6) == 0); + TEST_ASSERT_MESSAGE("Strings concatenation", sdslen(x) == 5 && memcmp(x, "fobar\0", 6) == 0); x = sdscpy(x, "a"); - TEST_ASSERT_MESSAGE("sdscpy() against an originally longer string", - sdslen(x) == 1 && memcmp(x, "a\0", 2) == 0); + TEST_ASSERT_MESSAGE("sdscpy() against an originally longer string", sdslen(x) == 1 && memcmp(x, "a\0", 2) == 0); x = sdscpy(x, "xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk"); TEST_ASSERT_MESSAGE("sdscpy() against an originally shorter string", - sdslen(x) == 33 && - memcmp(x, "xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk\0", 33) == 0); + sdslen(x) == 33 && memcmp(x, "xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk\0", 33) == 0); sdsfree(x); x = sdscatprintf(sdsempty(), "%d", 123); - TEST_ASSERT_MESSAGE("sdscatprintf() seems working in the base case", - sdslen(x) == 3 && memcmp(x, "123\0", 4) == 0); + TEST_ASSERT_MESSAGE("sdscatprintf() seems working in the base case", sdslen(x) == 3 && memcmp(x, "123\0", 4) == 0); sdsfree(x); x = sdscatprintf(sdsempty(), "a%cb", 0); - TEST_ASSERT_MESSAGE("sdscatprintf() seems working with \\0 inside of result", - sdslen(x) == 3 && memcmp(x, "a\0""b\0", 4) == 0); + TEST_ASSERT_MESSAGE("sdscatprintf() seems working with \\0 inside of result", sdslen(x) == 3 && memcmp(x, + "a\0" + "b\0", + 4) == 0); sdsfree(x); char etalon[1024 * 1024]; @@ -60,112 +59,100 @@ int test_sds(int argc, char **argv, int flags) { } x = sdscatprintf(sdsempty(), "%0*d", (int)sizeof(etalon), 0); TEST_ASSERT_MESSAGE("sdscatprintf() can print 1MB", - sdslen(x) == sizeof(etalon) && memcmp(x, etalon, sizeof(etalon)) == 0); + sdslen(x) == sizeof(etalon) && memcmp(x, etalon, sizeof(etalon)) == 0); sdsfree(x); x = sdsnew("--"); - x = sdscatfmt(x, "Hello %s World %I,%I--", "Hi!", LLONG_MIN,LLONG_MAX); + x = sdscatfmt(x, "Hello %s World %I,%I--", "Hi!", LLONG_MIN, LLONG_MAX); TEST_ASSERT_MESSAGE("sdscatfmt() seems working in the base case", - sdslen(x) == 60 && - memcmp(x,"--Hello Hi! World -9223372036854775808," - "9223372036854775807--",60) == 0); + sdslen(x) == 60 && memcmp(x, + "--Hello Hi! World -9223372036854775808," + "9223372036854775807--", + 60) == 0); sdsfree(x); x = sdsnew("--"); x = sdscatfmt(x, "%u,%U--", UINT_MAX, ULLONG_MAX); TEST_ASSERT_MESSAGE("sdscatfmt() seems working with unsigned numbers", - sdslen(x) == 35 && - memcmp(x, "--4294967295,18446744073709551615--", 35) == 0); + sdslen(x) == 35 && memcmp(x, "--4294967295,18446744073709551615--", 35) == 0); sdsfree(x); x = sdsnew(" x "); sdstrim(x, " x"); - TEST_ASSERT_MESSAGE("sdstrim() works when all chars match", - sdslen(x) == 0); + TEST_ASSERT_MESSAGE("sdstrim() works when all chars match", sdslen(x) == 0); sdsfree(x); x = sdsnew(" x "); - sdstrim(x," "); - TEST_ASSERT_MESSAGE("sdstrim() works when a single char remains", - sdslen(x) == 1 && x[0] == 'x'); + sdstrim(x, " "); + TEST_ASSERT_MESSAGE("sdstrim() works when a single char remains", sdslen(x) == 1 && x[0] == 'x'); sdsfree(x); x = sdsnew("xxciaoyyy"); sdstrim(x, "xy"); - TEST_ASSERT_MESSAGE("sdstrim() correctly trims characters", - sdslen(x) == 4 && memcmp(x, "ciao\0", 5) == 0); + TEST_ASSERT_MESSAGE("sdstrim() correctly trims characters", sdslen(x) == 4 && memcmp(x, "ciao\0", 5) == 0); y = sdsdup(x); sdsrange(y, 1, 1); - TEST_ASSERT_MESSAGE("sdsrange(...,1,1)", - sdslen(y) == 1 && memcmp(y, "i\0", 2) == 0); + TEST_ASSERT_MESSAGE("sdsrange(...,1,1)", sdslen(y) == 1 && memcmp(y, "i\0", 2) == 0); sdsfree(y); y = sdsdup(x); sdsrange(y, 1, -1); - TEST_ASSERT_MESSAGE("sdsrange(...,1,-1)", - sdslen(y) == 3 && memcmp(y, "iao\0", 4) == 0); + TEST_ASSERT_MESSAGE("sdsrange(...,1,-1)", sdslen(y) == 3 && memcmp(y, "iao\0", 4) == 0); sdsfree(y); y = sdsdup(x); sdsrange(y, -2, -1); - TEST_ASSERT_MESSAGE("sdsrange(...,-2,-1)", - sdslen(y) == 2 && memcmp(y, "ao\0", 3) == 0); + TEST_ASSERT_MESSAGE("sdsrange(...,-2,-1)", sdslen(y) == 2 && memcmp(y, "ao\0", 3) == 0); sdsfree(y); y = sdsdup(x); sdsrange(y, 2, 1); - TEST_ASSERT_MESSAGE("sdsrange(...,2,1)", - sdslen(y) == 0 && memcmp(y, "\0", 1) == 0); + TEST_ASSERT_MESSAGE("sdsrange(...,2,1)", sdslen(y) == 0 && memcmp(y, "\0", 1) == 0); sdsfree(y); y = sdsdup(x); sdsrange(y, 1, 100); - TEST_ASSERT_MESSAGE("sdsrange(...,1,100)", - sdslen(y) == 3 && memcmp(y, "iao\0", 4) == 0); + TEST_ASSERT_MESSAGE("sdsrange(...,1,100)", sdslen(y) == 3 && memcmp(y, "iao\0", 4) == 0); sdsfree(y); y = sdsdup(x); sdsrange(y, 100, 100); - TEST_ASSERT_MESSAGE("sdsrange(...,100,100)", - sdslen(y) == 0 && memcmp(y, "\0", 1) == 0); + TEST_ASSERT_MESSAGE("sdsrange(...,100,100)", sdslen(y) == 0 && memcmp(y, "\0", 1) == 0); sdsfree(y); y = sdsdup(x); - sdsrange(y,4,6); - TEST_ASSERT_MESSAGE("sdsrange(...,4,6)", - sdslen(y) == 0 && memcmp(y, "\0", 1) == 0); + sdsrange(y, 4, 6); + TEST_ASSERT_MESSAGE("sdsrange(...,4,6)", sdslen(y) == 0 && memcmp(y, "\0", 1) == 0); sdsfree(y); y = sdsdup(x); sdsrange(y, 3, 6); - TEST_ASSERT_MESSAGE("sdsrange(...,3,6)", - sdslen(y) == 1 && memcmp(y, "o\0", 2) == 0); + TEST_ASSERT_MESSAGE("sdsrange(...,3,6)", sdslen(y) == 1 && memcmp(y, "o\0", 2) == 0); sdsfree(y); sdsfree(x); x = sdsnew("foo"); y = sdsnew("foa"); - TEST_ASSERT_MESSAGE("sdscmp(foo,foa)", sdscmp(x,y) > 0); + TEST_ASSERT_MESSAGE("sdscmp(foo,foa)", sdscmp(x, y) > 0); sdsfree(y); sdsfree(x); x = sdsnew("bar"); y = sdsnew("bar"); - TEST_ASSERT_MESSAGE("sdscmp(bar,bar)", sdscmp(x,y) == 0); + TEST_ASSERT_MESSAGE("sdscmp(bar,bar)", sdscmp(x, y) == 0); sdsfree(y); sdsfree(x); x = sdsnew("aar"); y = sdsnew("bar"); - TEST_ASSERT_MESSAGE("sdscmp(bar,bar)", sdscmp(x,y) < 0); + TEST_ASSERT_MESSAGE("sdscmp(bar,bar)", sdscmp(x, y) < 0); sdsfree(y); sdsfree(x); x = sdsnewlen("\a\n\0foo\r", 7); y = sdscatrepr(sdsempty(), x, sdslen(x)); - TEST_ASSERT_MESSAGE("sdscatrepr(...data...)", - memcmp(y, "\"\\a\\n\\x00foo\\r\"", 15) == 0); + TEST_ASSERT_MESSAGE("sdscatrepr(...data...)", memcmp(y, "\"\\a\\n\\x00foo\\r\"", 15) == 0); unsigned int oldfree; char *p; @@ -175,8 +162,7 @@ int test_sds(int argc, char **argv, int flags) { sdsfree(x); sdsfree(y); x = sdsnew("0"); - TEST_ASSERT_MESSAGE("sdsnew() free/len buffers", - sdslen(x) == 1 && sdsavail(x) == 0); + TEST_ASSERT_MESSAGE("sdsnew() free/len buffers", sdslen(x) == 1 && sdsavail(x) == 0); /* Run the test a few times in order to hit the first two * SDS header types. */ @@ -191,22 +177,22 @@ int test_sds(int argc, char **argv, int flags) { oldfree = sdsavail(x); UNUSED(oldfree); } - p = x+oldlen; + p = x + oldlen; for (j = 0; j < step; j++) { p[j] = 'A' + j; } sdsIncrLen(x, step); } - TEST_ASSERT_MESSAGE("sdsMakeRoomFor() content", - memcmp("0ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ", x, 101) == 0); + TEST_ASSERT_MESSAGE("sdsMakeRoomFor() content", memcmp("0ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGH" + "IJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ", + x, 101) == 0); TEST_ASSERT_MESSAGE("sdsMakeRoomFor() final length", sdslen(x) == 101); sdsfree(x); /* Simple template */ x = sdstemplate("v1={variable1} v2={variable2}", sdsTestTemplateCallback, NULL); - TEST_ASSERT_MESSAGE("sdstemplate() normal flow", - memcmp(x, "v1=value1 v2=value2", 19) == 0); + TEST_ASSERT_MESSAGE("sdstemplate() normal flow", memcmp(x, "v1=value1 v2=value2", 19) == 0); sdsfree(x); /* Template with callback error */ @@ -223,8 +209,7 @@ int test_sds(int argc, char **argv, int flags) { /* Template with quoting */ x = sdstemplate("v1={{{variable1}} {{} v2={variable2}", sdsTestTemplateCallback, NULL); - TEST_ASSERT_MESSAGE("sdstemplate() with quoting", - memcmp(x, "v1={value1} {} v2=value2", 24) == 0); + TEST_ASSERT_MESSAGE("sdstemplate() with quoting", memcmp(x, "v1={value1} {} v2=value2", 24) == 0); sdsfree(x); /* Test sdsResize - extend */ diff --git a/src/unit/test_sha1.c b/src/unit/test_sha1.c index 68f18825a0..2f1e396d9e 100644 --- a/src/unit/test_sha1.c +++ b/src/unit/test_sha1.c @@ -6,20 +6,18 @@ int test_sha1(int argc, char **argv, int flags) { SHA1_CTX ctx; unsigned char hash[20], buf[BUFSIZE]; - unsigned char expected[20] = {0x15, 0xdd, 0x99, 0xa1, 0x99, 0x1e, 0x0b, 0x38, - 0x26, 0xfe, 0xde, 0x3d, 0xef, 0xfc, 0x1f, 0xeb, 0xa4, 0x22, 0x78, 0xe6}; + unsigned char expected[20] = {0x15, 0xdd, 0x99, 0xa1, 0x99, 0x1e, 0x0b, 0x38, 0x26, 0xfe, + 0xde, 0x3d, 0xef, 0xfc, 0x1f, 0xeb, 0xa4, 0x22, 0x78, 0xe6}; int i; UNUSED(argc); UNUSED(argv); UNUSED(flags); - for(i=0;i= 0); /* test write file */ diff --git a/src/unit/test_ziplist.c b/src/unit/test_ziplist.c index 20c7c358db..d2f7ebe69c 100644 --- a/src/unit/test_ziplist.c +++ b/src/unit/test_ziplist.c @@ -6,10 +6,10 @@ static unsigned char *createList(void) { unsigned char *zl = ziplistNew(); - zl = ziplistPush(zl, (unsigned char *) "foo", 3, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "quux", 4, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "hello", 5, ZIPLIST_HEAD); - zl = ziplistPush(zl, (unsigned char *) "1024", 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"foo", 3, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"quux", 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"hello", 5, ZIPLIST_HEAD); + zl = ziplistPush(zl, (unsigned char *)"1024", 4, ZIPLIST_TAIL); return zl; } @@ -18,24 +18,24 @@ static unsigned char *createIntList(void) { char buf[32]; snprintf(buf, sizeof(buf), "100"); - zl = ziplistPush(zl, (unsigned char *) buf, strlen(buf), ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, strlen(buf), ZIPLIST_TAIL); snprintf(buf, sizeof(buf), "128000"); - zl = ziplistPush(zl, (unsigned char *) buf, strlen(buf), ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, strlen(buf), ZIPLIST_TAIL); snprintf(buf, sizeof(buf), "-100"); - zl = ziplistPush(zl, (unsigned char *) buf, strlen(buf), ZIPLIST_HEAD); + zl = ziplistPush(zl, (unsigned char *)buf, strlen(buf), ZIPLIST_HEAD); snprintf(buf, sizeof(buf), "4294967296"); - zl = ziplistPush(zl, (unsigned char *) buf, strlen(buf), ZIPLIST_HEAD); + zl = ziplistPush(zl, (unsigned char *)buf, strlen(buf), ZIPLIST_HEAD); snprintf(buf, sizeof(buf), "non integer"); - zl = ziplistPush(zl, (unsigned char *) buf, strlen(buf), ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, strlen(buf), ZIPLIST_TAIL); snprintf(buf, sizeof(buf), "much much longer non integer"); - zl = ziplistPush(zl, (unsigned char *) buf, strlen(buf), ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, strlen(buf), ZIPLIST_TAIL); return zl; } static long long usec(void) { struct timeval tv; gettimeofday(&tv, NULL); - return (((long long) tv.tv_sec) * 1000000) + tv.tv_usec; + return (((long long)tv.tv_sec) * 1000000) + tv.tv_usec; } static void stress(int pos, int num, int maxsize, int dnum) { @@ -44,12 +44,12 @@ static void stress(int pos, int num, int maxsize, int dnum) { for (i = 0; i < maxsize; i += dnum) { zl = ziplistNew(); for (j = 0; j < i; j++) { - zl = ziplistPush(zl, (unsigned char *) "quux", 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"quux", 4, ZIPLIST_TAIL); } /* Do num times a push+pop from pos */ for (k = 0; k < num; k++) { - zl = ziplistPush(zl, (unsigned char *) "quux", 4, pos); + zl = ziplistPush(zl, (unsigned char *)"quux", 4, pos); zl = ziplistDeleteRange(zl, 0, 1); } zfree(zl); @@ -74,24 +74,22 @@ static int randstring(char *target, unsigned int min, unsigned int max) { int len = min + rand() % (max - min + 1); int minval, maxval; switch (rand() % 3) { - case 0: - minval = 0; - maxval = 255; - break; - case 1: - minval = 48; - maxval = 122; - break; - case 2: - minval = 48; - maxval = 52; - break; - default: - assert(NULL); + case 0: + minval = 0; + maxval = 255; + break; + case 1: + minval = 48; + maxval = 122; + break; + case 2: + minval = 48; + maxval = 52; + break; + default: assert(NULL); } - while (p < len) - target[p++] = minval + rand() % (maxval - minval + 1); + while (p < len) target[p++] = minval + rand() % (maxval - minval + 1); return len; } @@ -144,8 +142,7 @@ int iteration; int test_ziplistCreateIntList(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createIntList(); /* "4294967296", "-100", "100", "128000", "non integer", "much much longer non integer" */ @@ -174,8 +171,7 @@ int test_ziplistCreateIntList(int argc, char **argv, int flags) { int test_ziplistPop(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ @@ -211,8 +207,7 @@ int test_ziplistPop(int argc, char **argv, int flags) { int test_ziplistGetElementAtIndex3(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ p = ziplistIndex(zl, 3); TEST_ASSERT(p != NULL); @@ -223,8 +218,7 @@ int test_ziplistGetElementAtIndex3(int argc, char **argv, int flags) { int test_ziplistGetElementOutOfRange(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); p = ziplistIndex(zl, 4); TEST_ASSERT(p == NULL); @@ -234,8 +228,7 @@ int test_ziplistGetElementOutOfRange(int argc, char **argv, int flags) { int test_ziplistGetLastElement(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ p = ziplistIndex(zl, -1); TEST_ASSERT(p != NULL); @@ -246,8 +239,7 @@ int test_ziplistGetLastElement(int argc, char **argv, int flags) { int test_ziplistGetFirstElement(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ p = ziplistIndex(zl, -4); TEST_ASSERT(p != NULL); @@ -258,8 +250,7 @@ int test_ziplistGetFirstElement(int argc, char **argv, int flags) { int test_ziplistGetElementOutOfRangeReverse(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ p = ziplistIndex(zl, -5); TEST_ASSERT(p == NULL); @@ -269,8 +260,7 @@ int test_ziplistGetElementOutOfRangeReverse(int argc, char **argv, int flags) { int test_ziplistIterateThroughFullList(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); p = ziplistIndex(zl, 0); while (ziplistGet(p, &entry, &elen, &value)) { @@ -283,8 +273,7 @@ int test_ziplistIterateThroughFullList(int argc, char **argv, int flags) { int test_ziplistIterateThroughListFrom1ToEnd(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); p = ziplistIndex(zl, 1); while (ziplistGet(p, &entry, &elen, &value)) { @@ -297,8 +286,7 @@ int test_ziplistIterateThroughListFrom1ToEnd(int argc, char **argv, int flags) { int test_ziplistIterateThroughListFrom2ToEnd(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); p = ziplistIndex(zl, 2); while (ziplistGet(p, &entry, &elen, &value)) { @@ -311,8 +299,7 @@ int test_ziplistIterateThroughListFrom2ToEnd(int argc, char **argv, int flags) { int test_ziplistIterateThroughStartOutOfRange(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); p = ziplistIndex(zl, 4); TEST_ASSERT(p == NULL); @@ -322,8 +309,7 @@ int test_ziplistIterateThroughStartOutOfRange(int argc, char **argv, int flags) int test_ziplistIterateBackToFront(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); p = ziplistIndex(zl, -1); while (ziplistGet(p, &entry, &elen, &value)) { @@ -336,8 +322,7 @@ int test_ziplistIterateBackToFront(int argc, char **argv, int flags) { int test_ziplistIterateBackToFrontDeletingAllItems(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); p = ziplistIndex(zl, -1); while (ziplistGet(p, &entry, &elen, &value)) { @@ -351,8 +336,7 @@ int test_ziplistIterateBackToFrontDeletingAllItems(int argc, char **argv, int fl int test_ziplistDeleteInclusiveRange0To0(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ p = ziplistIndex(zl, 0); @@ -370,8 +354,7 @@ int test_ziplistDeleteInclusiveRange0To0(int argc, char **argv, int flags) { int test_ziplistDeleteInclusiveRange0To1(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ p = ziplistIndex(zl, 0); @@ -394,8 +377,7 @@ int test_ziplistDeleteInclusiveRange0To1(int argc, char **argv, int flags) { int test_ziplistDeleteInclusiveRange1To2(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ p = ziplistIndex(zl, 1); @@ -416,8 +398,7 @@ int test_ziplistDeleteInclusiveRange1To2(int argc, char **argv, int flags) { int test_ziplistDeleteWithStartIndexOutOfRange(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); int orig_len = ziplistLen(zl); zl = ziplistDeleteRange(zl, 5, 1); @@ -429,8 +410,7 @@ int test_ziplistDeleteWithStartIndexOutOfRange(int argc, char **argv, int flags) int test_ziplistDeleteWithNumOverflow(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ int orig_len = ziplistLen(zl); @@ -443,13 +423,12 @@ int test_ziplistDeleteWithNumOverflow(int argc, char **argv, int flags) { int test_ziplistDeleteFooWhileIterating(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ p = ziplistIndex(zl, 0); while (ziplistGet(p, &entry, &elen, &value)) { TEST_ASSERT(p != NULL); - if (entry && strncmp("foo", (char *) entry, elen) == 0) { + if (entry && strncmp("foo", (char *)entry, elen) == 0) { zl = ziplistDelete(zl, &p); } else { p = ziplistNext(zl, p); @@ -465,21 +444,22 @@ int test_ziplistDeleteFooWhileIterating(int argc, char **argv, int flags) { int test_ziplistReplaceWithSameSize(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ unsigned char *orig_zl = zl; p = ziplistIndex(zl, 0); - zl = ziplistReplace(zl, p, (unsigned char *) "zoink", 5); + zl = ziplistReplace(zl, p, (unsigned char *)"zoink", 5); p = ziplistIndex(zl, 3); - zl = ziplistReplace(zl, p, (unsigned char *) "yy", 2); + zl = ziplistReplace(zl, p, (unsigned char *)"yy", 2); p = ziplistIndex(zl, 1); - zl = ziplistReplace(zl, p, (unsigned char *) "65536", 5); + zl = ziplistReplace(zl, p, (unsigned char *)"65536", 5); p = ziplistIndex(zl, 0); - TEST_ASSERT(!memcmp((char *) p, + TEST_ASSERT(!memcmp((char *)p, "\x00\x05zoink" "\x07\xf0\x00\x00\x01" /* 65536 as int24 */ - "\x05\x04quux" "\x06\x02yy" "\xff", + "\x05\x04quux" + "\x06\x02yy" + "\xff", 23)); TEST_ASSERT(zl == orig_zl); /* no reallocations have happened */ zfree(zl); @@ -488,15 +468,17 @@ int test_ziplistReplaceWithSameSize(int argc, char **argv, int flags) { int test_ziplistReplaceWithDifferentSize(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); /* "hello", "foo", "quux", "1024" */ p = ziplistIndex(zl, 1); - zl = ziplistReplace(zl, p, (unsigned char *) "squirrel", 8); + zl = ziplistReplace(zl, p, (unsigned char *)"squirrel", 8); p = ziplistIndex(zl, 0); - TEST_ASSERT(!strncmp((char *) p, - "\x00\x05hello" "\x07\x08squirrel" "\x0a\x04quux" - "\x06\xc0\x00\x04" "\xff", + TEST_ASSERT(!strncmp((char *)p, + "\x00\x05hello" + "\x07\x08squirrel" + "\x0a\x04quux" + "\x06\xc0\x00\x04" + "\xff", 28)); zfree(zl); return 0; @@ -504,33 +486,31 @@ int test_ziplistReplaceWithDifferentSize(int argc, char **argv, int flags) { int test_ziplistRegressionTestForOver255ByteStrings(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); char v1[257] = {0}, v2[257] = {0}; memset(v1, 'x', 256); memset(v2, 'y', 256); zl = ziplistNew(); - zl = ziplistPush(zl, (unsigned char *) v1, strlen(v1), ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) v2, strlen(v2), ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)v1, strlen(v1), ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)v2, strlen(v2), ZIPLIST_TAIL); /* Pop values again and compare their value. */ p = ziplistIndex(zl, 0); TEST_ASSERT(ziplistGet(p, &entry, &elen, &value)); - TEST_ASSERT(strncmp(v1, (char *) entry, elen) == 0); + TEST_ASSERT(strncmp(v1, (char *)entry, elen) == 0); p = ziplistIndex(zl, 1); TEST_ASSERT(ziplistGet(p, &entry, &elen, &value)); - TEST_ASSERT(strncmp(v2, (char *) entry, elen) == 0); + TEST_ASSERT(strncmp(v2, (char *)entry, elen) == 0); zfree(zl); return 0; } int test_ziplistRegressionTestDeleteNextToLastEntries(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); char v[3][257] = {{0}}; - zlentry e[3] = {{.prevrawlensize = 0, .prevrawlen = 0, .lensize = 0, - .len = 0, .headersize = 0, .encoding = 0, .p = NULL}}; + zlentry e[3] = { + {.prevrawlensize = 0, .prevrawlen = 0, .lensize = 0, .len = 0, .headersize = 0, .encoding = 0, .p = NULL}}; size_t i; for (i = 0; i < (sizeof(v) / sizeof(v[0])); i++) { @@ -543,7 +523,7 @@ int test_ziplistRegressionTestDeleteNextToLastEntries(int argc, char **argv, int zl = ziplistNew(); for (i = 0; i < (sizeof(v) / sizeof(v[0])); i++) { - zl = ziplistPush(zl, (unsigned char *) v[i], strlen(v[i]), ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)v[i], strlen(v[i]), ZIPLIST_TAIL); } verify(zl, e); @@ -567,14 +547,13 @@ int test_ziplistRegressionTestDeleteNextToLastEntries(int argc, char **argv, int int test_ziplistCreateLongListAndCheckIndices(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = ziplistNew(); char buf[32]; int i, len; for (i = 0; i < 1000; i++) { len = snprintf(buf, sizeof(buf), "%d", i); - zl = ziplistPush(zl, (unsigned char *) buf, len, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, len, ZIPLIST_TAIL); } for (i = 0; i < 1000; i++) { p = ziplistIndex(zl, i); @@ -591,25 +570,23 @@ int test_ziplistCreateLongListAndCheckIndices(int argc, char **argv, int flags) int test_ziplistCompareStringWithZiplistEntries(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); zl = createList(); p = ziplistIndex(zl, 0); - TEST_ASSERT(ziplistCompare(p, (unsigned char *) "hello", 5)); - TEST_ASSERT(!ziplistCompare(p, (unsigned char *) "hella", 5)); + TEST_ASSERT(ziplistCompare(p, (unsigned char *)"hello", 5)); + TEST_ASSERT(!ziplistCompare(p, (unsigned char *)"hella", 5)); p = ziplistIndex(zl, 3); - TEST_ASSERT(ziplistCompare(p, (unsigned char *) "1024", 4)); - TEST_ASSERT(!ziplistCompare(p, (unsigned char *) "1025", 4)); + TEST_ASSERT(ziplistCompare(p, (unsigned char *)"1024", 4)); + TEST_ASSERT(!ziplistCompare(p, (unsigned char *)"1025", 4)); zfree(zl); return 0; } int test_ziplistMergeTest(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); /* create list gives us: [hello, foo, quux, 1024] */ zl = createList(); unsigned char *zl2 = createList(); @@ -630,28 +607,27 @@ int test_ziplistMergeTest(int argc, char **argv, int flags) { TEST_ASSERT(ziplistLen(zl2) == 8); p = ziplistIndex(zl2, 0); - TEST_ASSERT(ziplistCompare(p, (unsigned char *) "hello", 5)); - TEST_ASSERT(!ziplistCompare(p, (unsigned char *) "hella", 5)); + TEST_ASSERT(ziplistCompare(p, (unsigned char *)"hello", 5)); + TEST_ASSERT(!ziplistCompare(p, (unsigned char *)"hella", 5)); p = ziplistIndex(zl2, 3); - TEST_ASSERT(ziplistCompare(p, (unsigned char *) "1024", 4)); - TEST_ASSERT(!ziplistCompare(p, (unsigned char *) "1025", 4)); + TEST_ASSERT(ziplistCompare(p, (unsigned char *)"1024", 4)); + TEST_ASSERT(!ziplistCompare(p, (unsigned char *)"1025", 4)); p = ziplistIndex(zl2, 4); - TEST_ASSERT(ziplistCompare(p, (unsigned char *) "hello", 5)); - TEST_ASSERT(!ziplistCompare(p, (unsigned char *) "hella", 5)); + TEST_ASSERT(ziplistCompare(p, (unsigned char *)"hello", 5)); + TEST_ASSERT(!ziplistCompare(p, (unsigned char *)"hella", 5)); p = ziplistIndex(zl2, 7); - TEST_ASSERT(ziplistCompare(p, (unsigned char *) "1024", 4)); - TEST_ASSERT(!ziplistCompare(p, (unsigned char *) "1025", 4)); + TEST_ASSERT(ziplistCompare(p, (unsigned char *)"1024", 4)); + TEST_ASSERT(!ziplistCompare(p, (unsigned char *)"1025", 4)); zfree(zl); return 0; } int test_ziplistStressWithRandomPayloadsOfDifferentEncoding(int argc, char **argv, int flags) { - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); int accurate = (flags & UNIT_TEST_ACCURATE); int i, j, len, where; unsigned char *p; @@ -669,7 +645,7 @@ int test_ziplistStressWithRandomPayloadsOfDifferentEncoding(int argc, char **arg for (i = 0; i < iteration; i++) { zl = ziplistNew(); ref = listCreate(); - listSetFreeMethod(ref, (void (*)(void *)) sdsfree); + listSetFreeMethod(ref, (void (*)(void *))sdsfree); len = rand() % 256; /* Create lists */ @@ -679,22 +655,15 @@ int test_ziplistStressWithRandomPayloadsOfDifferentEncoding(int argc, char **arg buflen = randstring(buf, 1, sizeof(buf) - 1); } else { switch (rand() % 3) { - case 0: - buflen = snprintf(buf, sizeof(buf), "%lld", (0LL + rand()) >> 20); - break; - case 1: - buflen = snprintf(buf, sizeof(buf), "%lld", (0LL + rand())); - break; - case 2: - buflen = snprintf(buf, sizeof(buf), "%lld", (0LL + rand()) << 20); - break; - default: - TEST_ASSERT(NULL); + case 0: buflen = snprintf(buf, sizeof(buf), "%lld", (0LL + rand()) >> 20); break; + case 1: buflen = snprintf(buf, sizeof(buf), "%lld", (0LL + rand())); break; + case 2: buflen = snprintf(buf, sizeof(buf), "%lld", (0LL + rand()) << 20); break; + default: TEST_ASSERT(NULL); } } /* Add to ziplist */ - zl = ziplistPush(zl, (unsigned char *) buf, buflen, where); + zl = ziplistPush(zl, (unsigned char *)buf, buflen, where); /* Add to reference list */ if (where == ZIPLIST_HEAD) { @@ -725,22 +694,20 @@ int test_ziplistStressWithRandomPayloadsOfDifferentEncoding(int argc, char **arg } zfree(zl); listRelease(ref); - } return 0; } int test_ziplistCascadeUpdateEdgeCases(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); /* Inserting a entry with data length greater than ZIP_BIG_PREVLEN-4 * will leads to cascade update. */ size_t s1 = ZIP_BIG_PREVLEN - 4, s2 = ZIP_BIG_PREVLEN - 3; zl = ziplistNew(); - zlentry e[4] = {{.prevrawlensize = 0, .prevrawlen = 0, .lensize = 0, - .len = 0, .headersize = 0, .encoding = 0, .p = NULL}}; + zlentry e[4] = { + {.prevrawlensize = 0, .prevrawlen = 0, .lensize = 0, .len = 0, .headersize = 0, .encoding = 0, .p = NULL}}; zl = insertHelper(zl, 'a', s1, ZIPLIST_ENTRY_HEAD(zl)); verify(zl, e); @@ -811,9 +778,8 @@ int test_ziplistCascadeUpdateEdgeCases(int argc, char **argv, int flags) { int test_ziplistInsertEdgeCase(int argc, char **argv, int flags) { UNUSED(flags); - if (argc >= 4) - srand(atoi(argv[3])); - //From issue #7170 + if (argc >= 4) srand(atoi(argv[3])); + // From issue #7170 zl = ziplistNew(); /* We set some values to almost reach the critical point - 254 */ @@ -822,15 +788,15 @@ int test_ziplistInsertEdgeCase(int argc, char **argv, int flags) { memset(A_250, 'A', 250); /* After the rpush, the list look like: [one two A_252 A_250 three 10] */ - zl = ziplistPush(zl, (unsigned char *) "one", 3, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "two", 3, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) A_252, strlen(A_252), ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) A_250, strlen(A_250), ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "three", 5, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "10", 2, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"one", 3, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"two", 3, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)A_252, strlen(A_252), ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)A_250, strlen(A_250), ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"three", 5, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"10", 2, ZIPLIST_TAIL); p = ziplistIndex(zl, 2); - TEST_ASSERT(ziplistCompare(p, (unsigned char *) A_252, strlen(A_252))); + TEST_ASSERT(ziplistCompare(p, (unsigned char *)A_252, strlen(A_252))); /* When we remove A_252, the list became: [one two A_250 three 10] * A_250's prev node became node two, because node two quite small @@ -840,23 +806,22 @@ int test_ziplistInsertEdgeCase(int argc, char **argv, int flags) { zl = ziplistDelete(zl, &p); p = ziplistIndex(zl, 3); - TEST_ASSERT(ziplistCompare(p, (unsigned char *) "three", 5)); + TEST_ASSERT(ziplistCompare(p, (unsigned char *)"three", 5)); /* We want to insert a node after A_250, the list became: [one two A_250 10 three 10] * Because the new node is quite small, node three prevlenSize will shrink to 1 */ - zl = ziplistInsert(zl, p, (unsigned char *) "10", 2); + zl = ziplistInsert(zl, p, (unsigned char *)"10", 2); /* Last element should equal 10 */ p = ziplistIndex(zl, -1); - TEST_ASSERT(ziplistCompare(p, (unsigned char *) "10", 2)); + TEST_ASSERT(ziplistCompare(p, (unsigned char *)"10", 2)); zfree(zl); return 0; } int test_ziplistStressWithVariableSize(int argc, char **argv, int flags) { - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); int accurate = (flags & UNIT_TEST_ACCURATE); unsigned long long start = usec(); @@ -872,30 +837,29 @@ int test_ziplistStressWithVariableSize(int argc, char **argv, int flags) { } int test_BenchmarkziplistFind(int argc, char **argv, int flags) { - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); int accurate = (flags & UNIT_TEST_ACCURATE); zl = ziplistNew(); iteration = accurate ? 100000 : 100; for (int i = 0; i < iteration; i++) { char buf[4096] = "asdf"; - zl = ziplistPush(zl, (unsigned char *) buf, 4, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 40, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 400, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 4000, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "1", 1, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "10", 2, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "100", 3, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "1000", 4, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "10000", 5, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "100000", 6, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 40, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 400, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 4000, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"1", 1, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"10", 2, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"100", 3, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"1000", 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"10000", 5, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"100000", 6, ZIPLIST_TAIL); } unsigned long long start = usec(); for (int i = 0; i < 2000; i++) { unsigned char *fptr = ziplistIndex(zl, ZIPLIST_HEAD); - fptr = ziplistFind(zl, fptr, (unsigned char *) "nothing", 7, 1); + fptr = ziplistFind(zl, fptr, (unsigned char *)"nothing", 7, 1); } TEST_PRINT_INFO("Benchmark ziplistFind: usec=%lld", usec() - start); @@ -904,24 +868,23 @@ int test_BenchmarkziplistFind(int argc, char **argv, int flags) { } int test_BenchmarkziplistIndex(int argc, char **argv, int flags) { - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); int accurate = (flags & UNIT_TEST_ACCURATE); zl = ziplistNew(); iteration = accurate ? 100000 : 100; for (int i = 0; i < iteration; i++) { char buf[4096] = "asdf"; - zl = ziplistPush(zl, (unsigned char *) buf, 4, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 40, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 400, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 4000, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "1", 1, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "10", 2, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "100", 3, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "1000", 4, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "10000", 5, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "100000", 6, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 40, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 400, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 4000, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"1", 1, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"10", 2, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"100", 3, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"1000", 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"10000", 5, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"100000", 6, ZIPLIST_TAIL); } unsigned long long start = usec(); @@ -935,23 +898,22 @@ int test_BenchmarkziplistIndex(int argc, char **argv, int flags) { } int test_BenchmarkziplistValidateIntegrity(int argc, char **argv, int flags) { - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); int accurate = (flags & UNIT_TEST_ACCURATE); zl = ziplistNew(); iteration = accurate ? 100000 : 100; for (int i = 0; i < iteration; i++) { char buf[4096] = "asdf"; - zl = ziplistPush(zl, (unsigned char *) buf, 4, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 40, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 400, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 4000, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "1", 1, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "10", 2, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "100", 3, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "1000", 4, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "10000", 5, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "100000", 6, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 40, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 400, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 4000, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"1", 1, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"10", 2, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"100", 3, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"1000", 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"10000", 5, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"100000", 6, ZIPLIST_TAIL); } unsigned long long start = usec(); for (int i = 0; i < 2000; i++) { @@ -964,29 +926,28 @@ int test_BenchmarkziplistValidateIntegrity(int argc, char **argv, int flags) { } int test_BenchmarkziplistCompareWithString(int argc, char **argv, int flags) { - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); int accurate = (flags & UNIT_TEST_ACCURATE); zl = ziplistNew(); iteration = accurate ? 100000 : 100; for (int i = 0; i < iteration; i++) { char buf[4096] = "asdf"; - zl = ziplistPush(zl, (unsigned char *) buf, 4, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 40, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 400, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 4000, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "1", 1, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "10", 2, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "100", 3, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "1000", 4, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "10000", 5, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "100000", 6, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 40, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 400, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 4000, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"1", 1, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"10", 2, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"100", 3, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"1000", 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"10000", 5, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"100000", 6, ZIPLIST_TAIL); } unsigned long long start = usec(); for (int i = 0; i < 2000; i++) { unsigned char *eptr = ziplistIndex(zl, 0); while (eptr != NULL) { - ziplistCompare(eptr, (unsigned char *) "nothing", 7); + ziplistCompare(eptr, (unsigned char *)"nothing", 7); eptr = ziplistNext(zl, eptr); } } @@ -997,29 +958,28 @@ int test_BenchmarkziplistCompareWithString(int argc, char **argv, int flags) { } int test_BenchmarkziplistCompareWithNumber(int argc, char **argv, int flags) { - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); int accurate = (flags & UNIT_TEST_ACCURATE); zl = ziplistNew(); iteration = accurate ? 100000 : 100; for (int i = 0; i < iteration; i++) { char buf[4096] = "asdf"; - zl = ziplistPush(zl, (unsigned char *) buf, 4, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 40, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 400, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) buf, 4000, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "1", 1, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "10", 2, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "100", 3, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "1000", 4, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "10000", 5, ZIPLIST_TAIL); - zl = ziplistPush(zl, (unsigned char *) "100000", 6, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 40, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 400, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)buf, 4000, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"1", 1, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"10", 2, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"100", 3, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"1000", 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"10000", 5, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)"100000", 6, ZIPLIST_TAIL); } unsigned long long start = usec(); for (int i = 0; i < 2000; i++) { unsigned char *eptr = ziplistIndex(zl, 0); while (eptr != NULL) { - ziplistCompare(eptr, (unsigned char *) "99999", 5); + ziplistCompare(eptr, (unsigned char *)"99999", 5); eptr = ziplistNext(zl, eptr); } } @@ -1030,17 +990,16 @@ int test_BenchmarkziplistCompareWithNumber(int argc, char **argv, int flags) { } int test_ziplistStress__ziplistCascadeUpdate(int argc, char **argv, int flags) { - if (argc >= 4) - srand(atoi(argv[3])); + if (argc >= 4) srand(atoi(argv[3])); int accurate = (flags & UNIT_TEST_ACCURATE); char data[ZIP_BIG_PREVLEN]; zl = ziplistNew(); iteration = accurate ? 100000 : 100; for (int i = 0; i < iteration; i++) { - zl = ziplistPush(zl, (unsigned char *) data, ZIP_BIG_PREVLEN - 4, ZIPLIST_TAIL); + zl = ziplistPush(zl, (unsigned char *)data, ZIP_BIG_PREVLEN - 4, ZIPLIST_TAIL); } unsigned long long start = usec(); - zl = ziplistPush(zl, (unsigned char *) data, ZIP_BIG_PREVLEN - 3, ZIPLIST_HEAD); + zl = ziplistPush(zl, (unsigned char *)data, ZIP_BIG_PREVLEN - 3, ZIPLIST_HEAD); TEST_PRINT_INFO("Stress __ziplistCascadeUpdate: usec=%lld", usec() - start); diff --git a/src/util.c b/src/util.c index 8f5e03614c..6006b071df 100644 --- a/src/util.c +++ b/src/util.c @@ -93,12 +93,12 @@ static int stringmatchlen_impl(const char *pattern, stringLen--; break; case '[': { - int not, match; + int not_op, match; pattern++; patternLen--; - not= pattern[0] == '^'; - if (not) { + not_op = pattern[0] == '^'; + if (not_op) { pattern++; patternLen--; } @@ -141,7 +141,7 @@ static int stringmatchlen_impl(const char *pattern, pattern++; patternLen--; } - if (not) match = !match; + if (not_op) match = !match; if (!match) return 0; /* no match */ string++; stringLen--; diff --git a/src/valkey-benchmark.c b/src/valkey-benchmark.c index 2eec75a10a..cb944fac02 100644 --- a/src/valkey-benchmark.c +++ b/src/valkey-benchmark.c @@ -115,8 +115,8 @@ static struct config { int cluster_node_count; struct clusterNode **cluster_nodes; struct serverConfig *redis_config; - struct hdr_histogram* latency_histogram; - struct hdr_histogram* current_sec_latency_histogram; + struct hdr_histogram *latency_histogram; + struct hdr_histogram *current_sec_latency_histogram; _Atomic int is_fetching_slots; _Atomic int is_updating_slots; _Atomic int slots_last_update; @@ -344,7 +344,7 @@ static void freeClient(client c) { aeDeleteFileEvent(el, c->context->fd, AE_WRITABLE); aeDeleteFileEvent(el, c->context->fd, AE_READABLE); if (c->thread_id >= 0) { - int requests_finished = atomic_load_explicit(&config.requests_finished,memory_order_relaxed); + int requests_finished = atomic_load_explicit(&config.requests_finished, memory_order_relaxed); if (requests_finished >= config.requests) { aeStop(el); } @@ -402,7 +402,7 @@ static void setClusterKeyHashTag(client c) { assert(c->thread_id >= 0); clusterNode *node = c->cluster_node; assert(node); - int is_updating_slots = atomic_load_explicit(&config.is_updating_slots,memory_order_relaxed); + int is_updating_slots = atomic_load_explicit(&config.is_updating_slots, memory_order_relaxed); /* If updateClusterSlotsConfiguration is updating the slots array, * call updateClusterSlotsConfiguration is order to block the thread * since the mutex is locked. When the slots will be updated by the @@ -423,7 +423,7 @@ static void setClusterKeyHashTag(client c) { } static void clientDone(client c) { - int requests_finished = atomic_load_explicit(&config.requests_finished,memory_order_relaxed); + int requests_finished = atomic_load_explicit(&config.requests_finished, memory_order_relaxed); if (requests_finished >= config.requests) { freeClient(c); if (!config.num_threads && config.el) aeStop(config.el); @@ -517,23 +517,27 @@ static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) { } continue; } - int requests_finished = atomic_fetch_add_explicit(&config.requests_finished,1,memory_order_relaxed); - if (requests_finished < config.requests){ - if (config.num_threads == 0) { - hdr_record_value( - config.latency_histogram, // Histogram to record to - (long)c->latency<=CONFIG_LATENCY_HISTOGRAM_MAX_VALUE ? (long)c->latency : CONFIG_LATENCY_HISTOGRAM_MAX_VALUE); // Value to record - hdr_record_value( - config.current_sec_latency_histogram, // Histogram to record to - (long)c->latency<=CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE ? (long)c->latency : CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE); // Value to record - } else { - hdr_record_value_atomic( - config.latency_histogram, // Histogram to record to - (long)c->latency<=CONFIG_LATENCY_HISTOGRAM_MAX_VALUE ? (long)c->latency : CONFIG_LATENCY_HISTOGRAM_MAX_VALUE); // Value to record - hdr_record_value_atomic( - config.current_sec_latency_histogram, // Histogram to record to - (long)c->latency<=CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE ? (long)c->latency : CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE); // Value to record - } + int requests_finished = atomic_fetch_add_explicit(&config.requests_finished, 1, memory_order_relaxed); + if (requests_finished < config.requests) { + if (config.num_threads == 0) { + hdr_record_value(config.latency_histogram, // Histogram to record to + (long)c->latency <= CONFIG_LATENCY_HISTOGRAM_MAX_VALUE + ? (long)c->latency + : CONFIG_LATENCY_HISTOGRAM_MAX_VALUE); // Value to record + hdr_record_value(config.current_sec_latency_histogram, // Histogram to record to + (long)c->latency <= CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE + ? (long)c->latency + : CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE); // Value to record + } else { + hdr_record_value_atomic(config.latency_histogram, // Histogram to record to + (long)c->latency <= CONFIG_LATENCY_HISTOGRAM_MAX_VALUE + ? (long)c->latency + : CONFIG_LATENCY_HISTOGRAM_MAX_VALUE); // Value to record + hdr_record_value_atomic(config.current_sec_latency_histogram, // Histogram to record to + (long)c->latency <= CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE + ? (long)c->latency + : CONFIG_LATENCY_HISTOGRAM_INSTANT_MAX_VALUE); // Value to record + } } c->pending--; if (c->pending == 0) { @@ -556,7 +560,7 @@ static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) { /* Initialize request when nothing was written. */ if (c->written == 0) { /* Enforce upper bound to number of requests. */ - int requests_issued = atomic_fetch_add_explicit(&config.requests_issued,config.pipeline,memory_order_relaxed); + int requests_issued = atomic_fetch_add_explicit(&config.requests_issued, config.pipeline, memory_order_relaxed); if (requests_issued >= config.requests) { return; } @@ -794,7 +798,7 @@ static client createClient(char *cmd, size_t len, client from, int thread_id) { /* In idle mode, clients still need to register readHandler for catching errors */ aeCreateFileEvent(el, c->context->fd, AE_READABLE, readHandler, c); - listAddNodeTail(config.clients,c); + listAddNodeTail(config.clients, c); atomic_fetch_add_explicit(&config.liveclients, 1, memory_order_relaxed); c->slots_last_update = atomic_load_explicit(&config.slots_last_update, memory_order_relaxed); @@ -1232,10 +1236,9 @@ static int fetchClusterSlotsConfiguration(client c) { redisReply *reply = NULL; is_fetching_slots = atomic_fetch_add_explicit(&config.is_fetching_slots, 1, memory_order_relaxed); - if (is_fetching_slots) return -1; //TODO: use other codes || errno ? + if (is_fetching_slots) return -1; // TODO: use other codes || errno ? atomic_store_explicit(&config.is_fetching_slots, 1, memory_order_relaxed); - fprintf(stderr, - "WARNING: Cluster slots configuration changed, fetching new one...\n"); + fprintf(stderr, "WARNING: Cluster slots configuration changed, fetching new one...\n"); const char *errmsg = "Failed to update cluster slots configuration"; static dictType dtype = { dictSdsHash, /* hash function */ diff --git a/src/zmalloc.c b/src/zmalloc.c index 8819f0c518..0117d8d91a 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -388,7 +388,7 @@ char *zstrdup(const char *s) { } size_t zmalloc_used_memory(void) { - size_t um = atomic_load_explicit(&used_memory,memory_order_relaxed); + size_t um = atomic_load_explicit(&used_memory, memory_order_relaxed); return um; } diff --git a/utils/generate-unit-test-header.py b/utils/generate-unit-test-header.py index 00cd852f2d..1b2619e40f 100755 --- a/utils/generate-unit-test-header.py +++ b/utils/generate-unit-test-header.py @@ -26,6 +26,7 @@ test_suites.append({'file': file, 'tests': tests}) test_suites.sort(key=lambda test_suite: test_suite['file']) output.write("""/* Do not modify this file, it's automatically generated from utils/generate-unit-test-header.py */ +/* clang-format off */ typedef int unitTestProc(int argc, char **argv, int flags); typedef struct unitTest { @@ -56,4 +57,4 @@ """) for test_suite in test_suites: output.write(' {{"{0}", __{1}}},\n'.format(test_suite['file'], test_suite['file'].replace('.c', '_c'))) - output.write('};\n') \ No newline at end of file + output.write('};\n') From 7ba7e4d05365a9f5374120838b26c99b8f129d08 Mon Sep 17 00:00:00 2001 From: Shivshankar Date: Tue, 28 May 2024 13:36:54 -0400 Subject: [PATCH 10/42] Update zfree on data in test_crc64combine before return. (#548) Signed-off-by: Shivshankar-Reddy Signed-off-by: Madelyn Olson Co-authored-by: Madelyn Olson --- src/unit/test_crc64combine.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/src/unit/test_crc64combine.c b/src/unit/test_crc64combine.c index 67ef4ade43..ba5c29a42d 100644 --- a/src/unit/test_crc64combine.c +++ b/src/unit/test_crc64combine.c @@ -135,19 +135,35 @@ int test_crc64combine(int argc, char **argv, int flags) { /* get the single-character version for single-byte Redis behavior */ set_crc64_cutoffs(0, crc64_test_size + 1); - if (bench_crc64(data, crc64_test_size, passes, expect, "crc_1byte", csv)) return 1; + if (bench_crc64(data, crc64_test_size, passes, expect, "crc_1byte", csv)) { + zfree(data); + data = NULL; + return 1; + } set_crc64_cutoffs(crc64_test_size + 1, crc64_test_size + 1); /* run with 8-byte "single" path, crcfaster */ - if (bench_crc64(data, crc64_test_size, passes, expect, "crcspeed", csv)) return 1; + if (bench_crc64(data, crc64_test_size, passes, expect, "crcspeed", csv)) { + zfree(data); + data = NULL; + return 1; + } /* run with dual 8-byte paths */ set_crc64_cutoffs(1, crc64_test_size + 1); - if (bench_crc64(data, crc64_test_size, passes, expect, "crcdual", csv)) return 1; + if (bench_crc64(data, crc64_test_size, passes, expect, "crcdual", csv)) { + zfree(data); + data = NULL; + return 1; + } /* run with tri 8-byte paths */ set_crc64_cutoffs(1, 1); - if (bench_crc64(data, crc64_test_size, passes, expect, "crctri", csv)) return 1; + if (bench_crc64(data, crc64_test_size, passes, expect, "crctri", csv)) { + zfree(data); + data = NULL; + return 1; + } /* Be free memory region, be free. */ zfree(data); From fd58b73f0ae895bf9de3810d799da20bb75a2b4f Mon Sep 17 00:00:00 2001 From: uriyage <78144248+uriyage@users.noreply.github.com> Date: Tue, 28 May 2024 21:09:37 +0300 Subject: [PATCH 11/42] Introduce shared query buffer for client reads (#258) This PR optimizes client query buffer handling in Valkey by introducing a shared query buffer that is used by default for client reads. This reduces memory usage by ~20KB per client by avoiding allocations for most clients using short (<16KB) complete commands. For larger or partial commands, the client still gets its own private buffer. The primary changes are: * Adding a shared query buffer `shared_qb` that clients use by default * Modifying client querybuf initialization and reset logic * Copying any partial query from shared to private buffer before command execution * Freeing idle client query buffers when empty to allow reuse of shared buffer * Master client query buffers are kept private as their contents need to be preserved for replication stream In addition to the memory savings, this change shows a 3% improvement in latency and throughput when running with 1000 active clients. The memory reduction may also help reduce the need to evict clients when reaching max memory limit, as the query buffer is the main memory consumer per client. --------- Signed-off-by: Uri Yagelnik Signed-off-by: Madelyn Olson Co-authored-by: Madelyn Olson --- src/networking.c | 95 +++++++++++++++++++++++++++++++----- src/replication.c | 3 ++ src/server.c | 24 +++++++-- src/server.h | 1 + tests/unit/introspection.tcl | 4 +- tests/unit/querybuf.tcl | 23 ++++++++- 6 files changed, 130 insertions(+), 20 deletions(-) diff --git a/src/networking.c b/src/networking.c index 2ff9a7e366..e062bc3aba 100644 --- a/src/networking.c +++ b/src/networking.c @@ -43,6 +43,7 @@ static void pauseClientsByClient(mstime_t end, int isPauseClientAll); int postponeClientRead(client *c); char *getClientSockname(client *c); int ProcessingEventsWhileBlocked = 0; /* See processEventsWhileBlocked(). */ +__thread sds thread_shared_qb = NULL; /* Return the size consumed from the allocator, for the specified SDS string, * including internal fragmentation. This function is used in order to compute @@ -147,7 +148,7 @@ client *createClient(connection *conn) { c->ref_repl_buf_node = NULL; c->ref_block_pos = 0; c->qb_pos = 0; - c->querybuf = sdsempty(); + c->querybuf = NULL; c->querybuf_peak = 0; c->reqtype = 0; c->argc = 0; @@ -1608,7 +1609,11 @@ void freeClient(client *c) { } /* Free the query buffer */ - sdsfree(c->querybuf); + if (c->querybuf && c->querybuf == thread_shared_qb) { + sdsclear(c->querybuf); + } else { + sdsfree(c->querybuf); + } c->querybuf = NULL; /* Deallocate structures used to block on blocking ops. */ @@ -2093,6 +2098,48 @@ void resetClient(client *c) { } } +/* Initializes the shared query buffer to a new sds with the default capacity */ +void initSharedQueryBuf(void) { + thread_shared_qb = sdsnewlen(NULL, PROTO_IOBUF_LEN); + sdsclear(thread_shared_qb); +} + +/* Resets the shared query buffer used by the given client. + * If any data remained in the buffer, the client will take ownership of the buffer + * and a new empty buffer will be allocated for the shared buffer. */ +void resetSharedQueryBuf(client *c) { + serverAssert(c->querybuf == thread_shared_qb); + size_t remaining = sdslen(c->querybuf) - c->qb_pos; + + if (remaining > 0) { + /* Let the client take ownership of the shared buffer. */ + initSharedQueryBuf(); + return; + } + + c->querybuf = NULL; + sdsclear(thread_shared_qb); + c->qb_pos = 0; +} + +/* Trims the client query buffer to the current position. */ +void trimClientQueryBuffer(client *c) { + if (c->querybuf == thread_shared_qb) { + resetSharedQueryBuf(c); + } + + if (c->querybuf == NULL) { + return; + } + + serverAssert(c->qb_pos <= sdslen(c->querybuf)); + + if (c->qb_pos > 0) { + sdsrange(c->querybuf, c->qb_pos, -1); + c->qb_pos = 0; + } +} + /* This function is used when we want to re-enter the event loop but there * is the risk that the client we are dealing with will be freed in some * way. This happens for instance in: @@ -2348,6 +2395,10 @@ int processMultibulkBuffer(client *c) { * ll+2, trimming querybuf is just a waste of time, because * at this time the querybuf contains not only our bulk. */ if (sdslen(c->querybuf) - c->qb_pos <= (size_t)ll + 2) { + if (c->querybuf == thread_shared_qb) { + /* Let the client take the ownership of the shared buffer. */ + initSharedQueryBuf(); + } sdsrange(c->querybuf, c->qb_pos, -1); c->qb_pos = 0; /* Hint the sds library about the amount of bytes this string is @@ -2508,7 +2559,7 @@ int processPendingCommandAndInputBuffer(client *c) { * return C_ERR in case the client was freed during the processing */ int processInputBuffer(client *c) { /* Keep processing while there is something in the input buffer */ - while (c->qb_pos < sdslen(c->querybuf)) { + while (c->querybuf && c->qb_pos < sdslen(c->querybuf)) { /* Immediately abort if the client is in the middle of something. */ if (c->flags & CLIENT_BLOCKED) break; @@ -2559,6 +2610,13 @@ int processInputBuffer(client *c) { break; } + if (c->querybuf == thread_shared_qb) { + /* Before processing the command, reset the shared query buffer to its default state. + * This avoids unintentionally modifying the shared qb during processCommand as we may use + * the shared qb for other clients during processEventsWhileBlocked */ + resetSharedQueryBuf(c); + } + /* We are finally ready to execute the command. */ if (processCommandAndResetClient(c) == C_ERR) { /* If the client is no longer valid, we avoid exiting this @@ -2587,10 +2645,8 @@ int processInputBuffer(client *c) { c->qb_pos -= c->repl_applied; c->repl_applied = 0; } - } else if (c->qb_pos) { - /* Trim to pos */ - sdsrange(c->querybuf, c->qb_pos, -1); - c->qb_pos = 0; + } else { + trimClientQueryBuffer(c); } /* Update client memory usage after processing the query buffer, this is @@ -2614,14 +2670,16 @@ void readQueryFromClient(connection *conn) { atomic_fetch_add_explicit(&server.stat_total_reads_processed, 1, memory_order_relaxed); readlen = PROTO_IOBUF_LEN; + qblen = c->querybuf ? sdslen(c->querybuf) : 0; /* If this is a multi bulk request, and we are processing a bulk reply * that is large enough, try to maximize the probability that the query * buffer contains exactly the SDS string representing the object, even * at the risk of requiring more read(2) calls. This way the function * processMultiBulkBuffer() can avoid copying buffers to create the * robj representing the argument. */ + if (c->reqtype == PROTO_REQ_MULTIBULK && c->multibulklen && c->bulklen != -1 && c->bulklen >= PROTO_MBULK_BIG_ARG) { - ssize_t remaining = (size_t)(c->bulklen + 2) - (sdslen(c->querybuf) - c->qb_pos); + ssize_t remaining = (size_t)(c->bulklen + 2) - (qblen - c->qb_pos); big_arg = 1; /* Note that the 'remaining' variable may be zero in some edge case, @@ -2633,7 +2691,12 @@ void readQueryFromClient(connection *conn) { if (c->flags & CLIENT_MASTER && readlen < PROTO_IOBUF_LEN) readlen = PROTO_IOBUF_LEN; } - qblen = sdslen(c->querybuf); + if (c->querybuf == NULL) { + serverAssert(sdslen(thread_shared_qb) == 0); + c->querybuf = big_arg ? sdsempty() : thread_shared_qb; + qblen = sdslen(c->querybuf); + } + if (!(c->flags & CLIENT_MASTER) && // master client's querybuf can grow greedy. (big_arg || sdsalloc(c->querybuf) < PROTO_IOBUF_LEN)) { /* When reading a BIG_ARG we won't be reading more than that one arg @@ -2654,7 +2717,7 @@ void readQueryFromClient(connection *conn) { nread = connRead(c->conn, c->querybuf + qblen, readlen); if (nread == -1) { if (connGetState(conn) == CONN_STATE_CONNECTED) { - return; + goto done; } else { serverLog(LL_VERBOSE, "Reading from client: %s", connGetLastError(c->conn)); freeClientAsync(c); @@ -2707,6 +2770,10 @@ void readQueryFromClient(connection *conn) { if (processInputBuffer(c) == C_ERR) c = NULL; done: + if (c && c->querybuf == thread_shared_qb) { + sdsclear(thread_shared_qb); + c->querybuf = NULL; + } beforeNextClient(c); } @@ -2824,8 +2891,8 @@ sds catClientInfoString(sds s, client *client) { " ssub=%i", (int) dictSize(client->pubsubshard_channels), " multi=%i", (client->flags & CLIENT_MULTI) ? client->mstate.count : -1, " watch=%i", (int) listLength(client->watched_keys), - " qbuf=%U", (unsigned long long) sdslen(client->querybuf), - " qbuf-free=%U", (unsigned long long) sdsavail(client->querybuf), + " qbuf=%U", client->querybuf ? (unsigned long long) sdslen(client->querybuf) : 0, + " qbuf-free=%U", client->querybuf ? (unsigned long long) sdsavail(client->querybuf) : 0, " argv-mem=%U", (unsigned long long) client->argv_len_sum, " multi-mem=%U", (unsigned long long) client->mstate.argv_len_sums, " rbs=%U", (unsigned long long) client->buf_usable_size, @@ -3780,8 +3847,9 @@ size_t getClientOutputBufferMemoryUsage(client *c) { * the client output buffer memory usage portion of the total. */ size_t getClientMemoryUsage(client *c, size_t *output_buffer_mem_usage) { size_t mem = getClientOutputBufferMemoryUsage(c); + if (output_buffer_mem_usage != NULL) *output_buffer_mem_usage = mem; - mem += sdsZmallocSize(c->querybuf); + mem += c->querybuf ? sdsZmallocSize(c->querybuf) : 0; mem += zmalloc_size(c); mem += c->buf_usable_size; /* For efficiency (less work keeping track of the argv memory), it doesn't include the used memory @@ -4168,6 +4236,7 @@ void *IOThreadMain(void *myid) { valkey_set_thread_title(thdname); serverSetCpuAffinity(server.server_cpulist); makeThreadKillable(); + initSharedQueryBuf(); while (1) { /* Wait for start */ diff --git a/src/replication.c b/src/replication.c index 0c561f1204..1d5e0fe290 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1722,6 +1722,9 @@ void replicationCreateMasterClient(connection *conn, int dbid) { * connection. */ server.master->flags |= CLIENT_MASTER; + /* Allocate a private query buffer for the master client instead of using the shared query buffer. + * This is done because the master's query buffer data needs to be preserved for my sub-replicas to use. */ + server.master->querybuf = sdsempty(); server.master->authenticated = 1; server.master->reploff = server.master_initial_offset; server.master->read_reploff = server.master->reploff; diff --git a/src/server.c b/src/server.c index e0590706d3..f87193b74c 100644 --- a/src/server.c +++ b/src/server.c @@ -714,6 +714,8 @@ long long getInstantaneousMetric(int metric) { * * The function always returns 0 as it never terminates the client. */ int clientsCronResizeQueryBuffer(client *c) { + /* If the client query buffer is NULL, it is using the shared query buffer and there is nothing to do. */ + if (c->querybuf == NULL) return 0; size_t querybuf_size = sdsalloc(c->querybuf); time_t idletime = server.unixtime - c->lastinteraction; @@ -723,7 +725,18 @@ int clientsCronResizeQueryBuffer(client *c) { /* There are two conditions to resize the query buffer: */ if (idletime > 2) { /* 1) Query is idle for a long time. */ - c->querybuf = sdsRemoveFreeSpace(c->querybuf, 1); + size_t remaining = sdslen(c->querybuf) - c->qb_pos; + if (!(c->flags & CLIENT_MASTER) && !remaining) { + /* If the client is not a master and no data is pending, + * The client can safely use the shared query buffer in the next read - free the client's querybuf. */ + sdsfree(c->querybuf); + /* By setting the querybuf to NULL, the client will use the shared query buffer in the next read. + * We don't move the client to the shared query buffer immediately, because if we allocated a private + * query buffer for the client, it's likely that the client will use it again soon. */ + c->querybuf = NULL; + } else { + c->querybuf = sdsRemoveFreeSpace(c->querybuf, 1); + } } else if (querybuf_size > PROTO_RESIZE_THRESHOLD && querybuf_size / 2 > c->querybuf_peak) { /* 2) Query buffer is too big for latest peak and is larger than * resize threshold. Trim excess space but only up to a limit, @@ -739,7 +752,7 @@ int clientsCronResizeQueryBuffer(client *c) { /* Reset the peak again to capture the peak memory usage in the next * cycle. */ - c->querybuf_peak = sdslen(c->querybuf); + c->querybuf_peak = c->querybuf ? sdslen(c->querybuf) : 0; /* We reset to either the current used, or currently processed bulk size, * which ever is bigger. */ if (c->bulklen != -1 && (size_t)c->bulklen + 2 > c->querybuf_peak) c->querybuf_peak = c->bulklen + 2; @@ -807,7 +820,9 @@ size_t ClientsPeakMemInput[CLIENTS_PEAK_MEM_USAGE_SLOTS] = {0}; size_t ClientsPeakMemOutput[CLIENTS_PEAK_MEM_USAGE_SLOTS] = {0}; int clientsCronTrackExpansiveClients(client *c, int time_idx) { - size_t in_usage = sdsZmallocSize(c->querybuf) + c->argv_len_sum + (c->argv ? zmalloc_size(c->argv) : 0); + size_t qb_size = c->querybuf ? sdsZmallocSize(c->querybuf) : 0; + size_t argv_size = c->argv ? zmalloc_size(c->argv) : 0; + size_t in_usage = qb_size + c->argv_len_sum + argv_size; size_t out_usage = getClientOutputBufferMemoryUsage(c); /* Track the biggest values observed so far in this slot. */ @@ -2711,6 +2726,7 @@ void initServer(void) { } slowlogInit(); latencyMonitorInit(); + initSharedQueryBuf(); /* Initialize ACL default password if it exists */ ACLUpdateDefaultUserPassword(server.requirepass); @@ -6310,7 +6326,7 @@ void dismissMemory(void *ptr, size_t size_hint) { void dismissClientMemory(client *c) { /* Dismiss client query buffer and static reply buffer. */ dismissMemory(c->buf, c->buf_usable_size); - dismissSds(c->querybuf); + if (c->querybuf) dismissSds(c->querybuf); /* Dismiss argv array only if we estimate it contains a big buffer. */ if (c->argc && c->argv_len_sum / c->argc >= server.page_size) { for (int i = 0; i < c->argc; i++) { diff --git a/src/server.h b/src/server.h index bdf8b12574..249d896d35 100644 --- a/src/server.h +++ b/src/server.h @@ -2721,6 +2721,7 @@ void linkClient(client *c); void protectClient(client *c); void unprotectClient(client *c); void initThreadedIO(void); +void initSharedQueryBuf(void); client *lookupClientByID(uint64_t id); int authRequired(client *c); void putClientInPendingWriteQueue(client *c); diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index 9a0f3d7b31..02aca9e97d 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -7,7 +7,7 @@ start_server {tags {"introspection"}} { test {CLIENT LIST} { r client list - } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 ssub=0 multi=-1 watch=0 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|list user=* redir=-1 resp=* lib-name=* lib-ver=* tot-net-in=* tot-net-out=* tot-cmds=*} + } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 ssub=0 multi=-1 watch=0 qbuf=0 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|list user=* redir=-1 resp=* lib-name=* lib-ver=* tot-net-in=* tot-net-out=* tot-cmds=*} test {CLIENT LIST with IDs} { set myid [r client id] @@ -17,7 +17,7 @@ start_server {tags {"introspection"}} { test {CLIENT INFO} { r client info - } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 ssub=0 multi=-1 watch=0 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|info user=* redir=-1 resp=* lib-name=* lib-ver=* tot-net-in=* tot-net-out=* tot-cmds=*} + } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 ssub=0 multi=-1 watch=0 qbuf=0 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|info user=* redir=-1 resp=* lib-name=* lib-ver=* tot-net-in=* tot-net-out=* tot-cmds=*} proc get_field_in_client_info {info field} { set info [string trim $info] diff --git a/tests/unit/querybuf.tcl b/tests/unit/querybuf.tcl index 0394b72c00..66942a5bd1 100644 --- a/tests/unit/querybuf.tcl +++ b/tests/unit/querybuf.tcl @@ -24,8 +24,24 @@ start_server {tags {"querybuf slow"}} { # The test will run at least 2s to check if client query # buffer will be resized when client idle 2s. test "query buffer resized correctly" { - set rd [valkey_client] + + set rd [valkey_deferring_client] + $rd client setname test_client + $rd read + + # Make sure query buff has size of 0 bytes at start as the client uses the shared qb. + assert {[client_query_buffer test_client] == 0} + + # Send partial command to client to make sure it doesn't use the shared qb. + $rd write "*3\r\n\$3\r\nset\r\n\$2\r\na" + $rd flush + after 100 + # send the rest of the command + $rd write "a\r\n\$1\r\nb\r\n" + $rd flush + assert_equal {OK} [$rd read] + set orig_test_client_qbuf [client_query_buffer test_client] # Make sure query buff has less than the peak resize threshold (PROTO_RESIZE_THRESHOLD) 32k # but at least the basic IO reading buffer size (PROTO_IOBUF_LEN) 16k @@ -78,6 +94,11 @@ start_server {tags {"querybuf slow"}} { $rd write "*3\r\n\$3\r\nset\r\n\$1\r\na\r\n\$1000000\r\n" $rd flush + after 200 + # Send the start of the arg and make sure the client is not using shared qb for it rather a private buf of > 1000000 size. + $rd write "a" + $rd flush + after 20 if {[client_query_buffer test_client] < 1000000} { fail "query buffer should not be resized when client idle time smaller than 2s" From 96dcd1183afa803d30ec71c574e80e3dc01c1f6e Mon Sep 17 00:00:00 2001 From: LiiNen Date: Wed, 29 May 2024 04:01:28 +0900 Subject: [PATCH 12/42] Change BITCOUNT 'end' as optional like BITPOS (#118) _This change is the thing I suggested to redis when it was BSD, and is not just migration - this is of course more advanced_ ### Issue There is weird difference in syntax between BITPOS and BITCOUNT: ``` BITPOS key bit [start [end [BYTE | BIT]]] BITCOUNT key [start end [BYTE | BIT]] ``` I think this might cause confusion in terms of usability. It was not just a syntax typo error, and really works differently. The results below are with unstable build: ``` > get TEST:ABCD "ABCD" > BITPOS TEST:ABCD 1 0 -1 (integer) 1 > BITCOUNT TEST:ABCD 0 -1 (integer) 9 > BITPOS TEST:ABCD 1 0 (integer) 1 > BITCOUNT TEST:ABCD 0 (error) ERR syntax error ``` ### What did I fix simply changes logic, to accept BITCOUNT also without 'end' - 'end' become optional, like BITPOS ``` > GET TEST:ABCD "ABCD" > BITPOS TEST:ABCD 1 0 -1 (integer) 1 > BITCOUNT TEST:ABCD 0 -1 (integer) 9 > BITPOS TEST:ABCD 1 0 (integer) 1 > BITCOUNT TEST:ABCD 0 (integer) 9 ``` Of course, I also fixed syntax hint: ``` # ASIS > BITCOUNT key [start end [BYTE|BIT]] # TOBE > BITCOUNT key [start [end [BYTE|BIT]]] ``` ![image](https://github.com/valkey-io/valkey/assets/38001238/8485f58e-6785-4106-9f3f-45e62f90d24b) ### Moreover ... I hadn't noticed that there was very small dead code in these command logic, when I wrote PR to redis. I found it now, when write code again, so I wrote it in valkey. ``` c /* asis unstable */ /* bitcountCommand() */ if (!strcasecmp(c->argv[4]->ptr,"bit")) isbit = 1; // ... if (c->argc < 4) { if (isbit) end = (totlen<<3) + 7; else end = totlen-1; } /* bitposCommand() */ if (!strcasecmp(c->argv[5]->ptr,"bit")) isbit = 1; // ... if (c->argc < 5) { if (isbit) end = (totlen<<3) + 7; else end = totlen-1; } ``` Bit variable (actually int) "isbit" is only being set as 1, when 'BIT' is declared. But we were checking whether 'isbit' is true or false in this 'if' phrase, even if isbit could never be 1, because argc is always less than 4 (or 5 in bitpos). I think this minor fixes will make valkey command operation more consistent. Of course, this PR contains just changing args from "required" to "optional", so it will never hurt previous users. Thanks, --------- Signed-off-by: LiiNen Co-authored-by: Madelyn Olson <34459052+madolson@users.noreply.github.com> --- src/bitops.c | 19 ++++++++++--------- src/commands.def | 18 ++++++++++++------ src/commands/bitcount.json | 37 ++++++++++++++++++++++++------------- tests/unit/bitops.tcl | 19 ++++++++++++++++--- 4 files changed, 62 insertions(+), 31 deletions(-) diff --git a/src/bitops.c b/src/bitops.c index db975e4dfe..7a385812d0 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -784,7 +784,7 @@ void bitopCommand(client *c) { addReplyLongLong(c, maxlen); /* Return the output string length in bytes. */ } -/* BITCOUNT key [start end [BIT|BYTE]] */ +/* BITCOUNT key [start [end [BIT|BYTE]]] */ void bitcountCommand(client *c) { robj *o; long long start, end; @@ -795,9 +795,8 @@ void bitcountCommand(client *c) { unsigned char first_byte_neg_mask = 0, last_byte_neg_mask = 0; /* Parse start/end range if any. */ - if (c->argc == 4 || c->argc == 5) { + if (c->argc == 3 || c->argc == 4 || c->argc == 5) { if (getLongLongFromObjectOrReply(c, c->argv[2], &start, NULL) != C_OK) return; - if (getLongLongFromObjectOrReply(c, c->argv[3], &end, NULL) != C_OK) return; if (c->argc == 5) { if (!strcasecmp(c->argv[4]->ptr, "bit")) isbit = 1; @@ -808,6 +807,11 @@ void bitcountCommand(client *c) { return; } } + if (c->argc >= 4) { + if (getLongLongFromObjectOrReply(c,c->argv[3],&end,NULL) != C_OK) + return; + } + /* Lookup, check for type. */ o = lookupKeyRead(c->db, c->argv[1]); if (checkType(c, o, OBJ_STRING)) return; @@ -817,6 +821,8 @@ void bitcountCommand(client *c) { /* Make sure we will not overflow */ serverAssert(totlen <= LLONG_MAX >> 3); + if (c->argc < 4) end = totlen-1; + /* Convert negative indexes */ if (start < 0 && end < 0 && start > end) { addReply(c, shared.czero); @@ -921,12 +927,7 @@ void bitposCommand(client *c) { long long totlen = strlen; serverAssert(totlen <= LLONG_MAX >> 3); - if (c->argc < 5) { - if (isbit) - end = (totlen << 3) + 7; - else - end = totlen - 1; - } + if (c->argc < 5) end = totlen - 1; if (isbit) totlen <<= 3; /* Convert negative indexes */ diff --git a/src/commands.def b/src/commands.def index f76e21f2f3..c59cb01dc1 100644 --- a/src/commands.def +++ b/src/commands.def @@ -36,6 +36,7 @@ const char *commandGroupStr(int index) { /* BITCOUNT history */ commandHistory BITCOUNT_History[] = { {"7.0.0","Added the `BYTE|BIT` option."}, +{"8.0.0","`end` made optional; when called without argument the command reports the last BYTE."}, }; #endif @@ -51,23 +52,28 @@ keySpec BITCOUNT_Keyspecs[1] = { }; #endif -/* BITCOUNT range unit argument table */ -struct COMMAND_ARG BITCOUNT_range_unit_Subargs[] = { +/* BITCOUNT range end_unit_block unit argument table */ +struct COMMAND_ARG BITCOUNT_range_end_unit_block_unit_Subargs[] = { {MAKE_ARG("byte",ARG_TYPE_PURE_TOKEN,-1,"BYTE",NULL,NULL,CMD_ARG_NONE,0,NULL)}, {MAKE_ARG("bit",ARG_TYPE_PURE_TOKEN,-1,"BIT",NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; +/* BITCOUNT range end_unit_block argument table */ +struct COMMAND_ARG BITCOUNT_range_end_unit_block_Subargs[] = { +{MAKE_ARG("end",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("unit",ARG_TYPE_ONEOF,-1,NULL,NULL,"7.0.0",CMD_ARG_OPTIONAL,2,NULL),.subargs=BITCOUNT_range_end_unit_block_unit_Subargs}, +}; + /* BITCOUNT range argument table */ struct COMMAND_ARG BITCOUNT_range_Subargs[] = { {MAKE_ARG("start",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, -{MAKE_ARG("end",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, -{MAKE_ARG("unit",ARG_TYPE_ONEOF,-1,NULL,NULL,"7.0.0",CMD_ARG_OPTIONAL,2,NULL),.subargs=BITCOUNT_range_unit_Subargs}, +{MAKE_ARG("end-unit-block",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,2,NULL),.subargs=BITCOUNT_range_end_unit_block_Subargs}, }; /* BITCOUNT argument table */ struct COMMAND_ARG BITCOUNT_Args[] = { {MAKE_ARG("key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, -{MAKE_ARG("range",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,3,NULL),.subargs=BITCOUNT_range_Subargs}, +{MAKE_ARG("range",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,2,NULL),.subargs=BITCOUNT_range_Subargs}, }; /********** BITFIELD ********************/ @@ -10661,7 +10667,7 @@ struct COMMAND_ARG WATCH_Args[] = { /* Main command table */ struct COMMAND_STRUCT serverCommandTable[] = { /* bitmap */ -{MAKE_CMD("bitcount","Counts the number of set bits (population counting) in a string.","O(N)","2.6.0",CMD_DOC_NONE,NULL,NULL,"bitmap",COMMAND_GROUP_BITMAP,BITCOUNT_History,1,BITCOUNT_Tips,0,bitcountCommand,-2,CMD_READONLY,ACL_CATEGORY_BITMAP,BITCOUNT_Keyspecs,1,NULL,2),.args=BITCOUNT_Args}, +{MAKE_CMD("bitcount","Counts the number of set bits (population counting) in a string.","O(N)","2.6.0",CMD_DOC_NONE,NULL,NULL,"bitmap",COMMAND_GROUP_BITMAP,BITCOUNT_History,2,BITCOUNT_Tips,0,bitcountCommand,-2,CMD_READONLY,ACL_CATEGORY_BITMAP,BITCOUNT_Keyspecs,1,NULL,2),.args=BITCOUNT_Args}, {MAKE_CMD("bitfield","Performs arbitrary bitfield integer operations on strings.","O(1) for each subcommand specified","3.2.0",CMD_DOC_NONE,NULL,NULL,"bitmap",COMMAND_GROUP_BITMAP,BITFIELD_History,0,BITFIELD_Tips,0,bitfieldCommand,-2,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_BITMAP,BITFIELD_Keyspecs,1,bitfieldGetKeys,2),.args=BITFIELD_Args}, {MAKE_CMD("bitfield_ro","Performs arbitrary read-only bitfield integer operations on strings.","O(1) for each subcommand specified","6.0.0",CMD_DOC_NONE,NULL,NULL,"bitmap",COMMAND_GROUP_BITMAP,BITFIELD_RO_History,0,BITFIELD_RO_Tips,0,bitfieldroCommand,-2,CMD_READONLY|CMD_FAST,ACL_CATEGORY_BITMAP,BITFIELD_RO_Keyspecs,1,NULL,2),.args=BITFIELD_RO_Args}, {MAKE_CMD("bitop","Performs bitwise operations on multiple strings, and stores the result.","O(N)","2.6.0",CMD_DOC_NONE,NULL,NULL,"bitmap",COMMAND_GROUP_BITMAP,BITOP_History,0,BITOP_Tips,0,bitopCommand,-4,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_BITMAP,BITOP_Keyspecs,2,NULL,3),.args=BITOP_Args}, diff --git a/src/commands/bitcount.json b/src/commands/bitcount.json index 2d277a8551..ad90b39bad 100644 --- a/src/commands/bitcount.json +++ b/src/commands/bitcount.json @@ -10,6 +10,10 @@ [ "7.0.0", "Added the `BYTE|BIT` option." + ], + [ + "8.0.0", + "`end` made optional; when called without argument the command reports the last BYTE." ] ], "command_flags": [ @@ -54,24 +58,31 @@ "type": "integer" }, { - "name": "end", - "type": "integer" - }, - { - "name": "unit", - "type": "oneof", + "name": "end-unit-block", + "type": "block", "optional": true, - "since": "7.0.0", "arguments": [ { - "name": "byte", - "type": "pure-token", - "token": "BYTE" + "name": "end", + "type": "integer" }, { - "name": "bit", - "type": "pure-token", - "token": "BIT" + "name": "unit", + "type": "oneof", + "optional": true, + "since": "7.0.0", + "arguments": [ + { + "name": "byte", + "type": "pure-token", + "token": "BYTE" + }, + { + "name": "bit", + "type": "pure-token", + "token": "BIT" + } + ] } ] } diff --git a/tests/unit/bitops.tcl b/tests/unit/bitops.tcl index edcafdee07..125b0a3d1f 100644 --- a/tests/unit/bitops.tcl +++ b/tests/unit/bitops.tcl @@ -128,13 +128,26 @@ start_server {tags {"bitops"}} { } } + test {BITCOUNT with just start} { + set s "foobar" + r set s $s + assert_equal [r bitcount s 0] [count_bits "foobar"] + assert_equal [r bitcount s 1] [count_bits "oobar"] + assert_equal [r bitcount s 1000] 0 + assert_equal [r bitcount s -1] [count_bits "r"] + assert_equal [r bitcount s -2] [count_bits "ar"] + assert_equal [r bitcount s -1000] [count_bits "foobar"] + } + test {BITCOUNT with start, end} { set s "foobar" r set s $s assert_equal [r bitcount s 0 -1] [count_bits "foobar"] assert_equal [r bitcount s 1 -2] [count_bits "ooba"] - assert_equal [r bitcount s -2 1] [count_bits ""] + assert_equal [r bitcount s -2 1] 0 + assert_equal [r bitcount s -1000 0] [count_bits "f"] assert_equal [r bitcount s 0 1000] [count_bits "foobar"] + assert_equal [r bitcount s -1000 1000] [count_bits "foobar"] assert_equal [r bitcount s 0 -1 bit] [count_bits $s] assert_equal [r bitcount s 10 14 bit] [count_bits_start_end $s 10 14] @@ -144,18 +157,18 @@ start_server {tags {"bitops"}} { assert_equal [r bitcount s 3 -34 bit] [count_bits_start_end $s 3 14] assert_equal [r bitcount s 3 -19 bit] [count_bits_start_end $s 3 29] assert_equal [r bitcount s -2 1 bit] 0 + assert_equal [r bitcount s -1000 14 bit] [count_bits_start_end $s 0 14] assert_equal [r bitcount s 0 1000 bit] [count_bits $s] + assert_equal [r bitcount s -1000 1000 bit] [count_bits $s] } test {BITCOUNT with illegal arguments} { # Used to return 0 for non-existing key instead of errors r del s - assert_error {ERR *syntax*} {r bitcount s 0} assert_error {ERR *syntax*} {r bitcount s 0 1 hello} assert_error {ERR *syntax*} {r bitcount s 0 1 hello hello2} r set s 1 - assert_error {ERR *syntax*} {r bitcount s 0} assert_error {ERR *syntax*} {r bitcount s 0 1 hello} assert_error {ERR *syntax*} {r bitcount s 0 1 hello hello2} } From 168da8b52eaae4346b71e44a26a6f5ae8a3089f1 Mon Sep 17 00:00:00 2001 From: LiiNen Date: Wed, 29 May 2024 13:49:50 +0900 Subject: [PATCH 13/42] Fix bitops.c clang-format properly (#570) ref: - https://github.com/valkey-io/valkey/pull/118 (my pervious change) - https://github.com/valkey-io/valkey/pull/461 (issuing that clang format checker fails due to my change) There was an issue that clang-format cheker failed. I don't know why I missed it and why it didn't catch. just running `clang-format -i bitops.c` was all. Signed-off-by: LiiNen --- src/bitops.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/bitops.c b/src/bitops.c index 7a385812d0..2094bb0ea9 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -808,10 +808,9 @@ void bitcountCommand(client *c) { } } if (c->argc >= 4) { - if (getLongLongFromObjectOrReply(c,c->argv[3],&end,NULL) != C_OK) - return; + if (getLongLongFromObjectOrReply(c, c->argv[3], &end, NULL) != C_OK) return; } - + /* Lookup, check for type. */ o = lookupKeyRead(c->db, c->argv[1]); if (checkType(c, o, OBJ_STRING)) return; @@ -821,7 +820,7 @@ void bitcountCommand(client *c) { /* Make sure we will not overflow */ serverAssert(totlen <= LLONG_MAX >> 3); - if (c->argc < 4) end = totlen-1; + if (c->argc < 4) end = totlen - 1; /* Convert negative indexes */ if (start < 0 && end < 0 && start > end) { From 6bab2d7968dd9d71f2ce200386ecf4286e333a76 Mon Sep 17 00:00:00 2001 From: Binbin Date: Thu, 30 May 2024 10:44:12 +0800 Subject: [PATCH 14/42] Make sure clear the CLUSTER SLOTS cache on time when updating hostname (#564) In #53, we will cache the CLUSTER SLOTS response to improve the throughput and reduct the latency. In the code snippet below, the second cluster slots will use the old hostname: ``` config set cluster-preferred-endpoint-type hostname config set cluster-announce-hostname old-hostname.com multi cluster slots config set cluster-announce-hostname new-hostname.com cluster slots exec ``` When updating the hostname, in updateAnnouncedHostname, we will set CLUSTER_TODO_SAVE_CONFIG and we will do a clearCachedClusterSlotsResponse in clusterSaveConfigOrDie, so harmless in most cases. Move the clearCachedClusterSlotsResponse call to clusterDoBeforeSleep instead of scheduling it to be called in clusterSaveConfigOrDie. Signed-off-by: Binbin --- src/cluster_legacy.c | 4 +++- tests/unit/cluster/hostnames.tcl | 14 ++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index 63364d3596..22fdb20cf7 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -752,7 +752,6 @@ void clusterSaveConfigOrDie(int do_fsync) { serverLog(LL_WARNING, "Fatal: can't update cluster config file."); exit(1); } - clearCachedClusterSlotsResponse(); } /* Lock the cluster config using flock(), and retain the file descriptor used to @@ -4847,6 +4846,9 @@ void clusterBeforeSleep(void) { } void clusterDoBeforeSleep(int flags) { + /* Clear the cache if there are config changes here. */ + if (flags & CLUSTER_TODO_SAVE_CONFIG) clearCachedClusterSlotsResponse(); + server.cluster->todo_before_sleep |= flags; } diff --git a/tests/unit/cluster/hostnames.tcl b/tests/unit/cluster/hostnames.tcl index 7be4b42aa3..98a6385c6f 100644 --- a/tests/unit/cluster/hostnames.tcl +++ b/tests/unit/cluster/hostnames.tcl @@ -73,6 +73,20 @@ test "Verify cluster-preferred-endpoint-type behavior for redirects and info" { # Verify prefer hostname behavior R 0 config set cluster-preferred-endpoint-type hostname + # Make sure the cache is cleared when updating hostname. + R 0 multi + R 0 cluster slots + R 0 config set cluster-announce-hostname "new-me.com" + R 0 cluster slots + set multi_result [R 0 exec] + set slot_result1 [lindex $multi_result 0] + set slot_result2 [lindex $multi_result 2] + assert_equal "me.com" [get_slot_field $slot_result1 0 2 0] + assert_equal "new-me.com" [get_slot_field $slot_result2 0 2 0] + + # Set it back to its original value. + R 0 config set cluster-announce-hostname "me.com" + set slot_result [R 0 cluster slots] assert_equal "me.com" [get_slot_field $slot_result 0 2 0] assert_equal "them.com" [get_slot_field $slot_result 2 2 0] From 0d2ba9b94d28d4022ea475a2b83157830982c941 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Thu, 30 May 2024 13:09:29 -0400 Subject: [PATCH 15/42] Update redis legacy word when run TLS cert file (#572) Reference: https://github.com/valkey-io/valkey-doc/blob/main/topics/encryption.md Before we runtest --tls, we need first run utils/gen-test-certs.sh I found there are some redis legacy word there, update them. Signed-off-by: hwware --- utils/gen-test-certs.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/gen-test-certs.sh b/utils/gen-test-certs.sh index 2036a9b85f..411ce83d2b 100755 --- a/utils/gen-test-certs.sh +++ b/utils/gen-test-certs.sh @@ -19,7 +19,7 @@ generate_cert() { [ -f $keyfile ] || openssl genrsa -out $keyfile 2048 openssl req \ -new -sha256 \ - -subj "/O=Redis Test/CN=$cn" \ + -subj "/O=Valkey Test/CN=$cn" \ -key $keyfile | \ openssl x509 \ -req -sha256 \ @@ -38,7 +38,7 @@ openssl req \ -x509 -new -nodes -sha256 \ -key tests/tls/ca.key \ -days 3650 \ - -subj '/O=Redis Test/CN=Certificate Authority' \ + -subj '/O=Valkey Test/CN=Certificate Authority' \ -out tests/tls/ca.crt cat > tests/tls/openssl.cnf <<_END_ From 6fb90adf4be1c3b703040c9f744bc9cf17d3d577 Mon Sep 17 00:00:00 2001 From: nitaicaro <42576749+nitaicaro@users.noreply.github.com> Date: Thu, 30 May 2024 22:55:00 +0300 Subject: [PATCH 16/42] =?UTF-8?q?Fix=20crash=20where=20command=20duration?= =?UTF-8?q?=20is=20not=20reset=20when=20client=20is=20blocked=20=E2=80=A6?= =?UTF-8?q?=20(#526)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In #11012, we changed the way command durations were computed to handle the same command being executed multiple times. In #11970, we added an assert if the duration is not properly reset, potentially indicating that a call to report statistics was missed. I found an edge case where this happens - easily reproduced by blocking a client on `XGROUPREAD` and migrating the stream's slot. This causes the engine to process the `XGROUPREAD` command twice: 1. First time, we are blocked on the stream, so we wait for unblock to come back to it a second time. In most cases, when we come back to process the command second time after unblock, we process the command normally, which includes recording the duration and then resetting it. 2. After unblocking we come back to process the command, and this is where we hit the edge case - at this point, we had already migrated the slot to another node, so we return a `MOVED` response. But when we do that, we don’t reset the duration field. Fix: also reset the duration when returning a `MOVED` response. I think this is right, because the client should redirect the command to the right node, which in turn will calculate the execution duration. Also wrote a test which reproduces this, it fails without the fix and passes with it. --------- Signed-off-by: Nitai Caro Co-authored-by: Nitai Caro --- src/server.c | 1 + tests/unit/cluster/slot-migration.tcl | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/src/server.c b/src/server.c index f87193b74c..bf4967c106 100644 --- a/src/server.c +++ b/src/server.c @@ -3883,6 +3883,7 @@ int processCommand(client *c) { flagTransaction(c); } clusterRedirectClient(c, n, c->slot, error_code); + c->duration = 0; c->cmd->rejected_calls++; return C_OK; } diff --git a/tests/unit/cluster/slot-migration.tcl b/tests/unit/cluster/slot-migration.tcl index 008e97e037..d4f0d43b3b 100644 --- a/tests/unit/cluster/slot-migration.tcl +++ b/tests/unit/cluster/slot-migration.tcl @@ -422,3 +422,24 @@ start_cluster 3 3 {tags {external:skip cluster} overrides {cluster-allow-replica resume_process [srv -3 pid] } } + +start_cluster 2 0 {tags {external:skip cluster regression} overrides {cluster-allow-replica-migration no cluster-node-timeout 1000} } { + # Issue #563 regression test + test "Client blocked on XREADGROUP while stream's slot is migrated" { + set stream_name aga + set slot 609 + + # Start a deferring client to simulate a blocked client on XREADGROUP + R 0 XGROUP CREATE $stream_name mygroup $ MKSTREAM + set rd [valkey_deferring_client] + $rd xreadgroup GROUP mygroup consumer BLOCK 0 streams $stream_name > + wait_for_blocked_client + + # Migrate the slot to the target node + R 0 CLUSTER SETSLOT $slot MIGRATING [dict get [cluster_get_myself 1] id] + R 1 CLUSTER SETSLOT $slot IMPORTING [dict get [cluster_get_myself 0] id] + + # This line should cause the crash + R 0 MIGRATE 127.0.0.1 [lindex [R 1 CONFIG GET port] 1] $stream_name 0 5000 + } +} From f927565d28229691b39f873f40d2d8b9cacc425d Mon Sep 17 00:00:00 2001 From: Ping Xie Date: Thu, 30 May 2024 23:45:47 -0700 Subject: [PATCH 17/42] Consolidate various BLOCKED_WAIT* states (#562) There are currently three block types: BLOCKED_WAIT, BLOCKED_WAITAOF, and BLOCKED_WAIT_PREREPL, used to block clients executing `WAIT`, `WAITAOF`, and `CLUSTER SETSLOT`, respectively. They share the same workflow: the client is blocked until replication to the expected number of replicas completes. However, they provide different responses depending on the commands involved. Using distinct block types leads to code duplication and reduced readability. This PR consolidates the three types into a single WAIT type, differentiating them using the pending command to ensure the appropriate response is returned. Fix #427 --------- Signed-off-by: Ping Xie --- .gitignore | 2 ++ src/blocked.c | 50 ++++++++++++++++---------------------------- src/cluster_legacy.c | 6 ++++-- src/networking.c | 2 +- src/replication.c | 11 +++++----- src/server.h | 28 +++++++++++-------------- 6 files changed, 42 insertions(+), 57 deletions(-) diff --git a/.gitignore b/.gitignore index 8ed98aa326..e745f76a04 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,5 @@ Makefile.dep compile_commands.json redis.code-workspace .cache +.cscope* +.swp diff --git a/src/blocked.c b/src/blocked.c index 0291505cb9..85ef9170a0 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -183,8 +183,7 @@ void queueClientForReprocessing(client *c) { void unblockClient(client *c, int queue_for_reprocessing) { if (c->bstate.btype == BLOCKED_LIST || c->bstate.btype == BLOCKED_ZSET || c->bstate.btype == BLOCKED_STREAM) { unblockClientWaitingData(c); - } else if (c->bstate.btype == BLOCKED_WAIT || c->bstate.btype == BLOCKED_WAITAOF || - c->bstate.btype == BLOCKED_WAIT_PREREPL) { + } else if (c->bstate.btype == BLOCKED_WAIT) { unblockClientWaitingReplicas(c); } else if (c->bstate.btype == BLOCKED_MODULE) { if (moduleClientIsBlockedOnKeys(c)) unblockClientWaitingData(c); @@ -200,8 +199,7 @@ void unblockClient(client *c, int queue_for_reprocessing) { /* Reset the client for a new query, unless the client has pending command to process * or in case a shutdown operation was canceled and we are still in the processCommand sequence */ - if (!(c->flags & CLIENT_PENDING_COMMAND) && c->bstate.btype != BLOCKED_SHUTDOWN && - c->bstate.btype != BLOCKED_WAIT_PREREPL) { + if (!(c->flags & CLIENT_PENDING_COMMAND) && c->bstate.btype != BLOCKED_SHUTDOWN) { freeClientOriginalArgv(c); /* Clients that are not blocked on keys are not reprocessed so we must * call reqresAppendResponse here (for clients blocked on key, @@ -211,11 +209,11 @@ void unblockClient(client *c, int queue_for_reprocessing) { resetClient(c); } + /* We count blocked client stats on regular clients and not on module clients */ + if (!(c->flags & CLIENT_MODULE)) server.blocked_clients--; + server.blocked_clients_by_type[c->bstate.btype]--; /* Clear the flags, and put the client in the unblocked list so that * we'll process new commands in its query buffer ASAP. */ - if (!(c->flags & CLIENT_MODULE)) - server.blocked_clients--; /* We count blocked client stats on regular clients and not on module clients */ - server.blocked_clients_by_type[c->bstate.btype]--; c->flags &= ~CLIENT_BLOCKED; c->bstate.btype = BLOCKED_NONE; c->bstate.unblock_on_nokey = 0; @@ -231,15 +229,19 @@ void replyToBlockedClientTimedOut(client *c) { addReplyNullArray(c); updateStatsOnUnblock(c, 0, 0, 0); } else if (c->bstate.btype == BLOCKED_WAIT) { - addReplyLongLong(c, replicationCountAcksByOffset(c->bstate.reploffset)); - } else if (c->bstate.btype == BLOCKED_WAITAOF) { - addReplyArrayLen(c, 2); - addReplyLongLong(c, server.fsynced_reploff >= c->bstate.reploffset); - addReplyLongLong(c, replicationCountAOFAcksByOffset(c->bstate.reploffset)); + if (c->cmd->proc == waitCommand) { + addReplyLongLong(c, replicationCountAcksByOffset(c->bstate.reploffset)); + } else if (c->cmd->proc == waitaofCommand) { + addReplyArrayLen(c, 2); + addReplyLongLong(c, server.fsynced_reploff >= c->bstate.reploffset); + addReplyLongLong(c, replicationCountAOFAcksByOffset(c->bstate.reploffset)); + } else if (c->cmd->proc == clusterCommand) { + addReplyErrorObject(c, shared.noreplicaserr); + } else { + serverPanic("Unknown wait command %s in replyToBlockedClientTimedOut().", c->cmd->declared_name); + } } else if (c->bstate.btype == BLOCKED_MODULE) { moduleBlockedClientTimedOut(c, 0); - } else if (c->bstate.btype == BLOCKED_WAIT_PREREPL) { - addReplyErrorObject(c, shared.noreplicaserr); } else { serverPanic("Unknown btype in replyToBlockedClientTimedOut()."); } @@ -585,29 +587,13 @@ static void handleClientsBlockedOnKey(readyList *rl) { } /* block a client for replica acknowledgement */ -void blockClientForReplicaAck(client *c, mstime_t timeout, long long offset, long numreplicas, int btype, int numlocal) { +void blockClientForReplicaAck(client *c, mstime_t timeout, long long offset, long numreplicas, int numlocal) { c->bstate.timeout = timeout; c->bstate.reploffset = offset; c->bstate.numreplicas = numreplicas; c->bstate.numlocal = numlocal; listAddNodeHead(server.clients_waiting_acks, c); - blockClient(c, btype); -} - -/* block a client due to pre-replication */ -void blockForPreReplication(client *c, mstime_t timeout, long long offset, long numreplicas) { - blockClientForReplicaAck(c, timeout, offset, numreplicas, BLOCKED_WAIT_PREREPL, 0); - c->flags |= CLIENT_PENDING_COMMAND; -} - -/* block a client due to wait command */ -void blockForReplication(client *c, mstime_t timeout, long long offset, long numreplicas) { - blockClientForReplicaAck(c, timeout, offset, numreplicas, BLOCKED_WAIT, 0); -} - -/* block a client due to waitaof command */ -void blockForAofFsync(client *c, mstime_t timeout, long long offset, int numlocal, long numreplicas) { - blockClientForReplicaAck(c, timeout, offset, numreplicas, BLOCKED_WAITAOF, numlocal); + blockClient(c, BLOCKED_WAIT); } /* Postpone client from executing a command. For example the server might be busy diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index 22fdb20cf7..0de6351e90 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -6018,7 +6018,7 @@ void clusterCommandSetSlot(client *c) { * This ensures that all replicas have the latest topology information, enabling * a reliable slot ownership transfer even if the primary node went down during * the process. */ - if (nodeIsMaster(myself) && myself->numslaves != 0 && (c->flags & CLIENT_PREREPL_DONE) == 0) { + if (nodeIsMaster(myself) && myself->numslaves != 0 && (c->flags & CLIENT_REPLICATION_DONE) == 0) { forceCommandPropagation(c, PROPAGATE_REPL); /* We are a primary and this is the first time we see this `SETSLOT` * command. Force-replicate the command to all of our replicas @@ -6028,7 +6028,9 @@ void clusterCommandSetSlot(client *c) { * 2. The repl offset target is set to the master's current repl offset + 1. * There is no concern of partial replication because replicas always * ack the repl offset at the command boundary. */ - blockForPreReplication(c, timeout_ms, server.master_repl_offset + 1, myself->numslaves); + blockClientForReplicaAck(c, timeout_ms, server.master_repl_offset + 1, myself->numslaves, 0); + /* Mark client as pending command for execution after replication to replicas. */ + c->flags |= CLIENT_PENDING_COMMAND; replicationRequestAckFromSlaves(); return; } diff --git a/src/networking.c b/src/networking.c index e062bc3aba..9274f21c05 100644 --- a/src/networking.c +++ b/src/networking.c @@ -2068,7 +2068,7 @@ void resetClient(client *c) { c->multibulklen = 0; c->bulklen = -1; c->slot = -1; - c->flags &= ~(CLIENT_EXECUTING_COMMAND | CLIENT_PREREPL_DONE); + c->flags &= ~(CLIENT_EXECUTING_COMMAND | CLIENT_REPLICATION_DONE); /* Make sure the duration has been recorded to some command. */ serverAssert(c->duration == 0); diff --git a/src/replication.c b/src/replication.c index 1d5e0fe290..375b637f61 100644 --- a/src/replication.c +++ b/src/replication.c @@ -3457,7 +3457,7 @@ void waitCommand(client *c) { /* Otherwise block the client and put it into our list of clients * waiting for ack from slaves. */ - blockForReplication(c, timeout, offset, numreplicas); + blockClientForReplicaAck(c, timeout, offset, numreplicas, 0); /* Make sure that the server will send an ACK request to all the slaves * before returning to the event loop. */ @@ -3497,7 +3497,7 @@ void waitaofCommand(client *c) { /* Otherwise block the client and put it into our list of clients * waiting for ack from slaves. */ - blockForAofFsync(c, timeout, c->woff, numlocal, numreplicas); + blockClientForReplicaAck(c, timeout, c->woff, numreplicas, numlocal); /* Make sure that the server will send an ACK request to all the slaves * before returning to the event loop. */ @@ -3532,8 +3532,7 @@ void processClientsWaitingReplicas(void) { int numreplicas = 0; client *c = ln->value; - int is_wait_aof = c->bstate.btype == BLOCKED_WAITAOF; - int is_wait_prerepl = c->bstate.btype == BLOCKED_WAIT_PREREPL; + int is_wait_aof = c->cmd->proc == waitaofCommand; if (is_wait_aof && c->bstate.numlocal && !server.aof_enabled) { addReplyError(c, "WAITAOF cannot be used when numlocal is set but appendonly is disabled."); @@ -3580,8 +3579,8 @@ void processClientsWaitingReplicas(void) { addReplyArrayLen(c, 2); addReplyLongLong(c, numlocal); addReplyLongLong(c, numreplicas); - } else if (is_wait_prerepl) { - c->flags |= CLIENT_PREREPL_DONE; + } else if (c->flags & CLIENT_PENDING_COMMAND) { + c->flags |= CLIENT_REPLICATION_DONE; } else { addReplyLongLong(c, numreplicas); } diff --git a/src/server.h b/src/server.h index 249d896d35..2bacc991e1 100644 --- a/src/server.h +++ b/src/server.h @@ -426,23 +426,21 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; #define CLIENT_MODULE_PREVENT_AOF_PROP (1ULL << 48) /* Module client do not want to propagate to AOF */ #define CLIENT_MODULE_PREVENT_REPL_PROP (1ULL << 49) /* Module client do not want to propagate to replica */ #define CLIENT_REPROCESSING_COMMAND (1ULL << 50) /* The client is re-processing the command. */ -#define CLIENT_PREREPL_DONE (1ULL << 51) /* Indicate that pre-replication has been done on the client */ +#define CLIENT_REPLICATION_DONE (1ULL << 51) /* Indicate that replication has been done on the client */ /* Client block type (btype field in client structure) * if CLIENT_BLOCKED flag is set. */ typedef enum blocking_type { - BLOCKED_NONE, /* Not blocked, no CLIENT_BLOCKED flag set. */ - BLOCKED_LIST, /* BLPOP & co. */ - BLOCKED_WAIT, /* WAIT for synchronous replication. */ - BLOCKED_WAITAOF, /* WAITAOF for AOF file fsync. */ - BLOCKED_MODULE, /* Blocked by a loadable module. */ - BLOCKED_STREAM, /* XREAD. */ - BLOCKED_ZSET, /* BZPOP et al. */ - BLOCKED_POSTPONE, /* Blocked by processCommand, re-try processing later. */ - BLOCKED_SHUTDOWN, /* SHUTDOWN. */ - BLOCKED_WAIT_PREREPL, /* WAIT for pre-replication and then run the command. */ - BLOCKED_NUM, /* Number of blocked states. */ - BLOCKED_END /* End of enumeration */ + BLOCKED_NONE, /* Not blocked, no CLIENT_BLOCKED flag set. */ + BLOCKED_LIST, /* BLPOP & co. */ + BLOCKED_WAIT, /* WAIT for synchronous replication. */ + BLOCKED_MODULE, /* Blocked by a loadable module. */ + BLOCKED_STREAM, /* XREAD. */ + BLOCKED_ZSET, /* BZPOP et al. */ + BLOCKED_POSTPONE, /* Blocked by processCommand, re-try processing later. */ + BLOCKED_SHUTDOWN, /* SHUTDOWN. */ + BLOCKED_NUM, /* Number of blocked states. */ + BLOCKED_END /* End of enumeration */ } blocking_type; /* Client request types */ @@ -3498,9 +3496,7 @@ void signalKeyAsReady(serverDb *db, robj *key, int type); void blockForKeys(client *c, int btype, robj **keys, int numkeys, mstime_t timeout, int unblock_on_nokey); void blockClientShutdown(client *c); void blockPostponeClient(client *c); -void blockForReplication(client *c, mstime_t timeout, long long offset, long numreplicas); -void blockForPreReplication(client *c, mstime_t timeout, long long offset, long numreplicas); -void blockForAofFsync(client *c, mstime_t timeout, long long offset, int numlocal, long numreplicas); +void blockClientForReplicaAck(client *c, mstime_t timeout, long long offset, long numreplicas, int numlocal); void replicationRequestAckFromSlaves(void); void signalDeletedKeyAsReady(serverDb *db, robj *key, int type); void updateStatsOnUnblock(client *c, long blocked_us, long reply_us, int had_errors); From 2b97aa61710f2b64245bf734abaa1984efe454e8 Mon Sep 17 00:00:00 2001 From: Ping Xie Date: Fri, 31 May 2024 22:50:08 -0700 Subject: [PATCH 18/42] Introduce `enable-debug-assert` to enable/disable debug asserts at runtime (#584) Introduce a new hidden server configuration, `enable-debug-assert`, which allows selectively enabling or disabling, at runtime, expensive or risky assertions used primarily for debugging and testing. Fix #569 --------- Signed-off-by: Ping Xie --- .github/workflows/ci.yml | 2 +- .github/workflows/daily.yml | 2 +- src/config.c | 2 ++ src/server.h | 13 ++++++------- tests/assets/default.conf | 2 ++ 5 files changed, 12 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 40376f1628..508565d296 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,7 +37,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: make # build with TLS module just for compilation coverage - run: make SANITIZER=address SERVER_CFLAGS='-Werror -DDEBUG_ASSERTIONS' BUILD_TLS=module + run: make SANITIZER=address SERVER_CFLAGS='-Werror' BUILD_TLS=module - name: testprep run: sudo apt-get install tcl8.6 tclx -y - name: test diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 658e58b235..ff7a9ad67b 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -598,7 +598,7 @@ jobs: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} - name: make - run: make all-with-unit-tests OPT=-O3 SANITIZER=address SERVER_CFLAGS='-DSERVER_TEST -Werror -DDEBUG_ASSERTIONS' + run: make all-with-unit-tests OPT=-O3 SANITIZER=address SERVER_CFLAGS='-DSERVER_TEST -Werror' - name: testprep run: | sudo apt-get update diff --git a/src/config.c b/src/config.c index 539d8fdf20..7dc86fbf39 100644 --- a/src/config.c +++ b/src/config.c @@ -3191,7 +3191,9 @@ standardConfig static_configs[] = { createTimeTConfig("repl-backlog-ttl", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.repl_backlog_time_limit, 60 * 60, INTEGER_CONFIG, NULL, NULL), /* Default: 1 hour */ createOffTConfig("auto-aof-rewrite-min-size", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.aof_rewrite_min_size, 64 * 1024 * 1024, MEMORY_CONFIG, NULL, NULL), createOffTConfig("loading-process-events-interval-bytes", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, 1024, INT_MAX, server.loading_process_events_interval_bytes, 1024 * 1024 * 2, INTEGER_CONFIG, NULL, NULL), + createBoolConfig("enable-debug-assert", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, server.enable_debug_assert, 0, NULL, NULL), + /* Tls configs */ createIntConfig("tls-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, applyTLSPort), /* TCP port. */ createIntConfig("tls-session-cache-size", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_size, 20 * 1024, INTEGER_CONFIG, NULL, applyTlsCfg), createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, applyTlsCfg), diff --git a/src/server.h b/src/server.h index 2bacc991e1..bac4c33c96 100644 --- a/src/server.h +++ b/src/server.h @@ -701,13 +701,11 @@ typedef enum { #define serverAssert(_e) (likely(_e) ? (void)0 : (_serverAssert(#_e, __FILE__, __LINE__), valkey_unreachable())) #define serverPanic(...) _serverPanic(__FILE__, __LINE__, __VA_ARGS__), valkey_unreachable() -/* The following macros provide assertions that are only executed during test builds and should be used to add - * assertions that are too computationally expensive or dangerous to run during normal operations. */ -#ifdef DEBUG_ASSERTIONS -#define debugServerAssertWithInfo(...) serverAssertWithInfo(__VA_ARGS__) -#else -#define debugServerAssertWithInfo(...) -#endif +/* The following macro provides a conditional assertion that is only executed + * when the server config 'enable-debug-assert' is true. This is useful for adding + * assertions that are too computationally expensive or risky to run in normal + * operation, but are valuable for debugging or testing. */ +#define debugServerAssertWithInfo(...) (server.enable_debug_assert ? serverAssertWithInfo(__VA_ARGS__) : (void)0) /* latency histogram per command init settings */ #define LATENCY_HISTOGRAM_MIN_VALUE 1L /* >= 1 nanosec */ @@ -1680,6 +1678,7 @@ struct valkeyServer { int enable_protected_configs; /* Enable the modification of protected configs, see PROTECTED_ACTION_ALLOWED_* */ int enable_debug_cmd; /* Enable DEBUG commands, see PROTECTED_ACTION_ALLOWED_* */ int enable_module_cmd; /* Enable MODULE commands, see PROTECTED_ACTION_ALLOWED_* */ + int enable_debug_assert; /* Enable debug asserts */ /* RDB / AOF loading information */ volatile sig_atomic_t loading; /* We are loading data from disk if true */ diff --git a/tests/assets/default.conf b/tests/assets/default.conf index 6e8156ce37..1a59b5bcae 100644 --- a/tests/assets/default.conf +++ b/tests/assets/default.conf @@ -35,3 +35,5 @@ propagation-error-behavior panic # Make sure shutdown doesn't fail if there's an initial AOFRW shutdown-on-sigterm force + +enable-debug-assert yes From d16b4ec1b9a2309f17601abb1e238f06dfb736a9 Mon Sep 17 00:00:00 2001 From: Chen Tianjie Date: Sat, 1 Jun 2024 16:09:20 +0800 Subject: [PATCH 19/42] Unshare object to avoid LRU/LFU being messed up (#250) When LRU/LFU enabled, Valkey does not allow using shared objects, as value objects may be shared among many different keys and they can't share LRU/LFU information. However `maxmemory-policy` is modifiable at runtime. If LRU/LFU is not enabled at start, but then enabled when some shared objects are already used, there could be some confusion in LRU/LFU information. For `set` command it is OK since it is going to create a new object when LRU/LFU enabled, but `get` command will not unshare the object and just update LRU/LFU information. So we may duplicate the object in this case. It is a one-time task for each key using shared objects, unless this is the case for so many keys, there should be no serious performance degradation. Still, LRU will be updated anyway, no matter LRU/LFU is enabled or not, because `OBJECT IDLETIME` needs it, unless `maxmemory-policy` is set to LFU. So idle time of a key may still be messed up. --------- Signed-off-by: chentianjie.ctj Signed-off-by: Chen Tianjie --- src/db.c | 4 ++++ src/object.c | 6 ++++-- src/server.h | 3 +++ tests/unit/maxmemory.tcl | 15 +++++++++++++++ 4 files changed, 26 insertions(+), 2 deletions(-) diff --git a/src/db.c b/src/db.c index a78c8bad2b..2e6d85cf4e 100644 --- a/src/db.c +++ b/src/db.c @@ -122,6 +122,10 @@ robj *lookupKey(serverDb *db, robj *key, int flags) { server.current_client->cmd->proc != touchCommand) flags |= LOOKUP_NOTOUCH; if (!hasActiveChildProcess() && !(flags & LOOKUP_NOTOUCH)) { + if (!canUseSharedObject() && val->refcount == OBJ_SHARED_REFCOUNT) { + val = dupStringObject(val); + kvstoreDictSetVal(db->keys, getKeySlot(key->ptr), de, val); + } if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) { updateLFU(val); } else { diff --git a/src/object.c b/src/object.c index 1a335edd6d..ea3538b9ee 100644 --- a/src/object.c +++ b/src/object.c @@ -647,8 +647,10 @@ robj *tryObjectEncodingEx(robj *o, int try_trim) { * Note that we avoid using shared integers when maxmemory is used * because every object needs to have a private LRU field for the LRU * algorithm to work well. */ - if ((server.maxmemory == 0 || !(server.maxmemory_policy & MAXMEMORY_FLAG_NO_SHARED_INTEGERS)) && value >= 0 && - value < OBJ_SHARED_INTEGERS) { + if (canUseSharedObject() && + value >= 0 && + value < OBJ_SHARED_INTEGERS) + { decrRefCount(o); return shared.integers[value]; } else { diff --git a/src/server.h b/src/server.h index bac4c33c96..8f273db1e8 100644 --- a/src/server.h +++ b/src/server.h @@ -2855,6 +2855,9 @@ int collateStringObjects(const robj *a, const robj *b); int equalStringObjects(robj *a, robj *b); unsigned long long estimateObjectIdleTime(robj *o); void trimStringObjectIfNeeded(robj *o, int trim_small_values); +static inline int canUseSharedObject(void) { + return server.maxmemory == 0 || !(server.maxmemory_policy & MAXMEMORY_FLAG_NO_SHARED_INTEGERS); +} #define sdsEncodedObject(objptr) (objptr->encoding == OBJ_ENCODING_RAW || objptr->encoding == OBJ_ENCODING_EMBSTR) /* Synchronous I/O with timeout */ diff --git a/tests/unit/maxmemory.tcl b/tests/unit/maxmemory.tcl index 92e68ac1ed..ee1232796d 100644 --- a/tests/unit/maxmemory.tcl +++ b/tests/unit/maxmemory.tcl @@ -168,6 +168,21 @@ start_server {tags {"maxmemory external:skip"}} { r config set maxmemory 0 } + test "Shared integers are unshared with maxmemory and LRU policy" { + r set a 1 + r set b 1 + assert_refcount_morethan a 1 + assert_refcount_morethan b 1 + r config set maxmemory 1073741824 + r config set maxmemory-policy allkeys-lru + r get a + assert_refcount 1 a + r config set maxmemory-policy volatile-lru + r get b + assert_refcount 1 b + r config set maxmemory 0 + } + foreach policy { allkeys-random allkeys-lru allkeys-lfu volatile-lru volatile-lfu volatile-random volatile-ttl } { From 30f277a86d35e3f70314f50ca7b59562838bd429 Mon Sep 17 00:00:00 2001 From: Ping Xie Date: Sun, 2 Jun 2024 13:15:08 -0700 Subject: [PATCH 20/42] Enable debug asserts for cluster and sentinel tests (#588) Also make `enable-debug-assert` an immutable config Address review comments in #584 --------- Signed-off-by: Ping Xie --- .gitignore | 1 + src/config.c | 2 +- src/object.c | 5 +---- src/server.h | 3 ++- tests/instances.tcl | 2 ++ 5 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index e745f76a04..c660bc3600 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,4 @@ redis.code-workspace .cache .cscope* .swp +tests/cluster/tmp/* diff --git a/src/config.c b/src/config.c index 7dc86fbf39..3cd0cc5def 100644 --- a/src/config.c +++ b/src/config.c @@ -3054,6 +3054,7 @@ standardConfig static_configs[] = { createBoolConfig("aof-disable-auto-gc", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, server.aof_disable_auto_gc, 0, NULL, updateAofAutoGCEnabled), createBoolConfig("replica-ignore-disk-write-errors", NULL, MODIFIABLE_CONFIG, server.repl_ignore_disk_write_error, 0, NULL, NULL), createBoolConfig("extended-redis-compatibility", NULL, MODIFIABLE_CONFIG, server.extended_redis_compat, 0, NULL, updateExtendedRedisCompat), + createBoolConfig("enable-debug-assert", NULL, IMMUTABLE_CONFIG | HIDDEN_CONFIG, server.enable_debug_assert, 0, NULL, NULL), /* String Configs */ createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.acl_filename, "", NULL, NULL), @@ -3191,7 +3192,6 @@ standardConfig static_configs[] = { createTimeTConfig("repl-backlog-ttl", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.repl_backlog_time_limit, 60 * 60, INTEGER_CONFIG, NULL, NULL), /* Default: 1 hour */ createOffTConfig("auto-aof-rewrite-min-size", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.aof_rewrite_min_size, 64 * 1024 * 1024, MEMORY_CONFIG, NULL, NULL), createOffTConfig("loading-process-events-interval-bytes", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, 1024, INT_MAX, server.loading_process_events_interval_bytes, 1024 * 1024 * 2, INTEGER_CONFIG, NULL, NULL), - createBoolConfig("enable-debug-assert", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, server.enable_debug_assert, 0, NULL, NULL), /* Tls configs */ createIntConfig("tls-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, applyTLSPort), /* TCP port. */ diff --git a/src/object.c b/src/object.c index ea3538b9ee..814b8de3a9 100644 --- a/src/object.c +++ b/src/object.c @@ -647,10 +647,7 @@ robj *tryObjectEncodingEx(robj *o, int try_trim) { * Note that we avoid using shared integers when maxmemory is used * because every object needs to have a private LRU field for the LRU * algorithm to work well. */ - if (canUseSharedObject() && - value >= 0 && - value < OBJ_SHARED_INTEGERS) - { + if (canUseSharedObject() && value >= 0 && value < OBJ_SHARED_INTEGERS) { decrRefCount(o); return shared.integers[value]; } else { diff --git a/src/server.h b/src/server.h index 8f273db1e8..66f2c81c4d 100644 --- a/src/server.h +++ b/src/server.h @@ -701,10 +701,11 @@ typedef enum { #define serverAssert(_e) (likely(_e) ? (void)0 : (_serverAssert(#_e, __FILE__, __LINE__), valkey_unreachable())) #define serverPanic(...) _serverPanic(__FILE__, __LINE__, __VA_ARGS__), valkey_unreachable() -/* The following macro provides a conditional assertion that is only executed +/* The following macros provide a conditional assertion that is only executed * when the server config 'enable-debug-assert' is true. This is useful for adding * assertions that are too computationally expensive or risky to run in normal * operation, but are valuable for debugging or testing. */ +#define debugServerAssert(...) (server.enable_debug_assert ? serverAssert(__VA_ARGS__) : (void)0) #define debugServerAssertWithInfo(...) (server.enable_debug_assert ? serverAssertWithInfo(__VA_ARGS__) : (void)0) /* latency histogram per command init settings */ diff --git a/tests/instances.tcl b/tests/instances.tcl index 3b487423f8..782804ddae 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -118,6 +118,8 @@ proc spawn_instance {type base_port count {conf {}} {base_conf_file ""}} { puts $cfg "repl-diskless-sync-delay 0" puts $cfg "dir ./$dirname" puts $cfg "logfile log.txt" + puts $cfg "enable-debug-assert yes" + # Add additional config files foreach directive $conf { puts $cfg $directive From 28e055af0b94ce28ba22a17fc0494e94c8e9081e Mon Sep 17 00:00:00 2001 From: naglera <58042354+naglera@users.noreply.github.com> Date: Mon, 3 Jun 2024 06:53:39 +0300 Subject: [PATCH 21/42] Deflake chained replicas disconnect (#574) Deflake chained replicas disconnect when replica re-connect with the same master. sync_partial_ok counter might get incremented if replica timed out during test. Signed-off-by: naglera --- tests/integration/psync2-pingoff.tcl | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/psync2-pingoff.tcl b/tests/integration/psync2-pingoff.tcl index 87c02bb067..5a541ee353 100644 --- a/tests/integration/psync2-pingoff.tcl +++ b/tests/integration/psync2-pingoff.tcl @@ -195,6 +195,7 @@ start_server {} { set R_host($j) [srv [expr 0-$j] host] set R_port($j) [srv [expr 0-$j] port] $R($j) CONFIG SET repl-ping-replica-period 1 + $R($j) config set repl-timeout 300 } test "Chained replicas disconnect when replica re-connect with the same master" { From 417660449f9c1fce998e1563f30f7bfd9cb5197f Mon Sep 17 00:00:00 2001 From: poiuj <1099644+poiuj@users.noreply.github.com> Date: Mon, 3 Jun 2024 06:55:54 +0300 Subject: [PATCH 22/42] Adjust sds types (#502) sds type should be determined based on the size of the underlying buffer, not the logical length of the sds. Currently we truncate the alloc field in case buffer is larger than we can handle. It leads to a mismatch between alloc field and the actual size of the buffer. Even considering that alloc doesn't include header size and the null terminator. It also leads to a waste of memory with jemalloc. For example, let's consider creation of sds of length 253. According to the length, the appropriate type is SDS_TYPE_8. But we allocate `253 + sizeof(struct sdshdr8) + 1` bytes, which sums to 257 bytes. In this case jemalloc allocates buffer from the next size bucket. With current configuration on Linux it's 320 bytes. So we end up with 320 bytes buffer, while we can't address more than 255. The same happens with other types and length close enough to the appropriate powers of 2. The downside of the adjustment is that with allocators that do not allocate larger than requested chunks (like GNU allocator), we switch to a larger type "too early". It leads to small waste of memory. Specifically: sds of length 31 takes 35 bytes instead of 33 (2 bytes wasted) sds of length 255 takes 261 bytes instead of 259 (2 bytes wasted) sds of length 65,535 takes 65,545 bytes instead of 65,541 (4 bytes wasted) sds of length 4,294,967,295 takes 4,294,967,313 bytes instead of 4,294,967,305 (8 bytes wasted) --------- Signed-off-by: Vadym Khoptynets --- src/sds.c | 85 +++++++++++++++++++++++++++++++------------ src/sdsalloc.h | 1 + src/unit/test_files.h | 4 +- src/unit/test_sds.c | 79 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 145 insertions(+), 24 deletions(-) diff --git a/src/sds.c b/src/sds.c index d957ef4bab..6793c46caa 100644 --- a/src/sds.c +++ b/src/sds.c @@ -55,10 +55,10 @@ static inline int sdsHdrSize(char type) { static inline char sdsReqType(size_t string_size) { if (string_size < 1 << 5) return SDS_TYPE_5; - if (string_size < 1 << 8) return SDS_TYPE_8; - if (string_size < 1 << 16) return SDS_TYPE_16; + if (string_size <= (1 << 8) - sizeof(struct sdshdr8) - 1) return SDS_TYPE_8; + if (string_size <= (1 << 16) - sizeof(struct sdshdr16) - 1) return SDS_TYPE_16; #if (LONG_MAX == LLONG_MAX) - if (string_size < 1ll << 32) return SDS_TYPE_32; + if (string_size <= (1ll << 32) - sizeof(struct sdshdr32) - 1) return SDS_TYPE_32; return SDS_TYPE_64; #else return SDS_TYPE_32; @@ -75,6 +75,16 @@ static inline size_t sdsTypeMaxSize(char type) { return -1; /* this is equivalent to the max SDS_TYPE_64 or SDS_TYPE_32 */ } +static inline int adjustTypeIfNeeded(char *type, int *hdrlen, size_t bufsize) { + size_t usable = bufsize - *hdrlen - 1; + if (*type != SDS_TYPE_5 && usable > sdsTypeMaxSize(*type)) { + *type = sdsReqType(usable); + *hdrlen = sdsHdrSize(*type); + return 1; + } + return 0; +} + /* Create a new sds string with the content specified by the 'init' pointer * and 'initlen'. * If NULL is used for 'init' the string is initialized with zero bytes. @@ -97,19 +107,23 @@ sds _sdsnewlen(const void *init, size_t initlen, int trymalloc) { if (type == SDS_TYPE_5 && initlen == 0) type = SDS_TYPE_8; int hdrlen = sdsHdrSize(type); unsigned char *fp; /* flags pointer. */ - size_t usable; + size_t bufsize, usable; assert(initlen + hdrlen + 1 > initlen); /* Catch size_t overflow */ - sh = trymalloc ? s_trymalloc_usable(hdrlen + initlen + 1, &usable) : s_malloc_usable(hdrlen + initlen + 1, &usable); + sh = trymalloc ? s_trymalloc_usable(hdrlen + initlen + 1, &bufsize) + : s_malloc_usable(hdrlen + initlen + 1, &bufsize); if (sh == NULL) return NULL; if (init == SDS_NOINIT) init = NULL; else if (!init) memset(sh, 0, hdrlen + initlen + 1); + + adjustTypeIfNeeded(&type, &hdrlen, bufsize); + usable = bufsize - hdrlen - 1; + s = (char *)sh + hdrlen; fp = ((unsigned char *)s) - 1; - usable = usable - hdrlen - 1; - if (usable > sdsTypeMaxSize(type)) usable = sdsTypeMaxSize(type); + switch (type) { case SDS_TYPE_5: { *fp = type | (initlen << SDS_TYPE_BITS); @@ -118,6 +132,7 @@ sds _sdsnewlen(const void *init, size_t initlen, int trymalloc) { case SDS_TYPE_8: { SDS_HDR_VAR(8, s); sh->len = initlen; + assert(usable <= sdsTypeMaxSize(type)); sh->alloc = usable; *fp = type; break; @@ -125,6 +140,7 @@ sds _sdsnewlen(const void *init, size_t initlen, int trymalloc) { case SDS_TYPE_16: { SDS_HDR_VAR(16, s); sh->len = initlen; + assert(usable <= sdsTypeMaxSize(type)); sh->alloc = usable; *fp = type; break; @@ -132,6 +148,7 @@ sds _sdsnewlen(const void *init, size_t initlen, int trymalloc) { case SDS_TYPE_32: { SDS_HDR_VAR(32, s); sh->len = initlen; + assert(usable <= sdsTypeMaxSize(type)); sh->alloc = usable; *fp = type; break; @@ -139,6 +156,7 @@ sds _sdsnewlen(const void *init, size_t initlen, int trymalloc) { case SDS_TYPE_64: { SDS_HDR_VAR(64, s); sh->len = initlen; + assert(usable <= sdsTypeMaxSize(type)); sh->alloc = usable; *fp = type; break; @@ -226,7 +244,8 @@ sds _sdsMakeRoomFor(sds s, size_t addlen, int greedy) { size_t len, newlen, reqlen; char type, oldtype = s[-1] & SDS_TYPE_MASK; int hdrlen; - size_t usable; + size_t bufsize, usable; + int use_realloc; /* Return ASAP if there is enough space left. */ if (avail >= addlen) return s; @@ -251,23 +270,32 @@ sds _sdsMakeRoomFor(sds s, size_t addlen, int greedy) { hdrlen = sdsHdrSize(type); assert(hdrlen + newlen + 1 > reqlen); /* Catch size_t overflow */ - if (oldtype == type) { - newsh = s_realloc_usable(sh, hdrlen + newlen + 1, &usable); + use_realloc = (oldtype == type); + if (use_realloc) { + newsh = s_realloc_usable(sh, hdrlen + newlen + 1, &bufsize); if (newsh == NULL) return NULL; s = (char *)newsh + hdrlen; + + if (adjustTypeIfNeeded(&type, &hdrlen, bufsize)) { + memmove((char *)newsh + hdrlen, s, len + 1); + s = (char *)newsh + hdrlen; + s[-1] = type; + sdssetlen(s, len); + } } else { /* Since the header size changes, need to move the string forward, * and can't use realloc */ - newsh = s_malloc_usable(hdrlen + newlen + 1, &usable); + newsh = s_malloc_usable(hdrlen + newlen + 1, &bufsize); if (newsh == NULL) return NULL; + adjustTypeIfNeeded(&type, &hdrlen, bufsize); memcpy((char *)newsh + hdrlen, s, len + 1); s_free(sh); s = (char *)newsh + hdrlen; s[-1] = type; sdssetlen(s, len); } - usable = usable - hdrlen - 1; - if (usable > sdsTypeMaxSize(type)) usable = sdsTypeMaxSize(type); + usable = bufsize - hdrlen - 1; + assert(type == SDS_TYPE_5 || usable <= sdsTypeMaxSize(type)); sdssetalloc(s, usable); return s; } @@ -303,7 +331,7 @@ sds sdsRemoveFreeSpace(sds s, int would_regrow) { * allocation size, this is done in order to avoid repeated calls to this * function when the caller detects that it has excess space. */ sds sdsResize(sds s, size_t size, int would_regrow) { - void *sh, *newsh; + void *sh, *newsh = NULL; char type, oldtype = s[-1] & SDS_TYPE_MASK; int hdrlen, oldhdrlen = sdsHdrSize(oldtype); size_t len = sdslen(s); @@ -331,7 +359,8 @@ sds sdsResize(sds s, size_t size, int would_regrow) { * type. */ int use_realloc = (oldtype == type || (type < oldtype && type > SDS_TYPE_8)); size_t newlen = use_realloc ? oldhdrlen + size + 1 : hdrlen + size + 1; - size_t newsize = 0; + size_t bufsize = 0; + size_t newsize; if (use_realloc) { int alloc_already_optimal = 0; @@ -340,27 +369,37 @@ sds sdsResize(sds s, size_t size, int would_regrow) { * We aim to avoid calling realloc() when using Jemalloc if there is no * change in the allocation size, as it incurs a cost even if the * allocation size stays the same. */ - newsize = zmalloc_size(sh); - alloc_already_optimal = (je_nallocx(newlen, 0) == newsize); + bufsize = zmalloc_size(sh); + alloc_already_optimal = (je_nallocx(newlen, 0) == bufsize); #endif if (!alloc_already_optimal) { - newsh = s_realloc_usable(sh, newlen, &newsize); + newsh = s_realloc_usable(sh, newlen, &bufsize); if (newsh == NULL) return NULL; s = (char *)newsh + oldhdrlen; - newsize -= (oldhdrlen + 1); + + if (adjustTypeIfNeeded(&oldtype, &oldhdrlen, bufsize)) { + memmove((char *)newsh + oldhdrlen, s, len + 1); + s = (char *)newsh + oldhdrlen; + s[-1] = oldtype; + sdssetlen(s, len); + } } + newsize = bufsize - oldhdrlen - 1; + assert(oldtype == SDS_TYPE_5 || newsize <= sdsTypeMaxSize(oldtype)); } else { - newsh = s_malloc_usable(newlen, &newsize); + newsh = s_malloc_usable(newlen, &bufsize); if (newsh == NULL) return NULL; - memcpy((char *)newsh + hdrlen, s, len); + adjustTypeIfNeeded(&type, &hdrlen, bufsize); + memcpy((char *)newsh + hdrlen, s, len + 1); s_free(sh); s = (char *)newsh + hdrlen; s[-1] = type; - newsize -= (hdrlen + 1); + newsize = bufsize - hdrlen - 1; + assert(type == SDS_TYPE_5 || newsize <= sdsTypeMaxSize(type)); } + s[len] = '\0'; sdssetlen(s, len); - if (newsize > sdsTypeMaxSize(s[-1])) newsize = sdsTypeMaxSize(s[-1]); sdssetalloc(s, newsize); return s; } diff --git a/src/sdsalloc.h b/src/sdsalloc.h index 1252c7af81..6644eb3c83 100644 --- a/src/sdsalloc.h +++ b/src/sdsalloc.h @@ -49,5 +49,6 @@ #define s_realloc_usable zrealloc_usable #define s_trymalloc_usable ztrymalloc_usable #define s_tryrealloc_usable ztryrealloc_usable +#define s_malloc_size zmalloc_size #endif diff --git a/src/unit/test_files.h b/src/unit/test_files.h index 4a67f67052..a087e6fe44 100644 --- a/src/unit/test_files.h +++ b/src/unit/test_files.h @@ -24,6 +24,8 @@ int test_kvstoreIteratorRemoveAllKeysDeleteEmptyDict(int argc, char **argv, int int test_kvstoreDictIteratorRemoveAllKeysNoDeleteEmptyDict(int argc, char **argv, int flags); int test_kvstoreDictIteratorRemoveAllKeysDeleteEmptyDict(int argc, char **argv, int flags); int test_sds(int argc, char **argv, int flags); +int test_typesAndAllocSize(int argc, char **argv, int flags); +int test_sdsHeaderSizes(int argc, char **argv, int flags); int test_sha1(int argc, char **argv, int flags); int test_string2ll(int argc, char **argv, int flags); int test_string2l(int argc, char **argv, int flags); @@ -77,7 +79,7 @@ unitTest __test_crc64combine_c[] = {{"test_crc64combine", test_crc64combine}, {N unitTest __test_endianconv_c[] = {{"test_endianconv", test_endianconv}, {NULL, NULL}}; unitTest __test_intset_c[] = {{"test_intsetValueEncodings", test_intsetValueEncodings}, {"test_intsetBasicAdding", test_intsetBasicAdding}, {"test_intsetLargeNumberRandomAdd", test_intsetLargeNumberRandomAdd}, {"test_intsetUpgradeFromint16Toint32", test_intsetUpgradeFromint16Toint32}, {"test_intsetUpgradeFromint16Toint64", test_intsetUpgradeFromint16Toint64}, {"test_intsetUpgradeFromint32Toint64", test_intsetUpgradeFromint32Toint64}, {"test_intsetStressLookups", test_intsetStressLookups}, {"test_intsetStressAddDelete", test_intsetStressAddDelete}, {NULL, NULL}}; unitTest __test_kvstore_c[] = {{"test_kvstoreAdd16Keys", test_kvstoreAdd16Keys}, {"test_kvstoreIteratorRemoveAllKeysNoDeleteEmptyDict", test_kvstoreIteratorRemoveAllKeysNoDeleteEmptyDict}, {"test_kvstoreIteratorRemoveAllKeysDeleteEmptyDict", test_kvstoreIteratorRemoveAllKeysDeleteEmptyDict}, {"test_kvstoreDictIteratorRemoveAllKeysNoDeleteEmptyDict", test_kvstoreDictIteratorRemoveAllKeysNoDeleteEmptyDict}, {"test_kvstoreDictIteratorRemoveAllKeysDeleteEmptyDict", test_kvstoreDictIteratorRemoveAllKeysDeleteEmptyDict}, {NULL, NULL}}; -unitTest __test_sds_c[] = {{"test_sds", test_sds}, {NULL, NULL}}; +unitTest __test_sds_c[] = {{"test_sds", test_sds}, {"test_typesAndAllocSize", test_typesAndAllocSize}, {"test_sdsHeaderSizes", test_sdsHeaderSizes}, {NULL, NULL}}; unitTest __test_sha1_c[] = {{"test_sha1", test_sha1}, {NULL, NULL}}; unitTest __test_util_c[] = {{"test_string2ll", test_string2ll}, {"test_string2l", test_string2l}, {"test_ll2string", test_ll2string}, {"test_ld2string", test_ld2string}, {"test_fixedpoint_d2string", test_fixedpoint_d2string}, {"test_version2num", test_version2num}, {"test_reclaimFilePageCache", test_reclaimFilePageCache}, {NULL, NULL}}; unitTest __test_ziplist_c[] = {{"test_ziplistCreateIntList", test_ziplistCreateIntList}, {"test_ziplistPop", test_ziplistPop}, {"test_ziplistGetElementAtIndex3", test_ziplistGetElementAtIndex3}, {"test_ziplistGetElementOutOfRange", test_ziplistGetElementOutOfRange}, {"test_ziplistGetLastElement", test_ziplistGetLastElement}, {"test_ziplistGetFirstElement", test_ziplistGetFirstElement}, {"test_ziplistGetElementOutOfRangeReverse", test_ziplistGetElementOutOfRangeReverse}, {"test_ziplistIterateThroughFullList", test_ziplistIterateThroughFullList}, {"test_ziplistIterateThroughListFrom1ToEnd", test_ziplistIterateThroughListFrom1ToEnd}, {"test_ziplistIterateThroughListFrom2ToEnd", test_ziplistIterateThroughListFrom2ToEnd}, {"test_ziplistIterateThroughStartOutOfRange", test_ziplistIterateThroughStartOutOfRange}, {"test_ziplistIterateBackToFront", test_ziplistIterateBackToFront}, {"test_ziplistIterateBackToFrontDeletingAllItems", test_ziplistIterateBackToFrontDeletingAllItems}, {"test_ziplistDeleteInclusiveRange0To0", test_ziplistDeleteInclusiveRange0To0}, {"test_ziplistDeleteInclusiveRange0To1", test_ziplistDeleteInclusiveRange0To1}, {"test_ziplistDeleteInclusiveRange1To2", test_ziplistDeleteInclusiveRange1To2}, {"test_ziplistDeleteWithStartIndexOutOfRange", test_ziplistDeleteWithStartIndexOutOfRange}, {"test_ziplistDeleteWithNumOverflow", test_ziplistDeleteWithNumOverflow}, {"test_ziplistDeleteFooWhileIterating", test_ziplistDeleteFooWhileIterating}, {"test_ziplistReplaceWithSameSize", test_ziplistReplaceWithSameSize}, {"test_ziplistReplaceWithDifferentSize", test_ziplistReplaceWithDifferentSize}, {"test_ziplistRegressionTestForOver255ByteStrings", test_ziplistRegressionTestForOver255ByteStrings}, {"test_ziplistRegressionTestDeleteNextToLastEntries", test_ziplistRegressionTestDeleteNextToLastEntries}, {"test_ziplistCreateLongListAndCheckIndices", test_ziplistCreateLongListAndCheckIndices}, {"test_ziplistCompareStringWithZiplistEntries", test_ziplistCompareStringWithZiplistEntries}, {"test_ziplistMergeTest", test_ziplistMergeTest}, {"test_ziplistStressWithRandomPayloadsOfDifferentEncoding", test_ziplistStressWithRandomPayloadsOfDifferentEncoding}, {"test_ziplistCascadeUpdateEdgeCases", test_ziplistCascadeUpdateEdgeCases}, {"test_ziplistInsertEdgeCase", test_ziplistInsertEdgeCase}, {"test_ziplistStressWithVariableSize", test_ziplistStressWithVariableSize}, {"test_BenchmarkziplistFind", test_BenchmarkziplistFind}, {"test_BenchmarkziplistIndex", test_BenchmarkziplistIndex}, {"test_BenchmarkziplistValidateIntegrity", test_BenchmarkziplistValidateIntegrity}, {"test_BenchmarkziplistCompareWithString", test_BenchmarkziplistCompareWithString}, {"test_BenchmarkziplistCompareWithNumber", test_BenchmarkziplistCompareWithNumber}, {"test_ziplistStress__ziplistCascadeUpdate", test_ziplistStress__ziplistCascadeUpdate}, {NULL, NULL}}; diff --git a/src/unit/test_sds.c b/src/unit/test_sds.c index adf3d37f2c..19b5c7d73f 100644 --- a/src/unit/test_sds.c +++ b/src/unit/test_sds.c @@ -4,6 +4,7 @@ #include "test_help.h" #include "../sds.h" +#include "../sdsalloc.h" static sds sdsTestTemplateCallback(sds varname, void *arg) { UNUSED(arg); @@ -247,5 +248,83 @@ int test_sds(int argc, char **argv, int flags) { TEST_ASSERT_MESSAGE("sdsReszie() crop strlen", strlen(x) == 4); TEST_ASSERT_MESSAGE("sdsReszie() crop alloc", sdsalloc(x) >= 4); sdsfree(x); + + return 0; +} + +int test_typesAndAllocSize(int argc, char **argv, int flags) { + UNUSED(argc); + UNUSED(argv); + UNUSED(flags); + + sds x = sdsnewlen(NULL, 31); + TEST_ASSERT_MESSAGE("len 31 type", (x[-1] & SDS_TYPE_MASK) == SDS_TYPE_5); + sdsfree(x); + + x = sdsnewlen(NULL, 32); + TEST_ASSERT_MESSAGE("len 32 type", (x[-1] & SDS_TYPE_MASK) >= SDS_TYPE_8); + TEST_ASSERT_MESSAGE("len 32 sdsAllocSize", sdsAllocSize(x) == s_malloc_size(sdsAllocPtr(x))); + sdsfree(x); + + x = sdsnewlen(NULL, 252); + TEST_ASSERT_MESSAGE("len 252 type", (x[-1] & SDS_TYPE_MASK) >= SDS_TYPE_8); + TEST_ASSERT_MESSAGE("len 252 sdsAllocSize", sdsAllocSize(x) == s_malloc_size(sdsAllocPtr(x))); + sdsfree(x); + + x = sdsnewlen(NULL, 253); + TEST_ASSERT_MESSAGE("len 253 type", (x[-1] & SDS_TYPE_MASK) == SDS_TYPE_16); + TEST_ASSERT_MESSAGE("len 253 sdsAllocSize", sdsAllocSize(x) == s_malloc_size(sdsAllocPtr(x))); + sdsfree(x); + + x = sdsnewlen(NULL, 65530); + TEST_ASSERT_MESSAGE("len 65530 type", (x[-1] & SDS_TYPE_MASK) >= SDS_TYPE_16); + TEST_ASSERT_MESSAGE("len 65530 sdsAllocSize", sdsAllocSize(x) == s_malloc_size(sdsAllocPtr(x))); + sdsfree(x); + + x = sdsnewlen(NULL, 65531); + TEST_ASSERT_MESSAGE("len 65531 type", (x[-1] & SDS_TYPE_MASK) >= SDS_TYPE_32); + TEST_ASSERT_MESSAGE("len 65531 sdsAllocSize", sdsAllocSize(x) == s_malloc_size(sdsAllocPtr(x))); + sdsfree(x); + +#if (LONG_MAX == LLONG_MAX) + if (flags & UNIT_TEST_LARGE_MEMORY) { + x = sdsnewlen(NULL, 4294967286); + TEST_ASSERT_MESSAGE("len 4294967286 type", (x[-1] & SDS_TYPE_MASK) >= SDS_TYPE_32); + TEST_ASSERT_MESSAGE("len 4294967286 sdsAllocSize", sdsAllocSize(x) == s_malloc_size(sdsAllocPtr(x))); + sdsfree(x); + + x = sdsnewlen(NULL, 4294967287); + TEST_ASSERT_MESSAGE("len 4294967287 type", (x[-1] & SDS_TYPE_MASK) == SDS_TYPE_64); + TEST_ASSERT_MESSAGE("len 4294967287 sdsAllocSize", sdsAllocSize(x) == s_malloc_size(sdsAllocPtr(x))); + sdsfree(x); + } +#endif + + return 0; +} + +/* The test verifies that we can adjust SDS types if an allocator returned + * larger buffer. The maximum length for type SDS_TYPE_X is + * 2^X - header_size(SDS_TYPE_X) - 1. The maximum value to be stored in alloc + * field is 2^X - 1. When allocated buffer is larger than + * 2^X + header_size(SDS_TYPE_X), we "move" to a larger type SDS_TYPE_Y. To be + * sure SDS_TYPE_Y header fits into 2^X + header_size(SDS_TYPE_X) + 1 bytes, the + * difference between header sizes must be smaller than + * header_size(SDS_TYPE_X) + 1. + * We ignore SDS_TYPE_5 as it doesn't have alloc field. */ +int test_sdsHeaderSizes(int argc, char **argv, int flags) { + UNUSED(argc); + UNUSED(argv); + UNUSED(flags); + + TEST_ASSERT_MESSAGE("can't always adjust SDS_TYPE_8 with SDS_TYPE_16", + sizeof(struct sdshdr16) <= 2 * sizeof(struct sdshdr8) + 1); + TEST_ASSERT_MESSAGE("can't always adjust SDS_TYPE_16 with SDS_TYPE_32", + sizeof(struct sdshdr32) <= 2 * sizeof(struct sdshdr16) + 1); +#if (LONG_MAX == LLONG_MAX) + TEST_ASSERT_MESSAGE("can't always adjust SDS_TYPE_32 with SDS_TYPE_64", + sizeof(struct sdshdr64) <= 2 * sizeof(struct sdshdr32) + 1); +#endif + return 0; } From b72e43ed165b86bbd61061e181bae11dcc617ee0 Mon Sep 17 00:00:00 2001 From: uriyage <78144248+uriyage@users.noreply.github.com> Date: Mon, 3 Jun 2024 21:15:28 +0300 Subject: [PATCH 23/42] Adjust query buffer resized correctly test to non-jemalloc allocators. (#593) Test `query buffer resized correctly` start to fail (https://github.com/valkey-io/valkey/actions/runs/9278013807) with non-jemalloc allocators after https://github.com/valkey-io/valkey/pull/258 PR. With Jemalloc we allocate ~20K for the query buffer, in the test we read 1 byte in the first read, in the second read we make sure we have at least 16KB free place in the query buffer and we have as Jemalloc allocated 20KB, But with non jemalloc we allocate in the first read exactly 16KB. in the second read we check and see that we don't have 16KB free space as we already read 1 byte hence we reallocate this time greedly (*2 of the requested size of 16KB+1) hence the test condition that the querybuf size is < 32KB is no longer true The `query buffer resized correctly test` starts [failing](https://github.com/valkey-io/valkey/actions/runs/9278013807) with non-jemalloc allocators after PR #258 . With jemalloc, we allocate ~20KB for the query buffer. In the test, we read 1 byte initially and then ensure there is at least 16KB of free space in the buffer for the second read, which is satisfied by jemalloc's 20KB allocation. However, with non-jemalloc allocators, the first read allocates exactly 16KB. When we check again, we don't have 16KB free due to the 1 byte already read. This triggers a greedy reallocation (doubling the requested size of 16KB+1), causing the query buffer size to exceed the 32KB limit, thus failing the test condition. This PR adjusted the test query buffer upper limit to be 32KB +2. Signed-off-by: Uri Yagelnik --- tests/unit/querybuf.tcl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/querybuf.tcl b/tests/unit/querybuf.tcl index 66942a5bd1..7e04b87905 100644 --- a/tests/unit/querybuf.tcl +++ b/tests/unit/querybuf.tcl @@ -45,7 +45,8 @@ start_server {tags {"querybuf slow"}} { set orig_test_client_qbuf [client_query_buffer test_client] # Make sure query buff has less than the peak resize threshold (PROTO_RESIZE_THRESHOLD) 32k # but at least the basic IO reading buffer size (PROTO_IOBUF_LEN) 16k - assert {$orig_test_client_qbuf >= 16384 && $orig_test_client_qbuf < 32768} + set MAX_QUERY_BUFFER_SIZE [expr 32768 + 2] ; # 32k + 2, allowing for potential greedy allocation of (16k + 1) * 2 bytes for the query buffer. + assert {$orig_test_client_qbuf >= 16384 && $orig_test_client_qbuf <= $MAX_QUERY_BUFFER_SIZE} # Check that the initial query buffer is resized after 2 sec wait_for_condition 1000 10 { From b95e7c384f61be9b493b98835dbb5038f90b92f6 Mon Sep 17 00:00:00 2001 From: Madelyn Olson Date: Mon, 3 Jun 2024 11:49:15 -0700 Subject: [PATCH 24/42] Skip tls for xgroup read regression since it doesn't matter (#595) "Client blocked on XREADGROUP while stream's slot is migrated" uses the migrate command, which requires special handling for TLS and non-tls. This was not being handled, so was throwing an error. Signed-off-by: Madelyn Olson --- tests/unit/cluster/slot-migration.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/cluster/slot-migration.tcl b/tests/unit/cluster/slot-migration.tcl index d4f0d43b3b..5aec2ce14e 100644 --- a/tests/unit/cluster/slot-migration.tcl +++ b/tests/unit/cluster/slot-migration.tcl @@ -423,7 +423,7 @@ start_cluster 3 3 {tags {external:skip cluster} overrides {cluster-allow-replica } } -start_cluster 2 0 {tags {external:skip cluster regression} overrides {cluster-allow-replica-migration no cluster-node-timeout 1000} } { +start_cluster 2 0 {tags {tls:skip external:skip cluster regression} overrides {cluster-allow-replica-migration no cluster-node-timeout 1000} } { # Issue #563 regression test test "Client blocked on XREADGROUP while stream's slot is migrated" { set stream_name aga From 0700c441c60dc150157a82af057e71b6a6234326 Mon Sep 17 00:00:00 2001 From: Eran Liberty Date: Mon, 3 Jun 2024 22:22:06 +0300 Subject: [PATCH 25/42] Remove unused valDup (#443) Remove the unused value duplicate API from dict. It's unused in the codebase and introduces unnecessary overhead. --------- Signed-off-by: Eran Liberty --- src/cluster_legacy.c | 3 --- src/config.c | 2 -- src/dict.c | 5 ++++- src/dict.h | 1 - src/eval.c | 1 - src/expire.c | 1 - src/functions.c | 5 ----- src/kvstore.c | 5 +++-- src/latency.c | 1 - src/module.c | 2 -- src/sentinel.c | 3 --- src/server.c | 17 ----------------- src/t_zset.c | 1 - src/unit/test_kvstore.c | 2 +- src/valkey-benchmark.c | 1 - src/valkey-cli.c | 4 ---- 16 files changed, 8 insertions(+), 46 deletions(-) diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index 0de6351e90..ee328e558e 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -144,7 +144,6 @@ static inline int defaultClientPort(void) { dictType clusterNodesDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -157,7 +156,6 @@ dictType clusterNodesDictType = { dictType clusterNodesBlackListDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -168,7 +166,6 @@ dictType clusterNodesBlackListDictType = { dictType clusterSdsToListType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictListDestructor, /* val destructor */ diff --git a/src/config.c b/src/config.c index 3cd0cc5def..e46d01cf6f 100644 --- a/src/config.c +++ b/src/config.c @@ -986,7 +986,6 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state); dictType optionToLineDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictListDestructor, /* val destructor */ @@ -996,7 +995,6 @@ dictType optionToLineDictType = { dictType optionSetDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ diff --git a/src/dict.c b/src/dict.c index 119c60ab57..ab53b4922c 100644 --- a/src/dict.c +++ b/src/dict.c @@ -48,6 +48,8 @@ #include "serverassert.h" #include "monotonic.h" +#define UNUSED(V) ((void)V) + /* Using dictSetResizeEnabled() we make possible to disable * resizing and rehashing of the hash table as needed. This is very important * for us, as we use copy-on-write and don't want to move too much memory @@ -800,8 +802,9 @@ void dictSetKey(dict *d, dictEntry *de, void *key) { } void dictSetVal(dict *d, dictEntry *de, void *val) { + UNUSED(d); assert(entryHasValue(de)); - de->v.val = d->type->valDup ? d->type->valDup(d, val) : val; + de->v.val = val; } void dictSetSignedIntegerVal(dictEntry *de, int64_t val) { diff --git a/src/dict.h b/src/dict.h index 7ba22edf1e..723e5a54c2 100644 --- a/src/dict.h +++ b/src/dict.h @@ -54,7 +54,6 @@ typedef struct dictType { /* Callbacks */ uint64_t (*hashFunction)(const void *key); void *(*keyDup)(dict *d, const void *key); - void *(*valDup)(dict *d, const void *obj); int (*keyCompare)(dict *d, const void *key1, const void *key2); void (*keyDestructor)(dict *d, void *key); void (*valDestructor)(dict *d, void *obj); diff --git a/src/eval.c b/src/eval.c index d9c2c183d6..10372be329 100644 --- a/src/eval.c +++ b/src/eval.c @@ -71,7 +71,6 @@ static uint64_t dictStrCaseHash(const void *key) { dictType shaScriptObjectDictType = { dictStrCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictLuaScriptDestructor, /* val destructor */ diff --git a/src/expire.c b/src/expire.c index 8d81473209..9261fbee28 100644 --- a/src/expire.c +++ b/src/expire.c @@ -470,7 +470,6 @@ void rememberSlaveKeyWithExpire(serverDb *db, robj *key) { static dictType dt = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ diff --git a/src/functions.c b/src/functions.c index 3076f3b90a..08d869f026 100644 --- a/src/functions.c +++ b/src/functions.c @@ -65,7 +65,6 @@ typedef struct functionsLibMetaData { dictType engineDictType = { dictSdsCaseHash, /* hash function */ dictSdsDup, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -75,7 +74,6 @@ dictType engineDictType = { dictType functionDictType = { dictSdsCaseHash, /* hash function */ dictSdsDup, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -85,7 +83,6 @@ dictType functionDictType = { dictType engineStatsDictType = { dictSdsCaseHash, /* hash function */ dictSdsDup, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ engineStatsDispose, /* val destructor */ @@ -95,7 +92,6 @@ dictType engineStatsDictType = { dictType libraryFunctionDictType = { dictSdsHash, /* hash function */ dictSdsDup, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ engineFunctionDispose, /* val destructor */ @@ -105,7 +101,6 @@ dictType libraryFunctionDictType = { dictType librariesDictType = { dictSdsHash, /* hash function */ dictSdsDup, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ engineLibraryDispose, /* val destructor */ diff --git a/src/kvstore.c b/src/kvstore.c index 70e2043157..8de74c724b 100644 --- a/src/kvstore.c +++ b/src/kvstore.c @@ -794,8 +794,9 @@ void kvstoreDictSetKey(kvstore *kvs, int didx, dictEntry *de, void *key) { } void kvstoreDictSetVal(kvstore *kvs, int didx, dictEntry *de, void *val) { - dict *d = kvstoreGetDict(kvs, didx); - dictSetVal(d, de, val); + UNUSED(kvs); + UNUSED(didx); + dictSetVal(NULL, de, val); } dictEntry * diff --git a/src/latency.c b/src/latency.c index 78f3cc3edd..f9ab4905d0 100644 --- a/src/latency.c +++ b/src/latency.c @@ -51,7 +51,6 @@ void dictVanillaFree(dict *d, void *val); dictType latencyTimeSeriesDictType = { dictStringHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictStringKeyCompare, /* key compare */ dictVanillaFree, /* key destructor */ dictVanillaFree, /* val destructor */ diff --git a/src/module.c b/src/module.c index f82f30326d..ebb3d0e6c6 100644 --- a/src/module.c +++ b/src/module.c @@ -11780,7 +11780,6 @@ int dictCStringKeyCompare(dict *d, const void *key1, const void *key2) { dictType moduleAPIDictType = { dictCStringKeyHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictCStringKeyCompare, /* key compare */ NULL, /* key destructor */ NULL, /* val destructor */ @@ -11811,7 +11810,6 @@ void moduleInitModulesSystemLast(void) { dictType sdsKeyValueHashDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictSdsDestructor, /* val destructor */ diff --git a/src/sentinel.c b/src/sentinel.c index f4becdca27..e705b653d4 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -430,7 +430,6 @@ void dictInstancesValDestructor(dict *d, void *obj) { dictType instancesDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor */ dictInstancesValDestructor, /* val destructor */ @@ -444,7 +443,6 @@ dictType instancesDictType = { dictType leaderVotesDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor */ NULL, /* val destructor */ @@ -455,7 +453,6 @@ dictType leaderVotesDictType = { dictType renamedCommandsDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictSdsDestructor, /* val destructor */ diff --git a/src/server.c b/src/server.c index bf4967c106..e01c125d9a 100644 --- a/src/server.c +++ b/src/server.c @@ -424,7 +424,6 @@ int dictResizeAllowed(size_t moreMem, double usedRatio) { dictType objectKeyPointerValueDictType = { dictEncObjHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictEncObjKeyCompare, /* key compare */ dictObjectDestructor, /* key destructor */ NULL, /* val destructor */ @@ -436,7 +435,6 @@ dictType objectKeyPointerValueDictType = { dictType objectKeyHeapPointerValueDictType = { dictEncObjHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictEncObjKeyCompare, /* key compare */ dictObjectDestructor, /* key destructor */ dictVanillaFree, /* val destructor */ @@ -447,7 +445,6 @@ dictType objectKeyHeapPointerValueDictType = { dictType setDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -460,7 +457,6 @@ dictType setDictType = { dictType zsetDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* Note: SDS string shared & freed by skiplist */ NULL, /* val destructor */ @@ -471,7 +467,6 @@ dictType zsetDictType = { dictType dbDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictObjectDestructor, /* val destructor */ @@ -482,7 +477,6 @@ dictType dbDictType = { dictType dbExpiresDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor */ NULL, /* val destructor */ @@ -493,7 +487,6 @@ dictType dbExpiresDictType = { dictType commandTableDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -504,7 +497,6 @@ dictType commandTableDictType = { dictType hashDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictSdsDestructor, /* val destructor */ @@ -515,7 +507,6 @@ dictType hashDictType = { dictType sdsReplyDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor */ NULL, /* val destructor */ @@ -528,7 +519,6 @@ dictType sdsReplyDictType = { dictType keylistDictType = { dictObjHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictObjKeyCompare, /* key compare */ dictObjectDestructor, /* key destructor */ dictListDestructor, /* val destructor */ @@ -540,7 +530,6 @@ dictType keylistDictType = { dictType objToDictDictType = { dictObjHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictObjKeyCompare, /* key compare */ dictObjectDestructor, /* key destructor */ dictDictDestructor, /* val destructor */ @@ -552,7 +541,6 @@ dictType objToDictDictType = { dictType modulesDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -563,7 +551,6 @@ dictType modulesDictType = { dictType migrateCacheDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -575,7 +562,6 @@ dictType migrateCacheDictType = { dictType stringSetDictType = { dictCStrCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictCStrKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -587,7 +573,6 @@ dictType stringSetDictType = { dictType externalStringType = { dictCStrCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictCStrKeyCaseCompare, /* key compare */ NULL, /* key destructor */ NULL, /* val destructor */ @@ -599,7 +584,6 @@ dictType externalStringType = { dictType sdsHashDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictVanillaFree, /* val destructor */ @@ -610,7 +594,6 @@ dictType sdsHashDictType = { dictType clientDictType = { dictClientHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictClientKeyCompare, /* key compare */ .no_value = 1 /* no values in this dict */ }; diff --git a/src/t_zset.c b/src/t_zset.c index 3ebc9b86d8..216ed165d2 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -2527,7 +2527,6 @@ static void zdiff(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen, dictType setAccumulatorDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor */ NULL, /* val destructor */ diff --git a/src/unit/test_kvstore.c b/src/unit/test_kvstore.c index f7c0b1a4fb..abf620a4f2 100644 --- a/src/unit/test_kvstore.c +++ b/src/unit/test_kvstore.c @@ -10,7 +10,7 @@ void freeTestCallback(dict *d, void *val) { zfree(val); } -dictType KvstoreDictTestType = {hashTestCallback, NULL, NULL, NULL, freeTestCallback, NULL, NULL}; +dictType KvstoreDictTestType = {hashTestCallback, NULL, NULL, freeTestCallback, NULL, NULL}; char *stringFromInt(int value) { char buf[32]; diff --git a/src/valkey-benchmark.c b/src/valkey-benchmark.c index cb944fac02..47d9483475 100644 --- a/src/valkey-benchmark.c +++ b/src/valkey-benchmark.c @@ -1243,7 +1243,6 @@ static int fetchClusterSlotsConfiguration(client c) { static dictType dtype = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor */ NULL, /* val destructor */ diff --git a/src/valkey-cli.c b/src/valkey-cli.c index 5a3d66fc4f..333a39752f 100644 --- a/src/valkey-cli.c +++ b/src/valkey-cli.c @@ -882,7 +882,6 @@ static void cliInitHelp(void) { dictType groupsdt = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -3544,7 +3543,6 @@ typedef struct clusterManagerLink { static dictType clusterManagerDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor */ dictSdsDestructor, /* val destructor */ @@ -3554,7 +3552,6 @@ static dictType clusterManagerDictType = { static dictType clusterManagerLinkDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictListDestructor, /* val destructor */ @@ -8628,7 +8625,6 @@ void type_free(dict *d, void *val) { static dictType typeinfoDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor (owned by the value)*/ type_free, /* val destructor */ From 9319f7aeca5d726d07e3e967d2f2f7b9d7126a46 Mon Sep 17 00:00:00 2001 From: Shivshankar Date: Tue, 4 Jun 2024 14:46:59 -0400 Subject: [PATCH 26/42] Replace valkey in log and panic messages (#550) Part of #207 --------- Signed-off-by: Shivshankar-Reddy --- src/cluster_legacy.c | 2 +- src/rdb.c | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index ee328e558e..90f31e3cd5 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -543,7 +543,7 @@ int clusterLoadConfig(char *filename) { } else if (!strcasecmp(s, "noflags")) { /* nothing to do */ } else { - serverPanic("Unknown flag in redis cluster config file"); + serverPanic("Unknown flag in %s cluster config file", SERVER_TITLE); } if (p) s = p + 1; } diff --git a/src/rdb.c b/src/rdb.c index fe297cb7a9..6384021350 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -3021,10 +3021,9 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin if ((dbid = rdbLoadLen(rdb, NULL)) == RDB_LENERR) goto eoferr; if (dbid >= (unsigned)server.dbnum) { serverLog(LL_WARNING, - "FATAL: Data file was created with a Redis " - "server configured to handle more than %d " - "databases. Exiting\n", - server.dbnum); + "FATAL: Data file was created with a %s server configured to handle " + "more than %d databases. Exiting\n", + SERVER_TITLE, server.dbnum); exit(1); } db = rdb_loading_ctx->dbarray + dbid; From 60c10a5a4daeb60dece86550c47a220fc401bb57 Mon Sep 17 00:00:00 2001 From: Eran Liberty Date: Tue, 4 Jun 2024 20:00:53 -0700 Subject: [PATCH 27/42] Remove valdup from BenchmarkDictType (#600) makes SERVER_CFLAGS='-DSERVER_TEST' compile as well Introduced in #443. Signed-off-by: Eran Liberty Co-authored-by: Eran Liberty --- src/dict.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dict.c b/src/dict.c index ab53b4922c..dc227dbee9 100644 --- a/src/dict.c +++ b/src/dict.c @@ -1748,7 +1748,7 @@ char *stringFromLongLong(long long value) { return s; } -dictType BenchmarkDictType = {hashCallback, NULL, NULL, compareCallback, freeCallback, NULL, NULL}; +dictType BenchmarkDictType = {hashCallback, NULL, compareCallback, freeCallback, NULL, NULL}; #define start_benchmark() start = timeInMilliseconds() #define end_benchmark(msg) \ From 278ce0cae04342c392ebb865d155cbecd6672313 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20S=C3=B6derqvist?= Date: Thu, 6 Jun 2024 10:53:17 -0700 Subject: [PATCH 28/42] Rebrand the Lua debugger (#603) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Viktor Söderqvist --- src/eval.c | 32 ++++++++++++++++++-------------- src/script_lua.c | 2 +- src/valkey-cli.c | 1 + tests/unit/scripting.tcl | 8 ++++---- 4 files changed, 24 insertions(+), 19 deletions(-) diff --git a/src/eval.c b/src/eval.c index 10372be329..e50acd179c 100644 --- a/src/eval.c +++ b/src/eval.c @@ -1497,19 +1497,19 @@ void ldbRedis(lua_State *lua, sds *argv, int argc) { /* Increase the Lua stack if needed to make sure there is enough room * to push 'argc + 1' elements to the stack. On failure, return error. * Notice that we need, in worst case, 'argc + 1' elements because we push all the arguments - * given by the user (without the first argument) and we also push the 'redis' global table and - * 'redis.call' function so: - * (1 (redis table)) + (1 (redis.call function)) + (argc - 1 (all arguments without the first)) = argc + 1*/ + * given by the user (without the first argument) and we also push the 'server' global table and + * 'server.call' function so: + * (1 (server table)) + (1 (server.call function)) + (argc - 1 (all arguments without the first)) = argc + 1*/ ldbLogRedisReply("max lua stack reached"); return; } - lua_getglobal(lua, "redis"); + lua_getglobal(lua, "server"); lua_pushstring(lua, "call"); - lua_gettable(lua, -2); /* Stack: redis, redis.call */ + lua_gettable(lua, -2); /* Stack: server, server.call */ for (j = 1; j < argc; j++) lua_pushlstring(lua, argv[j], sdslen(argv[j])); - ldb.step = 1; /* Force redis.call() to log. */ - lua_pcall(lua, argc - 1, 1, 0); /* Stack: redis, result */ + ldb.step = 1; /* Force server.call() to log. */ + lua_pcall(lua, argc - 1, 1, 0); /* Stack: server, result */ ldb.step = 0; /* Disable logging. */ lua_pop(lua, 2); /* Discard the result and clean the stack. */ } @@ -1612,15 +1612,15 @@ int ldbRepl(lua_State *lua) { ldbLog(sdsnew("[b]reak 0 Remove all breakpoints.")); ldbLog(sdsnew("[t]race Show a backtrace.")); ldbLog(sdsnew("[e]val Execute some Lua code (in a different callframe).")); - ldbLog(sdsnew("[r]edis Execute a Redis command.")); - ldbLog(sdsnew("[m]axlen [len] Trim logged Redis replies and Lua var dumps to len.")); + ldbLog(sdsnew("[v]alkey Execute a command.")); + ldbLog(sdsnew("[m]axlen [len] Trim logged replies and Lua var dumps to len.")); ldbLog(sdsnew(" Specifying zero as means unlimited.")); ldbLog(sdsnew("[a]bort Stop the execution of the script. In sync")); ldbLog(sdsnew(" mode dataset changes will be retained.")); ldbLog(sdsnew("")); ldbLog(sdsnew("Debugger functions you can call from Lua scripts:")); - ldbLog(sdsnew("redis.debug() Produce logs in the debugger console.")); - ldbLog(sdsnew("redis.breakpoint() Stop execution like if there was a breakpoint in the")); + ldbLog(sdsnew("server.debug() Produce logs in the debugger console.")); + ldbLog(sdsnew("server.breakpoint() Stop execution like if there was a breakpoint in the")); ldbLog(sdsnew(" next line of code.")); ldbSendLogs(); } else if (!strcasecmp(argv[0], "s") || !strcasecmp(argv[0], "step") || !strcasecmp(argv[0], "n") || @@ -1644,8 +1644,12 @@ int ldbRepl(lua_State *lua) { } else if (!strcasecmp(argv[0], "a") || !strcasecmp(argv[0], "abort")) { luaPushError(lua, "script aborted for user request"); luaError(lua); - } else if (argc > 1 && (!strcasecmp(argv[0], "r") || !strcasecmp(argv[0], REDIS_API_NAME) || + } else if (argc > 1 && ((!strcasecmp(argv[0], "r") || !strcasecmp(argv[0], "redis")) || + (!strcasecmp(argv[0], "v") || !strcasecmp(argv[0], "valkey")) || !strcasecmp(argv[0], SERVER_API_NAME))) { + /* [r]redis or [v]alkey calls a command. We accept "server" too, but + * not "s" because that's "step". Neither can we use [c]all because + * "c" is continue. */ ldbRedis(lua, argv, argc); ldbSendLogs(); } else if ((!strcasecmp(argv[0], "p") || !strcasecmp(argv[0], "print"))) { @@ -1667,7 +1671,7 @@ int ldbRepl(lua_State *lua) { ldbList(1, 1000000); ldbSendLogs(); } else { - ldbLog(sdsnew(" Unknown Redis Lua debugger command or " + ldbLog(sdsnew(" Unknown Lua debugger command or " "wrong number of arguments.")); ldbSendLogs(); } @@ -1711,7 +1715,7 @@ void luaLdbLineHook(lua_State *lua, lua_Debug *ar) { if (ldb.step || bp) { char *reason = "step over"; if (bp) - reason = ldb.luabp ? "redis.breakpoint() called" : "break point"; + reason = ldb.luabp ? "server.breakpoint() called" : "break point"; else if (timeout) reason = "timeout reached, infinite loop?"; ldb.step = 0; diff --git a/src/script_lua.c b/src/script_lua.c index cc5b2d472e..cfe1959259 100644 --- a/src/script_lua.c +++ b/src/script_lua.c @@ -897,7 +897,7 @@ static int luaRedisGenericCommand(lua_State *lua, int raise_error) { /* Log the command if debugging is active. */ if (ldbIsEnabled()) { - sds cmdlog = sdsnew(""); + sds cmdlog = sdsnew(""); for (j = 0; j < c->argc; j++) { if (j == 10) { cmdlog = sdscatprintf(cmdlog, " ... (%d more)", c->argc - j - 1); diff --git a/src/valkey-cli.c b/src/valkey-cli.c index 333a39752f..e3fb7fb5d2 100644 --- a/src/valkey-cli.c +++ b/src/valkey-cli.c @@ -1907,6 +1907,7 @@ sds sdsCatColorizedLdbReply(sds o, char *s, size_t len) { char *color = "white"; if (strstr(s, "")) color = "bold"; + if (strstr(s, "")) color = "green"; if (strstr(s, "")) color = "green"; if (strstr(s, "")) color = "cyan"; if (strstr(s, "")) color = "red"; diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index ea5c1b899f..a4ab545803 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -1525,13 +1525,13 @@ start_server {tags {"scripting needs:debug external:skip"}} { r script debug sync r eval {return 'hello'} 0 catch {r 'hello\0world'} e - assert_match {*Unknown Redis Lua debugger command*} $e + assert_match {*Unknown Lua debugger command*} $e catch {r 'hello\0'} e - assert_match {*Unknown Redis Lua debugger command*} $e + assert_match {*Unknown Lua debugger command*} $e catch {r '\0hello'} e - assert_match {*Unknown Redis Lua debugger command*} $e + assert_match {*Unknown Lua debugger command*} $e catch {r '\0hello\0'} e - assert_match {*Unknown Redis Lua debugger command*} $e + assert_match {*Unknown Lua debugger command*} $e } test {Test scripting debug lua stack overflow} { From ad5fd5b95ced743c577724276e3a182e0d3be255 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20S=C3=B6derqvist?= Date: Thu, 6 Jun 2024 16:40:55 -0700 Subject: [PATCH 29/42] More rebranding (#606) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit More rebranding of * Log messages (#252) * The DENIED error reply * Internal function names and comments, mainly Lua API --------- Signed-off-by: Viktor Söderqvist --- src/cluster_legacy.c | 2 +- src/debug.c | 2 +- src/eval.c | 116 ++++++++++----------- src/function_lua.c | 28 ++--- src/networking.c | 10 +- src/rdb.c | 2 +- src/script_lua.c | 86 ++++++++-------- src/script_lua.h | 2 +- src/server.h | 2 +- tests/integration/replication-4.tcl | 4 +- tests/integration/replication.tcl | 6 +- tests/integration/valkey-cli.tcl | 4 +- tests/unit/aofrw.tcl | 2 +- tests/unit/cluster/cli.tcl | 4 +- tests/unit/cluster/scripting.tcl | 6 +- tests/unit/functions.tcl | 154 ++++++++++++++-------------- tests/unit/introspection.tcl | 2 +- tests/unit/moduleapi/misc.tcl | 2 +- tests/unit/pause.tcl | 2 +- tests/unit/scripting.tcl | 2 +- 20 files changed, 219 insertions(+), 219 deletions(-) diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index 90f31e3cd5..685bafcbe4 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -561,7 +561,7 @@ int clusterLoadConfig(char *filename) { clusterAddNode(master); } /* shard_id can be absent if we are loading a nodes.conf generated - * by an older version of Redis; we should follow the primary's + * by an older version; we should follow the primary's * shard_id in this case */ if (auxFieldHandlers[af_shard_id].isPresent(n) == 0) { memcpy(n->shard_id, master->shard_id, CLUSTER_NAMELEN); diff --git a/src/debug.c b/src/debug.c index 51e9c6e9f6..fc2a49cca8 100644 --- a/src/debug.c +++ b/src/debug.c @@ -925,7 +925,7 @@ void debugCommand(client *c) { addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "stringmatch-test") && c->argc == 2) { stringmatchlen_fuzz_test(); - addReplyStatus(c, "Apparently Redis did not crash: test passed"); + addReplyStatus(c, "Apparently the server did not crash: test passed"); } else if (!strcasecmp(c->argv[1]->ptr, "set-disable-deny-scripts") && c->argc == 3) { server.script_disable_deny_script = atoi(c->argv[2]->ptr); addReply(c, shared.ok); diff --git a/src/eval.c b/src/eval.c index e50acd179c..464c8ef487 100644 --- a/src/eval.c +++ b/src/eval.c @@ -99,7 +99,7 @@ struct ldbState { int bp[LDB_BREAKPOINTS_MAX]; /* An array of breakpoints line numbers. */ int bpcount; /* Number of valid entries inside bp. */ int step; /* Stop at next line regardless of breakpoints. */ - int luabp; /* Stop at next line because redis.breakpoint() was called. */ + int luabp; /* Stop at next line because server.breakpoint() was called. */ sds *src; /* Lua script source code split by line. */ int lines; /* Number of lines in 'src'. */ int currentline; /* Current line number. */ @@ -114,7 +114,7 @@ struct ldbState { /* Perform the SHA1 of the input string. We use this both for hashing script * bodies in order to obtain the Lua function name, and in the implementation - * of redis.sha1(). + * of server.sha1(). * * 'digest' should point to a 41 bytes buffer: 40 for SHA1 converted into an * hexadecimal number, plus 1 byte for null term. */ @@ -135,12 +135,12 @@ void sha1hex(char *digest, char *script, size_t len) { digest[40] = '\0'; } -/* redis.breakpoint() +/* server.breakpoint() * * Allows to stop execution during a debugging session from within * the Lua code implementation, like if a breakpoint was set in the code * immediately after the function. */ -int luaRedisBreakpointCommand(lua_State *lua) { +int luaServerBreakpointCommand(lua_State *lua) { if (ldb.active) { ldb.luabp = 1; lua_pushboolean(lua, 1); @@ -150,12 +150,12 @@ int luaRedisBreakpointCommand(lua_State *lua) { return 1; } -/* redis.debug() +/* server.debug() * * Log a string message into the output console. * Can take multiple arguments that will be separated by commas. * Nothing is returned to the caller. */ -int luaRedisDebugCommand(lua_State *lua) { +int luaServerDebugCommand(lua_State *lua) { if (!ldb.active) return 0; int argc = lua_gettop(lua); sds log = sdscatprintf(sdsempty(), " line %d: ", ldb.currentline); @@ -167,14 +167,14 @@ int luaRedisDebugCommand(lua_State *lua) { return 0; } -/* redis.replicate_commands() +/* server.replicate_commands() * * DEPRECATED: Now do nothing and always return true. * Turn on single commands replication if the script never called * a write command so far, and returns true. Otherwise if the script * already started to write, returns false and stick to whole scripts * replication, which is our default. */ -int luaRedisReplicateCommandsCommand(lua_State *lua) { +int luaServerReplicateCommandsCommand(lua_State *lua) { lua_pushboolean(lua, 1); return 1; } @@ -205,27 +205,27 @@ void scriptingInit(int setup) { lctx.lua_scripts_lru_list = listCreate(); lctx.lua_scripts_mem = 0; - luaRegisterRedisAPI(lua); + luaRegisterServerAPI(lua); /* register debug commands */ - lua_getglobal(lua, "redis"); + lua_getglobal(lua, "server"); - /* redis.breakpoint */ + /* server.breakpoint */ lua_pushstring(lua, "breakpoint"); - lua_pushcfunction(lua, luaRedisBreakpointCommand); + lua_pushcfunction(lua, luaServerBreakpointCommand); lua_settable(lua, -3); - /* redis.debug */ + /* server.debug */ lua_pushstring(lua, "debug"); - lua_pushcfunction(lua, luaRedisDebugCommand); + lua_pushcfunction(lua, luaServerDebugCommand); lua_settable(lua, -3); - /* redis.replicate_commands */ + /* server.replicate_commands */ lua_pushstring(lua, "replicate_commands"); - lua_pushcfunction(lua, luaRedisReplicateCommandsCommand); + lua_pushcfunction(lua, luaServerReplicateCommandsCommand); lua_settable(lua, -3); - lua_setglobal(lua, "redis"); + lua_setglobal(lua, "server"); /* Add a helper function we use for pcall error reporting. * Note that when the error is in the C function we want to report the @@ -1204,50 +1204,50 @@ void ldbLogStackValue(lua_State *lua, char *prefix) { ldbLogWithMaxLen(s); } -char *ldbRedisProtocolToHuman_Int(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Bulk(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Status(sds *o, char *reply); -char *ldbRedisProtocolToHuman_MultiBulk(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Set(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Map(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Null(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Bool(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Double(sds *o, char *reply); +char *ldbRespToHuman_Int(sds *o, char *reply); +char *ldbRespToHuman_Bulk(sds *o, char *reply); +char *ldbRespToHuman_Status(sds *o, char *reply); +char *ldbRespToHuman_MultiBulk(sds *o, char *reply); +char *ldbRespToHuman_Set(sds *o, char *reply); +char *ldbRespToHuman_Map(sds *o, char *reply); +char *ldbRespToHuman_Null(sds *o, char *reply); +char *ldbRespToHuman_Bool(sds *o, char *reply); +char *ldbRespToHuman_Double(sds *o, char *reply); /* Get RESP from 'reply' and appends it in human readable form to * the passed SDS string 'o'. * * Note that the SDS string is passed by reference (pointer of pointer to * char*) so that we can return a modified pointer, as for SDS semantics. */ -char *ldbRedisProtocolToHuman(sds *o, char *reply) { +char *ldbRespToHuman(sds *o, char *reply) { char *p = reply; /* clang-format off */ switch(*p) { - case ':': p = ldbRedisProtocolToHuman_Int(o,reply); break; - case '$': p = ldbRedisProtocolToHuman_Bulk(o,reply); break; - case '+': p = ldbRedisProtocolToHuman_Status(o,reply); break; - case '-': p = ldbRedisProtocolToHuman_Status(o,reply); break; - case '*': p = ldbRedisProtocolToHuman_MultiBulk(o,reply); break; - case '~': p = ldbRedisProtocolToHuman_Set(o,reply); break; - case '%': p = ldbRedisProtocolToHuman_Map(o,reply); break; - case '_': p = ldbRedisProtocolToHuman_Null(o,reply); break; - case '#': p = ldbRedisProtocolToHuman_Bool(o,reply); break; - case ',': p = ldbRedisProtocolToHuman_Double(o,reply); break; + case ':': p = ldbRespToHuman_Int(o,reply); break; + case '$': p = ldbRespToHuman_Bulk(o,reply); break; + case '+': p = ldbRespToHuman_Status(o,reply); break; + case '-': p = ldbRespToHuman_Status(o,reply); break; + case '*': p = ldbRespToHuman_MultiBulk(o,reply); break; + case '~': p = ldbRespToHuman_Set(o,reply); break; + case '%': p = ldbRespToHuman_Map(o,reply); break; + case '_': p = ldbRespToHuman_Null(o,reply); break; + case '#': p = ldbRespToHuman_Bool(o,reply); break; + case ',': p = ldbRespToHuman_Double(o,reply); break; } /* clang-format on */ return p; } -/* The following functions are helpers for ldbRedisProtocolToHuman(), each +/* The following functions are helpers for ldbRespToHuman(), each * take care of a given RESP return type. */ -char *ldbRedisProtocolToHuman_Int(sds *o, char *reply) { +char *ldbRespToHuman_Int(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); *o = sdscatlen(*o, reply + 1, p - reply - 1); return p + 2; } -char *ldbRedisProtocolToHuman_Bulk(sds *o, char *reply) { +char *ldbRespToHuman_Bulk(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); long long bulklen; @@ -1261,14 +1261,14 @@ char *ldbRedisProtocolToHuman_Bulk(sds *o, char *reply) { } } -char *ldbRedisProtocolToHuman_Status(sds *o, char *reply) { +char *ldbRespToHuman_Status(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); *o = sdscatrepr(*o, reply, p - reply); return p + 2; } -char *ldbRedisProtocolToHuman_MultiBulk(sds *o, char *reply) { +char *ldbRespToHuman_MultiBulk(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); long long mbulklen; int j = 0; @@ -1281,14 +1281,14 @@ char *ldbRedisProtocolToHuman_MultiBulk(sds *o, char *reply) { } *o = sdscatlen(*o, "[", 1); for (j = 0; j < mbulklen; j++) { - p = ldbRedisProtocolToHuman(o, p); + p = ldbRespToHuman(o, p); if (j != mbulklen - 1) *o = sdscatlen(*o, ",", 1); } *o = sdscatlen(*o, "]", 1); return p; } -char *ldbRedisProtocolToHuman_Set(sds *o, char *reply) { +char *ldbRespToHuman_Set(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); long long mbulklen; int j = 0; @@ -1297,14 +1297,14 @@ char *ldbRedisProtocolToHuman_Set(sds *o, char *reply) { p += 2; *o = sdscatlen(*o, "~(", 2); for (j = 0; j < mbulklen; j++) { - p = ldbRedisProtocolToHuman(o, p); + p = ldbRespToHuman(o, p); if (j != mbulklen - 1) *o = sdscatlen(*o, ",", 1); } *o = sdscatlen(*o, ")", 1); return p; } -char *ldbRedisProtocolToHuman_Map(sds *o, char *reply) { +char *ldbRespToHuman_Map(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); long long mbulklen; int j = 0; @@ -1313,22 +1313,22 @@ char *ldbRedisProtocolToHuman_Map(sds *o, char *reply) { p += 2; *o = sdscatlen(*o, "{", 1); for (j = 0; j < mbulklen; j++) { - p = ldbRedisProtocolToHuman(o, p); + p = ldbRespToHuman(o, p); *o = sdscatlen(*o, " => ", 4); - p = ldbRedisProtocolToHuman(o, p); + p = ldbRespToHuman(o, p); if (j != mbulklen - 1) *o = sdscatlen(*o, ",", 1); } *o = sdscatlen(*o, "}", 1); return p; } -char *ldbRedisProtocolToHuman_Null(sds *o, char *reply) { +char *ldbRespToHuman_Null(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); *o = sdscatlen(*o, "(null)", 6); return p + 2; } -char *ldbRedisProtocolToHuman_Bool(sds *o, char *reply) { +char *ldbRespToHuman_Bool(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); if (reply[1] == 't') *o = sdscatlen(*o, "#true", 5); @@ -1337,7 +1337,7 @@ char *ldbRedisProtocolToHuman_Bool(sds *o, char *reply) { return p + 2; } -char *ldbRedisProtocolToHuman_Double(sds *o, char *reply) { +char *ldbRespToHuman_Double(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); *o = sdscatlen(*o, "(double) ", 9); *o = sdscatlen(*o, reply + 1, p - reply - 1); @@ -1347,9 +1347,9 @@ char *ldbRedisProtocolToHuman_Double(sds *o, char *reply) { /* Log a RESP reply as debugger output, in a human readable format. * If the resulting string is longer than 'len' plus a few more chars * used as prefix, it gets truncated. */ -void ldbLogRedisReply(char *reply) { +void ldbLogRespReply(char *reply) { sds log = sdsnew(" "); - ldbRedisProtocolToHuman(&log, reply); + ldbRespToHuman(&log, reply); ldbLogWithMaxLen(log); } @@ -1487,10 +1487,10 @@ void ldbEval(lua_State *lua, sds *argv, int argc) { } /* Implement the debugger "server" command. We use a trick in order to make - * the implementation very simple: we just call the Lua redis.call() command + * the implementation very simple: we just call the Lua server.call() command * implementation, with ldb.step enabled, so as a side effect the command * and its reply are logged. */ -void ldbRedis(lua_State *lua, sds *argv, int argc) { +void ldbServer(lua_State *lua, sds *argv, int argc) { int j; if (!lua_checkstack(lua, argc + 1)) { @@ -1500,7 +1500,7 @@ void ldbRedis(lua_State *lua, sds *argv, int argc) { * given by the user (without the first argument) and we also push the 'server' global table and * 'server.call' function so: * (1 (server table)) + (1 (server.call function)) + (argc - 1 (all arguments without the first)) = argc + 1*/ - ldbLogRedisReply("max lua stack reached"); + ldbLogRespReply("max lua stack reached"); return; } @@ -1592,7 +1592,7 @@ int ldbRepl(lua_State *lua) { /* Execute the command. */ if (!strcasecmp(argv[0], "h") || !strcasecmp(argv[0], "help")) { - ldbLog(sdsnew("Redis Lua debugger help:")); + ldbLog(sdsnew("Lua debugger help:")); ldbLog(sdsnew("[h]elp Show this help.")); ldbLog(sdsnew("[s]tep Run current line and stop again.")); ldbLog(sdsnew("[n]ext Alias for step.")); @@ -1650,7 +1650,7 @@ int ldbRepl(lua_State *lua) { /* [r]redis or [v]alkey calls a command. We accept "server" too, but * not "s" because that's "step". Neither can we use [c]all because * "c" is continue. */ - ldbRedis(lua, argv, argc); + ldbServer(lua, argv, argc); ldbSendLogs(); } else if ((!strcasecmp(argv[0], "p") || !strcasecmp(argv[0], "print"))) { if (argc == 2) diff --git a/src/function_lua.c b/src/function_lua.c index 54453a8f35..685485e37e 100644 --- a/src/function_lua.c +++ b/src/function_lua.c @@ -274,7 +274,7 @@ static int luaRegisterFunctionReadNamedArgs(lua_State *lua, registerFunctionArgs luaFunctionCtx *lua_f_ctx = NULL; uint64_t flags = 0; if (!lua_istable(lua, 1)) { - err = "calling redis.register_function with a single argument is only applicable to Lua table (representing " + err = "calling server.register_function with a single argument is only applicable to Lua table (representing " "named arguments)."; goto error; } @@ -284,23 +284,23 @@ static int luaRegisterFunctionReadNamedArgs(lua_State *lua, registerFunctionArgs while (lua_next(lua, -2)) { /* Stack now: table, key, value */ if (!lua_isstring(lua, -2)) { - err = "named argument key given to redis.register_function is not a string"; + err = "named argument key given to server.register_function is not a string"; goto error; } const char *key = lua_tostring(lua, -2); if (!strcasecmp(key, "function_name")) { if (!(name = luaGetStringSds(lua, -1))) { - err = "function_name argument given to redis.register_function must be a string"; + err = "function_name argument given to server.register_function must be a string"; goto error; } } else if (!strcasecmp(key, "description")) { if (!(desc = luaGetStringSds(lua, -1))) { - err = "description argument given to redis.register_function must be a string"; + err = "description argument given to server.register_function must be a string"; goto error; } } else if (!strcasecmp(key, "callback")) { if (!lua_isfunction(lua, -1)) { - err = "callback argument given to redis.register_function must be a function"; + err = "callback argument given to server.register_function must be a function"; goto error; } int lua_function_ref = luaL_ref(lua, LUA_REGISTRYINDEX); @@ -310,7 +310,7 @@ static int luaRegisterFunctionReadNamedArgs(lua_State *lua, registerFunctionArgs continue; /* value was already popped, so no need to pop it out. */ } else if (!strcasecmp(key, "flags")) { if (!lua_istable(lua, -1)) { - err = "flags argument to redis.register_function must be a table representing function flags"; + err = "flags argument to server.register_function must be a table representing function flags"; goto error; } if (luaRegisterFunctionReadFlags(lua, &flags) != C_OK) { @@ -319,19 +319,19 @@ static int luaRegisterFunctionReadNamedArgs(lua_State *lua, registerFunctionArgs } } else { /* unknown argument was given, raise an error */ - err = "unknown argument given to redis.register_function"; + err = "unknown argument given to server.register_function"; goto error; } lua_pop(lua, 1); /* pop the value to continue the iteration */ } if (!name) { - err = "redis.register_function must get a function name argument"; + err = "server.register_function must get a function name argument"; goto error; } if (!lua_f_ctx) { - err = "redis.register_function must get a callback argument"; + err = "server.register_function must get a callback argument"; goto error; } @@ -355,12 +355,12 @@ static int luaRegisterFunctionReadPositionalArgs(lua_State *lua, registerFunctio sds name = NULL; luaFunctionCtx *lua_f_ctx = NULL; if (!(name = luaGetStringSds(lua, 1))) { - err = "first argument to redis.register_function must be a string"; + err = "first argument to server.register_function must be a string"; goto error; } if (!lua_isfunction(lua, 2)) { - err = "second argument to redis.register_function must be a function"; + err = "second argument to server.register_function must be a function"; goto error; } @@ -382,7 +382,7 @@ static int luaRegisterFunctionReadPositionalArgs(lua_State *lua, registerFunctio static int luaRegisterFunctionReadArgs(lua_State *lua, registerFunctionArgs *register_f_args) { int argc = lua_gettop(lua); if (argc < 1 || argc > 2) { - luaPushError(lua, "wrong number of arguments to redis.register_function"); + luaPushError(lua, "wrong number of arguments to server.register_function"); return C_ERR; } @@ -398,7 +398,7 @@ static int luaRegisterFunction(lua_State *lua) { loadCtx *load_ctx = luaGetFromRegistry(lua, REGISTRY_LOAD_CTX_NAME); if (!load_ctx) { - luaPushError(lua, "redis.register_function can only be called on FUNCTION LOAD command"); + luaPushError(lua, "server.register_function can only be called on FUNCTION LOAD command"); return luaError(lua); } @@ -423,7 +423,7 @@ int luaEngineInitEngine(void) { luaEngineCtx *lua_engine_ctx = zmalloc(sizeof(*lua_engine_ctx)); lua_engine_ctx->lua = lua_open(); - luaRegisterRedisAPI(lua_engine_ctx->lua); + luaRegisterServerAPI(lua_engine_ctx->lua); /* Register the library commands table and fields and store it to registry */ lua_newtable(lua_engine_ctx->lua); /* load library globals */ diff --git a/src/networking.c b/src/networking.c index 9274f21c05..cfe98a2c0e 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1291,19 +1291,19 @@ void clientAcceptHandler(connection *conn) { * user what to do to fix it if needed. */ if (server.protected_mode && DefaultUser->flags & USER_FLAG_NOPASS) { if (connIsLocal(conn) != 1) { - char *err = "-DENIED Redis is running in protected mode because protected " + char *err = "-DENIED Running in protected mode because protected " "mode is enabled and no password is set for the default user. " "In this mode connections are only accepted from the loopback interface. " - "If you want to connect from external computers to Redis you " + "If you want to connect from external computers, you " "may adopt one of the following solutions: " "1) Just disable protected mode sending the command " "'CONFIG SET protected-mode no' from the loopback interface " - "by connecting to Redis from the same host the server is " - "running, however MAKE SURE Redis is not publicly accessible " + "by connecting from the same host the server is " + "running, however MAKE SURE it's not publicly accessible " "from internet if you do so. Use CONFIG REWRITE to make this " "change permanent. " "2) Alternatively you can just disable the protected mode by " - "editing the Redis configuration file, and setting the protected " + "editing the configuration file, and setting the protected " "mode option to 'no', and then restarting the server. " "3) If you started the server manually just for testing, restart " "it with the '--protected-mode no' option. " diff --git a/src/rdb.c b/src/rdb.c index 6384021350..b57cf44e7a 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -3081,7 +3081,7 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin } else if (!strcasecmp(auxkey->ptr, "redis-ver")) { serverLog(LL_NOTICE, "Loading RDB produced by Redis version %s", (char *)auxval->ptr); } else if (!strcasecmp(auxkey->ptr, "valkey-ver")) { - serverLog(LL_NOTICE, "Loading RDB produced by valkey version %s", (char *)auxval->ptr); + serverLog(LL_NOTICE, "Loading RDB produced by Valkey version %s", (char *)auxval->ptr); } else if (!strcasecmp(auxkey->ptr, "ctime")) { time_t age = time(NULL) - strtol(auxval->ptr, NULL, 10); if (age < 0) age = 0; diff --git a/src/script_lua.c b/src/script_lua.c index cfe1959259..ff75b679cf 100644 --- a/src/script_lua.c +++ b/src/script_lua.c @@ -224,7 +224,7 @@ static void redisProtocolToLuaType_Int(void *ctx, long long val, const char *pro if (!lua_checkstack(lua, 1)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_pushnumber(lua, (lua_Number)val); } @@ -240,7 +240,7 @@ static void redisProtocolToLuaType_NullBulkString(void *ctx, const char *proto, if (!lua_checkstack(lua, 1)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_pushboolean(lua, 0); } @@ -255,7 +255,7 @@ static void redisProtocolToLuaType_NullArray(void *ctx, const char *proto, size_ if (!lua_checkstack(lua, 1)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_pushboolean(lua, 0); } @@ -273,7 +273,7 @@ redisProtocolToLuaType_BulkString(void *ctx, const char *str, size_t len, const if (!lua_checkstack(lua, 1)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_pushlstring(lua, str, len); } @@ -289,7 +289,7 @@ static void redisProtocolToLuaType_Status(void *ctx, const char *str, size_t len if (!lua_checkstack(lua, 3)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_newtable(lua); lua_pushstring(lua, "ok"); @@ -308,7 +308,7 @@ static void redisProtocolToLuaType_Error(void *ctx, const char *str, size_t len, if (!lua_checkstack(lua, 3)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } sds err_msg = sdscatlen(sdsnew("-"), str, len); luaPushErrorBuff(lua, err_msg); @@ -326,7 +326,7 @@ static void redisProtocolToLuaType_Map(struct ReplyParser *parser, void *ctx, si if (!lua_checkstack(lua, 3)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_newtable(lua); lua_pushstring(lua, "map"); @@ -348,7 +348,7 @@ static void redisProtocolToLuaType_Set(struct ReplyParser *parser, void *ctx, si if (!lua_checkstack(lua, 3)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_newtable(lua); lua_pushstring(lua, "set"); @@ -362,7 +362,7 @@ static void redisProtocolToLuaType_Set(struct ReplyParser *parser, void *ctx, si * to push elements to the stack. On failure, exit with panic. * Notice that here we need to check the stack again because the recursive * call to redisProtocolToLuaType might have use the room allocated in the stack*/ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_pushboolean(lua, 1); lua_settable(lua, -3); @@ -379,7 +379,7 @@ static void redisProtocolToLuaType_Array(struct ReplyParser *parser, void *ctx, if (!lua_checkstack(lua, 2)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_newtable(lua); } @@ -422,7 +422,7 @@ static void redisProtocolToLuaType_VerbatimString(void *ctx, if (!lua_checkstack(lua, 5)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_newtable(lua); lua_pushstring(lua, "verbatim_string"); @@ -448,7 +448,7 @@ redisProtocolToLuaType_BigNumber(void *ctx, const char *str, size_t len, const c if (!lua_checkstack(lua, 3)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_newtable(lua); lua_pushstring(lua, "big_number"); @@ -467,7 +467,7 @@ static void redisProtocolToLuaType_Null(void *ctx, const char *proto, size_t pro if (!lua_checkstack(lua, 1)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_pushnil(lua); } @@ -483,7 +483,7 @@ static void redisProtocolToLuaType_Bool(void *ctx, int val, const char *proto, s if (!lua_checkstack(lua, 1)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_pushboolean(lua, val); } @@ -499,7 +499,7 @@ static void redisProtocolToLuaType_Double(void *ctx, double d, const char *proto if (!lua_checkstack(lua, 3)) { /* Increase the Lua stack if needed, to make sure there is enough room * to push elements to the stack. On failure, exit with panic. */ - serverPanic("lua stack limit reach when parsing redis.call reply"); + serverPanic("lua stack limit reach when parsing server.call reply"); } lua_newtable(lua); lua_pushstring(lua, "double"); @@ -508,7 +508,7 @@ static void redisProtocolToLuaType_Double(void *ctx, double d, const char *proto } /* This function is used in order to push an error on the Lua stack in the - * format used by redis.pcall to return errors, which is a lua table + * format used by server.pcall to return errors, which is a lua table * with an "err" field set to the error string including the error code. * Note that this table is never a valid reply by proper commands, * since the returned tables are otherwise always indexed by integers, never by strings. @@ -527,7 +527,7 @@ void luaPushErrorBuff(lua_State *lua, sds err_buffer) { /* There are two possible formats for the received `error` string: * 1) "-CODE msg": in this case we remove the leading '-' since we don't store it as part of the lua error format. * 2) "msg": in this case we prepend a generic 'ERR' code since all error statuses need some error code. - * We support format (1) so this function can reuse the error messages used in other places in redis. + * We support format (1) so this function can reuse the error messages used in other places. * We support format (2) so it'll be easy to pass descriptive errors to this function without worrying about format. */ if (err_buffer[0] == '-') { @@ -565,7 +565,7 @@ void luaPushError(lua_State *lua, const char *error) { } /* In case the error set into the Lua stack by luaPushError() was generated - * by the non-error-trapping version of redis.pcall(), which is redis.call(), + * by the non-error-trapping version of server.pcall(), which is server.call(), * this function will raise the Lua error so that the execution of the * script will be halted. */ int luaError(lua_State *lua) { @@ -759,7 +759,7 @@ static void luaReplyToRedisReply(client *c, client *script_client, lua_State *lu } /* --------------------------------------------------------------------------- - * Lua redis.* functions implementations. + * Lua server.* functions implementations. * ------------------------------------------------------------------------- */ void freeLuaRedisArgv(robj **argv, int argc, int argv_len); @@ -946,7 +946,7 @@ static int luaRedisGenericCommand(lua_State *lua, int raise_error) { redisProtocolToLuaType(lua, reply); /* If the debugger is active, log the reply from the server. */ - if (ldbIsEnabled()) ldbLogRedisReply(reply); + if (ldbIsEnabled()) ldbLogRespReply(reply); if (reply != c->buf) sdsfree(reply); c->reply_bytes = 0; @@ -998,17 +998,17 @@ static int luaRedisPcall(lua_State *lua) { return lua_gettop(lua); } -/* redis.call() */ +/* server.call() */ static int luaRedisCallCommand(lua_State *lua) { return luaRedisGenericCommand(lua, 1); } -/* redis.pcall() */ +/* server.pcall() */ static int luaRedisPCallCommand(lua_State *lua) { return luaRedisGenericCommand(lua, 0); } -/* This adds redis.sha1hex(string) to Lua scripts using the same hashing +/* This adds server.sha1hex(string) to Lua scripts using the same hashing * function used for sha1ing lua scripts. */ static int luaRedisSha1hexCommand(lua_State *lua) { int argc = lua_gettop(lua); @@ -1031,8 +1031,8 @@ static int luaRedisSha1hexCommand(lua_State *lua) { * passed as argument. This helper function is handy when returning * a RESP error or status reply from Lua: * - * return redis.error_reply("ERR Some Error") - * return redis.status_reply("ERR Some Error") + * return server.error_reply("ERR Some Error") + * return server.status_reply("ERR Some Error") */ static int luaRedisReturnSingleFieldTable(lua_State *lua, char *field) { if (lua_gettop(lua) != 1 || lua_type(lua, -1) != LUA_TSTRING) { @@ -1047,7 +1047,7 @@ static int luaRedisReturnSingleFieldTable(lua_State *lua, char *field) { return 1; } -/* redis.error_reply() */ +/* server.error_reply() */ static int luaRedisErrorReplyCommand(lua_State *lua) { if (lua_gettop(lua) != 1 || lua_type(lua, -1) != LUA_TSTRING) { luaPushError(lua, "wrong number or type of arguments"); @@ -1066,12 +1066,12 @@ static int luaRedisErrorReplyCommand(lua_State *lua) { return 1; } -/* redis.status_reply() */ +/* server.status_reply() */ static int luaRedisStatusReplyCommand(lua_State *lua) { return luaRedisReturnSingleFieldTable(lua, "ok"); } -/* redis.set_repl() +/* server.set_repl() * * Set the propagation of write commands executed in the context of the * script to on/off for AOF and slaves. */ @@ -1082,7 +1082,7 @@ static int luaRedisSetReplCommand(lua_State *lua) { serverAssert(rctx); /* Only supported inside script invocation */ if (argc != 1) { - luaPushError(lua, "redis.set_repl() requires one argument."); + luaPushError(lua, "server.set_repl() requires one argument."); return luaError(lua); } @@ -1096,7 +1096,7 @@ static int luaRedisSetReplCommand(lua_State *lua) { return 0; } -/* redis.acl_check_cmd() +/* server.acl_check_cmd() * * Checks ACL permissions for given command for the current user. */ static int luaRedisAclCheckCmdPermissionsCommand(lua_State *lua) { @@ -1132,14 +1132,14 @@ static int luaRedisAclCheckCmdPermissionsCommand(lua_State *lua) { } -/* redis.log() */ +/* server.log() */ static int luaLogCommand(lua_State *lua) { int j, argc = lua_gettop(lua); int level; sds log; if (argc < 2) { - luaPushError(lua, "redis.log() requires two arguments or more."); + luaPushError(lua, "server.log() requires two arguments or more."); return luaError(lua); } else if (!lua_isnumber(lua, -argc)) { luaPushError(lua, "First argument must be a number (log level)."); @@ -1169,14 +1169,14 @@ static int luaLogCommand(lua_State *lua) { return 0; } -/* redis.setresp() */ +/* server.setresp() */ static int luaSetResp(lua_State *lua) { scriptRunCtx *rctx = luaGetFromRegistry(lua, REGISTRY_RUN_CTX_NAME); serverAssert(rctx); /* Only supported inside script invocation */ int argc = lua_gettop(lua); if (argc != 1) { - luaPushError(lua, "redis.setresp() requires one argument."); + luaPushError(lua, "server.setresp() requires one argument."); return luaError(lua); } @@ -1378,7 +1378,7 @@ void luaRegisterVersion(lua_State *lua) { } void luaRegisterLogFunction(lua_State *lua) { - /* redis.log and log levels. */ + /* server.log and log levels. */ lua_pushstring(lua, "log"); lua_pushcfunction(lua, luaLogCommand); lua_settable(lua, -3); @@ -1400,7 +1400,7 @@ void luaRegisterLogFunction(lua_State *lua) { lua_settable(lua, -3); } -void luaRegisterRedisAPI(lua_State *lua) { +void luaRegisterServerAPI(lua_State *lua) { lua_pushvalue(lua, LUA_GLOBALSINDEX); luaSetAllowListProtection(lua); lua_pop(lua, 1); @@ -1413,12 +1413,12 @@ void luaRegisterRedisAPI(lua_State *lua) { /* Register the commands table and fields */ lua_newtable(lua); - /* redis.call */ + /* server.call */ lua_pushstring(lua, "call"); lua_pushcfunction(lua, luaRedisCallCommand); lua_settable(lua, -3); - /* redis.pcall */ + /* server.pcall */ lua_pushstring(lua, "pcall"); lua_pushcfunction(lua, luaRedisPCallCommand); lua_settable(lua, -3); @@ -1427,17 +1427,17 @@ void luaRegisterRedisAPI(lua_State *lua) { luaRegisterVersion(lua); - /* redis.setresp */ + /* server.setresp */ lua_pushstring(lua, "setresp"); lua_pushcfunction(lua, luaSetResp); lua_settable(lua, -3); - /* redis.sha1hex */ + /* server.sha1hex */ lua_pushstring(lua, "sha1hex"); lua_pushcfunction(lua, luaRedisSha1hexCommand); lua_settable(lua, -3); - /* redis.error_reply and redis.status_reply */ + /* server.error_reply and server.status_reply */ lua_pushstring(lua, "error_reply"); lua_pushcfunction(lua, luaRedisErrorReplyCommand); lua_settable(lua, -3); @@ -1445,7 +1445,7 @@ void luaRegisterRedisAPI(lua_State *lua) { lua_pushcfunction(lua, luaRedisStatusReplyCommand); lua_settable(lua, -3); - /* redis.set_repl and associated flags. */ + /* server.set_repl and associated flags. */ lua_pushstring(lua, "set_repl"); lua_pushcfunction(lua, luaRedisSetReplCommand); lua_settable(lua, -3); @@ -1470,7 +1470,7 @@ void luaRegisterRedisAPI(lua_State *lua) { lua_pushnumber(lua, PROPAGATE_AOF | PROPAGATE_REPL); lua_settable(lua, -3); - /* redis.acl_check_cmd */ + /* server.acl_check_cmd */ lua_pushstring(lua, "acl_check_cmd"); lua_pushcfunction(lua, luaRedisAclCheckCmdPermissionsCommand); lua_settable(lua, -3); diff --git a/src/script_lua.h b/src/script_lua.h index 8eec4b04f5..35edf46af6 100644 --- a/src/script_lua.h +++ b/src/script_lua.h @@ -66,7 +66,7 @@ typedef struct errorInfo { int ignore_err_stats_update; } errorInfo; -void luaRegisterRedisAPI(lua_State *lua); +void luaRegisterServerAPI(lua_State *lua); sds luaGetStringSds(lua_State *lua, int index); void luaRegisterGlobalProtectionFunction(lua_State *lua); void luaSetErrorMetatable(lua_State *lua); diff --git a/src/server.h b/src/server.h index 66f2c81c4d..30a2cace94 100644 --- a/src/server.h +++ b/src/server.h @@ -3465,7 +3465,7 @@ void freeLuaScriptsAsync(dict *lua_scripts, list *lua_scripts_lru_list, lua_Stat void freeFunctionsAsync(functionsLibCtx *lib_ctx); int ldbIsEnabled(void); void ldbLog(sds entry); -void ldbLogRedisReply(char *reply); +void ldbLogRespReply(char *reply); void sha1hex(char *digest, char *script, size_t len); unsigned long evalMemory(void); dict *evalScriptsDict(void); diff --git a/tests/integration/replication-4.tcl b/tests/integration/replication-4.tcl index 4370080b0f..d5b55b3b57 100644 --- a/tests/integration/replication-4.tcl +++ b/tests/integration/replication-4.tcl @@ -61,8 +61,8 @@ start_server {tags {"repl external:skip"}} { # Load some functions to be used later $master FUNCTION load replace {#!lua name=test - redis.register_function{function_name='f_default_flags', callback=function(keys, args) return redis.call('get',keys[1]) end, flags={}} - redis.register_function{function_name='f_no_writes', callback=function(keys, args) return redis.call('get',keys[1]) end, flags={'no-writes'}} + server.register_function{function_name='f_default_flags', callback=function(keys, args) return redis.call('get',keys[1]) end, flags={}} + server.register_function{function_name='f_no_writes', callback=function(keys, args) return redis.call('get',keys[1]) end, flags={'no-writes'}} } test {First server should have role slave after SLAVEOF} { diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl index 2118a8acdb..e64394ad1b 100644 --- a/tests/integration/replication.tcl +++ b/tests/integration/replication.tcl @@ -574,12 +574,12 @@ foreach testType {Successful Aborted} { # Set a function value on replica to check status during loading, on failure and after swapping db $replica function load {#!lua name=test - redis.register_function('test', function() return 'hello1' end) + server.register_function('test', function() return 'hello1' end) } # Set a function value on master to check it reaches the replica when replication ends $master function load {#!lua name=test - redis.register_function('test', function() return 'hello2' end) + server.register_function('test', function() return 'hello2' end) } # Remember the sync_full stat before the client kill. @@ -727,7 +727,7 @@ test {diskless loading short read} { # Set a function value to check short read handling on functions r function load {#!lua name=test - redis.register_function('test', function() return 'hello1' end) + server.register_function('test', function() return 'hello1' end) } for {set k 0} {$k < 3} {incr k} { diff --git a/tests/integration/valkey-cli.tcl b/tests/integration/valkey-cli.tcl index 066a70a6a8..153c527055 100644 --- a/tests/integration/valkey-cli.tcl +++ b/tests/integration/valkey-cli.tcl @@ -451,7 +451,7 @@ if {!$::tls} { ;# fake_redis_node doesn't support TLS set dir [lindex [r config get dir] 1] assert_equal "OK" [r debug populate 100000 key 1000] - assert_equal "lib1" [r function load "#!lua name=lib1\nredis.register_function('func1', function() return 123 end)"] + assert_equal "lib1" [r function load "#!lua name=lib1\nserver.register_function('func1', function() return 123 end)"] if {$functions_only} { set args "--functions-rdb $dir/cli.rdb" } else { @@ -464,7 +464,7 @@ if {!$::tls} { ;# fake_redis_node doesn't support TLS file rename "$dir/cli.rdb" "$dir/dump.rdb" assert_equal "OK" [r set should-not-exist 1] - assert_equal "should_not_exist_func" [r function load "#!lua name=should_not_exist_func\nredis.register_function('should_not_exist_func', function() return 456 end)"] + assert_equal "should_not_exist_func" [r function load "#!lua name=should_not_exist_func\nserver.register_function('should_not_exist_func', function() return 456 end)"] assert_equal "OK" [r debug reload nosave] assert_equal {} [r get should-not-exist] assert_equal {{library_name lib1 engine LUA functions {{name func1 description {} flags {}}}}} [r function list] diff --git a/tests/unit/aofrw.tcl b/tests/unit/aofrw.tcl index cc7545265a..b5310edae4 100644 --- a/tests/unit/aofrw.tcl +++ b/tests/unit/aofrw.tcl @@ -194,7 +194,7 @@ start_server {tags {"aofrw external:skip"} overrides {aof-use-rdb-preamble no}} test "AOF rewrite functions" { r flushall r FUNCTION LOAD {#!lua name=test - redis.register_function('test', function() return 1 end) + server.register_function('test', function() return 1 end) } r bgrewriteaof waitForBgrewriteaof r diff --git a/tests/unit/cluster/cli.tcl b/tests/unit/cluster/cli.tcl index 62f0328352..f5599f6c5a 100644 --- a/tests/unit/cluster/cli.tcl +++ b/tests/unit/cluster/cli.tcl @@ -181,7 +181,7 @@ start_multiple_servers 5 [list overrides $base_conf] { # upload a function to all the cluster exec src/valkey-cli --cluster-yes --cluster call 127.0.0.1:[srv 0 port] \ FUNCTION LOAD {#!lua name=TEST - redis.register_function('test', function() return 'hello' end) + server.register_function('test', function() return 'hello' end) } # adding node to the cluster @@ -205,7 +205,7 @@ start_multiple_servers 5 [list overrides $base_conf] { # add function to node 5 assert_equal {TEST} [$node5_rd FUNCTION LOAD {#!lua name=TEST - redis.register_function('test', function() return 'hello' end) + server.register_function('test', function() return 'hello' end) }] # make sure functions was added to node 5 diff --git a/tests/unit/cluster/scripting.tcl b/tests/unit/cluster/scripting.tcl index 76aa882e83..1cf1421079 100644 --- a/tests/unit/cluster/scripting.tcl +++ b/tests/unit/cluster/scripting.tcl @@ -17,7 +17,7 @@ start_cluster 1 0 {tags {external:skip cluster}} { return 'OK' end - redis.register_function('test_cross_slot', test_cross_slot)} + server.register_function('test_cross_slot', test_cross_slot)} assert_error "ERR Script attempted to access keys that do not hash to the same slot*" {r FCALL test_cross_slot 0} } @@ -45,7 +45,7 @@ start_cluster 1 0 {tags {external:skip cluster}} { return 'OK' end - redis.register_function{function_name='test_cross_slot', callback=test_cross_slot, flags={ 'allow-cross-slot-keys' }}} + server.register_function{function_name='test_cross_slot', callback=test_cross_slot, flags={ 'allow-cross-slot-keys' }}} r FCALL test_cross_slot 0 # Retrieve data from different slot to verify data has been stored in the correct dictionary in cluster-enabled setup @@ -73,7 +73,7 @@ start_cluster 1 0 {tags {external:skip cluster}} { test "Function no-cluster flag" { R 0 function load {#!lua name=test - redis.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-cluster'}} + server.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-cluster'}} } catch {R 0 fcall f1 0} e assert_match {*Can not run script on cluster, 'no-cluster' flag is set*} $e diff --git a/tests/unit/functions.tcl b/tests/unit/functions.tcl index 4415b60e57..6a720419d3 100644 --- a/tests/unit/functions.tcl +++ b/tests/unit/functions.tcl @@ -1,9 +1,9 @@ proc get_function_code {args} { - return [format "#!%s name=%s\nredis.register_function('%s', function(KEYS, ARGV)\n %s \nend)" [lindex $args 0] [lindex $args 1] [lindex $args 2] [lindex $args 3]] + return [format "#!%s name=%s\nserver.register_function('%s', function(KEYS, ARGV)\n %s \nend)" [lindex $args 0] [lindex $args 1] [lindex $args 2] [lindex $args 3]] } proc get_no_writes_function_code {args} { - return [format "#!%s name=%s\nredis.register_function{function_name='%s', callback=function(KEYS, ARGV)\n %s \nend, flags={'no-writes'}}" [lindex $args 0] [lindex $args 1] [lindex $args 2] [lindex $args 3]] + return [format "#!%s name=%s\nserver.register_function{function_name='%s', callback=function(KEYS, ARGV)\n %s \nend, flags={'no-writes'}}" [lindex $args 0] [lindex $args 1] [lindex $args 2] [lindex $args 3]] } start_server {tags {"scripting"}} { @@ -433,7 +433,7 @@ test {FUNCTION can processes create, delete and flush commands in AOF when doing start_server {} { r config set appendonly yes waitForBgrewriteaof r - r FUNCTION LOAD "#!lua name=test\nredis.register_function('test', function() return 'hello' end)" + r FUNCTION LOAD "#!lua name=test\nserver.register_function('test', function() return 'hello' end)" r config set slave-read-only yes r slaveof 127.0.0.1 0 r debug loadaof @@ -447,7 +447,7 @@ test {FUNCTION can processes create, delete and flush commands in AOF when doing r slaveof no one assert_equal [r function list] {} - r FUNCTION LOAD "#!lua name=test\nredis.register_function('test', function() return 'hello' end)" + r FUNCTION LOAD "#!lua name=test\nserver.register_function('test', function() return 'hello' end)" r FUNCTION FLUSH r slaveof 127.0.0.1 0 @@ -463,7 +463,7 @@ start_server {tags {"scripting"}} { local function ping() return redis.call('ping') end - redis.register_function( + server.register_function( 'f1', function(keys, args) return ping() @@ -478,13 +478,13 @@ start_server {tags {"scripting"}} { local function add1(a) return a + 1 end - redis.register_function( + server.register_function( 'f1', function(keys, args) return add1(1) end ) - redis.register_function( + server.register_function( 'f2', function(keys, args) return add1(2) @@ -502,19 +502,19 @@ start_server {tags {"scripting"}} { local function add1(a) return a + 2 end - redis.register_function( + server.register_function( 'f1', function(keys, args) return add1(1) end ) - redis.register_function( + server.register_function( 'f2', 'not a function' ) } } e - assert_match {*second argument to redis.register_function must be a function*} $e + assert_match {*second argument to server.register_function must be a function*} $e assert_equal [r fcall f1 0] {2} assert_equal [r fcall f2 0] {3} } @@ -522,7 +522,7 @@ start_server {tags {"scripting"}} { test {LIBRARIES - test registration function name collision} { catch { r function load replace {#!lua name=lib2 - redis.register_function( + server.register_function( 'f1', function(keys, args) return 1 @@ -538,13 +538,13 @@ start_server {tags {"scripting"}} { test {LIBRARIES - test registration function name collision on same library} { catch { r function load replace {#!lua name=lib2 - redis.register_function( + server.register_function( 'f1', function(keys, args) return 1 end ) - redis.register_function( + server.register_function( 'f1', function(keys, args) return 1 @@ -558,43 +558,43 @@ start_server {tags {"scripting"}} { test {LIBRARIES - test registration with no argument} { catch { r function load replace {#!lua name=lib2 - redis.register_function() + server.register_function() } } e set _ $e - } {*wrong number of arguments to redis.register_function*} + } {*wrong number of arguments to server.register_function*} test {LIBRARIES - test registration with only name} { catch { r function load replace {#!lua name=lib2 - redis.register_function('f1') + server.register_function('f1') } } e set _ $e - } {*calling redis.register_function with a single argument is only applicable to Lua table*} + } {*calling server.register_function with a single argument is only applicable to Lua table*} test {LIBRARIES - test registration with to many arguments} { catch { r function load replace {#!lua name=lib2 - redis.register_function('f1', function() return 1 end, {}, 'description', 'extra arg') + server.register_function('f1', function() return 1 end, {}, 'description', 'extra arg') } } e set _ $e - } {*wrong number of arguments to redis.register_function*} + } {*wrong number of arguments to server.register_function*} test {LIBRARIES - test registration with no string name} { catch { r function load replace {#!lua name=lib2 - redis.register_function(nil, function() return 1 end) + server.register_function(nil, function() return 1 end) } } e set _ $e - } {*first argument to redis.register_function must be a string*} + } {*first argument to server.register_function must be a string*} test {LIBRARIES - test registration with wrong name format} { catch { r function load replace {#!lua name=lib2 - redis.register_function('test\0test', function() return 1 end) + server.register_function('test\0test', function() return 1 end) } } e set _ $e @@ -603,7 +603,7 @@ start_server {tags {"scripting"}} { test {LIBRARIES - test registration with empty name} { catch { r function load replace {#!lua name=lib2 - redis.register_function('', function() return 1 end) + server.register_function('', function() return 1 end) } } e set _ $e @@ -703,10 +703,10 @@ start_server {tags {"scripting"}} { test {LIBRARIES - register function inside a function} { r function load {#!lua name=lib - redis.register_function( + server.register_function( 'f1', function(keys, args) - redis.register_function( + server.register_function( 'f2', function(key, args) return 2 @@ -751,7 +751,7 @@ start_server {tags {"scripting"}} { test {LIBRARIES - named arguments} { r function load {#!lua name=lib - redis.register_function{ + server.register_function{ function_name='f1', callback=function() return 'hello' @@ -765,7 +765,7 @@ start_server {tags {"scripting"}} { test {LIBRARIES - named arguments, bad function name} { catch { r function load replace {#!lua name=lib - redis.register_function{ + server.register_function{ function_name=function() return 1 end, callback=function() return 'hello' @@ -775,12 +775,12 @@ start_server {tags {"scripting"}} { } } e set _ $e - } {*function_name argument given to redis.register_function must be a string*} + } {*function_name argument given to server.register_function must be a string*} test {LIBRARIES - named arguments, bad callback type} { catch { r function load replace {#!lua name=lib - redis.register_function{ + server.register_function{ function_name='f1', callback='bad', description='some desc' @@ -788,12 +788,12 @@ start_server {tags {"scripting"}} { } } e set _ $e - } {*callback argument given to redis.register_function must be a function*} + } {*callback argument given to server.register_function must be a function*} test {LIBRARIES - named arguments, bad description} { catch { r function load replace {#!lua name=lib - redis.register_function{ + server.register_function{ function_name='f1', callback=function() return 'hello' @@ -803,12 +803,12 @@ start_server {tags {"scripting"}} { } } e set _ $e - } {*description argument given to redis.register_function must be a string*} + } {*description argument given to server.register_function must be a string*} test {LIBRARIES - named arguments, unknown argument} { catch { r function load replace {#!lua name=lib - redis.register_function{ + server.register_function{ function_name='f1', callback=function() return 'hello' @@ -819,12 +819,12 @@ start_server {tags {"scripting"}} { } } e set _ $e - } {*unknown argument given to redis.register_function*} + } {*unknown argument given to server.register_function*} test {LIBRARIES - named arguments, missing function name} { catch { r function load replace {#!lua name=lib - redis.register_function{ + server.register_function{ callback=function() return 'hello' end, @@ -833,19 +833,19 @@ start_server {tags {"scripting"}} { } } e set _ $e - } {*redis.register_function must get a function name argument*} + } {*server.register_function must get a function name argument*} test {LIBRARIES - named arguments, missing callback} { catch { r function load replace {#!lua name=lib - redis.register_function{ + server.register_function{ function_name='f1', description='desc' } } } e set _ $e - } {*redis.register_function must get a callback argument*} + } {*server.register_function must get a callback argument*} test {FUNCTION - test function restore with function name collision} { r function flush @@ -853,19 +853,19 @@ start_server {tags {"scripting"}} { local function add1(a) return a + 1 end - redis.register_function( + server.register_function( 'f1', function(keys, args) return add1(1) end ) - redis.register_function( + server.register_function( 'f2', function(keys, args) return add1(2) end ) - redis.register_function( + server.register_function( 'f3', function(keys, args) return add1(3) @@ -877,7 +877,7 @@ start_server {tags {"scripting"}} { # load a library with different name but with the same function name r function load {#!lua name=lib1 - redis.register_function( + server.register_function( 'f6', function(keys, args) return 7 @@ -888,19 +888,19 @@ start_server {tags {"scripting"}} { local function add1(a) return a + 1 end - redis.register_function( + server.register_function( 'f4', function(keys, args) return add1(4) end ) - redis.register_function( + server.register_function( 'f5', function(keys, args) return add1(5) end ) - redis.register_function( + server.register_function( 'f3', function(keys, args) return add1(3) @@ -926,14 +926,14 @@ start_server {tags {"scripting"}} { test {FUNCTION - test function list with code} { r function flush r function load {#!lua name=library1 - redis.register_function('f6', function(keys, args) return 7 end) + server.register_function('f6', function(keys, args) return 7 end) } r function list withcode - } {{library_name library1 engine LUA functions {{name f6 description {} flags {}}} library_code {*redis.register_function('f6', function(keys, args) return 7 end)*}}} + } {{library_name library1 engine LUA functions {{name f6 description {} flags {}}} library_code {*server.register_function('f6', function(keys, args) return 7 end)*}}} test {FUNCTION - test function list with pattern} { r function load {#!lua name=lib1 - redis.register_function('f7', function(keys, args) return 7 end) + server.register_function('f7', function(keys, args) return 7 end) } r function list libraryname library* } {{library_name library1 engine LUA functions {{name f6 description {} flags {}}}}} @@ -961,14 +961,14 @@ start_server {tags {"scripting"}} { test {FUNCTION - verify OOM on function load and function restore} { r function flush r function load replace {#!lua name=test - redis.register_function('f1', function() return 1 end) + server.register_function('f1', function() return 1 end) } set payload [r function dump] r config set maxmemory 1 r function flush catch {r function load replace {#!lua name=test - redis.register_function('f1', function() return 1 end) + server.register_function('f1', function() return 1 end) }} e assert_match {*command not allowed when used memory*} $e @@ -981,7 +981,7 @@ start_server {tags {"scripting"}} { test {FUNCTION - verify allow-omm allows running any command} { r FUNCTION load replace {#!lua name=f1 - redis.register_function{ + server.register_function{ function_name='f1', callback=function() return redis.call('set', 'x', '1') end, flags={'allow-oom'} @@ -1000,18 +1000,18 @@ start_server {tags {"scripting"}} { start_server {tags {"scripting"}} { test {FUNCTION - wrong flags type named arguments} { catch {r function load replace {#!lua name=test - redis.register_function{ + server.register_function{ function_name = 'f1', callback = function() return 1 end, flags = 'bad flags type' } }} e set _ $e - } {*flags argument to redis.register_function must be a table representing function flags*} + } {*flags argument to server.register_function must be a table representing function flags*} test {FUNCTION - wrong flag type} { catch {r function load replace {#!lua name=test - redis.register_function{ + server.register_function{ function_name = 'f1', callback = function() return 1 end, flags = {function() return 1 end} @@ -1022,7 +1022,7 @@ start_server {tags {"scripting"}} { test {FUNCTION - unknown flag} { catch {r function load replace {#!lua name=test - redis.register_function{ + server.register_function{ function_name = 'f1', callback = function() return 1 end, flags = {'unknown'} @@ -1033,7 +1033,7 @@ start_server {tags {"scripting"}} { test {FUNCTION - write script on fcall_ro} { r function load replace {#!lua name=test - redis.register_function{ + server.register_function{ function_name = 'f1', callback = function() return redis.call('set', 'x', 1) end } @@ -1044,7 +1044,7 @@ start_server {tags {"scripting"}} { test {FUNCTION - write script with no-writes flag} { r function load replace {#!lua name=test - redis.register_function{ + server.register_function{ function_name = 'f1', callback = function() return redis.call('set', 'x', 1) end, flags = {'no-writes'} @@ -1056,7 +1056,7 @@ start_server {tags {"scripting"}} { test {FUNCTION - deny oom} { r FUNCTION load replace {#!lua name=test - redis.register_function('f1', function() return redis.call('set', 'x', '1') end) + server.register_function('f1', function() return redis.call('set', 'x', '1') end) } r config set maxmemory 1 @@ -1069,7 +1069,7 @@ start_server {tags {"scripting"}} { test {FUNCTION - deny oom on no-writes function} { r FUNCTION load replace {#!lua name=test - redis.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-writes'}} + server.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-writes'}} } r config set maxmemory 1 @@ -1082,10 +1082,10 @@ start_server {tags {"scripting"}} { test {FUNCTION - allow stale} { r FUNCTION load replace {#!lua name=test - redis.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-writes'}} - redis.register_function{function_name='f2', callback=function() return 'hello' end, flags={'allow-stale', 'no-writes'}} - redis.register_function{function_name='f3', callback=function() return redis.call('get', 'x') end, flags={'allow-stale', 'no-writes'}} - redis.register_function{function_name='f4', callback=function() return redis.call('info', 'server') end, flags={'allow-stale', 'no-writes'}} + server.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-writes'}} + server.register_function{function_name='f2', callback=function() return 'hello' end, flags={'allow-stale', 'no-writes'}} + server.register_function{function_name='f3', callback=function() return redis.call('get', 'x') end, flags={'allow-stale', 'no-writes'}} + server.register_function{function_name='f4', callback=function() return redis.call('info', 'server') end, flags={'allow-stale', 'no-writes'}} } r config set replica-serve-stale-data no @@ -1110,13 +1110,13 @@ start_server {tags {"scripting"}} { r FUNCTION load replace {#!lua name=test local version = redis.REDIS_VERSION_NUM - redis.register_function{function_name='get_version_v1', callback=function() + server.register_function{function_name='get_version_v1', callback=function() return string.format('%s.%s.%s', bit.band(bit.rshift(version, 16), 0x000000ff), bit.band(bit.rshift(version, 8), 0x000000ff), bit.band(version, 0x000000ff)) end} - redis.register_function{function_name='get_version_v2', callback=function() return redis.REDIS_VERSION end} + server.register_function{function_name='get_version_v2', callback=function() return redis.REDIS_VERSION end} } catch {[r fcall f1 0]} e @@ -1127,12 +1127,12 @@ start_server {tags {"scripting"}} { r FUNCTION FLUSH r FUNCTION load {#!lua name=test1 - redis.register_function('f1', function() return 1 end) - redis.register_function('f2', function() return 1 end) + server.register_function('f1', function() return 1 end) + server.register_function('f2', function() return 1 end) } r FUNCTION load {#!lua name=test2 - redis.register_function('f3', function() return 1 end) + server.register_function('f3', function() return 1 end) } r function stats @@ -1152,12 +1152,12 @@ start_server {tags {"scripting"}} { r FUNCTION FLUSH r FUNCTION load {#!lua name=test1 - redis.register_function('f1', function() return 1 end) - redis.register_function('f2', function() return 1 end) + server.register_function('f1', function() return 1 end) + server.register_function('f2', function() return 1 end) } catch {r FUNCTION load {#!lua name=test1 - redis.register_function('f3', function() return 1 end) + server.register_function('f3', function() return 1 end) }} e assert_match "*Library 'test1' already exists*" $e @@ -1172,35 +1172,35 @@ start_server {tags {"scripting"}} { test {FUNCTION - function test empty engine} { catch {r function load replace {#! name=test - redis.register_function('foo', function() return 1 end) + server.register_function('foo', function() return 1 end) }} e set _ $e } {ERR Engine '' not found} test {FUNCTION - function test unknown metadata value} { catch {r function load replace {#!lua name=test foo=bar - redis.register_function('foo', function() return 1 end) + server.register_function('foo', function() return 1 end) }} e set _ $e } {ERR Invalid metadata value given: foo=bar} test {FUNCTION - function test no name} { catch {r function load replace {#!lua - redis.register_function('foo', function() return 1 end) + server.register_function('foo', function() return 1 end) }} e set _ $e } {ERR Library name was not given} test {FUNCTION - function test multiple names} { catch {r function load replace {#!lua name=foo name=bar - redis.register_function('foo', function() return 1 end) + server.register_function('foo', function() return 1 end) }} e set _ $e } {ERR Invalid metadata value, name argument was given multiple times} test {FUNCTION - function test name with quotes} { r function load replace {#!lua name="foo" - redis.register_function('foo', function() return 1 end) + server.register_function('foo', function() return 1 end) } } {foo} @@ -1208,7 +1208,7 @@ start_server {tags {"scripting"}} { r FUNCTION FLUSH r FUNCTION load {#!lua name=test1 - redis.register_function('f1', function() + server.register_function('f1', function() mt = getmetatable(_G) original_globals = mt.__index original_globals['redis'] = function() return 1 end diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index 02aca9e97d..8ceb03b502 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -311,7 +311,7 @@ start_server {tags {"introspection"}} { test {MONITOR can log commands issued by functions} { r function load replace {#!lua name=test - redis.register_function('test', function() return redis.call('set', 'foo', 'bar') end) + server.register_function('test', function() return redis.call('set', 'foo', 'bar') end) } set rd [valkey_deferring_client] $rd monitor diff --git a/tests/unit/moduleapi/misc.tcl b/tests/unit/moduleapi/misc.tcl index 041149eb80..9e0ea72456 100644 --- a/tests/unit/moduleapi/misc.tcl +++ b/tests/unit/moduleapi/misc.tcl @@ -45,7 +45,7 @@ start_server {overrides {save {900 1}} tags {"modules"}} { test {test RedisModule_ResetDataset do not reset functions} { r function load {#!lua name=lib - redis.register_function('test', function() return 1 end) + server.register_function('test', function() return 1 end) } assert_equal [r function list] {{library_name lib engine LUA functions {{name test description {} flags {}}}}} r test.flushall diff --git a/tests/unit/pause.tcl b/tests/unit/pause.tcl index 3698e9e064..38c13afc46 100644 --- a/tests/unit/pause.tcl +++ b/tests/unit/pause.tcl @@ -133,7 +133,7 @@ start_server {tags {"pause network"}} { r set x y # create a function for later r FUNCTION load replace {#!lua name=f1 - redis.register_function{ + server.register_function{ function_name='f1', callback=function() return "hello" end, flags={'no-writes'} diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index a4ab545803..0b7206e2a0 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -1189,7 +1189,7 @@ start_server {tags {"scripting"}} { set buf "*3\r\n\$4\r\neval\r\n\$33\r\nwhile 1 do redis.call('ping') end\r\n\$1\r\n0\r\n" append buf "*1\r\n\$4\r\nping\r\n" } else { - set buf "*4\r\n\$8\r\nfunction\r\n\$4\r\nload\r\n\$7\r\nreplace\r\n\$97\r\n#!lua name=test\nredis.register_function('test', function() while 1 do redis.call('ping') end end)\r\n" + set buf "*4\r\n\$8\r\nfunction\r\n\$4\r\nload\r\n\$7\r\nreplace\r\n\$99\r\n#!lua name=test\nserver.register_function('test', function() while 1 do server.call('ping') end end)\r\n" append buf "*3\r\n\$5\r\nfcall\r\n\$4\r\ntest\r\n\$1\r\n0\r\n" append buf "*1\r\n\$4\r\nping\r\n" } From bce240eab7aa02ace5ed76f61122647f9ef719d7 Mon Sep 17 00:00:00 2001 From: Madelyn Olson Date: Fri, 7 Jun 2024 00:46:52 -0700 Subject: [PATCH 30/42] Replace masteruser and masterauth with primaryuser and primaryauth (#598) Make the one backwards compatible config change we are allowed to replace for removing master from our API. `masterauth` and `masteruser` are still used as an alias, but aren't explicitly referenced. As an addendum to https://github.com/valkey-io/valkey/pull/591, it would be good to have this in 8. Given the related PR for updated other references for master, I just updated the ones around this specific change. Signed-off-by: Madelyn Olson --- src/config.c | 4 ++-- src/valkey-cli.c | 1 + tests/integration/valkey-benchmark.tcl | 4 ++-- tests/sentinel/tests/03-runtime-reconf.tcl | 12 ++++++------ tests/unit/auth.tcl | 12 ++++++------ tests/unit/slowlog.tcl | 8 ++++---- valkey.conf | 8 ++++---- 7 files changed, 25 insertions(+), 24 deletions(-) diff --git a/src/config.c b/src/config.c index e46d01cf6f..bd01d9156a 100644 --- a/src/config.c +++ b/src/config.c @@ -3059,7 +3059,7 @@ standardConfig static_configs[] = { createStringConfig("unixsocket", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.unixsocket, NULL, NULL, NULL), createStringConfig("pidfile", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.pidfile, NULL, NULL, NULL), createStringConfig("replica-announce-ip", "slave-announce-ip", MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.slave_announce_ip, NULL, NULL, NULL), - createStringConfig("masteruser", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masteruser, NULL, NULL, NULL), + createStringConfig("primaryuser", "masteruser", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masteruser, NULL, NULL, NULL), createStringConfig("cluster-announce-ip", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_ip, NULL, NULL, updateClusterIp), createStringConfig("cluster-config-file", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.cluster_configfile, "nodes.conf", NULL, NULL), createStringConfig("cluster-announce-hostname", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_hostname, NULL, isValidAnnouncedHostname, updateClusterHostname), @@ -3082,7 +3082,7 @@ standardConfig static_configs[] = { createStringConfig("locale-collate", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.locale_collate, "", NULL, updateLocaleCollate), /* SDS Configs */ - createSDSConfig("masterauth", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masterauth, NULL, NULL, NULL), + createSDSConfig("primaryauth", "masterauth", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masterauth, NULL, NULL, NULL), createSDSConfig("requirepass", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.requirepass, NULL, NULL, updateRequirePass), /* Enum Configs */ diff --git a/src/valkey-cli.c b/src/valkey-cli.c index e3fb7fb5d2..a02a171465 100644 --- a/src/valkey-cli.c +++ b/src/valkey-cli.c @@ -3161,6 +3161,7 @@ static int isSensitiveCommand(int argc, char **argv) { } else if (argc > 2 && !strcasecmp(argv[0], "config") && !strcasecmp(argv[1], "set")) { for (int j = 2; j < argc; j = j + 2) { if (!strcasecmp(argv[j], "masterauth") || !strcasecmp(argv[j], "masteruser") || + !strcasecmp(argv[j], "primaryuser") || !strcasecmp(argv[j], "primaryauth") || !strcasecmp(argv[j], "tls-key-file-pass") || !strcasecmp(argv[j], "tls-client-key-file-pass") || !strcasecmp(argv[j], "requirepass")) { return 1; diff --git a/tests/integration/valkey-benchmark.tcl b/tests/integration/valkey-benchmark.tcl index 74d26b7577..7310e2bd1c 100644 --- a/tests/integration/valkey-benchmark.tcl +++ b/tests/integration/valkey-benchmark.tcl @@ -44,7 +44,7 @@ tags {"benchmark network external:skip logreqres:skip"} { } test {benchmark: connecting using URI with authentication set,get} { - r config set masterauth pass + r config set primaryauth pass set cmd [valkeybenchmarkuriuserpass $master_host $master_port "default" pass "-c 5 -n 10 -t set,get"] common_bench_setup $cmd default_set_get_checks @@ -145,7 +145,7 @@ tags {"benchmark network external:skip logreqres:skip"} { } test {benchmark: tls connecting using URI with authentication set,get} { - r config set masterauth pass + r config set primaryauth pass set cmd [valkeybenchmarkuriuserpass $master_host $master_port "default" pass "-c 5 -n 10 -t set,get"] common_bench_setup $cmd default_set_get_checks diff --git a/tests/sentinel/tests/03-runtime-reconf.tcl b/tests/sentinel/tests/03-runtime-reconf.tcl index e2ac868103..c43333067a 100644 --- a/tests/sentinel/tests/03-runtime-reconf.tcl +++ b/tests/sentinel/tests/03-runtime-reconf.tcl @@ -9,14 +9,14 @@ proc server_set_password {} { foreach_valkey_id id { assert_equal {OK} [R $id CONFIG SET requirepass $::password] assert_equal {OK} [R $id AUTH $::password] - assert_equal {OK} [R $id CONFIG SET masterauth $::password] + assert_equal {OK} [R $id CONFIG SET primaryauth $::password] } } proc server_reset_password {} { foreach_valkey_id id { assert_equal {OK} [R $id CONFIG SET requirepass ""] - assert_equal {OK} [R $id CONFIG SET masterauth ""] + assert_equal {OK} [R $id CONFIG SET primaryauth ""] } } @@ -26,16 +26,16 @@ proc server_set_acl {id} { R $id CLIENT KILL USER default SKIPME no assert_equal {OK} [R $id AUTH $::user $::password] - assert_equal {OK} [R $id CONFIG SET masteruser $::user] - assert_equal {OK} [R $id CONFIG SET masterauth $::password] + assert_equal {OK} [R $id CONFIG SET primaryuser $::user] + assert_equal {OK} [R $id CONFIG SET primaryauth $::password] } proc server_reset_acl {id} { assert_equal {OK} [R $id ACL SETUSER default on] assert_equal {1} [R $id ACL DELUSER $::user] - assert_equal {OK} [R $id CONFIG SET masteruser ""] - assert_equal {OK} [R $id CONFIG SET masterauth ""] + assert_equal {OK} [R $id CONFIG SET primaryuser ""] + assert_equal {OK} [R $id CONFIG SET primaryauth ""] } proc verify_sentinel_connect_replicas {id} { diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl index d3c6156e6b..ee5d2db0fc 100644 --- a/tests/unit/auth.tcl +++ b/tests/unit/auth.tcl @@ -59,26 +59,26 @@ start_server {tags {"auth_binary_password external:skip"}} { r auth "abc\x00def" } {OK} - start_server {tags {"masterauth"}} { + start_server {tags {"primaryauth"}} { set master [srv -1 client] set master_host [srv -1 host] set master_port [srv -1 port] set slave [srv 0 client] - test {MASTERAUTH test with binary password} { + test {primaryauth test with binary password} { $master config set requirepass "abc\x00def" - # Configure the replica with masterauth + # Configure the replica with primaryauth set loglines [count_log_lines 0] - $slave config set masterauth "abc" + $slave config set primaryauth "abc" $slave slaveof $master_host $master_port # Verify replica is not able to sync with master wait_for_log_messages 0 {"*Unable to AUTH to MASTER*"} $loglines 1000 10 assert_equal {down} [s 0 master_link_status] - # Test replica with the correct masterauth - $slave config set masterauth "abc\x00def" + # Test replica with the correct primaryauth + $slave config set primaryauth "abc\x00def" wait_for_condition 50 100 { [s 0 master_link_status] eq {up} } else { diff --git a/tests/unit/slowlog.tcl b/tests/unit/slowlog.tcl index 547fa2d452..f1acbaa0ff 100644 --- a/tests/unit/slowlog.tcl +++ b/tests/unit/slowlog.tcl @@ -64,8 +64,8 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { r config set slowlog-log-slower-than 0 r slowlog reset catch {r acl setuser "slowlog test user" +get +set} _ - r config set masteruser "" - r config set masterauth "" + r config set primaryuser "" + r config set primaryauth "" r config set requirepass "" r config set tls-key-file-pass "" r config set tls-client-key-file-pass "" @@ -81,8 +81,8 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { assert_equal 11 [llength $slowlog_resp] assert_equal {slowlog reset} [lindex [lindex $slowlog_resp 10] 3] assert_equal {acl setuser (redacted) (redacted) (redacted)} [lindex [lindex $slowlog_resp 9] 3] - assert_equal {config set masteruser (redacted)} [lindex [lindex $slowlog_resp 8] 3] - assert_equal {config set masterauth (redacted)} [lindex [lindex $slowlog_resp 7] 3] + assert_equal {config set primaryuser (redacted)} [lindex [lindex $slowlog_resp 8] 3] + assert_equal {config set primaryauth (redacted)} [lindex [lindex $slowlog_resp 7] 3] assert_equal {config set requirepass (redacted)} [lindex [lindex $slowlog_resp 6] 3] assert_equal {config set tls-key-file-pass (redacted)} [lindex [lindex $slowlog_resp 5] 3] assert_equal {config set tls-client-key-file-pass (redacted)} [lindex [lindex $slowlog_resp 4] 3] diff --git a/valkey.conf b/valkey.conf index 7a3c4458cb..2f01e3dd2d 100644 --- a/valkey.conf +++ b/valkey.conf @@ -544,17 +544,17 @@ dir ./ # starting the replication synchronization process, otherwise the master will # refuse the replica request. # -# masterauth +# primaryauth # # However this is not enough if you are using ACLs # and the default user is not capable of running the PSYNC # command and/or other commands needed for replication. In this case it's # better to configure a special user to use with replication, and specify the -# masteruser configuration as such: +# primaryuser configuration as such: # -# masteruser +# primaryuser # -# When masteruser is specified, the replica will authenticate against its +# When primaryuser is specified, the replica will authenticate against its # master using the new AUTH form: AUTH . # When a replica loses its connection with the master, or when the replication From 54c97479356ecf41b4b63733494a1be2ab919e17 Mon Sep 17 00:00:00 2001 From: Ping Xie Date: Fri, 7 Jun 2024 14:21:33 -0700 Subject: [PATCH 31/42] Remove `master` and `slave` from source code (#591) External facing interfaces are not affected. --------- Signed-off-by: Ping Xie --- .github/workflows/clang-format.yml | 2 +- src/aof.c | 23 +- src/blocked.c | 6 +- src/cluster.c | 55 +- src/cluster.h | 12 +- src/cluster_legacy.c | 1070 +++++++++---------- src/cluster_legacy.h | 80 +- src/config.c | 62 +- src/db.c | 44 +- src/debug.c | 4 +- src/evict.c | 16 +- src/expire.c | 76 +- src/hyperloglog.c | 4 +- src/logreqres.c | 6 +- src/module.c | 112 +- src/modules/helloworld.c | 2 +- src/networking.c | 287 ++--- src/object.c | 26 +- src/rdb.c | 110 +- src/rdb.h | 2 +- src/replication.c | 1486 +++++++++++++------------- src/script.c | 28 +- src/script_lua.c | 2 +- src/sentinel.c | 1593 ++++++++++++++-------------- src/server.c | 278 ++--- src/server.h | 316 +++--- src/stream.h | 30 +- src/syncio.c | 2 +- src/t_stream.c | 148 +-- src/timeout.c | 6 +- src/valkey-benchmark.c | 16 +- src/valkey-cli.c | 330 +++--- src/valkeymodule.h | 6 +- tests/modules/hooks.c | 2 +- valkey.conf | 142 +-- 35 files changed, 3203 insertions(+), 3181 deletions(-) diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml index b851ffe926..cb76216d4e 100644 --- a/.github/workflows/clang-format.yml +++ b/.github/workflows/clang-format.yml @@ -41,7 +41,7 @@ jobs: - name: Check for formatting changes if: ${{ steps.clang-format.outputs.diff }} run: | - echo "Code is not formatted correctly. Here is the diff:" + echo "ERROR: Code is not formatted correctly. Here is the diff:" # Decode the Base64 diff to display it echo "${{ steps.clang-format.outputs.diff }}" | base64 --decode exit 1 diff --git a/src/aof.c b/src/aof.c index f3538f64fa..a88b28d827 100644 --- a/src/aof.c +++ b/src/aof.c @@ -904,12 +904,12 @@ int aofFsyncInProgress(void) { /* Starts a background task that performs fsync() against the specified * file descriptor (the one of the AOF file) in another thread. */ void aof_background_fsync(int fd) { - bioCreateFsyncJob(fd, server.master_repl_offset, 1); + bioCreateFsyncJob(fd, server.primary_repl_offset, 1); } /* Close the fd on the basis of aof_background_fsync. */ void aof_background_fsync_and_close(int fd) { - bioCreateCloseAofJob(fd, server.master_repl_offset, 1); + bioCreateCloseAofJob(fd, server.primary_repl_offset, 1); } /* Kills an AOFRW child process if exists */ @@ -1069,11 +1069,12 @@ void flushAppendOnlyFile(int force) { } else { /* All data is fsync'd already: Update fsynced_reploff_pending just in case. * This is needed to avoid a WAITAOF hang in case a module used RM_Call with the NO_AOF flag, - * in which case master_repl_offset will increase but fsynced_reploff_pending won't be updated + * in which case primary_repl_offset will increase but fsynced_reploff_pending won't be updated * (because there's no reason, from the AOF POV, to call fsync) and then WAITAOF may wait on * the higher offset (which contains data that was only propagated to replicas, and not to AOF) */ if (!sync_in_progress && server.aof_fsync != AOF_FSYNC_NO) - atomic_store_explicit(&server.fsynced_reploff_pending, server.master_repl_offset, memory_order_relaxed); + atomic_store_explicit(&server.fsynced_reploff_pending, server.primary_repl_offset, + memory_order_relaxed); return; } } @@ -1243,7 +1244,7 @@ void flushAppendOnlyFile(int force) { latencyAddSampleIfNeeded("aof-fsync-always", latency); server.aof_last_incr_fsync_offset = server.aof_last_incr_size; server.aof_last_fsync = server.mstime; - atomic_store_explicit(&server.fsynced_reploff_pending, server.master_repl_offset, memory_order_relaxed); + atomic_store_explicit(&server.fsynced_reploff_pending, server.primary_repl_offset, memory_order_relaxed); } else if (server.aof_fsync == AOF_FSYNC_EVERYSEC && server.mstime - server.aof_last_fsync >= 1000) { if (!sync_in_progress) { aof_background_fsync(server.aof_fd); @@ -1355,7 +1356,7 @@ struct client *createAOFClient(void) { c->id = CLIENT_ID_AOF; /* So modules can identify it's the AOF client. */ /* - * The AOF client should never be blocked (unlike master + * The AOF client should never be blocked (unlike primary * replication connection). * This is because blocking the AOF client might cause * deadlock (because potentially no one will unblock it). @@ -1365,9 +1366,9 @@ struct client *createAOFClient(void) { */ c->flags = CLIENT_DENY_BLOCKING; - /* We set the fake client as a slave waiting for the synchronization + /* We set the fake client as a replica waiting for the synchronization * so that the server will not try to send replies to this client. */ - c->replstate = SLAVE_STATE_WAIT_BGSAVE_START; + c->repl_state = REPLICA_STATE_WAIT_BGSAVE_START; return c; } @@ -2320,7 +2321,7 @@ int rewriteAppendOnlyFile(char *filename) { if (server.aof_use_rdb_preamble) { int error; - if (rdbSaveRio(SLAVE_REQ_NONE, &aof, &error, RDBFLAGS_AOF_PREAMBLE, NULL) == C_ERR) { + if (rdbSaveRio(REPLICA_REQ_NONE, &aof, &error, RDBFLAGS_AOF_PREAMBLE, NULL) == C_ERR) { errno = error; goto werr; } @@ -2403,12 +2404,12 @@ int rewriteAppendOnlyFileBackground(void) { * between updates to `fsynced_reploff_pending` of the worker thread, belonging * to the previous AOF, and the new one. This concern is specific for a full * sync scenario where we don't wanna risk the ACKed replication offset - * jumping backwards or forward when switching to a different master. */ + * jumping backwards or forward when switching to a different primary. */ bioDrainWorker(BIO_AOF_FSYNC); /* Set the initial repl_offset, which will be applied to fsynced_reploff * when AOFRW finishes (after possibly being updated by a bio thread) */ - atomic_store_explicit(&server.fsynced_reploff_pending, server.master_repl_offset, memory_order_relaxed); + atomic_store_explicit(&server.fsynced_reploff_pending, server.primary_repl_offset, memory_order_relaxed); server.fsynced_reploff = 0; } diff --git a/src/blocked.c b/src/blocked.c index 85ef9170a0..6d8d4fbc7c 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -87,7 +87,7 @@ void initClientBlockingState(client *c) { * and will be processed when the client is unblocked. */ void blockClient(client *c, int btype) { /* Master client should never be blocked unless pause or module */ - serverAssert(!(c->flags & CLIENT_MASTER && btype != BLOCKED_MODULE && btype != BLOCKED_POSTPONE)); + serverAssert(!(c->flags & CLIENT_PRIMARY && btype != BLOCKED_MODULE && btype != BLOCKED_POSTPONE)); c->flags |= CLIENT_BLOCKED; c->bstate.btype = btype; @@ -265,8 +265,8 @@ void replyToClientsBlockedOnShutdown(void) { /* Mass-unblock clients because something changed in the instance that makes * blocking no longer safe. For example clients blocked in list operations - * in an instance which turns from master to slave is unsafe, so this function - * is called when a master turns into a slave. + * in an instance which turns from master to replica is unsafe, so this function + * is called when a master turns into a replica. * * The semantics is to send an -UNBLOCKED error to the client, disconnecting * it at the same time. */ diff --git a/src/cluster.c b/src/cluster.c index 71d1cc9124..3a4dccdff5 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -900,7 +900,6 @@ void clusterCommand(client *c) { } kvstoreReleaseDictIterator(kvs_di); } else if ((!strcasecmp(c->argv[1]->ptr, "slaves") || !strcasecmp(c->argv[1]->ptr, "replicas")) && c->argc == 3) { - /* CLUSTER SLAVES */ /* CLUSTER REPLICAS */ clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr)); int j; @@ -911,15 +910,15 @@ void clusterCommand(client *c) { return; } - if (clusterNodeIsSlave(n)) { + if (clusterNodeIsReplica(n)) { addReplyError(c, "The specified node is not a master"); return; } /* Report TLS ports to TLS client, and report non-TLS port to non-TLS client. */ - addReplyArrayLen(c, clusterNodeNumSlaves(n)); - for (j = 0; j < clusterNodeNumSlaves(n); j++) { - sds ni = clusterGenNodeDescription(c, clusterNodeGetSlave(n, j), shouldReturnTlsInfo()); + addReplyArrayLen(c, clusterNodeNumReplicas(n)); + for (j = 0; j < clusterNodeNumReplicas(n); j++) { + sds ni = clusterGenNodeDescription(c, clusterNodeGetReplica(n, j), shouldReturnTlsInfo()); addReplyBulkCString(c, ni); sdsfree(ni); } @@ -1048,8 +1047,8 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int * can safely serve the request, otherwise we return a TRYAGAIN * error). To do so we set the importing/migrating state and * increment a counter for every missing key. */ - if (clusterNodeIsMaster(myself) || c->flags & CLIENT_READONLY) { - if (n == clusterNodeGetMaster(myself) && getMigratingSlotDest(slot) != NULL) { + if (clusterNodeIsPrimary(myself) || c->flags & CLIENT_READONLY) { + if (n == clusterNodeGetPrimary(myself) && getMigratingSlotDest(slot) != NULL) { migrating_slot = 1; } else if (getImportingSlotSource(slot) != NULL) { importing_slot = 1; @@ -1122,7 +1121,7 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int /* MIGRATE always works in the context of the local node if the slot * is open (migrating or importing state). We need to be able to freely * move keys among instances in this case. */ - if ((migrating_slot || importing_slot) && cmd->proc == migrateCommand && clusterNodeIsMaster(myself)) { + if ((migrating_slot || importing_slot) && cmd->proc == migrateCommand && clusterNodeIsPrimary(myself)) { return myself; } @@ -1152,13 +1151,13 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int } } - /* Handle the read-only client case reading from a slave: if this - * node is a slave and the request is about a hash slot our master + /* Handle the read-only client case reading from a replica: if this + * node is a replica and the request is about a hash slot our primary * is serving, we can reply without redirection. */ int is_write_command = (cmd_flags & CMD_WRITE) || (c->cmd->proc == execCommand && (c->mstate.cmd_flags & CMD_WRITE)); - if (((c->flags & CLIENT_READONLY) || pubsubshard_included) && !is_write_command && clusterNodeIsSlave(myself) && - clusterNodeGetMaster(myself) == n) { + if (((c->flags & CLIENT_READONLY) || pubsubshard_included) && !is_write_command && clusterNodeIsReplica(myself) && + clusterNodeGetPrimary(myself) == n) { return myself; } @@ -1204,7 +1203,7 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co * to detect timeouts, in order to handle the following case: * * 1) A client blocks with BLPOP or similar blocking operation. - * 2) The master migrates the hash slot elsewhere or turns into a slave. + * 2) The primary migrates the hash slot elsewhere or turns into a replica. * 3) The client may remain blocked forever (or up to the max timeout time) * waiting for a key change that will never happen. * @@ -1240,8 +1239,8 @@ int clusterRedirectBlockedClientIfNeeded(client *c) { /* if the client is read-only and attempting to access key that our * replica can handle, allow it. */ - if ((c->flags & CLIENT_READONLY) && !(c->lastcmd->flags & CMD_WRITE) && clusterNodeIsSlave(myself) && - clusterNodeGetMaster(myself) == node) { + if ((c->flags & CLIENT_READONLY) && !(c->lastcmd->flags & CMD_WRITE) && clusterNodeIsReplica(myself) && + clusterNodeGetPrimary(myself) == node) { node = myself; } @@ -1331,9 +1330,9 @@ int isNodeAvailable(clusterNode *node) { } void addNodeReplyForClusterSlot(client *c, clusterNode *node, int start_slot, int end_slot) { - int i, nested_elements = 3; /* slots (2) + master addr (1) */ - for (i = 0; i < clusterNodeNumSlaves(node); i++) { - if (!isNodeAvailable(clusterNodeGetSlave(node, i))) continue; + int i, nested_elements = 3; /* slots (2) + primary addr (1) */ + for (i = 0; i < clusterNodeNumReplicas(node); i++) { + if (!isNodeAvailable(clusterNodeGetReplica(node, i))) continue; nested_elements++; } addReplyArrayLen(c, nested_elements); @@ -1342,11 +1341,11 @@ void addNodeReplyForClusterSlot(client *c, clusterNode *node, int start_slot, in addNodeToNodeReply(c, node); /* Remaining nodes in reply are replicas for slot range */ - for (i = 0; i < clusterNodeNumSlaves(node); i++) { + for (i = 0; i < clusterNodeNumReplicas(node); i++) { /* This loop is copy/pasted from clusterGenNodeDescription() * with modifications for per-slot node aggregation. */ - if (!isNodeAvailable(clusterNodeGetSlave(node, i))) continue; - addNodeToNodeReply(c, clusterNodeGetSlave(node, i)); + if (!isNodeAvailable(clusterNodeGetReplica(node, i))) continue; + addNodeToNodeReply(c, clusterNodeGetReplica(node, i)); nested_elements--; } serverAssert(nested_elements == 3); /* Original 3 elements */ @@ -1364,7 +1363,7 @@ void clearCachedClusterSlotsResponse(void) { sds generateClusterSlotResponse(void) { client *recording_client = createCachedResponseClient(); clusterNode *n = NULL; - int num_masters = 0, start = -1; + int num_primaries = 0, start = -1; void *slot_replylen = addReplyDeferredLen(recording_client); for (int i = 0; i <= CLUSTER_SLOTS; i++) { @@ -1380,13 +1379,13 @@ sds generateClusterSlotResponse(void) { * or end of slot. */ if (i == CLUSTER_SLOTS || n != getNodeBySlot(i)) { addNodeReplyForClusterSlot(recording_client, n, start, i - 1); - num_masters++; + num_primaries++; if (i == CLUSTER_SLOTS) break; n = getNodeBySlot(i); start = i; } } - setDeferredArrayLen(recording_client, slot_replylen, num_masters); + setDeferredArrayLen(recording_client, slot_replylen, num_primaries); sds cluster_slot_response = aggregateClientOutputBuffer(recording_client); deleteCachedResponseClient(recording_client); return cluster_slot_response; @@ -1405,8 +1404,8 @@ int verifyCachedClusterSlotsResponse(sds cached_response) { void clusterCommandSlots(client *c) { /* Format: 1) 1) start slot * 2) end slot - * 3) 1) master IP - * 2) master port + * 3) 1) primary IP + * 2) primary port * 3) node ID * 4) 1) replica IP * 2) replica port @@ -1446,8 +1445,8 @@ void askingCommand(client *c) { } /* The READONLY command is used by clients to enter the read-only mode. - * In this mode slaves will not redirect clients as long as clients access - * with read-only commands to keys that are served by the slave's master. */ + * In this mode replica will not redirect clients as long as clients access + * with read-only commands to keys that are served by the replica's primary. */ void readonlyCommand(client *c) { if (server.cluster_enabled == 0) { addReplyError(c, "This instance has cluster support disabled"); diff --git a/src/cluster.h b/src/cluster.h index de58486440..a42573bb3a 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -67,7 +67,7 @@ int clusterCommandSpecial(client *c); const char **clusterCommandExtendedHelp(void); int clusterAllowFailoverCmd(client *c); -void clusterPromoteSelfToMaster(void); +void clusterPromoteSelfToPrimary(void); int clusterManualFailoverTimeLimit(void); void clusterCommandSlots(client *c); @@ -83,18 +83,18 @@ int getClusterSize(void); int getMyShardSlotCount(void); int handleDebugClusterCommand(client *c); int clusterNodePending(clusterNode *node); -int clusterNodeIsMaster(clusterNode *n); +int clusterNodeIsPrimary(clusterNode *n); char **getClusterNodesList(size_t *numnodes); char *clusterNodeIp(clusterNode *node); -int clusterNodeIsSlave(clusterNode *node); -clusterNode *clusterNodeGetMaster(clusterNode *node); +int clusterNodeIsReplica(clusterNode *node); +clusterNode *clusterNodeGetPrimary(clusterNode *node); char *clusterNodeGetName(clusterNode *node); int clusterNodeTimedOut(clusterNode *node); int clusterNodeIsFailing(clusterNode *node); int clusterNodeIsNoFailover(clusterNode *node); char *clusterNodeGetShardId(clusterNode *node); -int clusterNodeNumSlaves(clusterNode *node); -clusterNode *clusterNodeGetSlave(clusterNode *node, int slave_idx); +int clusterNodeNumReplicas(clusterNode *node); +clusterNode *clusterNodeGetReplica(clusterNode *node, int slave_idx); clusterNode *getMigratingSlotDest(int slot); clusterNode *getImportingSlotSource(int slot); clusterNode *getNodeBySlot(int slot); diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index 685bafcbe4..e3585c537d 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -63,14 +63,14 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request); void clusterUpdateState(void); int clusterNodeCoversSlot(clusterNode *n, int slot); list *clusterGetNodesInMyShard(clusterNode *node); -int clusterNodeAddSlave(clusterNode *master, clusterNode *slave); +int clusterNodeAddReplica(clusterNode *primary, clusterNode *replica); int clusterAddSlot(clusterNode *n, int slot); int clusterDelSlot(int slot); int clusterDelNodeSlots(clusterNode *node); int clusterNodeSetSlotBit(clusterNode *n, int slot); -void clusterSetMaster(clusterNode *n, int closeSlots); -void clusterHandleSlaveFailover(void); -void clusterHandleSlaveMigration(int max_slaves); +void clusterSetPrimary(clusterNode *n, int closeSlots); +void clusterHandleReplicaFailover(void); +void clusterHandleReplicaMigration(int max_replicas); int bitmapTestBit(unsigned char *bitmap, int pos); void bitmapSetBit(unsigned char *bitmap, int pos); void bitmapClearBit(unsigned char *bitmap, int pos); @@ -78,7 +78,7 @@ void clusterDoBeforeSleep(int flags); void clusterSendUpdate(clusterLink *link, clusterNode *node); void resetManualFailover(void); void clusterCloseAllSlots(void); -void clusterSetNodeAsMaster(clusterNode *n); +void clusterSetNodeAsPrimary(clusterNode *n); void clusterDelNode(clusterNode *delnode); sds representClusterNodeFlags(sds ci, uint16_t flags); sds representSlotInfo(sds ci, uint16_t *slot_info_pairs, int slot_info_pairs_count); @@ -224,8 +224,8 @@ int auxShardIdSetter(clusterNode *n, void *value, int length) { memcpy(n->shard_id, value, CLUSTER_NAMELEN); /* if n already has replicas, make sure they all agree * on the shard id */ - for (int i = 0; i < n->numslaves; i++) { - if (memcmp(n->slaves[i]->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { + for (int i = 0; i < n->num_replicas; i++) { + if (memcmp(n->replicas[i]->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { return C_ERR; } } @@ -358,7 +358,7 @@ int clusterLoadConfig(char *filename) { while (fgets(line, maxline, fp) != NULL) { int argc, aux_argc; sds *argv, *aux_argv; - clusterNode *n, *master; + clusterNode *n, *primary; char *p, *s; /* Skip blank lines, they can be created either by users manually @@ -526,9 +526,9 @@ int clusterLoadConfig(char *filename) { myself = server.cluster->myself = n; n->flags |= CLUSTER_NODE_MYSELF; } else if (!strcasecmp(s, "master")) { - n->flags |= CLUSTER_NODE_MASTER; + n->flags |= CLUSTER_NODE_PRIMARY; } else if (!strcasecmp(s, "slave")) { - n->flags |= CLUSTER_NODE_SLAVE; + n->flags |= CLUSTER_NODE_REPLICA; } else if (!strcasecmp(s, "fail?")) { n->flags |= CLUSTER_NODE_PFAIL; } else if (!strcasecmp(s, "fail")) { @@ -548,32 +548,32 @@ int clusterLoadConfig(char *filename) { if (p) s = p + 1; } - /* Get master if any. Set the master and populate master's - * slave list. */ + /* Get primary if any. Set the primary and populate primary's + * replica list. */ if (argv[3][0] != '-') { if (verifyClusterNodeId(argv[3], sdslen(argv[3])) == C_ERR) { sdsfreesplitres(argv, argc); goto fmterr; } - master = clusterLookupNode(argv[3], sdslen(argv[3])); - if (!master) { - master = createClusterNode(argv[3], 0); - clusterAddNode(master); + primary = clusterLookupNode(argv[3], sdslen(argv[3])); + if (!primary) { + primary = createClusterNode(argv[3], 0); + clusterAddNode(primary); } /* shard_id can be absent if we are loading a nodes.conf generated * by an older version; we should follow the primary's * shard_id in this case */ if (auxFieldHandlers[af_shard_id].isPresent(n) == 0) { - memcpy(n->shard_id, master->shard_id, CLUSTER_NAMELEN); - clusterAddNodeToShard(master->shard_id, n); - } else if (clusterGetNodesInMyShard(master) != NULL && - memcmp(master->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { + memcpy(n->shard_id, primary->shard_id, CLUSTER_NAMELEN); + clusterAddNodeToShard(primary->shard_id, n); + } else if (clusterGetNodesInMyShard(primary) != NULL && + memcmp(primary->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { /* If the primary has been added to a shard, make sure this * node has the same persisted shard id as the primary. */ goto fmterr; } - n->slaveof = master; - clusterNodeAddSlave(master, n); + n->replicaof = primary; + clusterNodeAddReplica(primary, n); } else if (auxFieldHandlers[af_shard_id].isPresent(n) == 0) { /* n is a primary but it does not have a persisted shard_id. * This happens if we are loading a nodes.conf generated by @@ -589,7 +589,7 @@ int clusterLoadConfig(char *filename) { /* Set configEpoch for this node. * If the node is a replica, set its config epoch to 0. * If it's a primary, load the config epoch from the configuration file. */ - n->configEpoch = (nodeIsSlave(n) && n->slaveof) ? 0 : strtoull(argv[6], NULL, 10); + n->configEpoch = (nodeIsReplica(n) && n->replicaof) ? 0 : strtoull(argv[6], NULL, 10); /* Populate hash slots served by this instance. */ for (j = 8; j < argc; j++) { @@ -828,7 +828,7 @@ void deriveAnnouncedPorts(int *announced_tcp_port, int *announced_tls_port, int void clusterUpdateMyselfFlags(void) { if (!myself) return; int oldflags = myself->flags; - int nofailover = server.cluster_slave_no_failover ? CLUSTER_NODE_NOFAILOVER : 0; + int nofailover = server.cluster_replica_no_failover ? CLUSTER_NODE_NOFAILOVER : 0; myself->flags &= ~CLUSTER_NODE_NOFAILOVER; myself->flags |= nofailover; if (myself->flags != oldflags) { @@ -916,7 +916,7 @@ static void updateShardId(clusterNode *node, const char *shard_id) { clusterAddNodeToShard(shard_id, node); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); } - if (shard_id && myself != node && myself->slaveof == node) { + if (shard_id && myself != node && myself->replicaof == node) { if (memcmp(myself->shard_id, shard_id, CLUSTER_NAMELEN) != 0) { /* shard-id can diverge right after a rolling upgrade * from pre-7.2 releases */ @@ -933,7 +933,7 @@ static inline int areInSameShard(clusterNode *node1, clusterNode *node2) { } static inline uint64_t nodeEpoch(clusterNode *n) { - return n->slaveof ? n->slaveof->configEpoch : n->configEpoch; + return n->replicaof ? n->replicaof->configEpoch : n->configEpoch; } /* Update my hostname based on server configuration values */ @@ -988,7 +988,7 @@ void clusterInit(void) { if (clusterLoadConfig(server.cluster_configfile) == C_ERR) { /* No configuration found. We will just use the random name provided * by the createClusterNode() function. */ - myself = server.cluster->myself = createClusterNode(NULL, CLUSTER_NODE_MYSELF | CLUSTER_NODE_MASTER); + myself = server.cluster->myself = createClusterNode(NULL, CLUSTER_NODE_MYSELF | CLUSTER_NODE_PRIMARY); serverLog(LL_NOTICE, "No cluster configuration found, I'm %.40s", myself->name); clusterAddNode(myself); clusterAddNodeToShard(myself->shard_id, myself); @@ -1019,7 +1019,7 @@ void clusterInit(void) { deriveAnnouncedPorts(&myself->tcp_port, &myself->tls_port, &myself->cport); server.cluster->mf_end = 0; - server.cluster->mf_slave = NULL; + server.cluster->mf_replica = NULL; for (connTypeForCaching conn_type = CACHE_CONN_TCP; conn_type < CACHE_CONN_TYPE_MAX; conn_type++) { server.cached_cluster_slot_info[conn_type] = NULL; } @@ -1059,20 +1059,20 @@ void clusterInitLast(void) { * * 1) All other nodes are forgotten. * 2) All the assigned / open slots are released. - * 3) If the node is a slave, it turns into a master. + * 3) If the node is a replica, it turns into a primary. * 4) Only for hard reset: a new Node ID is generated. * 5) Only for hard reset: currentEpoch and configEpoch are set to 0. * 6) The new configuration is saved and the cluster state updated. - * 7) If the node was a slave, the whole data set is flushed away. */ + * 7) If the node was a replica, the whole data set is flushed away. */ void clusterReset(int hard) { dictIterator *di; dictEntry *de; int j; - /* Turn into master. */ - if (nodeIsSlave(myself)) { - clusterSetNodeAsMaster(myself); - replicationUnsetMaster(); + /* Turn into primary. */ + if (nodeIsReplica(myself)) { + clusterSetNodeAsPrimary(myself); + replicationUnsetPrimary(); emptyData(-1, EMPTYDB_NO_FLAGS, NULL); } @@ -1247,7 +1247,7 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { /* If the server is starting up, don't accept cluster connections: * UPDATE messages may interact with the database content. */ - if (server.masterhost == NULL && server.loading) return; + if (server.primary_host == NULL && server.loading) return; while (max--) { cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); @@ -1318,9 +1318,9 @@ clusterNode *createClusterNode(char *nodename, int flags) { node->slot_info_pairs = NULL; node->slot_info_pairs_count = 0; node->numslots = 0; - node->numslaves = 0; - node->slaves = NULL; - node->slaveof = NULL; + node->num_replicas = 0; + node->replicas = NULL; + node->replicaof = NULL; node->last_in_ping_gossip = 0; node->ping_sent = node->pong_received = 0; node->data_received = 0; @@ -1441,43 +1441,44 @@ static int clusterNodeNameComparator(const void *node1, const void *node2) { return strncasecmp((*(clusterNode **)node1)->name, (*(clusterNode **)node2)->name, CLUSTER_NAMELEN); } -int clusterNodeRemoveSlave(clusterNode *master, clusterNode *slave) { +int clusterNodeRemoveReplica(clusterNode *primary, clusterNode *replica) { int j; - for (j = 0; j < master->numslaves; j++) { - if (master->slaves[j] == slave) { - if ((j + 1) < master->numslaves) { - int remaining_slaves = (master->numslaves - j) - 1; - memmove(master->slaves + j, master->slaves + (j + 1), (sizeof(*master->slaves) * remaining_slaves)); + for (j = 0; j < primary->num_replicas; j++) { + if (primary->replicas[j] == replica) { + if ((j + 1) < primary->num_replicas) { + int remaining_replicas = (primary->num_replicas - j) - 1; + memmove(primary->replicas + j, primary->replicas + (j + 1), + (sizeof(*primary->replicas) * remaining_replicas)); } - master->numslaves--; - if (master->numslaves == 0) master->flags &= ~CLUSTER_NODE_MIGRATE_TO; + primary->num_replicas--; + if (primary->num_replicas == 0) primary->flags &= ~CLUSTER_NODE_MIGRATE_TO; return C_OK; } } return C_ERR; } -int clusterNodeAddSlave(clusterNode *master, clusterNode *slave) { +int clusterNodeAddReplica(clusterNode *primary, clusterNode *replica) { int j; - /* If it's already a slave, don't add it again. */ - for (j = 0; j < master->numslaves; j++) - if (master->slaves[j] == slave) return C_ERR; - master->slaves = zrealloc(master->slaves, sizeof(clusterNode *) * (master->numslaves + 1)); - master->slaves[master->numslaves] = slave; - master->numslaves++; - qsort(master->slaves, master->numslaves, sizeof(clusterNode *), clusterNodeNameComparator); - master->flags |= CLUSTER_NODE_MIGRATE_TO; + /* If it's already a replica, don't add it again. */ + for (j = 0; j < primary->num_replicas; j++) + if (primary->replicas[j] == replica) return C_ERR; + primary->replicas = zrealloc(primary->replicas, sizeof(clusterNode *) * (primary->num_replicas + 1)); + primary->replicas[primary->num_replicas] = replica; + primary->num_replicas++; + qsort(primary->replicas, primary->num_replicas, sizeof(clusterNode *), clusterNodeNameComparator); + primary->flags |= CLUSTER_NODE_MIGRATE_TO; return C_OK; } -int clusterCountNonFailingSlaves(clusterNode *n) { - int j, okslaves = 0; +int clusterCountNonFailingReplicas(clusterNode *n) { + int j, ok_replicas = 0; - for (j = 0; j < n->numslaves; j++) - if (!nodeFailed(n->slaves[j])) okslaves++; - return okslaves; + for (j = 0; j < n->num_replicas; j++) + if (!nodeFailed(n->replicas[j])) ok_replicas++; + return ok_replicas; } /* Low level cleanup of the node structure. Only called by clusterDelNode(). */ @@ -1485,12 +1486,12 @@ void freeClusterNode(clusterNode *n) { sds nodename; int j; - /* If the node has associated slaves, we have to set - * all the slaves->slaveof fields to NULL (unknown). */ - for (j = 0; j < n->numslaves; j++) n->slaves[j]->slaveof = NULL; + /* If the node has associated replicas, we have to set + * all the replicas->replicaof fields to NULL (unknown). */ + for (j = 0; j < n->num_replicas; j++) n->replicas[j]->replicaof = NULL; - /* Remove this node from the list of slaves of its master. */ - if (nodeIsSlave(n) && n->slaveof) clusterNodeRemoveSlave(n->slaveof, n); + /* Remove this node from the list of replicas of its primary. */ + if (nodeIsReplica(n) && n->replicaof) clusterNodeRemoveReplica(n->replicaof, n); /* Unlink from the set of nodes. */ nodename = sdsnewlen(n->name, CLUSTER_NAMELEN); @@ -1503,7 +1504,7 @@ void freeClusterNode(clusterNode *n) { if (n->link) freeClusterLink(n->link); if (n->inbound_link) freeClusterLink(n->inbound_link); listRelease(n->fail_reports); - zfree(n->slaves); + zfree(n->replicas); zfree(n); } @@ -1524,8 +1525,8 @@ void clusterAddNode(clusterNode *node) { * other nodes. * 3) Remove the node from the owning shard * 4) Free the node with freeClusterNode() that will in turn remove it - * from the hash table and from the list of slaves of its master, if - * it is a slave node. + * from the hash table and from the list of replicas of its primary, if + * it is a replica node. */ void clusterDelNode(clusterNode *delnode) { int j; @@ -1568,7 +1569,7 @@ clusterNode *clusterLookupNode(const char *name, int length) { /* Get all the nodes in my shard. * Note that the list returned is not computed on the fly - * via slaveof; rather, it is maintained permanently to + * via replicaof; rather, it is maintained permanently to * track the shard membership and its life cycle is tied * to this process. Therefore, the caller must not * release the list. */ @@ -1668,8 +1669,8 @@ uint64_t clusterGetMaxEpoch(void) { * * 1) When slots are closed after importing. Otherwise resharding would be * too expensive. - * 2) When CLUSTER FAILOVER is called with options that force a slave to - * failover its master even if there is not master majority able to + * 2) When CLUSTER FAILOVER is called with options that force a replica to + * failover its primary even if there is not primary majority able to * create a new configuration epoch. * * The cluster will not explode using this function, even in the case of @@ -1692,14 +1693,14 @@ int clusterBumpConfigEpochWithoutConsensus(void) { } } -/* This function is called when this node is a master, and we receive from - * another master a configuration epoch that is equal to our configuration +/* This function is called when this node is a primary, and we receive from + * another primary a configuration epoch that is equal to our configuration * epoch. * * BACKGROUND * - * It is not possible that different slaves get the same config - * epoch during a failover election, because the slaves need to get voted + * It is not possible that different replicas get the same config + * epoch during a failover election, because the replicas need to get voted * by a majority. However when we perform a manual resharding of the cluster * the node will assign a configuration epoch to itself without to ask * for agreement. Usually resharding happens when the cluster is working well @@ -1718,13 +1719,13 @@ int clusterBumpConfigEpochWithoutConsensus(void) { * end with a different configEpoch at startup automatically. * * In all the cases, we want a mechanism that resolves this issue automatically - * as a safeguard. The same configuration epoch for masters serving different + * as a safeguard. The same configuration epoch for primaries serving different * set of slots is not harmful, but it is if the nodes end serving the same * slots for some reason (manual errors or software bugs) without a proper * failover procedure. * * In general we want a system that eventually always ends with different - * masters having different configuration epochs whatever happened, since + * primaries having different configuration epochs whatever happened, since * nothing is worse than a split-brain condition in a distributed system. * * BEHAVIOR @@ -1739,8 +1740,8 @@ int clusterBumpConfigEpochWithoutConsensus(void) { * end with a different configuration epoch. */ void clusterHandleConfigEpochCollision(clusterNode *sender) { - /* Prerequisites: nodes have the same configEpoch and are both masters. */ - if (sender->configEpoch != myself->configEpoch || !clusterNodeIsMaster(sender) || !clusterNodeIsMaster(myself)) + /* Prerequisites: nodes have the same configEpoch and are both primaries. */ + if (sender->configEpoch != myself->configEpoch || !clusterNodeIsPrimary(sender) || !clusterNodeIsPrimary(myself)) return; /* Don't act if the colliding node has a smaller Node ID. */ if (memcmp(sender->name, myself->name, CLUSTER_NAMELEN) <= 0) return; @@ -1834,8 +1835,8 @@ int clusterBlacklistExists(char *nodeid) { /* This function checks if a given node should be marked as FAIL. * It happens if the following conditions are met: * - * 1) We received enough failure reports from other master nodes via gossip. - * Enough means that the majority of the masters signaled the node is + * 1) We received enough failure reports from other primary nodes via gossip. + * Enough means that the majority of the primaries signaled the node is * down recently. * 2) We believe this node is in PFAIL state. * @@ -1843,13 +1844,13 @@ int clusterBlacklistExists(char *nodeid) { * event trying to force every other node to set the FAIL flag for the node. * * Note that the form of agreement used here is weak, as we collect the majority - * of masters state during some time, and even if we force agreement by + * of primaries state during some time, and even if we force agreement by * propagating the FAIL message, because of partitions we may not reach every * node. However: * * 1) Either we reach the majority and eventually the FAIL state will propagate * to all the cluster. - * 2) Or there is no majority so no slave promotion will be authorized and the + * 2) Or there is no majority so no replica promotion will be authorized and the * FAIL flag will be cleared after some time. */ void markNodeAsFailingIfNeeded(clusterNode *node) { @@ -1860,9 +1861,9 @@ void markNodeAsFailingIfNeeded(clusterNode *node) { if (nodeFailed(node)) return; /* Already FAILing. */ failures = clusterNodeFailureReportsCount(node); - /* Also count myself as a voter if I'm a master. */ - if (clusterNodeIsMaster(myself)) failures++; - if (failures < needed_quorum) return; /* No weak agreement from masters. */ + /* Also count myself as a voter if I'm a primary. */ + if (clusterNodeIsPrimary(myself)) failures++; + if (failures < needed_quorum) return; /* No weak agreement from primaries. */ serverLog(LL_NOTICE, "Marking node %.40s (%s) as failing (quorum reached).", node->name, node->human_nodename); @@ -1873,8 +1874,8 @@ void markNodeAsFailingIfNeeded(clusterNode *node) { /* Broadcast the failing node name to everybody, forcing all the other * reachable nodes to flag the node as FAIL. - * We do that even if this node is a replica and not a master: anyway - * the failing state is triggered collecting failure reports from masters, + * We do that even if this node is a replica and not a primary: anyway + * the failing state is triggered collecting failure reports from primaries, * so here the replica is only helping propagating this status. */ clusterSendFail(node->name); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG); @@ -1888,20 +1889,20 @@ void clearNodeFailureIfNeeded(clusterNode *node) { serverAssert(nodeFailed(node)); - /* For slaves we always clear the FAIL flag if we can contact the + /* For replicas we always clear the FAIL flag if we can contact the * node again. */ - if (nodeIsSlave(node) || node->numslots == 0) { + if (nodeIsReplica(node) || node->numslots == 0) { serverLog(LL_NOTICE, "Clear FAIL state for node %.40s (%s):%s is reachable again.", node->name, - node->human_nodename, nodeIsSlave(node) ? "replica" : "master without slots"); + node->human_nodename, nodeIsReplica(node) ? "replica" : "master without slots"); node->flags &= ~CLUSTER_NODE_FAIL; clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG); } - /* If it is a master and... + /* If it is a primary and... * 1) The FAIL state is old enough. * 2) It is yet serving slots from our point of view (not failed over). * Apparently no one is going to fix these slots, clear the FAIL flag. */ - if (clusterNodeIsMaster(node) && node->numslots > 0 && + if (clusterNodeIsPrimary(node) && node->numslots > 0 && (now - node->fail_time) > (server.cluster_node_timeout * CLUSTER_FAIL_UNDO_TIME_MULT)) { serverLog( LL_NOTICE, @@ -2083,8 +2084,8 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) { /* Ignore gossips about self. */ if (node && node != myself) { /* We already know this node. - Handle failure reports, only when the sender is a master. */ - if (sender && clusterNodeIsMaster(sender)) { + Handle failure reports, only when the sender is a primary. */ + if (sender && clusterNodeIsPrimary(sender)) { if (flags & (CLUSTER_NODE_FAIL | CLUSTER_NODE_PFAIL)) { if (clusterNodeAddFailureReport(node, sender)) { serverLog(LL_VERBOSE, "Node %.40s (%s) reported node %.40s (%s) as not reachable.", @@ -2224,32 +2225,32 @@ int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link, clusterMsg * serverLog(LL_NOTICE, "Address updated for node %.40s (%s), now %s:%d", node->name, node->human_nodename, node->ip, getNodeDefaultClientPort(node)); - /* Check if this is our master and we have to change the + /* Check if this is our primary and we have to change the * replication target as well. */ - if (nodeIsSlave(myself) && myself->slaveof == node) - replicationSetMaster(node->ip, getNodeDefaultReplicationPort(node)); + if (nodeIsReplica(myself) && myself->replicaof == node) + replicationSetPrimary(node->ip, getNodeDefaultReplicationPort(node)); return 1; } -/* Reconfigure the specified node 'n' as a master. This function is called when - * a node that we believed to be a slave is now acting as master in order to +/* Reconfigure the specified node 'n' as a primary. This function is called when + * a node that we believed to be a replica is now acting as primary in order to * update the state of the node. */ -void clusterSetNodeAsMaster(clusterNode *n) { - if (clusterNodeIsMaster(n)) return; +void clusterSetNodeAsPrimary(clusterNode *n) { + if (clusterNodeIsPrimary(n)) return; - if (n->slaveof) { - clusterNodeRemoveSlave(n->slaveof, n); + if (n->replicaof) { + clusterNodeRemoveReplica(n->replicaof, n); if (n != myself) n->flags |= CLUSTER_NODE_MIGRATE_TO; } - n->flags &= ~CLUSTER_NODE_SLAVE; - n->flags |= CLUSTER_NODE_MASTER; - n->slaveof = NULL; + n->flags &= ~CLUSTER_NODE_REPLICA; + n->flags |= CLUSTER_NODE_PRIMARY; + n->replicaof = NULL; /* Update config and state. */ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE); } -/* This function is called when we receive a master configuration via a +/* This function is called when we receive a primary configuration via a * PING, PONG or UPDATE packet. What we receive is a node, a configEpoch of the * node, and the set of slots claimed under this configEpoch. * @@ -2262,27 +2263,27 @@ void clusterSetNodeAsMaster(clusterNode *n) { * case we receive the info via an UPDATE packet. */ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoch, unsigned char *slots) { int j; - clusterNode *curmaster = NULL, *newmaster = NULL; + clusterNode *cur_primary = NULL, *new_primary = NULL; /* The dirty slots list is a list of slots for which we lose the ownership * while having still keys inside. This usually happens after a failover * or after a manual cluster reconfiguration operated by the admin. * - * If the update message is not able to demote a master to slave (in this - * case we'll resync with the master updating the whole key space), we + * If the update message is not able to demote a primary to replica (in this + * case we'll resync with the primary updating the whole key space), we * need to delete all the keys in the slots we lost ownership. */ uint16_t dirty_slots[CLUSTER_SLOTS]; int dirty_slots_count = 0; - /* We should detect if sender is new master of our shard. + /* We should detect if sender is new primary of our shard. * We will know it if all our slots were migrated to sender, and sender * has no slots except ours */ int sender_slots = 0; int migrated_our_slots = 0; - /* Here we set curmaster to this node or the node this node - * replicates to if it's a slave. In the for loop we are - * interested to check if slots are taken away from curmaster. */ - curmaster = clusterNodeIsMaster(myself) ? myself : myself->slaveof; + /* Here we set cur_primary to this node or the node this node + * replicates to if it's a replica. In the for loop we are + * interested to check if slots are taken away from cur_primary. */ + cur_primary = clusterNodeIsPrimary(myself) ? myself : myself->replicaof; if (sender == myself) { serverLog(LL_NOTICE, "Discarding UPDATE message about myself."); @@ -2319,8 +2320,8 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc dirty_slots_count++; } - if (server.cluster->slots[j] == curmaster) { - newmaster = sender; + if (server.cluster->slots[j] == cur_primary) { + new_primary = sender; migrated_our_slots++; } @@ -2386,7 +2387,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * sender if it has just taken over the primary role. */ if (server.cluster->migrating_slots_to[j] != NULL && server.cluster->migrating_slots_to[j] != sender && (server.cluster->migrating_slots_to[j]->configEpoch < senderConfigEpoch || - nodeIsSlave(server.cluster->migrating_slots_to[j])) && + nodeIsReplica(server.cluster->migrating_slots_to[j])) && areInSameShard(server.cluster->migrating_slots_to[j], sender)) { serverLog(LL_NOTICE, "Failover occurred in migration target." @@ -2412,7 +2413,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * 1. Remove the importing state for the specific slot. * 2. Finalize the slot's ownership, if I am not already the owner of * the slot. */ - if (nodeIsMaster(myself) && server.cluster->importing_slots_from[j] == sender) { + if (nodeIsPrimary(myself) && server.cluster->importing_slots_from[j] == sender) { serverLog(LL_NOTICE, "Slot %d is no longer being imported from node %.40s (%s) in shard %.40s;" " Clear my importing source for the slot.", @@ -2447,13 +2448,13 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * keys redirections. */ if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) return; - /* Handle a special case where newmaster is not set but both sender + /* Handle a special case where new_primary is not set but both sender * and myself own no slots and in the same shard. Set the sender as * the new primary if my current config epoch is lower than the * sender's. */ - if (!newmaster && myself->slaveof != sender && sender_slots == 0 && myself->numslots == 0 && + if (!new_primary && myself->replicaof != sender && sender_slots == 0 && myself->numslots == 0 && nodeEpoch(myself) < senderConfigEpoch && areInSameShard(sender, myself)) { - newmaster = sender; + new_primary = sender; } /* If the shard to which this node (myself) belongs loses all of @@ -2475,7 +2476,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * shard and our primary just had its last slot migrated to the * sender. In this case we don't reconfigure ourselves as a replica * of the sender. */ - if (newmaster && curmaster->numslots == 0) { + if (new_primary && cur_primary->numslots == 0) { if (server.cluster_allow_replica_migration || areInSameShard(sender, myself)) { serverLog(LL_NOTICE, "Configuration change detected. Reconfiguring myself " @@ -2483,7 +2484,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc sender->name, sender->human_nodename, sender->shard_id); /* Don't clear the migrating/importing states if this is a replica that * just gets promoted to the new primary in the shard. */ - clusterSetMaster(sender, !areInSameShard(sender, myself)); + clusterSetPrimary(sender, !areInSameShard(sender, myself)); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); } else if ((sender_slots >= migrated_our_slots) && !areInSameShard(sender, myself)) { /* When all our slots are lost to the sender and the sender belongs to @@ -2491,14 +2492,14 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * migration. Don't reconfigure this node to migrate to the new shard * in this case. */ serverLog(LL_NOTICE, - "My last slot was migrated to node %.40s (%s) in shard %.40s. I am now an empty master.", + "My last slot was migrated to node %.40s (%s) in shard %.40s. I am now an empty primary.", sender->name, sender->human_nodename, sender->shard_id); } } else if (dirty_slots_count) { /* If we are here, we received an update message which removed * ownership for certain slots we still have keys about, but still - * we are serving some slots, so this master node was not demoted to - * a slave. + * we are serving some slots, so this primary node was not demoted to + * a replica. * * In order to maintain a consistent state between keys and slots * we need to remove all the keys from the slots we lost. */ @@ -2686,7 +2687,7 @@ void clusterProcessPingExtensions(clusterMsg *hdr, clusterLink *link) { } else if (type == CLUSTERMSG_EXT_TYPE_FORGOTTEN_NODE) { clusterMsgPingExtForgottenNode *forgotten_node_ext = &(ext->ext[0].forgotten_node); clusterNode *n = clusterLookupNode(forgotten_node_ext->name, CLUSTER_NAMELEN); - if (n && n != myself && !(nodeIsSlave(myself) && myself->slaveof == n)) { + if (n && n != myself && !(nodeIsReplica(myself) && myself->replicaof == n)) { sds id = sdsnewlen(forgotten_node_ext->name, CLUSTER_NAMELEN); dictEntry *de = dictAddOrFind(server.cluster->nodes_black_list, id); uint64_t expire = server.unixtime + ntohu64(forgotten_node_ext->ttl); @@ -2718,9 +2719,9 @@ void clusterProcessPingExtensions(clusterMsg *hdr, clusterLink *link) { * As the cluster progressively upgrades to version 7.2, we can expect the shard_ids * across all nodes to naturally converge and align. * - * If sender is a replica, set the shard_id to the shard_id of its master. + * If sender is a replica, set the shard_id to the shard_id of its primary. * Otherwise, we'll set it now. */ - if (ext_shardid == NULL) ext_shardid = clusterNodeGetMaster(sender)->shard_id; + if (ext_shardid == NULL) ext_shardid = clusterNodeGetPrimary(sender)->shard_id; updateShardId(sender, ext_shardid); } @@ -2867,7 +2868,7 @@ int clusterProcessPacket(clusterLink *link) { senderConfigEpoch = ntohu64(hdr->configEpoch); if (senderCurrentEpoch > server.cluster->currentEpoch) server.cluster->currentEpoch = senderCurrentEpoch; /* Update the sender configEpoch if it is a primary publishing a newer one. */ - if (!memcmp(hdr->slaveof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->slaveof)) && + if (!memcmp(hdr->replicaof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->replicaof)) && senderConfigEpoch > sender->configEpoch) { sender->configEpoch = senderConfigEpoch; clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_FSYNC_CONFIG); @@ -2875,16 +2876,16 @@ int clusterProcessPacket(clusterLink *link) { /* Update the replication offset info for this node. */ sender->repl_offset = ntohu64(hdr->offset); sender->repl_offset_time = now; - /* If we are a slave performing a manual failover and our master + /* If we are a replica performing a manual failover and our primary * sent its offset while already paused, populate the MF state. */ - if (server.cluster->mf_end && nodeIsSlave(myself) && myself->slaveof == sender && - hdr->mflags[0] & CLUSTERMSG_FLAG0_PAUSED && server.cluster->mf_master_offset == -1) { - server.cluster->mf_master_offset = sender->repl_offset; + if (server.cluster->mf_end && nodeIsReplica(myself) && myself->replicaof == sender && + hdr->mflags[0] & CLUSTERMSG_FLAG0_PAUSED && server.cluster->mf_primary_offset == -1) { + server.cluster->mf_primary_offset = sender->repl_offset; clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_MANUALFAILOVER); serverLog(LL_NOTICE, "Received replication offset for paused " - "master manual failover: %lld", - server.cluster->mf_master_offset); + "primary manual failover: %lld", + server.cluster->mf_primary_offset); } } @@ -2913,7 +2914,7 @@ int clusterProcessPacket(clusterLink *link) { /* Add this node if it is new for us and the msg type is MEET. * In this stage we don't try to add the node with the right - * flags, slaveof pointer, and so forth, as this details will be + * flags, replicaof pointer, and so forth, as this details will be * resolved when we'll receive PONGs from the node. */ if (!sender && type == CLUSTERMSG_TYPE_MEET) { clusterNode *node; @@ -2962,7 +2963,7 @@ int clusterProcessPacket(clusterLink *link) { clusterRenameNode(link->node, hdr->sender); serverLog(LL_DEBUG, "Handshake with node %.40s completed.", link->node->name); link->node->flags &= ~CLUSTER_NODE_HANDSHAKE; - link->node->flags |= flags & (CLUSTER_NODE_MASTER | CLUSTER_NODE_SLAVE); + link->node->flags |= flags & (CLUSTER_NODE_PRIMARY | CLUSTER_NODE_REPLICA); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); } else if (memcmp(link->node->name, hdr->sender, CLUSTER_NAMELEN) != 0) { /* If the reply has a non matching node ID we @@ -2987,8 +2988,8 @@ int clusterProcessPacket(clusterLink *link) { /* Copy the CLUSTER_NODE_NOFAILOVER flag from what the sender * announced. This is a dynamic flag that we receive from the * sender, and the latest status must be trusted. We need it to - * be propagated because the slave ranking used to understand the - * delay of each slave in the voting process, needs to know + * be propagated because the replica ranking used to understand the + * delay of each replica in the voting process, needs to know * what are the instances really competing. */ if (sender) { int nofailover = flags & CLUSTER_NODE_NOFAILOVER; @@ -3021,23 +3022,23 @@ int clusterProcessPacket(clusterLink *link) { } } - /* Check for role switch: slave -> master or master -> slave. */ + /* Check for role switch: replica -> primary or primary -> replica. */ if (sender) { serverLog(LL_DEBUG, "node %.40s (%s) announces that it is a %s in shard %.40s", sender->name, sender->human_nodename, - !memcmp(hdr->slaveof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->slaveof)) ? "master" : "slave", + !memcmp(hdr->replicaof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->replicaof)) ? "primary" : "replica", sender->shard_id); - if (!memcmp(hdr->slaveof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->slaveof))) { - /* Node is a master. */ - clusterSetNodeAsMaster(sender); + if (!memcmp(hdr->replicaof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->replicaof))) { + /* Node is a primary. */ + clusterSetNodeAsPrimary(sender); } else { - /* Node is a slave. */ - clusterNode *master = clusterLookupNode(hdr->slaveof, CLUSTER_NAMELEN); + /* Node is a replica. */ + clusterNode *primary = clusterLookupNode(hdr->replicaof, CLUSTER_NAMELEN); - if (clusterNodeIsMaster(sender)) { - /* Master turned into a slave! Reconfigure the node. */ - if (master && areInSameShard(master, sender)) { - /* `sender` was a primary and was in the same shard as `master`, its new primary */ + if (clusterNodeIsPrimary(sender)) { + /* Primary turned into a replica! Reconfigure the node. */ + if (primary && areInSameShard(primary, sender)) { + /* `sender` was a primary and was in the same shard as its new primary */ if (sender->configEpoch > senderConfigEpoch) { serverLog(LL_NOTICE, "Ignore stale message from %.40s (%s) in shard %.40s;" @@ -3045,48 +3046,48 @@ int clusterProcessPacket(clusterLink *link) { sender->name, sender->human_nodename, sender->shard_id, (unsigned long long)senderConfigEpoch, (unsigned long long)sender->configEpoch); } else { - /* `master` is still a `slave` in this observer node's view; update its role and configEpoch - */ - clusterSetNodeAsMaster(master); - master->configEpoch = senderConfigEpoch; + /* `primary` is still a `replica` in this observer node's view; + * update its role and configEpoch */ + clusterSetNodeAsPrimary(primary); + primary->configEpoch = senderConfigEpoch; serverLog(LL_NOTICE, "A failover occurred in shard %.40s; node %.40s (%s)" " failed over to node %.40s (%s) with a config epoch of %llu", - sender->shard_id, sender->name, sender->human_nodename, master->name, - master->human_nodename, (unsigned long long)master->configEpoch); + sender->shard_id, sender->name, sender->human_nodename, primary->name, + primary->human_nodename, (unsigned long long)primary->configEpoch); } } else { /* `sender` was moved to another shard and has become a replica, remove its slot assignment */ int slots = clusterDelNodeSlots(sender); serverLog(LL_NOTICE, - "Node %.40s (%s) is no longer master of shard %.40s;" + "Node %.40s (%s) is no longer primary of shard %.40s;" " removed all %d slot(s) it used to own", sender->name, sender->human_nodename, sender->shard_id, slots); - if (master != NULL) { + if (primary != NULL) { serverLog(LL_NOTICE, "Node %.40s (%s) is now part of shard %.40s", sender->name, - sender->human_nodename, master->shard_id); + sender->human_nodename, primary->shard_id); } } - sender->flags &= ~(CLUSTER_NODE_MASTER | CLUSTER_NODE_MIGRATE_TO); - sender->flags |= CLUSTER_NODE_SLAVE; + sender->flags &= ~(CLUSTER_NODE_PRIMARY | CLUSTER_NODE_MIGRATE_TO); + sender->flags |= CLUSTER_NODE_REPLICA; /* Update config and state. */ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE); } - /* Master node changed for this slave? */ - if (master && sender->slaveof != master) { - if (sender->slaveof) clusterNodeRemoveSlave(sender->slaveof, sender); + /* Primary node changed for this replica? */ + if (primary && sender->replicaof != primary) { + if (sender->replicaof) clusterNodeRemoveReplica(sender->replicaof, sender); serverLog(LL_NOTICE, "Node %.40s (%s) is now a replica of node %.40s (%s) in shard %.40s", - sender->name, sender->human_nodename, master->name, master->human_nodename, + sender->name, sender->human_nodename, primary->name, primary->human_nodename, sender->shard_id); - clusterNodeAddSlave(master, sender); - sender->slaveof = master; + clusterNodeAddReplica(primary, sender); + sender->replicaof = primary; /* Update the shard_id when a replica is connected to its * primary in the very first time. */ - updateShardId(sender, master->shard_id); + updateShardId(sender, primary->shard_id); /* Update config. */ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); @@ -3096,32 +3097,33 @@ int clusterProcessPacket(clusterLink *link) { /* Update our info about served slots. * - * Note: this MUST happen after we update the master/slave state - * so that CLUSTER_NODE_MASTER flag will be set. */ + * Note: this MUST happen after we update the primary/replica state + * so that CLUSTER_NODE_PRIMARY flag will be set. */ /* Many checks are only needed if the set of served slots this * instance claims is different compared to the set of slots we have * for it. Check this ASAP to avoid other computational expansive * checks later. */ - clusterNode *sender_master = NULL; /* Sender or its master if slave. */ - int dirty_slots = 0; /* Sender claimed slots don't match my view? */ + clusterNode *sender_primary = NULL; /* Sender or its primary if replica. */ + int dirty_slots = 0; /* Sender claimed slots don't match my view? */ if (sender) { - sender_master = clusterNodeIsMaster(sender) ? sender : sender->slaveof; - if (sender_master) { - dirty_slots = memcmp(sender_master->slots, hdr->myslots, sizeof(hdr->myslots)) != 0; + sender_primary = clusterNodeIsPrimary(sender) ? sender : sender->replicaof; + if (sender_primary) { + dirty_slots = memcmp(sender_primary->slots, hdr->myslots, sizeof(hdr->myslots)) != 0; /* Force dirty when the sending shard owns no slots so that * we have a chance to examine and repair slot migrating/importing * states that involve empty shards. */ - dirty_slots |= sender_master->numslots == 0; + dirty_slots |= sender_primary->numslots == 0; } } - /* 1) If the sender of the message is a master, and we detected that + /* 1) If the sender of the message is a primary, and we detected that * the set of slots it claims changed, scan the slots to see if we * need to update our configuration. */ - if (sender_master && dirty_slots) clusterUpdateSlotsConfigWith(sender_master, senderConfigEpoch, hdr->myslots); + if (sender_primary && dirty_slots) + clusterUpdateSlotsConfigWith(sender_primary, senderConfigEpoch, hdr->myslots); /* Explicitly check for a replication loop before attempting the replication * chain folding logic. @@ -3152,38 +3154,38 @@ int clusterProcessPacket(clusterLink *link) { * epoch than B has on slot 1. This leads to B sending an UPDATE to * A directly saying A* is the new owner of slot 1 with a higher epoch. * d. A receives the UPDATE from B and executes clusterUpdateSlotsConfigWith. - * A now realizes that it is a replica of A* hence setting myself->slaveof + * A now realizes that it is a replica of A* hence setting myself->replicaof * to A*. * e. Finally, the pre-failover PING message queued up in A*'s outgoing * buffer to A is delivered and processed, out of order though, to A. * f. This stale PING message creates the replication loop */ - if (myself->slaveof && myself->slaveof->slaveof && myself->slaveof->slaveof != myself) { - /* Safeguard against sub-replicas. A replica's master can turn itself + if (myself->replicaof && myself->replicaof->replicaof && myself->replicaof->replicaof != myself) { + /* Safeguard against sub-replicas. A replica's primary can turn itself * into a replica if its last slot is removed. If no other node takes * over the slot, there is nothing else to trigger replica migration. */ serverLog(LL_NOTICE, "I'm a sub-replica! Reconfiguring myself as a replica of %.40s from %.40s", - myself->slaveof->slaveof->name, myself->slaveof->name); - clusterSetMaster(myself->slaveof->slaveof, 1); + myself->replicaof->replicaof->name, myself->replicaof->name); + clusterSetPrimary(myself->replicaof->replicaof, 1); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); } /* 2) We also check for the reverse condition, that is, the sender - * claims to serve slots we know are served by a master with a + * claims to serve slots we know are served by a primary with a * greater configEpoch. If this happens we inform the sender. * * This is useful because sometimes after a partition heals, a - * reappearing master may be the last one to claim a given set of + * reappearing primary may be the last one to claim a given set of * hash slots, but with a configuration that other instances know to * be deprecated. Example: * - * A and B are master and slave for slots 1,2,3. + * A and B are primary and replica for slots 1,2,3. * A is partitioned away, B gets promoted. * B is partitioned away, and A returns available. * * Usually B would PING A publishing its set of served slots and its * configEpoch, but because of the partition B can't inform A of the * new configuration, so other nodes that have an updated table must - * do it. In this way A will stop to act as a master (or can try to + * do it. In this way A will stop to act as a primary (or can try to * failover if there are the conditions to win the election). */ if (sender && dirty_slots) { int j; @@ -3209,7 +3211,7 @@ int clusterProcessPacket(clusterLink *link) { /* If our config epoch collides with the sender's try to fix * the problem. */ - if (sender && clusterNodeIsMaster(myself) && clusterNodeIsMaster(sender) && + if (sender && clusterNodeIsPrimary(myself) && clusterNodeIsPrimary(sender) && senderConfigEpoch == myself->configEpoch) { clusterHandleConfigEpochCollision(sender); } @@ -3259,10 +3261,10 @@ int clusterProcessPacket(clusterLink *link) { clusterSendFailoverAuthIfNeeded(sender, hdr); } else if (type == CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK) { if (!sender) return 1; /* We don't know that node. */ - /* We consider this vote only if the sender is a master serving + /* We consider this vote only if the sender is a primary serving * a non zero number of slots, and its currentEpoch is greater or * equal to epoch where this node started the election. */ - if (clusterNodeIsMaster(sender) && sender->numslots > 0 && + if (clusterNodeIsPrimary(sender) && sender->numslots > 0 && senderCurrentEpoch >= server.cluster->failover_auth_epoch) { server.cluster->failover_auth_count++; /* Maybe we reached a quorum here, set a flag to make sure @@ -3270,20 +3272,20 @@ int clusterProcessPacket(clusterLink *link) { clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_FAILOVER); } } else if (type == CLUSTERMSG_TYPE_MFSTART) { - /* This message is acceptable only if I'm a master and the sender - * is one of my slaves. */ - if (!sender || sender->slaveof != myself) return 1; - /* Manual failover requested from slaves. Initialize the state + /* This message is acceptable only if I'm a primary and the sender + * is one of my replicas. */ + if (!sender || sender->replicaof != myself) return 1; + /* Manual failover requested from replicas. Initialize the state * accordingly. */ resetManualFailover(); server.cluster->mf_end = now + CLUSTER_MF_TIMEOUT; - server.cluster->mf_slave = sender; + server.cluster->mf_replica = sender; pauseActions(PAUSE_DURING_FAILOVER, now + (CLUSTER_MF_TIMEOUT * CLUSTER_MF_PAUSE_MULT), PAUSE_ACTIONS_CLIENT_WRITE_SET); serverLog(LL_NOTICE, "Manual failover requested by replica %.40s (%s).", sender->name, sender->human_nodename); /* We need to send a ping message to the replica, as it would carry - * `server.cluster->mf_master_offset`, which means the master paused clients - * at offset `server.cluster->mf_master_offset`, so that the replica would + * `server.cluster->mf_primary_offset`, which means the primary paused clients + * at offset `server.cluster->mf_primary_offset`, so that the replica would * know that it is safe to set its `server.cluster->mf_can_start` to 1 so as * to complete failover as quickly as possible. */ clusterSendPing(link, CLUSTERMSG_TYPE_PING); @@ -3296,8 +3298,8 @@ int clusterProcessPacket(clusterLink *link) { if (!n) return 1; /* We don't know the reported node. */ if (n->configEpoch >= reportedConfigEpoch) return 1; /* Nothing new. */ - /* If in our current config the node is a slave, set it as a master. */ - if (nodeIsSlave(n)) clusterSetNodeAsMaster(n); + /* If in our current config the node is a replica, set it as a primary. */ + if (nodeIsReplica(n)) clusterSetNodeAsPrimary(n); /* Update the node's configEpoch. */ n->configEpoch = reportedConfigEpoch; @@ -3547,13 +3549,13 @@ void clusterBroadcastMessage(clusterMsgSendBlock *msgblock) { * sizeof(clusterMsg) in bytes. */ static void clusterBuildMessageHdr(clusterMsg *hdr, int type, size_t msglen) { uint64_t offset; - clusterNode *master; + clusterNode *primary; - /* If this node is a master, we send its slots bitmap and configEpoch. - * If this node is a slave we send the master's information instead (the - * node is flagged as slave so the receiver knows that it is NOT really + /* If this node is a primary, we send its slots bitmap and configEpoch. + * If this node is a replica we send the primary's information instead (the + * node is flagged as replica so the receiver knows that it is NOT really * in charge for this slots. */ - master = (nodeIsSlave(myself) && myself->slaveof) ? myself->slaveof : myself; + primary = (nodeIsReplica(myself) && myself->replicaof) ? myself->replicaof : myself; hdr->ver = htons(CLUSTER_PROTO_VER); hdr->sig[0] = 'R'; @@ -3575,9 +3577,9 @@ static void clusterBuildMessageHdr(clusterMsg *hdr, int type, size_t msglen) { int announced_tcp_port, announced_tls_port, announced_cport; deriveAnnouncedPorts(&announced_tcp_port, &announced_tls_port, &announced_cport); - memcpy(hdr->myslots, master->slots, sizeof(hdr->myslots)); - memset(hdr->slaveof, 0, CLUSTER_NAMELEN); - if (myself->slaveof != NULL) memcpy(hdr->slaveof, myself->slaveof->name, CLUSTER_NAMELEN); + memcpy(hdr->myslots, primary->slots, sizeof(hdr->myslots)); + memset(hdr->replicaof, 0, CLUSTER_NAMELEN); + if (myself->replicaof != NULL) memcpy(hdr->replicaof, myself->replicaof->name, CLUSTER_NAMELEN); if (server.tls_cluster) { hdr->port = htons(announced_tls_port); hdr->pport = htons(announced_tcp_port); @@ -3591,17 +3593,17 @@ static void clusterBuildMessageHdr(clusterMsg *hdr, int type, size_t msglen) { /* Set the currentEpoch and configEpochs. */ hdr->currentEpoch = htonu64(server.cluster->currentEpoch); - hdr->configEpoch = htonu64(master->configEpoch); + hdr->configEpoch = htonu64(primary->configEpoch); /* Set the replication offset. */ - if (nodeIsSlave(myself)) - offset = replicationGetSlaveOffset(); + if (nodeIsReplica(myself)) + offset = replicationGetReplicaOffset(); else - offset = server.master_repl_offset; + offset = server.primary_repl_offset; hdr->offset = htonu64(offset); /* Set the message flags. */ - if (clusterNodeIsMaster(myself) && server.cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_PAUSED; + if (clusterNodeIsPrimary(myself) && server.cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_PAUSED; hdr->totlen = htonl(msglen); } @@ -3644,7 +3646,7 @@ void clusterSendPing(clusterLink *link, int type) { /* How many gossip sections we want to add? 1/10 of the number of nodes * and anyway at least 3. Why 1/10? * - * If we have N masters, with N/10 entries, and we consider that in + * If we have N primaries, with N/10 entries, and we consider that in * node_timeout we exchange with each other node at least 4 packets * (we ping in the worst case in node_timeout/2 time, and we also * receive two pings from the host), we have a total of 8 packets @@ -3657,14 +3659,14 @@ void clusterSendPing(clusterLink *link, int type) { * PROB = probability of being featured in a single gossip entry, * which is 1 / NUM_OF_NODES. * ENTRIES = 10. - * TOTAL_PACKETS = 2 * 4 * NUM_OF_MASTERS. + * TOTAL_PACKETS = 2 * 4 * NUM_OF_PRIMARIES. * - * If we assume we have just masters (so num of nodes and num of masters + * If we assume we have just primaries (so num of nodes and num of primaries * is the same), with 1/10 we always get over the majority, and specifically - * 80% of the number of nodes, to account for many masters failing at the + * 80% of the number of nodes, to account for many primaries failing at the * same time. * - * Since we have non-voting slaves that lower the probability of an entry + * Since we have non-voting replicas that lower the probability of an entry * to feature our node, we set the number of entries per packet as * 10% of the total nodes we have. */ wanted = floor(dictSize(server.cluster->nodes) / 10); @@ -3773,16 +3775,16 @@ void clusterSendPing(clusterLink *link, int type) { * In Cluster mode, pongs are not used just for failure detection, but also * to carry important configuration information. So broadcasting a pong is * useful when something changes in the configuration and we want to make - * the cluster aware ASAP (for instance after a slave promotion). + * the cluster aware ASAP (for instance after a replica promotion). * * The 'target' argument specifies the receiving instances using the * defines below: * * CLUSTER_BROADCAST_ALL -> All known instances. - * CLUSTER_BROADCAST_LOCAL_SLAVES -> All slaves in my master-slaves ring. + * CLUSTER_BROADCAST_LOCAL_REPLICAS -> All replicas in my primary-replicas ring. */ #define CLUSTER_BROADCAST_ALL 0 -#define CLUSTER_BROADCAST_LOCAL_SLAVES 1 +#define CLUSTER_BROADCAST_LOCAL_REPLICAS 1 void clusterBroadcastPong(int target) { dictIterator *di; dictEntry *de; @@ -3793,10 +3795,10 @@ void clusterBroadcastPong(int target) { if (!node->link) continue; if (node == myself || nodeInHandshake(node)) continue; - if (target == CLUSTER_BROADCAST_LOCAL_SLAVES) { - int local_slave = - nodeIsSlave(node) && node->slaveof && (node->slaveof == myself || node->slaveof == myself->slaveof); - if (!local_slave) continue; + if (target == CLUSTER_BROADCAST_LOCAL_REPLICAS) { + int local_replica = nodeIsReplica(node) && node->replicaof && + (node->replicaof == myself || node->replicaof == myself->replicaof); + if (!local_replica) continue; } clusterSendPing(node->link, CLUSTERMSG_TYPE_PONG); } @@ -3952,15 +3954,15 @@ void clusterPropagatePublish(robj *channel, robj *message, int sharded) { } /* ----------------------------------------------------------------------------- - * SLAVE node specific functions + * REPLICA node specific functions * -------------------------------------------------------------------------- */ /* This function sends a FAILOVER_AUTH_REQUEST message to every node in order to - * see if there is the quorum for this slave instance to failover its failing - * master. + * see if there is the quorum for this replica instance to failover its failing + * primary. * - * Note that we send the failover request to everybody, master and slave nodes, - * but only the masters are supposed to reply to our query. */ + * Note that we send the failover request to everybody, primary and replica nodes, + * but only the primaries are supposed to reply to our query. */ void clusterRequestFailoverAuth(void) { uint32_t msglen = sizeof(clusterMsg) - sizeof(union clusterMsgData); clusterMsgSendBlock *msgblock = createClusterMsgSendBlock(CLUSTERMSG_TYPE_FAILOVER_AUTH_REQUEST, msglen); @@ -3968,7 +3970,7 @@ void clusterRequestFailoverAuth(void) { clusterMsg *hdr = &msgblock->msg; /* If this is a manual failover, set the CLUSTERMSG_FLAG0_FORCEACK bit * in the header to communicate the nodes receiving the message that - * they should authorized the failover even if the master is working. */ + * they should authorized the failover even if the primary is working. */ if (server.cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_FORCEACK; clusterBroadcastMessage(msgblock); clusterMsgSendBlockDecrRefCount(msgblock); @@ -3998,18 +4000,18 @@ void clusterSendMFStart(clusterNode *node) { /* Vote for the node asking for our vote if there are the conditions. */ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { - clusterNode *master = node->slaveof; + clusterNode *primary = node->replicaof; uint64_t requestCurrentEpoch = ntohu64(request->currentEpoch); uint64_t requestConfigEpoch = ntohu64(request->configEpoch); unsigned char *claimed_slots = request->myslots; int force_ack = request->mflags[0] & CLUSTERMSG_FLAG0_FORCEACK; int j; - /* IF we are not a master serving at least 1 slot, we don't have the + /* IF we are not a primary serving at least 1 slot, we don't have the * right to vote, as the cluster size is the number - * of masters serving at least one slot, and quorum is the cluster + * of primariies serving at least one slot, and quorum is the cluster * size + 1 */ - if (nodeIsSlave(myself) || myself->numslots == 0) return; + if (nodeIsReplica(myself) || myself->numslots == 0) return; /* Request epoch must be >= our currentEpoch. * Note that it is impossible for it to actually be greater since @@ -4029,37 +4031,37 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { return; } - /* Node must be a slave and its master down. - * The master can be non failing if the request is flagged + /* Node must be a replica and its primary down. + * The primary can be non failing if the request is flagged * with CLUSTERMSG_FLAG0_FORCEACK (manual failover). */ - if (clusterNodeIsMaster(node) || master == NULL || (!nodeFailed(master) && !force_ack)) { - if (clusterNodeIsMaster(node)) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): it is a master node", node->name, + if (clusterNodeIsPrimary(node) || primary == NULL || (!nodeFailed(primary) && !force_ack)) { + if (clusterNodeIsPrimary(node)) { + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): it is a primary node", node->name, node->human_nodename); - } else if (master == NULL) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): I don't know its master", node->name, + } else if (primary == NULL) { + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): I don't know its primary", node->name, node->human_nodename); - } else if (!nodeFailed(master)) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): its master is up", node->name, + } else if (!nodeFailed(primary)) { + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): its primary is up", node->name, node->human_nodename); } return; } - /* We did not voted for a slave about this master for two + /* We did not voted for a replica about this primary for two * times the node timeout. This is not strictly needed for correctness * of the algorithm but makes the base case more linear. */ - if (mstime() - node->slaveof->voted_time < server.cluster_node_timeout * 2) { + if (mstime() - node->replicaof->voted_time < server.cluster_node_timeout * 2) { serverLog(LL_WARNING, "Failover auth denied to %.40s %s: " - "can't vote about this master before %lld milliseconds", + "can't vote about this primary before %lld milliseconds", node->name, node->human_nodename, - (long long)((server.cluster_node_timeout * 2) - (mstime() - node->slaveof->voted_time))); + (long long)((server.cluster_node_timeout * 2) - (mstime() - node->replicaof->voted_time))); return; } - /* The slave requesting the vote must have a configEpoch for the claimed - * slots that is >= the one of the masters currently serving the same + /* The replica requesting the vote must have a configEpoch for the claimed + * slots that is >= the one of the primaries currently serving the same * slots in the current configuration. */ for (j = 0; j < CLUSTER_SLOTS; j++) { if (bitmapTestBit(claimed_slots, j) == 0) continue; @@ -4067,8 +4069,8 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { continue; } /* If we reached this point we found a slot that in our current slots - * is served by a master with a greater configEpoch than the one claimed - * by the slave requesting our vote. Refuse to vote for this slave. */ + * is served by a primary with a greater configEpoch than the one claimed + * by the replica requesting our vote. Refuse to vote for this replica. */ serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): " "slot %d epoch (%llu) > reqEpoch (%llu)", @@ -4077,46 +4079,46 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { return; } - /* We can vote for this slave. */ + /* We can vote for this replica. */ server.cluster->lastVoteEpoch = server.cluster->currentEpoch; - node->slaveof->voted_time = mstime(); + node->replicaof->voted_time = mstime(); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_FSYNC_CONFIG); clusterSendFailoverAuth(node); serverLog(LL_NOTICE, "Failover auth granted to %.40s (%s) for epoch %llu", node->name, node->human_nodename, (unsigned long long)server.cluster->currentEpoch); } -/* This function returns the "rank" of this instance, a slave, in the context - * of its master-slaves ring. The rank of the slave is given by the number of - * other slaves for the same master that have a better replication offset +/* This function returns the "rank" of this instance, a replica, in the context + * of its primar-replicas ring. The rank of the replica is given by the number of + * other replicas for the same primary that have a better replication offset * compared to the local one (better means, greater, so they claim more data). * - * A slave with rank 0 is the one with the greatest (most up to date) + * A replica with rank 0 is the one with the greatest (most up to date) * replication offset, and so forth. Note that because how the rank is computed - * multiple slaves may have the same rank, in case they have the same offset. + * multiple replicas may have the same rank, in case they have the same offset. * - * The slave rank is used to add a delay to start an election in order to - * get voted and replace a failing master. Slaves with better replication + * The replica rank is used to add a delay to start an election in order to + * get voted and replace a failing primary. Replicas with better replication * offsets are more likely to win. */ -int clusterGetSlaveRank(void) { +int clusterGetReplicaRank(void) { long long myoffset; int j, rank = 0; - clusterNode *master; + clusterNode *primary; - serverAssert(nodeIsSlave(myself)); - master = myself->slaveof; - if (master == NULL) return 0; /* Never called by slaves without master. */ + serverAssert(nodeIsReplica(myself)); + primary = myself->replicaof; + if (primary == NULL) return 0; /* Never called by replicas without primary. */ - myoffset = replicationGetSlaveOffset(); - for (j = 0; j < master->numslaves; j++) - if (master->slaves[j] != myself && !nodeCantFailover(master->slaves[j]) && - master->slaves[j]->repl_offset > myoffset) + myoffset = replicationGetReplicaOffset(); + for (j = 0; j < primary->num_replicas; j++) + if (primary->replicas[j] != myself && !nodeCantFailover(primary->replicas[j]) && + primary->replicas[j]->repl_offset > myoffset) rank++; return rank; } -/* This function is called by clusterHandleSlaveFailover() in order to - * let the slave log why it is not able to failover. Sometimes there are +/* This function is called by clusterHandleReplicaFailover() in order to + * let the replica log why it is not able to failover. Sometimes there are * not the conditions, but since the failover function is called again and * again, we can't log the same things continuously. * @@ -4125,18 +4127,18 @@ int clusterGetSlaveRank(void) { * * 1) The reason for which the failover can't be initiated changed. * The reasons also include a NONE reason we reset the state to - * when the slave finds that its master is fine (no FAIL flag). - * 2) Also, the log is emitted again if the master is still down and + * when the replica finds that its primary is fine (no FAIL flag). + * 2) Also, the log is emitted again if the primary is still down and * the reason for not failing over is still the same, but more than * CLUSTER_CANT_FAILOVER_RELOG_PERIOD seconds elapsed. - * 3) Finally, the function only logs if the slave is down for more than + * 3) Finally, the function only logs if the replica is down for more than * five seconds + NODE_TIMEOUT. This way nothing is logged when a * failover starts in a reasonable time. * - * The function is called with the reason why the slave can't failover + * The function is called with the reason why the replica can't failover * which is one of the integer macros CLUSTER_CANT_FAILOVER_*. * - * The function is guaranteed to be called only if 'myself' is a slave. */ + * The function is guaranteed to be called only if 'myself' is a replica. */ void clusterLogCantFailover(int reason) { char *msg; static time_t lastlog_time = 0; @@ -4149,10 +4151,11 @@ void clusterLogCantFailover(int reason) { server.cluster->cant_failover_reason = reason; - /* We also don't emit any log if the master failed no long ago, the - * goal of this function is to log slaves in a stalled condition for + /* We also don't emit any log if the primary failed no long ago, the + * goal of this function is to log replicas in a stalled condition for * a long time. */ - if (myself->slaveof && nodeFailed(myself->slaveof) && (mstime() - myself->slaveof->fail_time) < nolog_fail_time) + if (myself->replicaof && nodeFailed(myself->replicaof) && + (mstime() - myself->replicaof->fail_time) < nolog_fail_time) return; switch (reason) { @@ -4178,24 +4181,24 @@ void clusterLogCantFailover(int reason) { } /* This function implements the final part of automatic and manual failovers, - * where the slave grabs its master's hash slots, and propagates the new + * where the replica grabs its primary's hash slots, and propagates the new * configuration. * * Note that it's up to the caller to be sure that the node got a new * configuration epoch already. */ -void clusterFailoverReplaceYourMaster(void) { +void clusterFailoverReplaceYourPrimary(void) { int j; - clusterNode *oldmaster = myself->slaveof; + clusterNode *old_primary = myself->replicaof; - if (clusterNodeIsMaster(myself) || oldmaster == NULL) return; + if (clusterNodeIsPrimary(myself) || old_primary == NULL) return; - /* 1) Turn this node into a master. */ - clusterSetNodeAsMaster(myself); - replicationUnsetMaster(); + /* 1) Turn this node into a primary . */ + clusterSetNodeAsPrimary(myself); + replicationUnsetPrimary(); - /* 2) Claim all the slots assigned to our master. */ + /* 2) Claim all the slots assigned to our primary. */ for (j = 0; j < CLUSTER_SLOTS; j++) { - if (clusterNodeCoversSlot(oldmaster, j)) { + if (clusterNodeCoversSlot(old_primary, j)) { clusterDelSlot(j); clusterAddSlot(myself, j); } @@ -4206,22 +4209,22 @@ void clusterFailoverReplaceYourMaster(void) { clusterSaveConfigOrDie(1); /* 4) Pong all the other nodes so that they can update the state - * accordingly and detect that we switched to master role. */ + * accordingly and detect that we switched to primary role. */ clusterBroadcastPong(CLUSTER_BROADCAST_ALL); /* 5) If there was a manual failover in progress, clear the state. */ resetManualFailover(); } -/* This function is called if we are a slave node and our master serving +/* This function is called if we are a replica node and our primary serving * a non-zero amount of hash slots is in FAIL state. * * The goal of this function is: * 1) To check if we are able to perform a failover, is our data updated? - * 2) Try to get elected by masters. + * 2) Try to get elected by primaries. * 3) Perform the failover informing all the other nodes. */ -void clusterHandleSlaveFailover(void) { +void clusterHandleReplicaFailover(void) { mstime_t data_age; mstime_t auth_age = mstime() - server.cluster->failover_auth_time; int needed_quorum = (server.cluster->size / 2) + 1; @@ -4243,12 +4246,13 @@ void clusterHandleSlaveFailover(void) { /* Pre conditions to run the function, that must be met both in case * of an automatic or manual failover: - * 1) We are a slave. - * 2) Our master is flagged as FAIL, or this is a manual failover. + * 1) We are a replica. + * 2) Our primary is flagged as FAIL, or this is a manual failover. * 3) We don't have the no failover configuration set, and this is * not a manual failover. */ - if (clusterNodeIsMaster(myself) || myself->slaveof == NULL || (!nodeFailed(myself->slaveof) && !manual_failover) || - (server.cluster_slave_no_failover && !manual_failover)) { + if (clusterNodeIsPrimary(myself) || myself->replicaof == NULL || + (!nodeFailed(myself->replicaof) && !manual_failover) || + (server.cluster_replica_no_failover && !manual_failover)) { /* There are no reasons to failover, so we set the reason why we * are returning without failing over to NONE. */ server.cluster->cant_failover_reason = CLUSTER_CANT_FAILOVER_NONE; @@ -4256,25 +4260,25 @@ void clusterHandleSlaveFailover(void) { } /* Set data_age to the number of milliseconds we are disconnected from - * the master. */ + * the primary. */ if (server.repl_state == REPL_STATE_CONNECTED) { - data_age = (mstime_t)(server.unixtime - server.master->lastinteraction) * 1000; + data_age = (mstime_t)(server.unixtime - server.primary->last_interaction) * 1000; } else { data_age = (mstime_t)(server.unixtime - server.repl_down_since) * 1000; } /* Remove the node timeout from the data age as it is fine that we are - * disconnected from our master at least for the time it was down to be + * disconnected from our primary at least for the time it was down to be * flagged as FAIL, that's the baseline. */ if (data_age > server.cluster_node_timeout) data_age -= server.cluster_node_timeout; - /* Check if our data is recent enough according to the slave validity + /* Check if our data is recent enough according to the replica validity * factor configured by the user. * * Check bypassed for manual failovers. */ - if (server.cluster_slave_validity_factor && - data_age > (((mstime_t)server.repl_ping_slave_period * 1000) + - (server.cluster_node_timeout * server.cluster_slave_validity_factor))) { + if (server.cluster_replica_validity_factor && + data_age > (((mstime_t)server.repl_ping_replica_period * 1000) + + (server.cluster_node_timeout * server.cluster_replica_validity_factor))) { if (!manual_failover) { clusterLogCantFailover(CLUSTER_CANT_FAILOVER_DATA_AGE); return; @@ -4289,9 +4293,9 @@ void clusterHandleSlaveFailover(void) { random() % 500; /* Random delay between 0 and 500 milliseconds. */ server.cluster->failover_auth_count = 0; server.cluster->failover_auth_sent = 0; - server.cluster->failover_auth_rank = clusterGetSlaveRank(); - /* We add another delay that is proportional to the slave rank. - * Specifically 1 second * rank. This way slaves that have a probably + server.cluster->failover_auth_rank = clusterGetReplicaRank(); + /* We add another delay that is proportional to the replica rank. + * Specifically 1 second * rank. This way replicas that have a probably * less updated replication offset, are penalized. */ server.cluster->failover_auth_time += server.cluster->failover_auth_rank * 1000; /* However if this is a manual failover, no delay is needed. */ @@ -4304,21 +4308,21 @@ void clusterHandleSlaveFailover(void) { "Start of election delayed for %lld milliseconds " "(rank #%d, offset %lld).", server.cluster->failover_auth_time - mstime(), server.cluster->failover_auth_rank, - replicationGetSlaveOffset()); + replicationGetReplicaOffset()); /* Now that we have a scheduled election, broadcast our offset - * to all the other slaves so that they'll updated their offsets + * to all the other replicas so that they'll updated their offsets * if our offset is better. */ - clusterBroadcastPong(CLUSTER_BROADCAST_LOCAL_SLAVES); + clusterBroadcastPong(CLUSTER_BROADCAST_LOCAL_REPLICAS); return; } /* It is possible that we received more updated offsets from other - * slaves for the same master since we computed our election delay. + * replicas for the same primary since we computed our election delay. * Update the delay if our rank changed. * * Not performed if this is a manual failover. */ if (server.cluster->failover_auth_sent == 0 && server.cluster->mf_end == 0) { - int newrank = clusterGetSlaveRank(); + int newrank = clusterGetReplicaRank(); if (newrank > server.cluster->failover_auth_rank) { long long added_delay = (newrank - server.cluster->failover_auth_rank) * 1000; server.cluster->failover_auth_time += added_delay; @@ -4354,9 +4358,9 @@ void clusterHandleSlaveFailover(void) { /* Check if we reached the quorum. */ if (server.cluster->failover_auth_count >= needed_quorum) { - /* We have the quorum, we can finally failover the master. */ + /* We have the quorum, we can finally failover the primary. */ - serverLog(LL_NOTICE, "Failover election won: I'm the new master."); + serverLog(LL_NOTICE, "Failover election won: I'm the new primary."); /* Update my configEpoch to the epoch of the election. */ if (myself->configEpoch < server.cluster->failover_auth_epoch) { @@ -4366,99 +4370,99 @@ void clusterHandleSlaveFailover(void) { } /* Take responsibility for the cluster slots. */ - clusterFailoverReplaceYourMaster(); + clusterFailoverReplaceYourPrimary(); } else { clusterLogCantFailover(CLUSTER_CANT_FAILOVER_WAITING_VOTES); } } /* ----------------------------------------------------------------------------- - * CLUSTER slave migration + * CLUSTER replica migration * - * Slave migration is the process that allows a slave of a master that is - * already covered by at least another slave, to "migrate" to a master that - * is orphaned, that is, left with no working slaves. + * Replica migration is the process that allows a replica of a primary that is + * already covered by at least another replica, to "migrate" to a primary that + * is orphaned, that is, left with no working replicas. * ------------------------------------------------------------------------- */ /* This function is responsible to decide if this replica should be migrated - * to a different (orphaned) master. It is called by the clusterCron() function + * to a different (orphaned) primary. It is called by the clusterCron() function * only if: * - * 1) We are a slave node. - * 2) It was detected that there is at least one orphaned master in + * 1) We are a replica node. + * 2) It was detected that there is at least one orphaned primary in * the cluster. - * 3) We are a slave of one of the masters with the greatest number of - * slaves. + * 3) We are a replica of one of the primaries with the greatest number of + * replicas. * * This checks are performed by the caller since it requires to iterate - * the nodes anyway, so we spend time into clusterHandleSlaveMigration() + * the nodes anyway, so we spend time into clusterHandleReplicaMigration() * if definitely needed. * - * The function is called with a pre-computed max_slaves, that is the max - * number of working (not in FAIL state) slaves for a single master. + * The function is called with a pre-computed max_replicas, that is the max + * number of working (not in FAIL state) replicas for a single primary. * * Additional conditions for migration are examined inside the function. */ -void clusterHandleSlaveMigration(int max_slaves) { - int j, okslaves = 0; - clusterNode *mymaster = myself->slaveof, *target = NULL, *candidate = NULL; +void clusterHandleReplicaMigration(int max_replicas) { + int j, ok_replicas = 0; + clusterNode *my_primary = myself->replicaof, *target = NULL, *candidate = NULL; dictIterator *di; dictEntry *de; /* Step 1: Don't migrate if the cluster state is not ok. */ if (server.cluster->state != CLUSTER_OK) return; - /* Step 2: Don't migrate if my master will not be left with at least - * 'migration-barrier' slaves after my migration. */ - if (mymaster == NULL) return; - for (j = 0; j < mymaster->numslaves; j++) - if (!nodeFailed(mymaster->slaves[j]) && !nodeTimedOut(mymaster->slaves[j])) okslaves++; - if (okslaves <= server.cluster_migration_barrier) return; + /* Step 2: Don't migrate if my primary will not be left with at least + * 'migration-barrier' replicas after my migration. */ + if (my_primary == NULL) return; + for (j = 0; j < my_primary->num_replicas; j++) + if (!nodeFailed(my_primary->replicas[j]) && !nodeTimedOut(my_primary->replicas[j])) ok_replicas++; + if (ok_replicas <= server.cluster_migration_barrier) return; /* Step 3: Identify a candidate for migration, and check if among the - * masters with the greatest number of ok slaves, I'm the one with the - * smallest node ID (the "candidate slave"). + * primaries with the greatest number of ok replicas, I'm the one with the + * smallest node ID (the "candidate replica"). * * Note: this means that eventually a replica migration will occur - * since slaves that are reachable again always have their FAIL flag + * since replicas that are reachable again always have their FAIL flag * cleared, so eventually there must be a candidate. * There is a possible race condition causing multiple - * slaves to migrate at the same time, but this is unlikely to + * replicas to migrate at the same time, but this is unlikely to * happen and relatively harmless when it does. */ candidate = myself; di = dictGetSafeIterator(server.cluster->nodes); while ((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); - int okslaves = 0, is_orphaned = 1; + int ok_replicas = 0, is_orphaned = 1; - /* We want to migrate only if this master is working, orphaned, and - * used to have slaves or if failed over a master that had slaves + /* We want to migrate only if this primary is working, orphaned, and + * used to have replicas or if failed over a primary that had replicas * (MIGRATE_TO flag). This way we only migrate to instances that were * supposed to have replicas. */ - if (nodeIsSlave(node) || nodeFailed(node)) is_orphaned = 0; + if (nodeIsReplica(node) || nodeFailed(node)) is_orphaned = 0; if (!(node->flags & CLUSTER_NODE_MIGRATE_TO)) is_orphaned = 0; - /* Check number of working slaves. */ - if (clusterNodeIsMaster(node)) okslaves = clusterCountNonFailingSlaves(node); - if (okslaves > 0) is_orphaned = 0; + /* Check number of working replicas. */ + if (clusterNodeIsPrimary(node)) ok_replicas = clusterCountNonFailingReplicas(node); + if (ok_replicas > 0) is_orphaned = 0; if (is_orphaned) { if (!target && node->numslots > 0) target = node; /* Track the starting time of the orphaned condition for this - * master. */ + * primary. */ if (!node->orphaned_time) node->orphaned_time = mstime(); } else { node->orphaned_time = 0; } - /* Check if I'm the slave candidate for the migration: attached - * to a master with the maximum number of slaves and with the smallest + /* Check if I'm the replica candidate for the migration: attached + * to a primary with the maximum number of replicas and with the smallest * node ID. */ - if (okslaves == max_slaves) { - for (j = 0; j < node->numslaves; j++) { - if (memcmp(node->slaves[j]->name, candidate->name, CLUSTER_NAMELEN) < 0) { - candidate = node->slaves[j]; + if (ok_replicas == max_replicas) { + for (j = 0; j < node->num_replicas; j++) { + if (memcmp(node->replicas[j]->name, candidate->name, CLUSTER_NAMELEN) < 0) { + candidate = node->replicas[j]; } } } @@ -4466,62 +4470,62 @@ void clusterHandleSlaveMigration(int max_slaves) { dictReleaseIterator(di); /* Step 4: perform the migration if there is a target, and if I'm the - * candidate, but only if the master is continuously orphaned for a + * candidate, but only if the primary is continuously orphaned for a * couple of seconds, so that during failovers, we give some time to - * the natural slaves of this instance to advertise their switch from - * the old master to the new one. */ - if (target && candidate == myself && (mstime() - target->orphaned_time) > CLUSTER_SLAVE_MIGRATION_DELAY && + * the natural replicas of this instance to advertise their switch from + * the old primary to the new one. */ + if (target && candidate == myself && (mstime() - target->orphaned_time) > CLUSTER_REPLICA_MIGRATION_DELAY && !(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) { - serverLog(LL_NOTICE, "Migrating to orphaned master %.40s (%s) in shard %.40s", target->name, + serverLog(LL_NOTICE, "Migrating to orphaned primary %.40s (%s) in shard %.40s", target->name, target->human_nodename, target->shard_id); - clusterSetMaster(target, 1); + clusterSetPrimary(target, 1); } } /* ----------------------------------------------------------------------------- * CLUSTER manual failover * - * This are the important steps performed by slaves during a manual failover: + * This are the important steps performed by replicas during a manual failover: * 1) User send CLUSTER FAILOVER command. The failover state is initialized * setting mf_end to the millisecond unix time at which we'll abort the * attempt. - * 2) Slave sends a MFSTART message to the master requesting to pause clients + * 2) Replica sends a MFSTART message to the primary requesting to pause clients * for two times the manual failover timeout CLUSTER_MF_TIMEOUT. - * When master is paused for manual failover, it also starts to flag + * When primary is paused for manual failover, it also starts to flag * packets with CLUSTERMSG_FLAG0_PAUSED. - * 3) Slave waits for master to send its replication offset flagged as PAUSED. - * 4) If slave received the offset from the master, and its offset matches, - * mf_can_start is set to 1, and clusterHandleSlaveFailover() will perform + * 3) Replica waits for primary to send its replication offset flagged as PAUSED. + * 4) If replica received the offset from the primary, and its offset matches, + * mf_can_start is set to 1, and clusterHandleReplicaFailover() will perform * the failover as usually, with the difference that the vote request - * will be modified to force masters to vote for a slave that has a - * working master. + * will be modified to force primaries to vote for a replica that has a + * working primary. * - * From the point of view of the master things are simpler: when a - * PAUSE_CLIENTS packet is received the master sets mf_end as well and - * the sender in mf_slave. During the time limit for the manual failover - * the master will just send PINGs more often to this slave, flagged with - * the PAUSED flag, so that the slave will set mf_master_offset when receiving - * a packet from the master with this flag set. + * From the point of view of the primary things are simpler: when a + * PAUSE_CLIENTS packet is received the primary sets mf_end as well and + * the sender in mf_replica. During the time limit for the manual failover + * the primary will just send PINGs more often to this replica, flagged with + * the PAUSED flag, so that the replica will set mf_primary_offset when receiving + * a packet from the primary with this flag set. * * The goal of the manual failover is to perform a fast failover without - * data loss due to the asynchronous master-slave replication. + * data loss due to the asynchronous primary-replica replication. * -------------------------------------------------------------------------- */ -/* Reset the manual failover state. This works for both masters and slaves +/* Reset the manual failover state. This works for both primaries and replicas * as all the state about manual failover is cleared. * * The function can be used both to initialize the manual failover state at * startup or to abort a manual failover in progress. */ void resetManualFailover(void) { - if (server.cluster->mf_slave) { - /* We were a master failing over, so we paused clients and related actions. + if (server.cluster->mf_replica) { + /* We were a primary failing over, so we paused clients and related actions. * Regardless of the outcome we unpause now to allow traffic again. */ unpauseActions(PAUSE_DURING_FAILOVER); } server.cluster->mf_end = 0; /* No manual failover in progress. */ server.cluster->mf_can_start = 0; - server.cluster->mf_slave = NULL; - server.cluster->mf_master_offset = -1; + server.cluster->mf_replica = NULL; + server.cluster->mf_primary_offset = -1; } /* If a manual failover timed out, abort it. */ @@ -4539,16 +4543,16 @@ void clusterHandleManualFailover(void) { if (server.cluster->mf_end == 0) return; /* If mf_can_start is non-zero, the failover was already triggered so the - * next steps are performed by clusterHandleSlaveFailover(). */ + * next steps are performed by clusterHandleReplicaFailover(). */ if (server.cluster->mf_can_start) return; - if (server.cluster->mf_master_offset == -1) return; /* Wait for offset... */ + if (server.cluster->mf_primary_offset == -1) return; /* Wait for offset... */ - if (server.cluster->mf_master_offset == replicationGetSlaveOffset()) { - /* Our replication offset matches the master replication offset + if (server.cluster->mf_primary_offset == replicationGetReplicaOffset()) { + /* Our replication offset matches the primary replication offset * announced after clients were paused. We can start the failover. */ server.cluster->mf_can_start = 1; - serverLog(LL_NOTICE, "All master replication stream processed, " + serverLog(LL_NOTICE, "All primary replication stream processed, " "manual failover can start."); clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_FAILOVER); return; @@ -4628,9 +4632,9 @@ void clusterCron(void) { dictIterator *di; dictEntry *de; int update_state = 0; - int orphaned_masters; /* How many masters there are without ok slaves. */ - int max_slaves; /* Max number of ok slaves for a single master. */ - int this_slaves; /* Number of ok slaves for our master (if we are slave). */ + int orphaned_primaries; /* How many primaries there are without ok replicas. */ + int max_replicas; /* Max number of ok replicas for a single primary. */ + int this_replicas; /* Number of ok replicas for our primary (if we are replica). */ mstime_t min_pong = 0, now = mstime(); clusterNode *min_pong_node = NULL; static unsigned long long iteration = 0; @@ -4690,13 +4694,13 @@ void clusterCron(void) { /* Iterate nodes to check if we need to flag something as failing. * This loop is also responsible to: - * 1) Check if there are orphaned masters (masters without non failing - * slaves). - * 2) Count the max number of non failing slaves for a single master. - * 3) Count the number of slaves for our master, if we are a slave. */ - orphaned_masters = 0; - max_slaves = 0; - this_slaves = 0; + * 1) Check if there are orphaned primaries (primaries without non failing + * replicas). + * 2) Count the max number of non failing replicas for a single primary. + * 3) Count the number of replicas for our primary, if we are a replica. */ + orphaned_primaries = 0; + max_replicas = 0; + this_replicas = 0; di = dictGetSafeIterator(server.cluster->nodes); while ((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); @@ -4704,19 +4708,19 @@ void clusterCron(void) { if (node->flags & (CLUSTER_NODE_MYSELF | CLUSTER_NODE_NOADDR | CLUSTER_NODE_HANDSHAKE)) continue; - /* Orphaned master check, useful only if the current instance - * is a slave that may migrate to another master. */ - if (nodeIsSlave(myself) && clusterNodeIsMaster(node) && !nodeFailed(node)) { - int okslaves = clusterCountNonFailingSlaves(node); + /* Orphaned primary check, useful only if the current instance + * is a replica that may migrate to another primary. */ + if (nodeIsReplica(myself) && clusterNodeIsPrimary(node) && !nodeFailed(node)) { + int ok_replicas = clusterCountNonFailingReplicas(node); - /* A master is orphaned if it is serving a non-zero number of - * slots, have no working slaves, but used to have at least one - * slave, or failed over a master that used to have slaves. */ - if (okslaves == 0 && node->numslots > 0 && node->flags & CLUSTER_NODE_MIGRATE_TO) { - orphaned_masters++; + /* A primary is orphaned if it is serving a non-zero number of + * slots, have no working replicas, but used to have at least one + * replica, or failed over a primary that used to have replicas. */ + if (ok_replicas == 0 && node->numslots > 0 && node->flags & CLUSTER_NODE_MIGRATE_TO) { + orphaned_primaries++; } - if (okslaves > max_slaves) max_slaves = okslaves; - if (myself->slaveof == node) this_slaves = okslaves; + if (ok_replicas > max_replicas) max_replicas = ok_replicas; + if (myself->replicaof == node) this_replicas = ok_replicas; } /* If we are not receiving any data for more than half the cluster @@ -4746,9 +4750,10 @@ void clusterCron(void) { continue; } - /* If we are a master and one of the slaves requested a manual + /* If we are a primary and one of the replicas requested a manual * failover, ping it continuously. */ - if (server.cluster->mf_end && clusterNodeIsMaster(myself) && server.cluster->mf_slave == node && node->link) { + if (server.cluster->mf_end && clusterNodeIsPrimary(myself) && server.cluster->mf_replica == node && + node->link) { clusterSendPing(node->link, CLUSTERMSG_TYPE_PING); continue; } @@ -4772,7 +4777,7 @@ void clusterCron(void) { if (!(node->flags & (CLUSTER_NODE_PFAIL | CLUSTER_NODE_FAIL))) { node->flags |= CLUSTER_NODE_PFAIL; update_state = 1; - if (clusterNodeIsMaster(myself) && server.cluster->size == 1) { + if (clusterNodeIsPrimary(myself) && server.cluster->size == 1) { markNodeAsFailingIfNeeded(node); } else { serverLog(LL_DEBUG, "*** NODE %.40s possibly failing", node->name); @@ -4782,26 +4787,27 @@ void clusterCron(void) { } dictReleaseIterator(di); - /* If we are a slave node but the replication is still turned off, - * enable it if we know the address of our master and it appears to + /* If we are a replica node but the replication is still turned off, + * enable it if we know the address of our primary and it appears to * be up. */ - if (nodeIsSlave(myself) && server.masterhost == NULL && myself->slaveof && nodeHasAddr(myself->slaveof)) { - replicationSetMaster(myself->slaveof->ip, getNodeDefaultReplicationPort(myself->slaveof)); + if (nodeIsReplica(myself) && server.primary_host == NULL && myself->replicaof && nodeHasAddr(myself->replicaof)) { + replicationSetPrimary(myself->replicaof->ip, getNodeDefaultReplicationPort(myself->replicaof)); } /* Abort a manual failover if the timeout is reached. */ manualFailoverCheckTimeout(); - if (nodeIsSlave(myself)) { + if (nodeIsReplica(myself)) { clusterHandleManualFailover(); - if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleSlaveFailover(); - /* If there are orphaned slaves, and we are a slave among the masters - * with the max number of non-failing slaves, consider migrating to - * the orphaned masters. Note that it does not make sense to try - * a migration if there is no master with at least *two* working - * slaves. */ - if (orphaned_masters && max_slaves >= 2 && this_slaves == max_slaves && server.cluster_allow_replica_migration) - clusterHandleSlaveMigration(max_slaves); + if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleReplicaFailover(); + /* If there are orphaned replicas, and we are a replica among the primaries + * with the max number of non-failing replicas, consider migrating to + * the orphaned primaries. Note that it does not make sense to try + * a migration if there is no primary with at least *two* working + * replicas. */ + if (orphaned_primaries && max_replicas >= 2 && this_replicas == max_replicas && + server.cluster_allow_replica_migration) + clusterHandleReplicaMigration(max_replicas); } if (update_state || server.cluster->state == CLUSTER_FAIL) clusterUpdateState(); @@ -4822,14 +4828,14 @@ void clusterBeforeSleep(void) { if (flags & CLUSTER_TODO_HANDLE_MANUALFAILOVER) { /* Handle manual failover as soon as possible so that won't have a 100ms * as it was handled only in clusterCron */ - if (nodeIsSlave(myself)) { + if (nodeIsReplica(myself)) { clusterHandleManualFailover(); - if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleSlaveFailover(); + if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleReplicaFailover(); } } else if (flags & CLUSTER_TODO_HANDLE_FAILOVER) { /* Handle failover, this is needed when it is likely that there is already - * the quorum from masters in order to react fast. */ - clusterHandleSlaveFailover(); + * the quorum from primaries in order to react fast. */ + clusterHandleReplicaFailover(); } /* Update the cluster state. */ @@ -4875,21 +4881,21 @@ void bitmapClearBit(unsigned char *bitmap, int pos) { bitmap[byte] &= ~(1 << bit); } -/* Return non-zero if there is at least one master with slaves in the cluster. +/* Return non-zero if there is at least one primary with replicas in the cluster. * Otherwise zero is returned. Used by clusterNodeSetSlotBit() to set the - * MIGRATE_TO flag the when a master gets the first slot. */ -int clusterMastersHaveSlaves(void) { - dictIterator *di = dictGetSafeIterator(server.cluster->nodes); + * MIGRATE_TO flag the when a primary gets the first slot. */ +int clusterPrimariesHaveReplicas(void) { + dictIterator di; + dictInitSafeIterator(&di, server.cluster->nodes); dictEntry *de; - int slaves = 0; - while ((de = dictNext(di)) != NULL) { + int replicas = 0; + while ((de = dictNext(&di)) != NULL) { clusterNode *node = dictGetVal(de); - if (nodeIsSlave(node)) continue; - slaves += node->numslaves; + if (nodeIsReplica(node)) continue; + replicas += node->num_replicas; } - dictReleaseIterator(di); - return slaves != 0; + return replicas != 0; } /* Set the slot bit and return the old value. */ @@ -4898,20 +4904,20 @@ int clusterNodeSetSlotBit(clusterNode *n, int slot) { if (!old) { bitmapSetBit(n->slots, slot); n->numslots++; - /* When a master gets its first slot, even if it has no slaves, - * it gets flagged with MIGRATE_TO, that is, the master is a valid + /* When a primary gets its first slot, even if it has no replicas, + * it gets flagged with MIGRATE_TO, that is, the primary is a valid * target for replicas migration, if and only if at least one of - * the other masters has slaves right now. + * the other primaries has replicas right now. * - * Normally masters are valid targets of replica migration if: - * 1. The used to have slaves (but no longer have). - * 2. They are slaves failing over a master that used to have slaves. + * Normally primaries are valid targets of replica migration if: + * 1. The used to have replicas (but no longer have). + * 2. They are replicas failing over a primary that used to have replicas. * - * However new masters with slots assigned are considered valid - * migration targets if the rest of the cluster is not a slave-less. + * However new primaries with slots assigned are considered valid + * migration targets if the rest of the cluster is not a replica-less. * * See https://github.com/redis/redis/issues/3043 for more info. */ - if (n->numslots == 1 && clusterMastersHaveSlaves()) n->flags |= CLUSTER_NODE_MIGRATE_TO; + if (n->numslots == 1 && clusterPrimariesHaveReplicas()) n->flags |= CLUSTER_NODE_MIGRATE_TO; } return old; } @@ -4951,7 +4957,7 @@ int clusterDelSlot(int slot) { if (!n) return C_ERR; - /* Cleanup the channels in master/replica as part of slot deletion. */ + /* Cleanup the channels in primary/replica as part of slot deletion. */ removeChannelsInSlot(slot); /* Clear the slot bit. */ serverAssert(clusterNodeClearSlotBit(n, slot) == 1); @@ -4976,7 +4982,7 @@ int clusterDelNodeSlots(clusterNode *node) { } /* Clear the migrating / importing state for all the slots. - * This is useful at initialization and when turning a master into slave. */ + * This is useful at initialization and when turning a primary into replica. */ void clusterCloseAllSlots(void) { memset(server.cluster->migrating_slots_to, 0, sizeof(server.cluster->migrating_slots_to)); memset(server.cluster->importing_slots_from, 0, sizeof(server.cluster->importing_slots_from)); @@ -4996,20 +5002,20 @@ void clusterCloseAllSlots(void) { void clusterUpdateState(void) { int j, new_state; - int reachable_masters = 0; + int reachable_primaries = 0; static mstime_t among_minority_time; static mstime_t first_call_time = 0; server.cluster->todo_before_sleep &= ~CLUSTER_TODO_UPDATE_STATE; - /* If this is a master node, wait some time before turning the state + /* If this is a primary node, wait some time before turning the state * into OK, since it is not a good idea to rejoin the cluster as a writable - * master, after a reboot, without giving the cluster a chance to + * primary, after a reboot, without giving the cluster a chance to * reconfigure this node. Note that the delay is calculated starting from * the first call to this function and not since the server start, in order * to not count the DB loading time. */ if (first_call_time == 0) first_call_time = mstime(); - if (clusterNodeIsMaster(myself) && server.cluster->state == CLUSTER_FAIL && + if (clusterNodeIsPrimary(myself) && server.cluster->state == CLUSTER_FAIL && mstime() - first_call_time < CLUSTER_WRITABLE_DELAY) return; @@ -5027,10 +5033,10 @@ void clusterUpdateState(void) { } } - /* Compute the cluster size, that is the number of master nodes + /* Compute the cluster size, that is the number of primary nodes * serving at least a single slot. * - * At the same time count the number of reachable masters having + * At the same time count the number of reachable primaries having * at least one slot. */ { dictIterator *di; @@ -5041,9 +5047,9 @@ void clusterUpdateState(void) { while ((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); - if (clusterNodeIsMaster(node) && node->numslots) { + if (clusterNodeIsPrimary(node) && node->numslots) { server.cluster->size++; - if ((node->flags & (CLUSTER_NODE_FAIL | CLUSTER_NODE_PFAIL)) == 0) reachable_masters++; + if ((node->flags & (CLUSTER_NODE_FAIL | CLUSTER_NODE_PFAIL)) == 0) reachable_primaries++; } } dictReleaseIterator(di); @@ -5054,7 +5060,7 @@ void clusterUpdateState(void) { { int needed_quorum = (server.cluster->size / 2) + 1; - if (reachable_masters < needed_quorum) { + if (reachable_primaries < needed_quorum) { new_state = CLUSTER_FAIL; among_minority_time = mstime(); } @@ -5064,14 +5070,14 @@ void clusterUpdateState(void) { if (new_state != server.cluster->state) { mstime_t rejoin_delay = server.cluster_node_timeout; - /* If the instance is a master and was partitioned away with the + /* If the instance is a primary and was partitioned away with the * minority, don't let it accept queries for some time after the * partition heals, to make sure there is enough time to receive * a configuration update. */ if (rejoin_delay > CLUSTER_MAX_REJOIN_DELAY) rejoin_delay = CLUSTER_MAX_REJOIN_DELAY; if (rejoin_delay < CLUSTER_MIN_REJOIN_DELAY) rejoin_delay = CLUSTER_MIN_REJOIN_DELAY; - if (new_state == CLUSTER_OK && clusterNodeIsMaster(myself) && mstime() - among_minority_time < rejoin_delay) { + if (new_state == CLUSTER_OK && clusterNodeIsPrimary(myself) && mstime() - among_minority_time < rejoin_delay) { return; } @@ -5104,12 +5110,12 @@ int verifyClusterConfigWithData(void) { int update_config = 0; /* Return ASAP if a module disabled cluster redirections. In that case - * every master can store keys about every possible hash slot. */ + * every primary can store keys about every possible hash slot. */ if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) return C_OK; - /* If this node is a slave, don't perform the check at all as we + /* If this node is a replica, don't perform the check at all as we * completely depend on the replication stream. */ - if (nodeIsSlave(myself)) return C_OK; + if (nodeIsReplica(myself)) return C_OK; /* Make sure we only have keys in DB0. */ for (j = 1; j < server.dbnum; j++) { @@ -5162,35 +5168,35 @@ int verifyClusterConfigWithData(void) { /* Remove all the shard channel related information not owned by the current shard. */ static inline void removeAllNotOwnedShardChannelSubscriptions(void) { if (!kvstoreSize(server.pubsubshard_channels)) return; - clusterNode *currmaster = clusterNodeIsMaster(myself) ? myself : myself->slaveof; + clusterNode *cur_primary = clusterNodeIsPrimary(myself) ? myself : myself->replicaof; for (int j = 0; j < CLUSTER_SLOTS; j++) { - if (server.cluster->slots[j] != currmaster) { + if (server.cluster->slots[j] != cur_primary) { removeChannelsInSlot(j); } } } /* ----------------------------------------------------------------------------- - * SLAVE nodes handling + * REPLICA nodes handling * -------------------------------------------------------------------------- */ -/* Set the specified node 'n' as master for this node. - * If this node is currently a master, it is turned into a slave. */ -void clusterSetMaster(clusterNode *n, int closeSlots) { +/* Set the specified node 'n' as primary for this node. + * If this node is currently a primary, it is turned into a replica. */ +void clusterSetPrimary(clusterNode *n, int closeSlots) { serverAssert(n != myself); serverAssert(myself->numslots == 0); - if (clusterNodeIsMaster(myself)) { - myself->flags &= ~(CLUSTER_NODE_MASTER | CLUSTER_NODE_MIGRATE_TO); - myself->flags |= CLUSTER_NODE_SLAVE; + if (clusterNodeIsPrimary(myself)) { + myself->flags &= ~(CLUSTER_NODE_PRIMARY | CLUSTER_NODE_MIGRATE_TO); + myself->flags |= CLUSTER_NODE_REPLICA; } else { - if (myself->slaveof) clusterNodeRemoveSlave(myself->slaveof, myself); + if (myself->replicaof) clusterNodeRemoveReplica(myself->replicaof, myself); } if (closeSlots) clusterCloseAllSlots(); - myself->slaveof = n; + myself->replicaof = n; updateShardId(myself, n->shard_id); - clusterNodeAddSlave(n, myself); - replicationSetMaster(n->ip, getNodeDefaultReplicationPort(n)); + clusterNodeAddReplica(n, myself); + replicationSetPrimary(n->ip, getNodeDefaultReplicationPort(n)); removeAllNotOwnedShardChannelSubscriptions(); resetManualFailover(); } @@ -5205,8 +5211,8 @@ struct clusterNodeFlags { }; static struct clusterNodeFlags clusterNodeFlagsTable[] = { - {CLUSTER_NODE_MYSELF, "myself,"}, {CLUSTER_NODE_MASTER, "master,"}, - {CLUSTER_NODE_SLAVE, "slave,"}, {CLUSTER_NODE_PFAIL, "fail?,"}, + {CLUSTER_NODE_MYSELF, "myself,"}, {CLUSTER_NODE_PRIMARY, "master,"}, + {CLUSTER_NODE_REPLICA, "slave,"}, {CLUSTER_NODE_PFAIL, "fail?,"}, {CLUSTER_NODE_FAIL, "fail,"}, {CLUSTER_NODE_HANDSHAKE, "handshake,"}, {CLUSTER_NODE_NOADDR, "noaddr,"}, {CLUSTER_NODE_NOFAILOVER, "nofailover,"}}; @@ -5277,10 +5283,10 @@ sds clusterGenNodeDescription(client *c, clusterNode *node, int tls_primary) { ci = sdscatlen(ci, " ", 1); ci = representClusterNodeFlags(ci, node->flags); - /* Slave of... or just "-" */ + /* Replica of... or just "-" */ ci = sdscatlen(ci, " ", 1); - if (node->slaveof) - ci = sdscatlen(ci, node->slaveof->name, CLUSTER_NAMELEN); + if (node->replicaof) + ci = sdscatlen(ci, node->replicaof->name, CLUSTER_NAMELEN); else ci = sdscatlen(ci, "-", 1); @@ -5540,7 +5546,7 @@ void clusterUpdateSlots(client *c, unsigned char *slots, int del) { long long getNodeReplicationOffset(clusterNode *node) { if (node->flags & CLUSTER_NODE_MYSELF) { - return nodeIsSlave(node) ? replicationGetSlaveOffset() : server.master_repl_offset; + return nodeIsReplica(node) ? replicationGetReplicaOffset() : server.primary_repl_offset; } else { return node->repl_offset; } @@ -5583,7 +5589,7 @@ void addNodeDetailsToShardReply(client *c, clusterNode *node) { long long node_offset = getNodeReplicationOffset(node); addReplyBulkCString(c, "role"); - addReplyBulkCString(c, nodeIsSlave(node) ? "replica" : "master"); + addReplyBulkCString(c, nodeIsReplica(node) ? "replica" : "master"); reply_count++; addReplyBulkCString(c, "replication-offset"); @@ -5594,7 +5600,7 @@ void addNodeDetailsToShardReply(client *c, clusterNode *node) { const char *health_msg = NULL; if (nodeFailed(node)) { health_msg = "fail"; - } else if (nodeIsSlave(node) && node_offset == 0) { + } else if (nodeIsReplica(node) && node_offset == 0) { health_msg = "loading"; } else { health_msg = "online"; @@ -5613,7 +5619,7 @@ void addShardReplyForClusterShards(client *c, list *nodes) { addReplyBulkCString(c, "slots"); /* Use slot_info_pairs from the primary only */ - n = clusterNodeGetMaster(n); + n = clusterNodeGetPrimary(n); if (n->slot_info_pairs != NULL) { serverAssert((n->slot_info_pairs_count % 2) == 0); @@ -5766,10 +5772,10 @@ int getClusterSize(void) { } int getMyShardSlotCount(void) { - if (!nodeIsSlave(server.cluster->myself)) { + if (!nodeIsReplica(server.cluster->myself)) { return server.cluster->myself->numslots; - } else if (server.cluster->myself->slaveof) { - return server.cluster->myself->slaveof->numslots; + } else if (server.cluster->myself->replicaof) { + return server.cluster->myself->replicaof->numslots; } else { return 0; } @@ -5795,8 +5801,8 @@ char **getClusterNodesList(size_t *numnodes) { return ids; } -int clusterNodeIsMaster(clusterNode *n) { - return n->flags & CLUSTER_NODE_MASTER; +int clusterNodeIsPrimary(clusterNode *n) { + return n->flags & CLUSTER_NODE_PRIMARY; } int handleDebugClusterCommand(client *c) { @@ -5840,12 +5846,12 @@ char *clusterNodeIp(clusterNode *node) { return node->ip; } -int clusterNodeIsSlave(clusterNode *node) { - return node->flags & CLUSTER_NODE_SLAVE; +int clusterNodeIsReplica(clusterNode *node) { + return node->flags & CLUSTER_NODE_REPLICA; } -clusterNode *clusterNodeGetMaster(clusterNode *node) { - while (node->slaveof != NULL) node = node->slaveof; +clusterNode *clusterNodeGetPrimary(clusterNode *node) { + while (node->replicaof != NULL) node = node->replicaof; return node; } @@ -5891,7 +5897,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, int optarg_pos = 0; /* Allow primaries to replicate "CLUSTER SETSLOT" */ - if (!(c->flags & CLIENT_MASTER) && nodeIsSlave(myself)) { + if (!(c->flags & CLIENT_PRIMARY) && nodeIsReplica(myself)) { addReplyError(c, "Please use SETSLOT only with masters."); return 0; } @@ -5900,7 +5906,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, if (!strcasecmp(c->argv[3]->ptr, "migrating") && c->argc >= 5) { /* CLUSTER SETSLOT MIGRATING */ - if (nodeIsMaster(myself) && server.cluster->slots[slot] != myself) { + if (nodeIsPrimary(myself) && server.cluster->slots[slot] != myself) { addReplyErrorFormat(c, "I'm not the owner of hash slot %u", slot); return 0; } @@ -5909,7 +5915,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, addReplyErrorFormat(c, "I don't know about node %s", (char *)c->argv[4]->ptr); return 0; } - if (nodeIsSlave(n)) { + if (nodeIsReplica(n)) { addReplyError(c, "Target node is not a master"); return 0; } @@ -5925,7 +5931,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, addReplyErrorFormat(c, "I don't know about node %s", (char *)c->argv[4]->ptr); return 0; } - if (nodeIsSlave(n)) { + if (nodeIsReplica(n)) { addReplyError(c, "Target node is not a master"); return 0; } @@ -5940,7 +5946,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, addReplyErrorFormat(c, "Unknown node %s", (char *)c->argv[4]->ptr); return 0; } - if (nodeIsSlave(n)) { + if (nodeIsReplica(n)) { addReplyError(c, "Target node is not a master"); return 0; } @@ -6015,20 +6021,20 @@ void clusterCommandSetSlot(client *c) { * This ensures that all replicas have the latest topology information, enabling * a reliable slot ownership transfer even if the primary node went down during * the process. */ - if (nodeIsMaster(myself) && myself->numslaves != 0 && (c->flags & CLIENT_REPLICATION_DONE) == 0) { + if (nodeIsPrimary(myself) && myself->num_replicas != 0 && (c->flags & CLIENT_REPLICATION_DONE) == 0) { forceCommandPropagation(c, PROPAGATE_REPL); /* We are a primary and this is the first time we see this `SETSLOT` * command. Force-replicate the command to all of our replicas * first and only on success will we handle the command. * Note that * 1. All replicas are expected to ack the replication within the given timeout - * 2. The repl offset target is set to the master's current repl offset + 1. + * 2. The repl offset target is set to the primary's current repl offset + 1. * There is no concern of partial replication because replicas always * ack the repl offset at the command boundary. */ - blockClientForReplicaAck(c, timeout_ms, server.master_repl_offset + 1, myself->numslaves, 0); + blockClientForReplicaAck(c, timeout_ms, server.primary_repl_offset + 1, myself->num_replicas, 0); /* Mark client as pending command for execution after replication to replicas. */ c->flags |= CLIENT_PENDING_COMMAND; - replicationRequestAckFromSlaves(); + replicationRequestAckFromReplicas(); return; } @@ -6061,20 +6067,20 @@ void clusterCommandSetSlot(client *c) { clusterDelSlot(slot); clusterAddSlot(n, slot); - /* If we are a master left without slots, we should turn into a - * replica of the new master. */ + /* If we are a primary left without slots, we should turn into a + * replica of the new primary. */ if (slot_was_mine && n != myself && myself->numslots == 0 && server.cluster_allow_replica_migration) { serverLog(LL_NOTICE, "Lost my last slot during slot migration. Reconfiguring myself " "as a replica of %.40s (%s) in shard %.40s", n->name, n->human_nodename, n->shard_id); - clusterSetMaster(n, 1); + clusterSetPrimary(n, 1); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); } /* If this node or this node's primary was importing this slot, * assigning the slot to itself also clears the importing status. */ - if ((n == myself || n == myself->slaveof) && server.cluster->importing_slots_from[slot]) { + if ((n == myself || n == myself->replicaof) && server.cluster->importing_slots_from[slot]) { server.cluster->importing_slots_from[slot] = NULL; /* Only primary broadcasts the updates */ @@ -6235,7 +6241,7 @@ int clusterCommandSpecial(client *c) { } else if (n == myself) { addReplyError(c, "I tried hard but I can't forget myself..."); return 1; - } else if (nodeIsSlave(myself) && myself->slaveof == n) { + } else if (nodeIsReplica(myself) && myself->replicaof == n) { addReplyError(c, "Can't forget my master!"); return 1; } @@ -6258,23 +6264,23 @@ int clusterCommandSpecial(client *c) { return 1; } - /* Can't replicate a slave. */ - if (nodeIsSlave(n)) { + /* Can't replicate a replica. */ + if (nodeIsReplica(n)) { addReplyError(c, "I can only replicate a master, not a replica."); return 1; } - /* If the instance is currently a master, it should have no assigned + /* If the instance is currently a primary, it should have no assigned * slots nor keys to accept to replicate some other node. - * Slaves can switch to another master without issues. */ - if (clusterNodeIsMaster(myself) && (myself->numslots != 0 || kvstoreSize(server.db[0].keys) != 0)) { + * Replicas can switch to another primary without issues. */ + if (clusterNodeIsPrimary(myself) && (myself->numslots != 0 || kvstoreSize(server.db[0].keys) != 0)) { addReplyError(c, "To set a master the node must be empty and " "without assigned slots."); return 1; } - /* Set the master. */ - clusterSetMaster(n, 1); + /* Set the primary. */ + clusterSetPrimary(n, 1); clusterBroadcastPong(CLUSTER_BROADCAST_ALL); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG); addReply(c, shared.ok); @@ -6305,13 +6311,13 @@ int clusterCommandSpecial(client *c) { } /* Check preconditions. */ - if (clusterNodeIsMaster(myself)) { + if (clusterNodeIsPrimary(myself)) { addReplyError(c, "You should send CLUSTER FAILOVER to a replica"); return 1; - } else if (myself->slaveof == NULL) { + } else if (myself->replicaof == NULL) { addReplyError(c, "I'm a replica but my master is unknown to me"); return 1; - } else if (!force && (nodeFailed(myself->slaveof) || myself->slaveof->link == NULL)) { + } else if (!force && (nodeFailed(myself->replicaof) || myself->replicaof->link == NULL)) { addReplyError(c, "Master is down or failed, " "please use CLUSTER FAILOVER FORCE"); return 1; @@ -6322,20 +6328,20 @@ int clusterCommandSpecial(client *c) { if (takeover) { /* A takeover does not perform any initial check. It just * generates a new configuration epoch for this node without - * consensus, claims the master's slots, and broadcast the new + * consensus, claims the primary's slots, and broadcast the new * configuration. */ - serverLog(LL_NOTICE, "Taking over the master (user request)."); + serverLog(LL_NOTICE, "Taking over the primary (user request)."); clusterBumpConfigEpochWithoutConsensus(); - clusterFailoverReplaceYourMaster(); + clusterFailoverReplaceYourPrimary(); } else if (force) { /* If this is a forced failover, we don't need to talk with our - * master to agree about the offset. We just failover taking over + * primary to agree about the offset. We just failover taking over * it without coordination. */ serverLog(LL_NOTICE, "Forced failover user request accepted."); server.cluster->mf_can_start = 1; } else { serverLog(LL_NOTICE, "Manual failover user request accepted."); - clusterSendMFStart(myself->slaveof); + clusterSendMFStart(myself->replicaof); } addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "set-config-epoch") && c->argc == 3) { @@ -6385,9 +6391,9 @@ int clusterCommandSpecial(client *c) { } } - /* Slaves can be reset while containing data, but not master nodes + /* Replicas can be reset while containing data, but not primary nodes * that must be empty. */ - if (clusterNodeIsMaster(myself) && kvstoreSize(c->db->keys) != 0) { + if (clusterNodeIsPrimary(myself) && kvstoreSize(c->db->keys) != 0) { addReplyError(c, "CLUSTER RESET can't be called with " "master nodes containing keys"); return 1; @@ -6444,12 +6450,12 @@ const char **clusterCommandExtendedHelp(void) { return help; } -int clusterNodeNumSlaves(clusterNode *node) { - return node->numslaves; +int clusterNodeNumReplicas(clusterNode *node) { + return node->num_replicas; } -clusterNode *clusterNodeGetSlave(clusterNode *node, int slave_idx) { - return node->slaves[slave_idx]; +clusterNode *clusterNodeGetReplica(clusterNode *node, int replica_idx) { + return node->replicas[replica_idx]; } clusterNode *getMigratingSlotDest(int slot) { @@ -6495,8 +6501,8 @@ int clusterAllowFailoverCmd(client *c) { return 0; } -void clusterPromoteSelfToMaster(void) { - replicationUnsetMaster(); +void clusterPromoteSelfToPrimary(void) { + replicationUnsetPrimary(); } int detectAndUpdateCachedNodeHealth(void) { @@ -6544,7 +6550,7 @@ void clusterReplicateOpenSlots(void) { sds name = sdsnewlen(nodes_ptr[j]->name, sizeof(nodes_ptr[j]->name)); argv[4] = createObject(OBJ_STRING, name); - replicationFeedSlaves(0, argv, argc); + replicationFeedReplicas(0, argv, argc); decrRefCount(argv[2]); decrRefCount(argv[4]); diff --git a/src/cluster_legacy.h b/src/cluster_legacy.h index cc02f30a8b..4cb715730f 100644 --- a/src/cluster_legacy.h +++ b/src/cluster_legacy.h @@ -5,13 +5,13 @@ /* The following defines are amount of time, sometimes expressed as * multiplicators of the node timeout value (when ending with MULT). */ -#define CLUSTER_FAIL_REPORT_VALIDITY_MULT 2 /* Fail report validity. */ -#define CLUSTER_FAIL_UNDO_TIME_MULT 2 /* Undo fail if master is back. */ -#define CLUSTER_MF_TIMEOUT 5000 /* Milliseconds to do a manual failover. */ -#define CLUSTER_MF_PAUSE_MULT 2 /* Master pause manual failover mult. */ -#define CLUSTER_SLAVE_MIGRATION_DELAY 5000 /* Delay for slave migration. */ +#define CLUSTER_FAIL_REPORT_VALIDITY_MULT 2 /* Fail report validity. */ +#define CLUSTER_FAIL_UNDO_TIME_MULT 2 /* Undo fail if primary is back. */ +#define CLUSTER_MF_TIMEOUT 5000 /* Milliseconds to do a manual failover. */ +#define CLUSTER_MF_PAUSE_MULT 2 /* Primary pause manual failover mult. */ +#define CLUSTER_REPLICA_MIGRATION_DELAY 5000 /* Delay for replica migration. */ -/* Reasons why a slave is not able to failover. */ +/* Reasons why a replica is not able to failover. */ #define CLUSTER_CANT_FAILOVER_NONE 0 #define CLUSTER_CANT_FAILOVER_DATA_AGE 1 #define CLUSTER_CANT_FAILOVER_WAITING_DELAY 2 @@ -41,23 +41,23 @@ typedef struct clusterLink { } clusterLink; /* Cluster node flags and macros. */ -#define CLUSTER_NODE_MASTER 1 /* The node is a master */ -#define CLUSTER_NODE_SLAVE 2 /* The node is a slave */ +#define CLUSTER_NODE_PRIMARY 1 /* The node is a primary */ +#define CLUSTER_NODE_REPLICA 2 /* The node is a replica */ #define CLUSTER_NODE_PFAIL 4 /* Failure? Need acknowledge */ #define CLUSTER_NODE_FAIL 8 /* The node is believed to be malfunctioning */ #define CLUSTER_NODE_MYSELF 16 /* This node is myself */ #define CLUSTER_NODE_HANDSHAKE 32 /* We have still to exchange the first ping */ #define CLUSTER_NODE_NOADDR 64 /* We don't know the address of this node */ #define CLUSTER_NODE_MEET 128 /* Send a MEET message to this node */ -#define CLUSTER_NODE_MIGRATE_TO 256 /* Master eligible for replica migration. */ -#define CLUSTER_NODE_NOFAILOVER 512 /* Slave will not try to failover. */ +#define CLUSTER_NODE_MIGRATE_TO 256 /* Primary eligible for replica migration. */ +#define CLUSTER_NODE_NOFAILOVER 512 /* replica will not try to failover. */ #define CLUSTER_NODE_EXTENSIONS_SUPPORTED 1024 /* This node supports extensions. */ #define CLUSTER_NODE_NULL_NAME \ "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" \ "\000\000\000\000\000\000\000\000\000\000\000\000" -#define nodeIsMaster(n) ((n)->flags & CLUSTER_NODE_MASTER) -#define nodeIsSlave(n) ((n)->flags & CLUSTER_NODE_SLAVE) +#define nodeIsPrimary(n) ((n)->flags & CLUSTER_NODE_PRIMARY) +#define nodeIsReplica(n) ((n)->flags & CLUSTER_NODE_REPLICA) #define nodeInHandshake(n) ((n)->flags & CLUSTER_NODE_HANDSHAKE) #define nodeHasAddr(n) (!((n)->flags & CLUSTER_NODE_NOADDR)) #define nodeTimedOut(n) ((n)->flags & CLUSTER_NODE_PFAIL) @@ -216,14 +216,14 @@ typedef struct { uint16_t type; /* Message type */ uint16_t count; /* Number of gossip sections. */ uint64_t currentEpoch; /* The epoch accordingly to the sending node. */ - uint64_t configEpoch; /* The config epoch if it's a master, or the last - epoch advertised by its master if it is a - slave. */ - uint64_t offset; /* Master replication offset if node is a master or - processed replication offset if node is a slave. */ + uint64_t configEpoch; /* The config epoch if it's a primary, or the last + epoch advertised by its primary if it is a + replica. */ + uint64_t offset; /* Primary replication offset if node is a primary or + processed replication offset if node is a replica. */ char sender[CLUSTER_NAMELEN]; /* Name of the sender node */ unsigned char myslots[CLUSTER_SLOTS / 8]; - char slaveof[CLUSTER_NAMELEN]; + char replicaof[CLUSTER_NAMELEN]; char myip[NET_IP_STR_LEN]; /* Sender IP, if not all zeroed. */ uint16_t extensions; /* Number of extensions sent along with this packet. */ char notused1[30]; /* 30 bytes reserved for future usage. */ @@ -256,7 +256,7 @@ static_assert(offsetof(clusterMsg, configEpoch) == 24, "unexpected field offset" static_assert(offsetof(clusterMsg, offset) == 32, "unexpected field offset"); static_assert(offsetof(clusterMsg, sender) == 40, "unexpected field offset"); static_assert(offsetof(clusterMsg, myslots) == 80, "unexpected field offset"); -static_assert(offsetof(clusterMsg, slaveof) == 2128, "unexpected field offset"); +static_assert(offsetof(clusterMsg, replicaof) == 2128, "unexpected field offset"); static_assert(offsetof(clusterMsg, myip) == 2168, "unexpected field offset"); static_assert(offsetof(clusterMsg, extensions) == 2214, "unexpected field offset"); static_assert(offsetof(clusterMsg, notused1) == 2216, "unexpected field offset"); @@ -271,10 +271,10 @@ static_assert(offsetof(clusterMsg, data) == 2256, "unexpected field offset"); /* Message flags better specify the packet content or are used to * provide some information about the node state. */ -#define CLUSTERMSG_FLAG0_PAUSED (1 << 0) /* Master paused for manual failover. */ +#define CLUSTERMSG_FLAG0_PAUSED (1 << 0) /* Primary paused for manual failover. */ #define CLUSTERMSG_FLAG0_FORCEACK \ (1 << 1) /* Give ACK to AUTH_REQUEST even if \ - master is up. */ + primary is up. */ #define CLUSTERMSG_FLAG0_EXT_DATA (1 << 2) /* Message contains extension data */ struct _clusterNode { @@ -287,20 +287,20 @@ struct _clusterNode { uint16_t *slot_info_pairs; /* Slots info represented as (start/end) pair (consecutive index). */ int slot_info_pairs_count; /* Used number of slots in slot_info_pairs */ int numslots; /* Number of slots handled by this node */ - int numslaves; /* Number of slave nodes, if this is a master */ - clusterNode **slaves; /* pointers to slave nodes */ - clusterNode *slaveof; /* pointer to the master node. Note that it - may be NULL even if the node is a slave - if we don't have the master node in our - tables. */ + int num_replicas; /* Number of replica nodes, if this is a primar */ + clusterNode **replicas; /* pointers to replica nodes */ + clusterNode *replicaof; /* pointer to the primary node. Note that it + may be NULL even if the node is a replica + if we don't have the parimary node in our + tables. */ unsigned long long last_in_ping_gossip; /* The number of the last carried in the ping gossip section */ mstime_t ping_sent; /* Unix time we sent latest ping */ mstime_t pong_received; /* Unix time we received the pong */ mstime_t data_received; /* Unix time we received any data */ mstime_t fail_time; /* Unix time when FAIL flag was set */ - mstime_t voted_time; /* Last time we voted for a slave of this master */ + mstime_t voted_time; /* Last time we voted for a replica of this parimary */ mstime_t repl_offset_time; /* Unix time we received offset for this node */ - mstime_t orphaned_time; /* Starting time of orphaned master condition */ + mstime_t orphaned_time; /* Starting time of orphaned primary condition */ long long repl_offset; /* Last known repl offset for this node. */ char ip[NET_IP_STR_LEN]; /* Latest known IP address of this node */ sds hostname; /* The known hostname for this node */ @@ -319,32 +319,32 @@ struct clusterState { clusterNode *myself; /* This node */ uint64_t currentEpoch; int state; /* CLUSTER_OK, CLUSTER_FAIL, ... */ - int size; /* Num of master nodes with at least one slot */ + int size; /* Num of primary nodes with at least one slot */ dict *nodes; /* Hash table of name -> clusterNode structures */ dict *shards; /* Hash table of shard_id -> list (of nodes) structures */ dict *nodes_black_list; /* Nodes we don't re-add for a few seconds. */ clusterNode *migrating_slots_to[CLUSTER_SLOTS]; clusterNode *importing_slots_from[CLUSTER_SLOTS]; clusterNode *slots[CLUSTER_SLOTS]; - /* The following fields are used to take the slave state on elections. */ + /* The following fields are used to take the replica state on elections. */ mstime_t failover_auth_time; /* Time of previous or next election. */ int failover_auth_count; /* Number of votes received so far. */ int failover_auth_sent; /* True if we already asked for votes. */ - int failover_auth_rank; /* This slave rank for current auth request. */ + int failover_auth_rank; /* This replica rank for current auth request. */ uint64_t failover_auth_epoch; /* Epoch of the current election. */ - int cant_failover_reason; /* Why a slave is currently not able to + int cant_failover_reason; /* Why a replica is currently not able to failover. See the CANT_FAILOVER_* macros. */ /* Manual failover state in common. */ mstime_t mf_end; /* Manual failover time limit (ms unixtime). It is zero if there is no MF in progress. */ - /* Manual failover state of master. */ - clusterNode *mf_slave; /* Slave performing the manual failover. */ - /* Manual failover state of slave. */ - long long mf_master_offset; /* Master offset the slave needs to start MF + /* Manual failover state of primary. */ + clusterNode *mf_replica; /* replica performing the manual failover. */ + /* Manual failover state of replica. */ + long long mf_primary_offset; /* Primary offset the replica needs to start MF or -1 if still not received. */ - int mf_can_start; /* If non-zero signal that the manual failover - can start requesting masters vote. */ - /* The following fields are used by masters to take state on elections. */ + int mf_can_start; /* If non-zero signal that the manual failover + can start requesting primary vote. */ + /* The following fields are used by primaries to take state on elections. */ uint64_t lastVoteEpoch; /* Epoch of the last vote granted. */ int todo_before_sleep; /* Things to do in clusterBeforeSleep(). */ /* Stats */ diff --git a/src/config.c b/src/config.c index bd01d9156a..13b66f00b5 100644 --- a/src/config.c +++ b/src/config.c @@ -124,7 +124,7 @@ configEnum propagation_error_behavior_enum[] = {{"ignore", PROPAGATION_ERR_BEHAV /* Output buffer limits presets. */ clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT] = { {0, 0, 0}, /* normal */ - {1024 * 1024 * 256, 1024 * 1024 * 64, 60}, /* slave */ + {1024 * 1024 * 256, 1024 * 1024 * 64, 60}, /* replica */ {1024 * 1024 * 32, 1024 * 1024 * 8, 60} /* pubsub */ }; @@ -373,7 +373,7 @@ static int updateClientOutputBufferLimit(sds *args, int arg_len, const char **er * error in a single client class is present. */ for (j = 0; j < arg_len; j += 4) { class = getClientTypeByName(args[j]); - if (class == -1 || class == CLIENT_TYPE_MASTER) { + if (class == -1 || class == CLIENT_TYPE_PRIMARY) { if (err) *err = "Invalid client class specified in " "buffer limit configuration."; @@ -574,7 +574,7 @@ void loadServerConfigFromString(char *config) { } /* Sanity checks. */ - if (server.cluster_enabled && server.masterhost) { + if (server.cluster_enabled && server.primary_host) { err = "replicaof directive not allowed in cluster mode"; goto loaderr; } @@ -1422,19 +1422,19 @@ void rewriteConfigDirOption(standardConfig *config, const char *name, struct rew rewriteConfigStringOption(state, name, cwd, NULL); } -/* Rewrite the slaveof option. */ +/* Rewrite the replicaof option. */ void rewriteConfigReplicaOfOption(standardConfig *config, const char *name, struct rewriteConfigState *state) { UNUSED(config); sds line; - /* If this is a master, we want all the slaveof config options + /* If this is a primary, we want all the replicaof config options * in the file to be removed. Note that if this is a cluster instance - * we don't want a slaveof directive inside valkey.conf. */ - if (server.cluster_enabled || server.masterhost == NULL) { + * we don't want a replicaof directive inside valkey.conf. */ + if (server.cluster_enabled || server.primary_host == NULL) { rewriteConfigMarkAsProcessed(state, name); return; } - line = sdscatprintf(sdsempty(), "%s %s %d", name, server.masterhost, server.masterport); + line = sdscatprintf(sdsempty(), "%s %s %d", name, server.primary_host, server.primary_port); rewriteConfigRewriteLine(state, name, line, 1); } @@ -2452,9 +2452,9 @@ static int updateMaxmemory(const char **err) { return 1; } -static int updateGoodSlaves(const char **err) { +static int updateGoodReplicas(const char **err) { UNUSED(err); - refreshGoodSlavesCount(); + refreshGoodReplicasCount(); return 1; } @@ -2788,7 +2788,7 @@ static int setConfigOOMScoreAdjValuesOption(standardConfig *config, sds *argv, i * keep the configuration, which may still be valid for privileged processes. */ - if (values[CONFIG_OOM_REPLICA] < values[CONFIG_OOM_MASTER] || + if (values[CONFIG_OOM_REPLICA] < values[CONFIG_OOM_PRIMARY] || values[CONFIG_OOM_BGCHILD] < values[CONFIG_OOM_REPLICA]) { serverLog(LL_WARNING, "The oom-score-adj-values configuration may not work for non-privileged processes! " "Please consult the documentation."); @@ -2867,18 +2867,18 @@ static int setConfigReplicaOfOption(standardConfig *config, sds *argv, int argc, return 0; } - sdsfree(server.masterhost); - server.masterhost = NULL; + sdsfree(server.primary_host); + server.primary_host = NULL; if (!strcasecmp(argv[0], "no") && !strcasecmp(argv[1], "one")) { return 1; } char *ptr; - server.masterport = strtol(argv[1], &ptr, 10); - if (server.masterport < 0 || server.masterport > 65535 || *ptr != '\0') { + server.primary_port = strtol(argv[1], &ptr, 10); + if (server.primary_port < 0 || server.primary_port > 65535 || *ptr != '\0') { *err = "Invalid master port"; return 0; } - server.masterhost = sdsnew(argv[0]); + server.primary_host = sdsnew(argv[0]); server.repl_state = REPL_STATE_CONNECT; return 1; } @@ -2891,8 +2891,8 @@ static sds getConfigBindOption(standardConfig *config) { static sds getConfigReplicaOfOption(standardConfig *config) { UNUSED(config); char buf[256]; - if (server.masterhost) - snprintf(buf, sizeof(buf), "%s %d", server.masterhost, server.masterport); + if (server.primary_host) + snprintf(buf, sizeof(buf), "%s %d", server.primary_host, server.primary_port); else buf[0] = '\0'; return sdsnew(buf); @@ -3030,11 +3030,11 @@ standardConfig static_configs[] = { createBoolConfig("aof-load-truncated", NULL, MODIFIABLE_CONFIG, server.aof_load_truncated, 1, NULL, NULL), createBoolConfig("aof-use-rdb-preamble", NULL, MODIFIABLE_CONFIG, server.aof_use_rdb_preamble, 1, NULL, NULL), createBoolConfig("aof-timestamp-enabled", NULL, MODIFIABLE_CONFIG, server.aof_timestamp_enabled, 0, NULL, NULL), - createBoolConfig("cluster-replica-no-failover", "cluster-slave-no-failover", MODIFIABLE_CONFIG, server.cluster_slave_no_failover, 0, NULL, updateClusterFlags), /* Failover by default. */ - createBoolConfig("replica-lazy-flush", "slave-lazy-flush", MODIFIABLE_CONFIG, server.repl_slave_lazy_flush, 0, NULL, NULL), + createBoolConfig("cluster-replica-no-failover", "cluster-slave-no-failover", MODIFIABLE_CONFIG, server.cluster_replica_no_failover, 0, NULL, updateClusterFlags), /* Failover by default. */ + createBoolConfig("replica-lazy-flush", "slave-lazy-flush", MODIFIABLE_CONFIG, server.repl_replica_lazy_flush, 0, NULL, NULL), createBoolConfig("replica-serve-stale-data", "slave-serve-stale-data", MODIFIABLE_CONFIG, server.repl_serve_stale_data, 1, NULL, NULL), - createBoolConfig("replica-read-only", "slave-read-only", DEBUG_CONFIG | MODIFIABLE_CONFIG, server.repl_slave_ro, 1, NULL, NULL), - createBoolConfig("replica-ignore-maxmemory", "slave-ignore-maxmemory", MODIFIABLE_CONFIG, server.repl_slave_ignore_maxmemory, 1, NULL, NULL), + createBoolConfig("replica-read-only", "slave-read-only", DEBUG_CONFIG | MODIFIABLE_CONFIG, server.repl_replica_ro, 1, NULL, NULL), + createBoolConfig("replica-ignore-maxmemory", "slave-ignore-maxmemory", MODIFIABLE_CONFIG, server.repl_replica_ignore_maxmemory, 1, NULL, NULL), createBoolConfig("jemalloc-bg-thread", NULL, MODIFIABLE_CONFIG, server.jemalloc_bg_thread, 1, NULL, updateJemallocBgThread), createBoolConfig("activedefrag", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.active_defrag_enabled, 0, isValidActiveDefrag, NULL), createBoolConfig("syslog-enabled", NULL, IMMUTABLE_CONFIG, server.syslog_enabled, 0, NULL, NULL), @@ -3058,8 +3058,8 @@ standardConfig static_configs[] = { createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.acl_filename, "", NULL, NULL), createStringConfig("unixsocket", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.unixsocket, NULL, NULL, NULL), createStringConfig("pidfile", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.pidfile, NULL, NULL, NULL), - createStringConfig("replica-announce-ip", "slave-announce-ip", MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.slave_announce_ip, NULL, NULL, NULL), - createStringConfig("primaryuser", "masteruser", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masteruser, NULL, NULL, NULL), + createStringConfig("replica-announce-ip", "slave-announce-ip", MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.replica_announce_ip, NULL, NULL, NULL), + createStringConfig("primaryuser", "masteruser", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.primary_user, NULL, NULL, NULL), createStringConfig("cluster-announce-ip", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_ip, NULL, NULL, updateClusterIp), createStringConfig("cluster-config-file", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.cluster_configfile, "nodes.conf", NULL, NULL), createStringConfig("cluster-announce-hostname", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_hostname, NULL, isValidAnnouncedHostname, updateClusterHostname), @@ -3082,7 +3082,7 @@ standardConfig static_configs[] = { createStringConfig("locale-collate", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.locale_collate, "", NULL, updateLocaleCollate), /* SDS Configs */ - createSDSConfig("primaryauth", "masterauth", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masterauth, NULL, NULL, NULL), + createSDSConfig("primaryauth", "masterauth", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.primary_auth, NULL, NULL, NULL), createSDSConfig("requirepass", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.requirepass, NULL, NULL, updateRequirePass), /* Enum Configs */ @@ -3108,7 +3108,7 @@ standardConfig static_configs[] = { createIntConfig("port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.port, 6379, INTEGER_CONFIG, NULL, updatePort), /* TCP port. */ createIntConfig("io-threads", NULL, DEBUG_CONFIG | IMMUTABLE_CONFIG, 1, 128, server.io_threads_num, 1, INTEGER_CONFIG, NULL, NULL), /* Single threaded by default */ createIntConfig("auto-aof-rewrite-percentage", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.aof_rewrite_perc, 100, INTEGER_CONFIG, NULL, NULL), - createIntConfig("cluster-replica-validity-factor", "cluster-slave-validity-factor", MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_slave_validity_factor, 10, INTEGER_CONFIG, NULL, NULL), /* Slave max data age factor. */ + createIntConfig("cluster-replica-validity-factor", "cluster-slave-validity-factor", MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_replica_validity_factor, 10, INTEGER_CONFIG, NULL, NULL), /* replica max data age factor. */ createIntConfig("list-max-listpack-size", "list-max-ziplist-size", MODIFIABLE_CONFIG, INT_MIN, INT_MAX, server.list_max_listpack_size, -2, INTEGER_CONFIG, NULL, NULL), createIntConfig("tcp-keepalive", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tcpkeepalive, 300, INTEGER_CONFIG, NULL, NULL), createIntConfig("cluster-migration-barrier", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_migration_barrier, 1, INTEGER_CONFIG, NULL, NULL), @@ -3118,26 +3118,26 @@ standardConfig static_configs[] = { createIntConfig("active-defrag-threshold-upper", NULL, MODIFIABLE_CONFIG, 0, 1000, server.active_defrag_threshold_upper, 100, INTEGER_CONFIG, NULL, updateDefragConfiguration), /* Default: maximum defrag force at 100% fragmentation */ createIntConfig("lfu-log-factor", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.lfu_log_factor, 10, INTEGER_CONFIG, NULL, NULL), createIntConfig("lfu-decay-time", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.lfu_decay_time, 1, INTEGER_CONFIG, NULL, NULL), - createIntConfig("replica-priority", "slave-priority", MODIFIABLE_CONFIG, 0, INT_MAX, server.slave_priority, 100, INTEGER_CONFIG, NULL, NULL), + createIntConfig("replica-priority", "slave-priority", MODIFIABLE_CONFIG, 0, INT_MAX, server.replica_priority, 100, INTEGER_CONFIG, NULL, NULL), createIntConfig("repl-diskless-sync-delay", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_diskless_sync_delay, 5, INTEGER_CONFIG, NULL, NULL), createIntConfig("maxmemory-samples", NULL, MODIFIABLE_CONFIG, 1, 64, server.maxmemory_samples, 5, INTEGER_CONFIG, NULL, NULL), createIntConfig("maxmemory-eviction-tenacity", NULL, MODIFIABLE_CONFIG, 0, 100, server.maxmemory_eviction_tenacity, 10, INTEGER_CONFIG, NULL, NULL), createIntConfig("timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.maxidletime, 0, INTEGER_CONFIG, NULL, NULL), /* Default client timeout: infinite */ - createIntConfig("replica-announce-port", "slave-announce-port", MODIFIABLE_CONFIG, 0, 65535, server.slave_announce_port, 0, INTEGER_CONFIG, NULL, NULL), + createIntConfig("replica-announce-port", "slave-announce-port", MODIFIABLE_CONFIG, 0, 65535, server.replica_announce_port, 0, INTEGER_CONFIG, NULL, NULL), createIntConfig("tcp-backlog", NULL, IMMUTABLE_CONFIG, 0, INT_MAX, server.tcp_backlog, 511, INTEGER_CONFIG, NULL, NULL), /* TCP listen backlog. */ createIntConfig("cluster-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.cluster_port, 0, INTEGER_CONFIG, NULL, NULL), createIntConfig("cluster-announce-bus-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_bus_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Default: Use +10000 offset. */ createIntConfig("cluster-announce-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Use server.port */ createIntConfig("cluster-announce-tls-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_tls_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Use server.tls_port */ createIntConfig("repl-timeout", NULL, MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_timeout, 60, INTEGER_CONFIG, NULL, NULL), - createIntConfig("repl-ping-replica-period", "repl-ping-slave-period", MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_ping_slave_period, 10, INTEGER_CONFIG, NULL, NULL), + createIntConfig("repl-ping-replica-period", "repl-ping-slave-period", MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_ping_replica_period, 10, INTEGER_CONFIG, NULL, NULL), createIntConfig("list-compress-depth", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, 0, INT_MAX, server.list_compress_depth, 0, INTEGER_CONFIG, NULL, NULL), createIntConfig("rdb-key-save-delay", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, INT_MIN, INT_MAX, server.rdb_key_save_delay, 0, INTEGER_CONFIG, NULL, NULL), createIntConfig("key-load-delay", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, INT_MIN, INT_MAX, server.key_load_delay, 0, INTEGER_CONFIG, NULL, NULL), createIntConfig("active-expire-effort", NULL, MODIFIABLE_CONFIG, 1, 10, server.active_expire_effort, 1, INTEGER_CONFIG, NULL, NULL), /* From 1 to 10. */ createIntConfig("hz", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.config_hz, CONFIG_DEFAULT_HZ, INTEGER_CONFIG, NULL, updateHZ), - createIntConfig("min-replicas-to-write", "min-slaves-to-write", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_slaves_to_write, 0, INTEGER_CONFIG, NULL, updateGoodSlaves), - createIntConfig("min-replicas-max-lag", "min-slaves-max-lag", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_slaves_max_lag, 10, INTEGER_CONFIG, NULL, updateGoodSlaves), + createIntConfig("min-replicas-to-write", "min-slaves-to-write", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_replicas_to_write, 0, INTEGER_CONFIG, NULL, updateGoodReplicas), + createIntConfig("min-replicas-max-lag", "min-slaves-max-lag", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_replicas_max_lag, 10, INTEGER_CONFIG, NULL, updateGoodReplicas), createIntConfig("watchdog-period", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, 0, INT_MAX, server.watchdog_period, 0, INTEGER_CONFIG, NULL, updateWatchdogPeriod), createIntConfig("shutdown-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.shutdown_timeout, 10, INTEGER_CONFIG, NULL, NULL), createIntConfig("repl-diskless-sync-max-replicas", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_diskless_sync_max_replicas, 0, INTEGER_CONFIG, NULL, NULL), diff --git a/src/db.c b/src/db.c index 2e6d85cf4e..1843395d8c 100644 --- a/src/db.c +++ b/src/db.c @@ -88,8 +88,8 @@ void updateLFU(robj *val) { * * Note: this function also returns NULL if the key is logically expired but * still existing, in case this is a replica and the LOOKUP_WRITE is not set. - * Even if the key expiry is master-driven, we can correctly report a key is - * expired on replicas even if the master is lagging expiring our key via DELs + * Even if the key expiry is primary-driven, we can correctly report a key is + * expired on replicas even if the primary is lagging expiring our key via DELs * in the replication link. */ robj *lookupKey(serverDb *db, robj *key, int flags) { dictEntry *de = dbFind(db, key->ptr); @@ -97,14 +97,14 @@ robj *lookupKey(serverDb *db, robj *key, int flags) { if (de) { val = dictGetVal(de); /* Forcing deletion of expired keys on a replica makes the replica - * inconsistent with the master. We forbid it on readonly replicas, but + * inconsistent with the primary. We forbid it on readonly replicas, but * we have to allow it on writable replicas to make write commands * behave consistently. * * It's possible that the WRITE flag is set even during a readonly * command, since the command may trigger events that cause modules to * perform additional writes. */ - int is_ro_replica = server.masterhost && server.repl_slave_ro; + int is_ro_replica = server.primary_host && server.repl_replica_ro; int expire_flags = 0; if (flags & LOOKUP_WRITE && !is_ro_replica) expire_flags |= EXPIRE_FORCE_DELETE_EXPIRED; if (flags & LOOKUP_NOEXPIRE) expire_flags |= EXPIRE_AVOID_DELETE_EXPIRED; @@ -361,10 +361,10 @@ robj *dbRandomKey(serverDb *db) { key = dictGetKey(de); keyobj = createStringObject(key, sdslen(key)); if (dbFindExpires(db, key)) { - if (allvolatile && server.masterhost && --maxtries == 0) { + if (allvolatile && server.primary_host && --maxtries == 0) { /* If the DB is composed only of keys with an expire set, * it could happen that all the keys are already logically - * expired in the slave, so the function cannot stop because + * expired in the repilca, so the function cannot stop because * expireIfNeeded() is false, nor it can stop because * dictGetFairRandomKey() returns NULL (there are keys to return). * To prevent the infinite loop we do some tries, but if there @@ -540,7 +540,7 @@ long long emptyData(int dbnum, int flags, void(callback)(dict *)) { /* Empty the database structure. */ removed = emptyDbStructure(server.db, dbnum, async, callback); - if (dbnum == -1) flushSlaveKeysWithExpireList(); + if (dbnum == -1) flushReplicaKeysWithExpireList(); if (with_functions) { serverAssert(dbnum == -1); @@ -673,7 +673,7 @@ void flushAllDataAndResetRDB(int flags) { if (server.saveparamslen > 0) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); - rdbSave(SLAVE_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE); + rdbSave(REPLICA_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE); } #if defined(USE_JEMALLOC) @@ -1610,7 +1610,7 @@ void swapMainDbWithTempDb(serverDb *tempDb) { } trackingInvalidateKeysOnFlush(1); - flushSlaveKeysWithExpireList(); + flushReplicaKeysWithExpireList(); } /* SWAPDB db1 db2 */ @@ -1666,8 +1666,8 @@ void setExpire(client *c, serverDb *db, robj *key, long long when) { dictSetSignedIntegerVal(de, when); } - int writable_slave = server.masterhost && server.repl_slave_ro == 0; - if (c && writable_slave && !(c->flags & CLIENT_MASTER)) rememberSlaveKeyWithExpire(db, key); + int writable_replica = server.primary_host && server.repl_replica_ro == 0; + if (c && writable_replica && !(c->flags & CLIENT_PRIMARY)) rememberReplicaKeyWithExpire(db, key); } /* Return the expire time of the specified key, or -1 if no expire @@ -1694,7 +1694,7 @@ void deleteExpiredKeyAndPropagate(serverDb *db, robj *keyobj) { } /* Propagate an implicit key deletion into replicas and the AOF file. - * When a key was deleted in the master by eviction, expiration or a similar + * When a key was deleted in the primary by eviction, expiration or a similar * mechanism a DEL/UNLINK operation for this key is sent * to all the replicas and the AOF file if enabled. * @@ -1720,7 +1720,7 @@ void propagateDeletion(serverDb *db, robj *key, int lazy) { incrRefCount(argv[0]); incrRefCount(argv[1]); - /* If the master decided to delete a key we must propagate it to replicas no matter what. + /* If the primary decided to delete a key we must propagate it to replicas no matter what. * Even if module executed a command without asking for propagation. */ int prev_replication_allowed = server.replication_allowed; server.replication_allowed = 1; @@ -1755,13 +1755,13 @@ int keyIsExpired(serverDb *db, robj *key) { * * The behavior of the function depends on the replication role of the * instance, because by default replicas do not delete expired keys. They - * wait for DELs from the master for consistency matters. However even + * wait for DELs from the primary for consistency matters. However even * replicas will try to have a coherent return value for the function, * so that read commands executed in the replica side will be able to * behave like if the key is expired even if still present (because the - * master has yet to propagate the DEL). + * primary has yet to propagate the DEL). * - * In masters as a side effect of finding a key which is expired, such + * In primary as a side effect of finding a key which is expired, such * key will be evicted from the database. Also this may trigger the * propagation of a DEL/UNLINK command in AOF / replication stream. * @@ -1769,7 +1769,7 @@ int keyIsExpired(serverDb *db, robj *key) { * it still returns KEY_EXPIRED if the key is logically expired. To force deletion * of logically expired keys even on replicas, use the EXPIRE_FORCE_DELETE_EXPIRED * flag. Note though that if the current client is executing - * replicated commands from the master, keys are never considered expired. + * replicated commands from the primary, keys are never considered expired. * * On the other hand, if you just want expiration check, but need to avoid * the actual key deletion and propagation of the deletion, use the @@ -1784,7 +1784,7 @@ keyStatus expireIfNeeded(serverDb *db, robj *key, int flags) { /* If we are running in the context of a replica, instead of * evicting the expired key from the database, we return ASAP: - * the replica key expiration is controlled by the master that will + * the replica key expiration is controlled by the primary that will * send us synthesized DEL operations for expired keys. The * exception is when write operations are performed on writable * replicas. @@ -1793,15 +1793,15 @@ keyStatus expireIfNeeded(serverDb *db, robj *key, int flags) { * that is, KEY_VALID if we think the key should still be valid, * KEY_EXPIRED if we think the key is expired but don't want to delete it at this time. * - * When replicating commands from the master, keys are never considered + * When replicating commands from the primary, keys are never considered * expired. */ - if (server.masterhost != NULL) { - if (server.current_client && (server.current_client->flags & CLIENT_MASTER)) return KEY_VALID; + if (server.primary_host != NULL) { + if (server.current_client && (server.current_client->flags & CLIENT_PRIMARY)) return KEY_VALID; if (!(flags & EXPIRE_FORCE_DELETE_EXPIRED)) return KEY_EXPIRED; } /* In some cases we're explicitly instructed to return an indication of a - * missing key without actually deleting it, even on masters. */ + * missing key without actually deleting it, even on primaries. */ if (flags & EXPIRE_AVOID_DELETE_EXPIRED) return KEY_EXPIRED; /* If 'expire' action is paused, for whatever reason, then don't expire any key. diff --git a/src/debug.c b/src/debug.c index fc2a49cca8..6394e3f0f4 100644 --- a/src/debug.c +++ b/src/debug.c @@ -552,7 +552,7 @@ void debugCommand(client *c) { if (save) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); - if (rdbSave(SLAVE_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) != C_OK) { + if (rdbSave(REPLICA_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) != C_OK) { addReplyErrorObject(c, shared.err); return; } @@ -845,7 +845,7 @@ void debugCommand(client *c) { server.aof_flush_sleep = atoi(c->argv[2]->ptr); addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "replicate") && c->argc >= 3) { - replicationFeedSlaves(-1, c->argv + 2, c->argc - 2); + replicationFeedReplicas(-1, c->argv + 2, c->argc - 2); addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "error") && c->argc == 3) { sds errstr = sdsnewlen("-", 1); diff --git a/src/evict.c b/src/evict.c index cf209ff065..fb04616871 100644 --- a/src/evict.c +++ b/src/evict.c @@ -321,7 +321,7 @@ unsigned long LFUDecrAndReturn(robj *o) { return counter; } -/* We don't want to count AOF buffers and slaves output buffers as +/* We don't want to count AOF buffers and replicas output buffers as * used memory: the eviction should use mostly data size, because * it can cause feedback-loop when we push DELs into them, putting * more and more DELs will make them bigger, if we count them, we @@ -377,7 +377,7 @@ size_t freeMemoryGetNotCountedMemory(void) { * 'total' total amount of bytes used. * (Populated both for C_ERR and C_OK) * - * 'logical' the amount of memory used minus the slaves/AOF buffers. + * 'logical' the amount of memory used minus the replicas/AOF buffers. * (Populated when C_ERR is returned) * * 'tofree' the amount of memory that should be released @@ -393,7 +393,7 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev size_t mem_reported, mem_used, mem_tofree; /* Check if we are over the memory usage limit. If we are not, no need - * to subtract the slaves output buffers. We can just return ASAP. */ + * to subtract the replicas output buffers. We can just return ASAP. */ mem_reported = zmalloc_used_memory(); if (total) *total = mem_reported; @@ -404,7 +404,7 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev } if (mem_reported <= server.maxmemory && !level) return C_OK; - /* Remove the size of slaves output buffers and AOF buffer from the + /* Remove the size of replicas output buffers and AOF buffer from the * count of used memory. */ mem_used = mem_reported; size_t overhead = freeMemoryGetNotCountedMemory(); @@ -477,8 +477,8 @@ static int isSafeToPerformEvictions(void) { if (isInsideYieldingLongCommand() || server.loading) return 0; /* By default replicas should ignore maxmemory - * and just be masters exact copies. */ - if (server.masterhost && server.repl_slave_ignore_maxmemory) return 0; + * and just be primaries exact copies. */ + if (server.primary_host && server.repl_replica_ignore_maxmemory) return 0; /* If 'evict' action is paused, for whatever reason, then return false */ if (isPausedActionsWithUpdate(PAUSE_ACTION_EVICT)) return 0; @@ -538,7 +538,7 @@ int performEvictions(void) { long long mem_freed = 0; /* Maybe become negative */ mstime_t latency, eviction_latency; long long delta; - int slaves = listLength(server.slaves); + int replicas = listLength(server.replicas); int result = EVICT_FAIL; if (getMaxmemoryState(&mem_reported, NULL, &mem_tofree, NULL) == C_OK) { @@ -697,7 +697,7 @@ int performEvictions(void) { * start spending so much time here that is impossible to * deliver data to the replicas fast enough, so we force the * transmission here inside the loop. */ - if (slaves) flushSlavesOutputBuffers(); + if (replicas) flushReplicasOutputBuffers(); /* Normally our stop condition is the ability to release * a fixed, pre-computed amount of memory. However when we diff --git a/src/expire.c b/src/expire.c index 9261fbee28..05abb9580a 100644 --- a/src/expire.c +++ b/src/expire.c @@ -368,21 +368,21 @@ void activeExpireCycle(int type) { } /*----------------------------------------------------------------------------- - * Expires of keys created in writable slaves + * Expires of keys created in writable replicas * - * Normally slaves do not process expires: they wait the masters to synthesize - * DEL operations in order to retain consistency. However writable slaves are - * an exception: if a key is created in the slave and an expire is assigned - * to it, we need a way to expire such a key, since the master does not know + * Normally replicas do not process expires: they wait the primaries to synthesize + * DEL operations in order to retain consistency. However writable replicas are + * an exception: if a key is created in the replica and an expire is assigned + * to it, we need a way to expire such a key, since the primary does not know * anything about such a key. * - * In order to do so, we track keys created in the slave side with an expire - * set, and call the expireSlaveKeys() function from time to time in order to + * In order to do so, we track keys created in the replica side with an expire + * set, and call the expirereplicaKeys() function from time to time in order to * reclaim the keys if they already expired. * * Note that the use case we are trying to cover here, is a popular one where - * slaves are put in writable mode in order to compute slow operations in - * the slave side that are mostly useful to actually read data in a more + * replicas are put in writable mode in order to compute slow operations in + * the replica side that are mostly useful to actually read data in a more * processed way. Think at sets intersections in a tmp key, with an expire so * that it is also used as a cache to avoid intersecting every time. * @@ -391,9 +391,9 @@ void activeExpireCycle(int type) { *----------------------------------------------------------------------------*/ /* The dictionary where we remember key names and database ID of keys we may - * want to expire from the slave. Since this function is not often used we + * want to expire from the replica. Since this function is not often used we * don't even care to initialize the database at startup. We'll do it once - * the feature is used the first time, that is, when rememberSlaveKeyWithExpire() + * the feature is used the first time, that is, when rememberreplicaKeyWithExpire() * is called. * * The dictionary has an SDS string representing the key as the hash table @@ -402,17 +402,17 @@ void activeExpireCycle(int type) { * with a DB id > 63 are not expired, but a trivial fix is to set the bitmap * to the max 64 bit unsigned value when we know there is a key with a DB * ID greater than 63, and check all the configured DBs in such a case. */ -dict *slaveKeysWithExpire = NULL; +dict *replicaKeysWithExpire = NULL; -/* Check the set of keys created by the master with an expire set in order to +/* Check the set of keys created by the primary with an expire set in order to * check if they should be evicted. */ -void expireSlaveKeys(void) { - if (slaveKeysWithExpire == NULL || dictSize(slaveKeysWithExpire) == 0) return; +void expireReplicaKeys(void) { + if (replicaKeysWithExpire == NULL || dictSize(replicaKeysWithExpire) == 0) return; int cycles = 0, noexpire = 0; mstime_t start = mstime(); while (1) { - dictEntry *de = dictGetRandomKey(slaveKeysWithExpire); + dictEntry *de = dictGetRandomKey(replicaKeysWithExpire); sds keyname = dictGetKey(de); uint64_t dbids = dictGetUnsignedIntegerVal(de); uint64_t new_dbids = 0; @@ -447,26 +447,26 @@ void expireSlaveKeys(void) { } /* Set the new bitmap as value of the key, in the dictionary - * of keys with an expire set directly in the writable slave. Otherwise + * of keys with an expire set directly in the writable replica. Otherwise * if the bitmap is zero, we no longer need to keep track of it. */ if (new_dbids) dictSetUnsignedIntegerVal(de, new_dbids); else - dictDelete(slaveKeysWithExpire, keyname); + dictDelete(replicaKeysWithExpire, keyname); /* Stop conditions: found 3 keys we can't expire in a row or * time limit was reached. */ cycles++; if (noexpire > 3) break; if ((cycles % 64) == 0 && mstime() - start > 1) break; - if (dictSize(slaveKeysWithExpire) == 0) break; + if (dictSize(replicaKeysWithExpire) == 0) break; } } /* Track keys that received an EXPIRE or similar command in the context - * of a writable slave. */ -void rememberSlaveKeyWithExpire(serverDb *db, robj *key) { - if (slaveKeysWithExpire == NULL) { + * of a writable replica. */ +void rememberReplicaKeyWithExpire(serverDb *db, robj *key) { + if (replicaKeysWithExpire == NULL) { static dictType dt = { dictSdsHash, /* hash function */ NULL, /* key dup */ @@ -475,17 +475,17 @@ void rememberSlaveKeyWithExpire(serverDb *db, robj *key) { NULL, /* val destructor */ NULL /* allow to expand */ }; - slaveKeysWithExpire = dictCreate(&dt); + replicaKeysWithExpire = dictCreate(&dt); } if (db->id > 63) return; - dictEntry *de = dictAddOrFind(slaveKeysWithExpire, key->ptr); + dictEntry *de = dictAddOrFind(replicaKeysWithExpire, key->ptr); /* If the entry was just created, set it to a copy of the SDS string * representing the key: we don't want to need to take those keys - * in sync with the main DB. The keys will be removed by expireSlaveKeys() + * in sync with the main DB. The keys will be removed by expireReplicaKeys() * as it scans to find keys to remove. */ if (dictGetKey(de) == key->ptr) { - dictSetKey(slaveKeysWithExpire, de, sdsdup(key->ptr)); + dictSetKey(replicaKeysWithExpire, de, sdsdup(key->ptr)); dictSetUnsignedIntegerVal(de, 0); } @@ -495,34 +495,34 @@ void rememberSlaveKeyWithExpire(serverDb *db, robj *key) { } /* Return the number of keys we are tracking. */ -size_t getSlaveKeyWithExpireCount(void) { - if (slaveKeysWithExpire == NULL) return 0; - return dictSize(slaveKeysWithExpire); +size_t getReplicaKeyWithExpireCount(void) { + if (replicaKeysWithExpire == NULL) return 0; + return dictSize(replicaKeysWithExpire); } /* Remove the keys in the hash table. We need to do that when data is - * flushed from the server. We may receive new keys from the master with + * flushed from the server. We may receive new keys from the primary with * the same name/db and it is no longer a good idea to expire them. * * Note: technically we should handle the case of a single DB being flushed * but it is not worth it since anyway race conditions using the same set - * of key names in a writable slave and in its master will lead to + * of key names in a writable replica and in its primary will lead to * inconsistencies. This is just a best-effort thing we do. */ -void flushSlaveKeysWithExpireList(void) { - if (slaveKeysWithExpire) { - dictRelease(slaveKeysWithExpire); - slaveKeysWithExpire = NULL; +void flushReplicaKeysWithExpireList(void) { + if (replicaKeysWithExpire) { + dictRelease(replicaKeysWithExpire); + replicaKeysWithExpire = NULL; } } int checkAlreadyExpired(long long when) { /* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past * should never be executed as a DEL when load the AOF or in the context - * of a slave instance. + * of a replica instance. * * Instead we add the already expired key to the database with expire time - * (possibly in the past) and wait for an explicit DEL from the master. */ - return (when <= commandTimeSnapshot() && !server.loading && !server.masterhost); + * (possibly in the past) and wait for an explicit DEL from the primary. */ + return (when <= commandTimeSnapshot() && !server.loading && !server.primary_host); } #define EXPIRE_NX (1 << 0) diff --git a/src/hyperloglog.c b/src/hyperloglog.c index 0fb30d9bda..f9bce26634 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -903,8 +903,8 @@ int hllSparseSet(robj *o, long index, uint8_t count) { * convert from sparse to dense a register requires to be updated. * * Note that this in turn means that PFADD will make sure the command - * is propagated to slaves / AOF, so if there is a sparse -> dense - * conversion, it will be performed in all the slaves as well. */ + * is propagated to replicas / AOF, so if there is a sparse -> dense + * conversion, it will be performed in all the replicas as well. */ int dense_retval = hllDenseSet(hdr->registers, index, count); serverAssert(dense_retval == 1); return dense_retval; diff --git a/src/logreqres.c b/src/logreqres.c index af4021afb1..70b4e55f6f 100644 --- a/src/logreqres.c +++ b/src/logreqres.c @@ -78,10 +78,10 @@ static int reqresShouldLog(client *c) { if (!server.req_res_logfile) return 0; /* Ignore client with streaming non-standard response */ - if (c->flags & (CLIENT_PUBSUB | CLIENT_MONITOR | CLIENT_SLAVE)) return 0; + if (c->flags & (CLIENT_PUBSUB | CLIENT_MONITOR | CLIENT_REPLICA)) return 0; - /* We only work on masters (didn't implement reqresAppendResponse to work on shared slave buffers) */ - if (getClientType(c) == CLIENT_TYPE_MASTER) return 0; + /* We only work on primaries (didn't implement reqresAppendResponse to work on shared replica buffers) */ + if (getClientType(c) == CLIENT_TYPE_PRIMARY) return 0; return 1; } diff --git a/src/module.c b/src/module.c index ebb3d0e6c6..e7416c7926 100644 --- a/src/module.c +++ b/src/module.c @@ -1220,7 +1220,7 @@ ValkeyModuleCommand *moduleCreateCommandProxy(struct ValkeyModule *module, * Starting from Redis OSS 7.0 this flag has been deprecated. * Declaring a command as "random" can be done using * command tips, see https://valkey.io/topics/command-tips. - * * **"allow-stale"**: The command is allowed to run on slaves that don't + * * **"allow-stale"**: The command is allowed to run on replicas that don't * serve stale data. Don't use if you don't know what * this means. * * **"no-monitor"**: Don't propagate the command on monitor. Use this if @@ -3491,7 +3491,7 @@ int VM_ReplyWithLongDouble(ValkeyModuleCtx *ctx, long double ld) { * ## Commands replication API * -------------------------------------------------------------------------- */ -/* Replicate the specified command and arguments to slaves and AOF, as effect +/* Replicate the specified command and arguments to replicas and AOF, as effect * of execution of the calling command implementation. * * The replicated commands are always wrapped into the MULTI/EXEC that @@ -3565,7 +3565,7 @@ int VM_Replicate(ValkeyModuleCtx *ctx, const char *cmdname, const char *fmt, ... * commands. * * Basically this form of replication is useful when you want to propagate - * the command to the slaves and AOF file exactly as it was called, since + * the command to the replicas and AOF file exactly as it was called, since * the command can just be re-executed to deterministically re-create the * new state starting from the old one. * @@ -3664,12 +3664,12 @@ int modulePopulateReplicationInfoStructure(void *ri, int structver) { ValkeyModuleReplicationInfoV1 *ri1 = ri; memset(ri1, 0, sizeof(*ri1)); ri1->version = structver; - ri1->master = server.masterhost == NULL; - ri1->masterhost = server.masterhost ? server.masterhost : ""; - ri1->masterport = server.masterport; + ri1->primary = server.primary_host == NULL; + ri1->primary_host = server.primary_host ? server.primary_host : ""; + ri1->primary_port = server.primary_port; ri1->replid1 = server.replid; ri1->replid2 = server.replid2; - ri1->repl1_offset = server.master_repl_offset; + ri1->repl1_offset = server.primary_repl_offset; ri1->repl2_offset = server.second_replid_offset; return VALKEYMODULE_OK; } @@ -3794,7 +3794,7 @@ int VM_GetSelectedDb(ValkeyModuleCtx *ctx) { * * VALKEYMODULE_CTX_FLAGS_MULTI: The command is running inside a transaction * * * VALKEYMODULE_CTX_FLAGS_REPLICATED: The command was sent over the replication - * link by the MASTER + * link by the PRIMARY * * * VALKEYMODULE_CTX_FLAGS_PRIMARY: The instance is a primary * @@ -3821,16 +3821,16 @@ int VM_GetSelectedDb(ValkeyModuleCtx *ctx) { * * * VALKEYMODULE_CTX_FLAGS_LOADING: Server is loading RDB/AOF * - * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_STALE: No active link with the master. + * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_STALE: No active link with the primary. * * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_CONNECTING: The replica is trying to - * connect with the master. + * connect with the primary. * - * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_TRANSFERRING: Master -> Replica RDB + * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_TRANSFERRING: primary -> Replica RDB * transfer is in progress. * * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_ONLINE: The replica has an active link - * with its master. This is the + * with its primary. This is the * contrary of STALE state. * * * VALKEYMODULE_CTX_FLAGS_ACTIVE_CHILD: There is currently some background @@ -3854,8 +3854,8 @@ int VM_GetContextFlags(ValkeyModuleCtx *ctx) { if (ctx) { if (ctx->client) { if (ctx->client->flags & CLIENT_DENY_BLOCKING) flags |= VALKEYMODULE_CTX_FLAGS_DENY_BLOCKING; - /* Module command received from MASTER, is replicated. */ - if (ctx->client->flags & CLIENT_MASTER) flags |= VALKEYMODULE_CTX_FLAGS_REPLICATED; + /* Module command received from PRIMARY, is replicated. */ + if (ctx->client->flags & CLIENT_PRIMARY) flags |= VALKEYMODULE_CTX_FLAGS_REPLICATED; if (ctx->client->resp == 3) { flags |= VALKEYMODULE_CTX_FLAGS_RESP3; } @@ -3880,7 +3880,7 @@ int VM_GetContextFlags(ValkeyModuleCtx *ctx) { flags |= VALKEYMODULE_CTX_FLAGS_LOADING; /* Maxmemory and eviction policy */ - if (server.maxmemory > 0 && (!server.masterhost || !server.repl_slave_ignore_maxmemory)) { + if (server.maxmemory > 0 && (!server.primary_host || !server.repl_replica_ignore_maxmemory)) { flags |= VALKEYMODULE_CTX_FLAGS_MAXMEMORY; if (server.maxmemory_policy != MAXMEMORY_NO_EVICTION) flags |= VALKEYMODULE_CTX_FLAGS_EVICT; @@ -3891,11 +3891,11 @@ int VM_GetContextFlags(ValkeyModuleCtx *ctx) { if (server.saveparamslen > 0) flags |= VALKEYMODULE_CTX_FLAGS_RDB; /* Replication flags */ - if (server.masterhost == NULL) { + if (server.primary_host == NULL) { flags |= VALKEYMODULE_CTX_FLAGS_PRIMARY; } else { flags |= VALKEYMODULE_CTX_FLAGS_REPLICA; - if (server.repl_slave_ro) flags |= VALKEYMODULE_CTX_FLAGS_READONLY; + if (server.repl_replica_ro) flags |= VALKEYMODULE_CTX_FLAGS_READONLY; /* Replica state flags. */ if (server.repl_state == REPL_STATE_CONNECT || server.repl_state == REPL_STATE_CONNECTING) { @@ -3927,16 +3927,16 @@ int VM_GetContextFlags(ValkeyModuleCtx *ctx) { /* Returns true if a client sent the CLIENT PAUSE command to the server or * if the Cluster does a manual failover, pausing the clients. - * This is needed when we have a master with replicas, and want to write, + * This is needed when we have a primary with replicas, and want to write, * without adding further data to the replication channel, that the replicas - * replication offset, match the one of the master. When this happens, it is - * safe to failover the master without data loss. + * replication offset, match the one of the primary. When this happens, it is + * safe to failover the primary without data loss. * * However modules may generate traffic by calling ValkeyModule_Call() with * the "!" flag, or by calling ValkeyModule_Replicate(), in a context outside * commands execution, for instance in timeout callbacks, threads safe * contexts, and so forth. When modules will generate too much traffic, it - * will be hard for the master and replicas offset to match, because there + * will be hard for the primary and replicas offset to match, because there * is more data to send in the replication channel. * * So modules may want to try to avoid very heavy background work that has @@ -6369,21 +6369,21 @@ ValkeyModuleCallReply *VM_Call(ValkeyModuleCtx *ctx, const char *cmdname, const goto cleanup; } - if (server.masterhost && server.repl_slave_ro && !obey_client) { + if (server.primary_host && server.repl_replica_ro && !obey_client) { errno = ESPIPE; if (error_as_call_replies) { - sds msg = sdsdup(shared.roslaveerr->ptr); + sds msg = sdsdup(shared.roreplicaerr->ptr); reply = callReplyCreateError(msg, ctx); } goto cleanup; } } - if (server.masterhost && server.repl_state != REPL_STATE_CONNECTED && server.repl_serve_stale_data == 0 && + if (server.primary_host && server.repl_state != REPL_STATE_CONNECTED && server.repl_serve_stale_data == 0 && !(cmd_flags & CMD_STALE)) { errno = ESPIPE; if (error_as_call_replies) { - sds msg = sdsdup(shared.masterdownerr->ptr); + sds msg = sdsdup(shared.primarydownerr->ptr); reply = callReplyCreateError(msg, ctx); } goto cleanup; @@ -6418,7 +6418,7 @@ ValkeyModuleCallReply *VM_Call(ValkeyModuleCtx *ctx, const char *cmdname, const /* If this is a Cluster node, we need to make sure the module is not * trying to access non-local keys, with the exception of commands - * received from our master. */ + * received from our primary. */ if (server.cluster_enabled && !mustObeyClient(ctx->client)) { int error_code; /* Duplicate relevant flags in the module client. */ @@ -8293,7 +8293,7 @@ void moduleHandleBlockedClients(void) { /* Update the wait offset, we don't know if this blocked client propagated anything, * currently we rather not add any API for that, so we just assume it did. */ - c->woff = server.master_repl_offset; + c->woff = server.primary_repl_offset; /* Put the client in the list of clients that need to write * if there are pending replies here. This is needed since @@ -8687,7 +8687,7 @@ int VM_AddPostNotificationJob(ValkeyModuleCtx *ctx, ValkeyModulePostNotificationJobFunc callback, void *privdata, void (*free_privdata)(void *)) { - if (server.loading || (server.masterhost && server.repl_slave_ro)) { + if (server.loading || (server.primary_host && server.repl_replica_ro)) { return VALKEYMODULE_ERR; } ValkeyModulePostExecUnitJob *job = zmalloc(sizeof(*job)); @@ -8812,7 +8812,7 @@ typedef struct moduleClusterNodeInfo { int flags; char ip[NET_IP_STR_LEN]; int port; - char master_id[40]; /* Only if flags & VALKEYMODULE_NODE_PRIMARY is true. */ + char primary_id[40]; /* Only if flags & VALKEYMODULE_NODE_PRIMARY is true. */ } mdouleClusterNodeInfo; /* We have an array of message types: each bucket is a linked list of @@ -8955,11 +8955,11 @@ size_t VM_GetClusterSize(void) { * or the node ID does not exist from the POV of this local node, VALKEYMODULE_ERR * is returned. * - * The arguments `ip`, `master_id`, `port` and `flags` can be NULL in case we don't - * need to populate back certain info. If an `ip` and `master_id` (only populated - * if the instance is a slave) are specified, they point to buffers holding + * The arguments `ip`, `primary_id`, `port` and `flags` can be NULL in case we don't + * need to populate back certain info. If an `ip` and `primary_id` (only populated + * if the instance is a replica) are specified, they point to buffers holding * at least VALKEYMODULE_NODE_ID_LEN bytes. The strings written back as `ip` - * and `master_id` are not null terminated. + * and `primary_id` are not null terminated. * * The list of flags reported is the following: * @@ -8968,9 +8968,9 @@ size_t VM_GetClusterSize(void) { * * VALKEYMODULE_NODE_REPLICA: The node is a replica * * VALKEYMODULE_NODE_PFAIL: We see the node as failing * * VALKEYMODULE_NODE_FAIL: The cluster agrees the node is failing - * * VALKEYMODULE_NODE_NOFAILOVER: The slave is configured to never failover + * * VALKEYMODULE_NODE_NOFAILOVER: The replica is configured to never failover */ -int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char *master_id, int *port, int *flags) { +int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char *primary_id, int *port, int *flags) { UNUSED(ctx); clusterNode *node = clusterLookupNode(id, strlen(id)); @@ -8980,14 +8980,14 @@ int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char * if (ip) valkey_strlcpy(ip, clusterNodeIp(node), NET_IP_STR_LEN); - if (master_id) { + if (primary_id) { /* If the information is not available, the function will set the * field to zero bytes, so that when the field can't be populated the * function kinda remains predictable. */ - if (clusterNodeIsSlave(node) && clusterNodeGetMaster(node)) - memcpy(master_id, clusterNodeGetName(clusterNodeGetMaster(node)), VALKEYMODULE_NODE_ID_LEN); + if (clusterNodeIsReplica(node) && clusterNodeGetPrimary(node)) + memcpy(primary_id, clusterNodeGetName(clusterNodeGetPrimary(node)), VALKEYMODULE_NODE_ID_LEN); else - memset(master_id, 0, VALKEYMODULE_NODE_ID_LEN); + memset(primary_id, 0, VALKEYMODULE_NODE_ID_LEN); } if (port) *port = getNodeDefaultClientPort(node); @@ -8996,8 +8996,8 @@ int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char * if (flags) { *flags = 0; if (clusterNodeIsMyself(node)) *flags |= VALKEYMODULE_NODE_MYSELF; - if (clusterNodeIsMaster(node)) *flags |= VALKEYMODULE_NODE_PRIMARY; - if (clusterNodeIsSlave(node)) *flags |= VALKEYMODULE_NODE_REPLICA; + if (clusterNodeIsPrimary(node)) *flags |= VALKEYMODULE_NODE_PRIMARY; + if (clusterNodeIsReplica(node)) *flags |= VALKEYMODULE_NODE_REPLICA; if (clusterNodeTimedOut(node)) *flags |= VALKEYMODULE_NODE_PFAIL; if (clusterNodeIsFailing(node)) *flags |= VALKEYMODULE_NODE_FAIL; if (clusterNodeIsNoFailover(node)) *flags |= VALKEYMODULE_NODE_NOFAILOVER; @@ -9016,7 +9016,7 @@ int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char * * * With the following effects: * - * * NO_FAILOVER: prevent Cluster slaves from failing over a dead master. + * * NO_FAILOVER: prevent Cluster replicas from failing over a dead primary. * Also disables the replica migration feature. * * * NO_REDIRECTION: Every node will accept any key, without trying to perform @@ -10594,7 +10594,7 @@ int moduleUnregisterFilters(ValkeyModule *module) { * 1. Invocation by a client. * 2. Invocation through `ValkeyModule_Call()` by any module. * 3. Invocation through Lua `redis.call()`. - * 4. Replication of a command from a master. + * 4. Replication of a command from a primary. * * The filter executes in a special filter context, which is different and more * limited than a ValkeyModuleCtx. Because the filter affects any command, it @@ -11243,10 +11243,10 @@ static uint64_t moduleEventVersions[] = { * * * ValkeyModuleEvent_ReplicationRoleChanged: * - * This event is called when the instance switches from master + * This event is called when the instance switches from primary * to replica or the other way around, however the event is * also called when the replica remains a replica but starts to - * replicate with a different master. + * replicate with a different primary. * * The following sub events are available: * @@ -11256,9 +11256,9 @@ static uint64_t moduleEventVersions[] = { * The 'data' field can be casted by the callback to a * `ValkeyModuleReplicationInfo` structure with the following fields: * - * int master; // true if master, false if replica - * char *masterhost; // master instance hostname for NOW_REPLICA - * int masterport; // master instance port for NOW_REPLICA + * int primary; // true if primary, false if replica + * char *primary_host; // primary instance hostname for NOW_REPLICA + * int primary_port; // primary instance port for NOW_REPLICA * char *replid1; // Main replication ID * char *replid2; // Secondary replication ID * uint64_t repl1_offset; // Main replication offset @@ -11315,7 +11315,7 @@ static uint64_t moduleEventVersions[] = { * * Called on loading operations: at startup when the server is * started, but also after a first synchronization when the - * replica is loading the RDB file from the master. + * replica is loading the RDB file from the primary. * The following sub events are available: * * * `VALKEYMODULE_SUBEVENT_LOADING_RDB_START` @@ -11344,7 +11344,7 @@ static uint64_t moduleEventVersions[] = { * * ValkeyModuleEvent_ReplicaChange * * This event is called when the instance (that can be both a - * master or a replica) get a new online replica, or lose a + * primary or a replica) get a new online replica, or lose a * replica since it gets disconnected. * The following sub events are available: * @@ -11372,9 +11372,9 @@ static uint64_t moduleEventVersions[] = { * * ValkeyModuleEvent_PrimaryLinkChange * * This is called for replicas in order to notify when the - * replication link becomes functional (up) with our master, + * replication link becomes functional (up) with our primary, * or when it goes down. Note that the link is not considered - * up when we just connected to the master, but only if the + * up when we just connected to the primary, but only if the * replication is happening correctly. * The following sub events are available: * @@ -11442,7 +11442,7 @@ static uint64_t moduleEventVersions[] = { * * * ValkeyModuleEvent_ReplAsyncLoad * - * Called when repl-diskless-load config is set to swapdb and a replication with a master of same + * Called when repl-diskless-load config is set to swapdb and a replication with a primary of same * data set history (matching replication ID) occurs. * In which case the server serves current data set while loading new database in memory from socket. * Modules must have declared they support this mechanism in order to activate it, through @@ -11924,7 +11924,7 @@ void moduleRemoveCateogires(ValkeyModule *module) { * The function aborts the server on errors, since to start with missing * modules is not considered sane: clients may rely on the existence of * given commands, loading AOF also may need some modules to exist, and - * if this instance is a slave, it must understand commands from master. */ + * if this instance is a replica, it must understand commands from primary. */ void moduleLoadFromQueue(void) { listIter li; listNode *ln; @@ -12909,13 +12909,13 @@ int VM_RdbLoad(ValkeyModuleCtx *ctx, ValkeyModuleRdbStream *stream, int flags) { } /* Not allowed on replicas. */ - if (server.masterhost != NULL) { + if (server.primary_host != NULL) { errno = ENOTSUP; return VALKEYMODULE_ERR; } /* Drop replicas if exist. */ - disconnectSlaves(); + disconnectReplicas(); freeReplicationBacklog(); if (server.aof_state != AOF_OFF) stopAppendOnly(); diff --git a/src/modules/helloworld.c b/src/modules/helloworld.c index 43f28a14d4..f74e4e9b66 100644 --- a/src/modules/helloworld.c +++ b/src/modules/helloworld.c @@ -251,7 +251,7 @@ int HelloRepl1_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, in /* Another command to show replication. In this case, we call * ValkeyModule_ReplicateVerbatim() to mean we want just the command to be - * propagated to slaves / AOF exactly as it was called by the user. + * propagated to replicas / AOF exactly as it was called by the user. * * This command also shows how to work with string objects. * It takes a list, and increments all the elements (that must have diff --git a/src/networking.c b/src/networking.c index cfe98a2c0e..78f36b8170 100644 --- a/src/networking.c +++ b/src/networking.c @@ -164,10 +164,10 @@ client *createClient(connection *conn) { c->sentlen = 0; c->flags = 0; c->slot = -1; - c->ctime = c->lastinteraction = server.unixtime; + c->ctime = c->last_interaction = server.unixtime; c->duration = 0; clientSetDefaultAuth(c); - c->replstate = REPL_STATE_NONE; + c->repl_state = REPL_STATE_NONE; c->repl_start_cmd_stream_on_ack = 0; c->reploff = 0; c->read_reploff = 0; @@ -176,11 +176,11 @@ client *createClient(connection *conn) { c->repl_ack_time = 0; c->repl_aof_off = 0; c->repl_last_partial_write = 0; - c->slave_listening_port = 0; - c->slave_addr = NULL; + c->replica_listening_port = 0; + c->replica_addr = NULL; c->replica_version = 0; - c->slave_capa = SLAVE_CAPA_NONE; - c->slave_req = SLAVE_REQ_NONE; + c->replica_capa = REPLICA_CAPA_NONE; + c->replica_req = REPLICA_REQ_NONE; c->reply = listCreate(); c->deferred_reply_errors = NULL; c->reply_bytes = 0; @@ -242,10 +242,11 @@ void installClientWriteHandler(client *c) { * buffers can hold, then we'll really install the handler. */ void putClientInPendingWriteQueue(client *c) { /* Schedule the client to write the output buffers to the socket only - * if not already done and, for slaves, if the slave can actually receive + * if not already done and, for replicas, if the replica can actually receive * writes at this stage. */ if (!(c->flags & CLIENT_PENDING_WRITE) && - (c->replstate == REPL_STATE_NONE || (c->replstate == SLAVE_STATE_ONLINE && !c->repl_start_cmd_stream_on_ack))) { + (c->repl_state == REPL_STATE_NONE || + (c->repl_state == REPLICA_STATE_ONLINE && !c->repl_start_cmd_stream_on_ack))) { /* Here instead of installing the write handler, we just flag the * client and put it into a list of clients that have something * to write to the socket. This way before re-entering the event @@ -265,7 +266,7 @@ void putClientInPendingWriteQueue(client *c) { * loop so that when the socket is writable new data gets written. * * If the client should not receive new data, because it is a fake client - * (used to load AOF in memory), a master or because the setup of the write + * (used to load AOF in memory), a primary or because the setup of the write * handler failed, the function returns C_ERR. * * The function may return C_OK without actually installing the write @@ -273,7 +274,7 @@ void putClientInPendingWriteQueue(client *c) { * * 1) The event handler should already be installed since the output buffer * already contains something. - * 2) The client is a slave but not yet online, so we want to just accumulate + * 2) The client is a replica but not yet online, so we want to just accumulate * writes in the buffer but not actually sending them yet. * * Typically gets called every time a reply is built, before adding more @@ -291,9 +292,9 @@ int prepareClientToWrite(client *c) { * CLIENT_PUSHING handling: disables the reply silencing flags. */ if ((c->flags & (CLIENT_REPLY_OFF | CLIENT_REPLY_SKIP)) && !(c->flags & CLIENT_PUSHING)) return C_ERR; - /* Masters don't receive replies, unless CLIENT_MASTER_FORCE_REPLY flag + /* Primaries don't receive replies, unless CLIENT_PRIMARY_FORCE_REPLY flag * is set. */ - if ((c->flags & CLIENT_MASTER) && !(c->flags & CLIENT_MASTER_FORCE_REPLY)) return C_ERR; + if ((c->flags & CLIENT_PRIMARY) && !(c->flags & CLIENT_PRIMARY_FORCE_REPLY)) return C_ERR; if (!c->conn) return C_ERR; /* Fake client for AOF loading. */ @@ -428,7 +429,7 @@ void _addReplyToBufferOrList(client *c, const char *s, size_t len) { * replication link that caused a reply to be generated we'll simply disconnect it. * Note this is the simplest way to check a command added a response. Replication links are used to write data but * not for responses, so we should normally never get here on a replica client. */ - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { sds cmdname = c->lastcmd ? c->lastcmd->fullname : NULL; logInvalidUseAndFreeClientAsync(c, "Replica generated a reply to command '%s'", cmdname ? cmdname : ""); @@ -563,24 +564,24 @@ void afterErrorReply(client *c, const char *s, size_t len, int flags) { c->realcmd->failed_calls++; } - /* Sometimes it could be normal that a slave replies to a master with + /* Sometimes it could be normal that a replica replies to a primary with * an error and this function gets called. Actually the error will never - * be sent because addReply*() against master clients has no effect... + * be sent because addReply*() against primary clients has no effect... * A notable example is: * * EVAL 'redis.call("incr",KEYS[1]); redis.call("nonexisting")' 1 x * - * Where the master must propagate the first change even if the second + * Where the primary must propagate the first change even if the second * will produce an error. However it is useful to log such events since * they are rare and may hint at errors in a script or a bug in the server. */ int ctype = getClientType(c); - if (ctype == CLIENT_TYPE_MASTER || ctype == CLIENT_TYPE_SLAVE || c->id == CLIENT_ID_AOF) { + if (ctype == CLIENT_TYPE_PRIMARY || ctype == CLIENT_TYPE_REPLICA || c->id == CLIENT_ID_AOF) { char *to, *from; if (c->id == CLIENT_ID_AOF) { to = "AOF-loading-client"; from = "server"; - } else if (ctype == CLIENT_TYPE_MASTER) { + } else if (ctype == CLIENT_TYPE_PRIMARY) { to = "master"; from = "replica"; } else { @@ -595,16 +596,16 @@ void afterErrorReply(client *c, const char *s, size_t len, int flags) { "to its %s: '%.*s' after processing the command " "'%s'", from, to, (int)len, s, cmdname ? cmdname : ""); - if (ctype == CLIENT_TYPE_MASTER && server.repl_backlog && server.repl_backlog->histlen > 0) { + if (ctype == CLIENT_TYPE_PRIMARY && server.repl_backlog && server.repl_backlog->histlen > 0) { showLatestBacklog(); } server.stat_unexpected_error_replies++; /* Based off the propagation error behavior, check if we need to panic here. There * are currently two checked cases: - * * If this command was from our master and we are not a writable replica. + * * If this command was from our primary and we are not a writable replica. * * We are reading from an AOF file. */ - int panic_in_replicas = (ctype == CLIENT_TYPE_MASTER && server.repl_slave_ro) && + int panic_in_replicas = (ctype == CLIENT_TYPE_PRIMARY && server.repl_replica_ro) && (server.propagation_error_behavior == PROPAGATION_ERR_BEHAVIOR_PANIC || server.propagation_error_behavior == PROPAGATION_ERR_BEHAVIOR_PANIC_ON_REPLICAS); int panic_in_aof = @@ -766,7 +767,7 @@ void *addReplyDeferredLen(client *c) { * replication link that caused a reply to be generated we'll simply disconnect it. * Note this is the simplest way to check a command added a response. Replication links are used to write data but * not for responses, so we should normally never get here on a replica client. */ - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { sds cmdname = c->lastcmd ? c->lastcmd->fullname : NULL; logInvalidUseAndFreeClientAsync(c, "Replica generated a reply to command '%s'", cmdname ? cmdname : ""); @@ -1257,7 +1258,7 @@ void copyReplicaOutputBuffer(client *dst, client *src) { /* Return true if the specified client has pending reply buffers to write to * the socket. */ int clientHasPendingReplies(client *c) { - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { /* Replicas use global shared replication buffer instead of * private output buffer. */ serverAssert(c->bufpos == 0 && listLength(c->reply) == 0); @@ -1415,29 +1416,29 @@ void freeClientArgv(client *c) { c->argv = NULL; } -/* Close all the slaves connections. This is useful in chained replication - * when we resync with our own master and want to force all our slaves to +/* Close all the replicas connections. This is useful in chained replication + * when we resync with our own primary and want to force all our replicas to * resync with us as well. */ -void disconnectSlaves(void) { +void disconnectReplicas(void) { listIter li; listNode *ln; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { freeClient((client *)ln->value); } } -/* Check if there is any other slave waiting dumping RDB finished expect me. +/* Check if there is any other replica waiting dumping RDB finished expect me. * This function is useful to judge current dumping RDB can be used for full * synchronization or not. */ -int anyOtherSlaveWaitRdb(client *except_me) { +int anyOtherReplicaWaitRdb(client *except_me) { listIter li; listNode *ln; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - if (slave != except_me && slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) { + client *replica = ln->value; + if (replica != except_me && replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_END) { return 1; } } @@ -1446,7 +1447,7 @@ int anyOtherSlaveWaitRdb(client *except_me) { /* Remove the specified client from global lists where the client could * be referenced, not including the Pub/Sub channels. - * This is used by freeClient() and replicationCacheMaster(). */ + * This is used by freeClient() and replicationCachePrimary(). */ void unlinkClient(client *c) { listNode *ln; @@ -1467,7 +1468,7 @@ void unlinkClient(client *c) { /* Check if this is a replica waiting for diskless replication (rdb pipe), * in which case it needs to be cleaned from that list */ - if (c->flags & CLIENT_SLAVE && c->replstate == SLAVE_STATE_WAIT_BGSAVE_END && server.rdb_pipe_conns) { + if (c->flags & CLIENT_REPLICA && c->repl_state == REPLICA_STATE_WAIT_BGSAVE_END && server.rdb_pipe_conns) { int i; for (i = 0; i < server.rdb_pipe_numconns; i++) { if (server.rdb_pipe_conns[i] == c->conn) { @@ -1515,7 +1516,7 @@ void unlinkClient(client *c) { void clearClientConnectionState(client *c) { listNode *ln; - /* MONITOR clients are also marked with CLIENT_SLAVE, we need to + /* MONITOR clients are also marked with CLIENT_REPLICA, we need to * distinguish between the two. */ if (c->flags & CLIENT_MONITOR) { @@ -1523,10 +1524,10 @@ void clearClientConnectionState(client *c) { serverAssert(ln != NULL); listDelNode(server.monitors, ln); - c->flags &= ~(CLIENT_MONITOR | CLIENT_SLAVE); + c->flags &= ~(CLIENT_MONITOR | CLIENT_REPLICA); } - serverAssert(!(c->flags & (CLIENT_SLAVE | CLIENT_MASTER))); + serverAssert(!(c->flags & (CLIENT_REPLICA | CLIENT_PRIMARY))); if (c->flags & CLIENT_TRACKING) disableTracking(c); selectDb(c, 0); @@ -1581,7 +1582,7 @@ void freeClient(client *c) { /* If this client was scheduled for async freeing we need to remove it * from the queue. Note that we need to do this here, because later - * we may call replicationCacheMaster() and the client should already + * we may call replicationCachePrimary() and the client should already * be removed from the list of clients to free. */ if (c->flags & CLIENT_CLOSE_ASAP) { ln = listSearchKey(server.clients_to_close, c); @@ -1589,23 +1590,23 @@ void freeClient(client *c) { listDelNode(server.clients_to_close, ln); } - /* If it is our master that's being disconnected we should make sure + /* If it is our primary that's being disconnected we should make sure * to cache the state to try a partial resynchronization later. * * Note that before doing this we make sure that the client is not in * some unexpected state, by checking its flags. */ - if (server.master && c->flags & CLIENT_MASTER) { - serverLog(LL_NOTICE, "Connection with master lost."); + if (server.primary && c->flags & CLIENT_PRIMARY) { + serverLog(LL_NOTICE, "Connection with primary lost."); if (!(c->flags & (CLIENT_PROTOCOL_ERROR | CLIENT_BLOCKED))) { c->flags &= ~(CLIENT_CLOSE_ASAP | CLIENT_CLOSE_AFTER_REPLY); - replicationCacheMaster(c); + replicationCachePrimary(c); return; } } - /* Log link disconnection with slave */ - if (getClientType(c) == CLIENT_TYPE_SLAVE) { - serverLog(LL_NOTICE, "Connection with replica %s lost.", replicationGetSlaveName(c)); + /* Log link disconnection with replica */ + if (getClientType(c) == CLIENT_TYPE_REPLICA) { + serverLog(LL_NOTICE, "Connection with replica %s lost.", replicationGetReplicaName(c)); } /* Free the query buffer */ @@ -1655,44 +1656,44 @@ void freeClient(client *c) { * places where active clients may be referenced. */ unlinkClient(c); - /* Master/slave cleanup Case 1: - * we lost the connection with a slave. */ - if (c->flags & CLIENT_SLAVE) { - /* If there is no any other slave waiting dumping RDB finished, the + /* Primary/replica cleanup Case 1: + * we lost the connection with a replica. */ + if (c->flags & CLIENT_REPLICA) { + /* If there is no any other replica waiting dumping RDB finished, the * current child process need not continue to dump RDB, then we kill it. * So child process won't use more memory, and we also can fork a new * child process asap to dump rdb for next full synchronization or bgsave. * But we also need to check if users enable 'save' RDB, if enable, we * should not remove directly since that means RDB is important for users * to keep data safe and we may delay configured 'save' for full sync. */ - if (server.saveparamslen == 0 && c->replstate == SLAVE_STATE_WAIT_BGSAVE_END && + if (server.saveparamslen == 0 && c->repl_state == REPLICA_STATE_WAIT_BGSAVE_END && server.child_type == CHILD_TYPE_RDB && server.rdb_child_type == RDB_CHILD_TYPE_DISK && - anyOtherSlaveWaitRdb(c) == 0) { + anyOtherReplicaWaitRdb(c) == 0) { killRDBChild(); } - if (c->replstate == SLAVE_STATE_SEND_BULK) { + if (c->repl_state == REPLICA_STATE_SEND_BULK) { if (c->repldbfd != -1) close(c->repldbfd); if (c->replpreamble) sdsfree(c->replpreamble); } - list *l = (c->flags & CLIENT_MONITOR) ? server.monitors : server.slaves; + list *l = (c->flags & CLIENT_MONITOR) ? server.monitors : server.replicas; ln = listSearchKey(l, c); serverAssert(ln != NULL); listDelNode(l, ln); /* We need to remember the time when we started to have zero - * attached slaves, as after some time we'll free the replication + * attached replicas, as after some time we'll free the replication * backlog. */ - if (getClientType(c) == CLIENT_TYPE_SLAVE && listLength(server.slaves) == 0) - server.repl_no_slaves_since = server.unixtime; - refreshGoodSlavesCount(); + if (getClientType(c) == CLIENT_TYPE_REPLICA && listLength(server.replicas) == 0) + server.repl_no_replicas_since = server.unixtime; + refreshGoodReplicasCount(); /* Fire the replica change modules event. */ - if (c->replstate == SLAVE_STATE_ONLINE) + if (c->repl_state == REPLICA_STATE_ONLINE) moduleFireServerEvent(VALKEYMODULE_EVENT_REPLICA_CHANGE, VALKEYMODULE_SUBEVENT_REPLICA_CHANGE_OFFLINE, NULL); } - /* Master/slave cleanup Case 2: - * we lost the connection with the master. */ - if (c->flags & CLIENT_MASTER) replicationHandleMasterDisconnection(); + /* Primary/replica cleanup Case 2: + * we lost the connection with the primary. */ + if (c->flags & CLIENT_PRIMARY) replicationHandlePrimaryDisconnection(); /* Remove client from memory usage buckets */ if (c->mem_usage_bucket) { @@ -1708,7 +1709,7 @@ void freeClient(client *c) { freeClientMultiState(c); sdsfree(c->peerid); sdsfree(c->sockname); - sdsfree(c->slave_addr); + sdsfree(c->replica_addr); zfree(c); } @@ -1889,7 +1890,7 @@ static int _writevToClient(client *c, ssize_t *nwritten) { * to client. */ int _writeToClient(client *c, ssize_t *nwritten) { *nwritten = 0; - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { serverAssert(c->bufpos == 0 && listLength(c->reply) == 0); replBufBlock *o = listNodeValue(c->ref_repl_buf_node); @@ -1966,14 +1967,14 @@ int writeToClient(client *c, int handler_installed) { * just deliver as much data as it is possible to deliver. * * Moreover, we also send as much as possible if the client is - * a slave or a monitor (otherwise, on high-speed traffic, the + * a replica or a monitor (otherwise, on high-speed traffic, the * replication/output buffer will grow indefinitely) */ if (totwritten > NET_MAX_WRITES_PER_EVENT && - (server.maxmemory == 0 || zmalloc_used_memory() < server.maxmemory) && !(c->flags & CLIENT_SLAVE)) + (server.maxmemory == 0 || zmalloc_used_memory() < server.maxmemory) && !(c->flags & CLIENT_REPLICA)) break; } - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes, totwritten, memory_order_relaxed); } else { atomic_fetch_add_explicit(&server.stat_net_output_bytes, totwritten, memory_order_relaxed); @@ -1988,11 +1989,11 @@ int writeToClient(client *c, int handler_installed) { } } if (totwritten > 0) { - /* For clients representing masters we don't count sending data + /* For clients representing primaries we don't count sending data * as an interaction, since we always send REPLCONF ACK commands * that take some time to just fill the socket output buffer. * We just rely on data / pings received for timeout detection. */ - if (!(c->flags & CLIENT_MASTER)) c->lastinteraction = server.unixtime; + if (!(c->flags & CLIENT_PRIMARY)) c->last_interaction = server.unixtime; } if (!clientHasPendingReplies(c)) { c->sentlen = 0; @@ -2211,22 +2212,22 @@ int processInlineBuffer(client *c) { return C_ERR; } - /* Newline from slaves can be used to refresh the last ACK time. - * This is useful for a slave to ping back while loading a big + /* Newline from replicas can be used to refresh the last ACK time. + * This is useful for a replica to ping back while loading a big * RDB file. */ - if (querylen == 0 && getClientType(c) == CLIENT_TYPE_SLAVE) c->repl_ack_time = server.unixtime; + if (querylen == 0 && getClientType(c) == CLIENT_TYPE_REPLICA) c->repl_ack_time = server.unixtime; - /* Masters should never send us inline protocol to run actual + /* Primaries should never send us inline protocol to run actual * commands. If this happens, it is likely due to a bug in the server where * we got some desynchronization in the protocol, for example * because of a PSYNC gone bad. * - * However there is an exception: masters may send us just a newline + * However there is an exception: primaries may send us just a newline * to keep the connection active. */ - if (querylen != 0 && c->flags & CLIENT_MASTER) { + if (querylen != 0 && c->flags & CLIENT_PRIMARY) { sdsfreesplitres(argv, argc); - serverLog(LL_WARNING, "WARNING: Receiving inline protocol from master, master stream corruption? Closing the " - "master connection and discarding the cached master."); + serverLog(LL_WARNING, "WARNING: Receiving inline protocol from primary, primary stream corruption? Closing the " + "primary connection and discarding the cached primary."); setProtocolError("Master using the inline protocol. Desync?", c); return C_ERR; } @@ -2257,7 +2258,7 @@ int processInlineBuffer(client *c) { * CLIENT_PROTOCOL_ERROR. */ #define PROTO_DUMP_LEN 128 static void setProtocolError(const char *errstr, client *c) { - if (server.verbosity <= LL_VERBOSE || c->flags & CLIENT_MASTER) { + if (server.verbosity <= LL_VERBOSE || c->flags & CLIENT_PRIMARY) { sds client = catClientInfoString(sdsempty(), c); /* Sample some protocol to given an idea about what was inside. */ @@ -2278,7 +2279,7 @@ static void setProtocolError(const char *errstr, client *c) { } /* Log all the client and protocol info. */ - int loglevel = (c->flags & CLIENT_MASTER) ? LL_WARNING : LL_VERBOSE; + int loglevel = (c->flags & CLIENT_PRIMARY) ? LL_WARNING : LL_VERBOSE; serverLog(loglevel, "Protocol error (%s) from client: %s. %s", errstr, client, buf); sdsfree(client); } @@ -2369,7 +2370,7 @@ int processMultibulkBuffer(client *c) { } ok = string2ll(c->querybuf + c->qb_pos + 1, newline - (c->querybuf + c->qb_pos + 1), &ll); - if (!ok || ll < 0 || (!(c->flags & CLIENT_MASTER) && ll > server.proto_max_bulk_len)) { + if (!ok || ll < 0 || (!(c->flags & CLIENT_PRIMARY) && ll > server.proto_max_bulk_len)) { addReplyError(c, "Protocol error: invalid bulk length"); setProtocolError("invalid bulk length", c); return C_ERR; @@ -2380,8 +2381,8 @@ int processMultibulkBuffer(client *c) { } c->qb_pos = newline - c->querybuf + 2; - if (!(c->flags & CLIENT_MASTER) && ll >= PROTO_MBULK_BIG_ARG) { - /* When the client is not a master client (because master + if (!(c->flags & CLIENT_PRIMARY) && ll >= PROTO_MBULK_BIG_ARG) { + /* When the client is not a primary client (because primary * client's querybuf can only be trimmed after data applied * and sent to replicas). * @@ -2423,10 +2424,10 @@ int processMultibulkBuffer(client *c) { c->argv = zrealloc(c->argv, sizeof(robj *) * c->argv_len); } - /* Optimization: if a non-master client's buffer contains JUST our bulk element + /* Optimization: if a non-primary client's buffer contains JUST our bulk element * instead of creating a new object by *copying* the sds we * just use the current sds string. */ - if (!(c->flags & CLIENT_MASTER) && c->qb_pos == 0 && c->bulklen >= PROTO_MBULK_BIG_ARG && + if (!(c->flags & CLIENT_PRIMARY) && c->qb_pos == 0 && c->bulklen >= PROTO_MBULK_BIG_ARG && sdslen(c->querybuf) == (size_t)(c->bulklen + 2)) { c->argv[c->argc++] = createObject(OBJ_STRING, c->querybuf); c->argv_len_sum += c->bulklen; @@ -2455,8 +2456,8 @@ int processMultibulkBuffer(client *c) { /* Perform necessary tasks after a command was executed: * * 1. The client is reset unless there are reasons to avoid doing it. - * 2. In the case of master clients, the replication offset is updated. - * 3. Propagate commands we got from our master to replicas down the line. */ + * 2. In the case of primary clients, the replication offset is updated. + * 3. Propagate commands we got from our primary to replicas down the line. */ void commandProcessed(client *c) { /* If client is blocked(including paused), just return avoid reset and replicate. * @@ -2471,21 +2472,21 @@ void commandProcessed(client *c) { resetClient(c); long long prev_offset = c->reploff; - if (c->flags & CLIENT_MASTER && !(c->flags & CLIENT_MULTI)) { - /* Update the applied replication offset of our master. */ + if (c->flags & CLIENT_PRIMARY && !(c->flags & CLIENT_MULTI)) { + /* Update the applied replication offset of our primary. */ c->reploff = c->read_reploff - sdslen(c->querybuf) + c->qb_pos; } - /* If the client is a master we need to compute the difference + /* If the client is a primary we need to compute the difference * between the applied offset before and after processing the buffer, * to understand how much of the replication stream was actually - * applied to the master state: this quantity, and its corresponding + * applied to the primary state: this quantity, and its corresponding * part of the replication stream, will be propagated to the * sub-replicas and to the replication backlog. */ - if (c->flags & CLIENT_MASTER) { + if (c->flags & CLIENT_PRIMARY) { long long applied = c->reploff - prev_offset; if (applied) { - replicationFeedStreamFromMasterStream(c->querybuf + c->repl_applied, applied); + replicationFeedStreamFromPrimaryStream(c->querybuf + c->repl_applied, applied); c->repl_applied += applied; } } @@ -2519,8 +2520,8 @@ int processCommandAndResetClient(client *c) { * is dead and will stop reading from its buffer. */ server.current_client = old_client; - /* performEvictions may flush slave output buffers. This may - * result in a slave, that may be the active client, to be + /* performEvictions may flush replica output buffers. This may + * result in a replica, that may be the active client, to be * freed. */ return deadclient ? C_ERR : C_OK; } @@ -2543,7 +2544,7 @@ int processPendingCommandAndInputBuffer(client *c) { /* Now process client if it has more data in it's buffer. * - * Note: when a master client steps into this function, + * Note: when a primary client steps into this function, * it can always satisfy this condition, because its querybuf * contains data not applied. */ if (c->querybuf && sdslen(c->querybuf) > 0) { @@ -2567,11 +2568,11 @@ int processInputBuffer(client *c) { * commands to execute in c->argv. */ if (c->flags & CLIENT_PENDING_COMMAND) break; - /* Don't process input from the master while there is a busy script - * condition on the slave. We want just to accumulate the replication + /* Don't process input from the primary while there is a busy script + * condition on the replica. We want just to accumulate the replication * stream (instead of replying -BUSY like we do with other clients) and * later resume the processing. */ - if (isInsideYieldingLongCommand() && c->flags & CLIENT_MASTER) break; + if (isInsideYieldingLongCommand() && c->flags & CLIENT_PRIMARY) break; /* CLIENT_CLOSE_AFTER_REPLY closes the connection once the reply is * written to the client. Make sure to not let the reply grow after @@ -2627,15 +2628,15 @@ int processInputBuffer(client *c) { } } - if (c->flags & CLIENT_MASTER) { - /* If the client is a master, trim the querybuf to repl_applied, - * since master client is very special, its querybuf not only + if (c->flags & CLIENT_PRIMARY) { + /* If the client is a primary, trim the querybuf to repl_applied, + * since primary client is very special, its querybuf not only * used to parse command, but also proxy to sub-replicas. * * Here are some scenarios we cannot trim to qb_pos: - * 1. we don't receive complete command from master - * 2. master client blocked cause of client pause - * 3. io threads operate read, master client flagged with CLIENT_PENDING_COMMAND + * 1. we don't receive complete command from primary + * 2. primary client blocked cause of client pause + * 3. io threads operate read, primary client flagged with CLIENT_PENDING_COMMAND * * In these scenarios, qb_pos points to the part of the current command * or the beginning of next command, and the current command is not applied yet, @@ -2686,9 +2687,9 @@ void readQueryFromClient(connection *conn) { * for example once we resume a blocked client after CLIENT PAUSE. */ if (remaining > 0) readlen = remaining; - /* Master client needs expand the readlen when meet BIG_ARG(see #9100), + /* Primary client needs expand the readlen when meet BIG_ARG(see #9100), * but doesn't need align to the next arg, we can read more data. */ - if (c->flags & CLIENT_MASTER && readlen < PROTO_IOBUF_LEN) readlen = PROTO_IOBUF_LEN; + if (c->flags & CLIENT_PRIMARY && readlen < PROTO_IOBUF_LEN) readlen = PROTO_IOBUF_LEN; } if (c->querybuf == NULL) { @@ -2697,7 +2698,7 @@ void readQueryFromClient(connection *conn) { qblen = sdslen(c->querybuf); } - if (!(c->flags & CLIENT_MASTER) && // master client's querybuf can grow greedy. + if (!(c->flags & CLIENT_PRIMARY) && // primary client's querybuf can grow greedy. (big_arg || sdsalloc(c->querybuf) < PROTO_IOBUF_LEN)) { /* When reading a BIG_ARG we won't be reading more than that one arg * into the query buffer, so we don't need to pre-allocate more than we @@ -2737,8 +2738,8 @@ void readQueryFromClient(connection *conn) { qblen = sdslen(c->querybuf); if (c->querybuf_peak < qblen) c->querybuf_peak = qblen; - c->lastinteraction = server.unixtime; - if (c->flags & CLIENT_MASTER) { + c->last_interaction = server.unixtime; + if (c->flags & CLIENT_PRIMARY) { c->read_reploff += nread; atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes, nread, memory_order_relaxed); } else { @@ -2746,7 +2747,7 @@ void readQueryFromClient(connection *conn) { } c->net_input_bytes += nread; - if (!(c->flags & CLIENT_MASTER) && + if (!(c->flags & CLIENT_PRIMARY) && /* The commands cached in the MULTI/EXEC queue have not been executed yet, * so they are also considered a part of the query buffer in a broader sense. * @@ -2832,14 +2833,14 @@ sds catClientInfoString(sds s, client *client) { char flags[17], events[3], conninfo[CONN_INFO_LEN], *p; p = flags; - if (client->flags & CLIENT_SLAVE) { + if (client->flags & CLIENT_REPLICA) { if (client->flags & CLIENT_MONITOR) *p++ = 'O'; else *p++ = 'S'; } /* clang-format off */ - if (client->flags & CLIENT_MASTER) *p++ = 'M'; + if (client->flags & CLIENT_PRIMARY) *p++ = 'M'; if (client->flags & CLIENT_PUBSUB) *p++ = 'P'; if (client->flags & CLIENT_MULTI) *p++ = 'x'; if (client->flags & CLIENT_BLOCKED) *p++ = 'b'; @@ -2883,7 +2884,7 @@ sds catClientInfoString(sds s, client *client) { " %s", connGetInfo(client->conn, conninfo, sizeof(conninfo)), " name=%s", client->name ? (char*)client->name->ptr : "", " age=%I", (long long)(commandTimeSnapshot() / 1000 - client->ctime), - " idle=%I", (long long)(server.unixtime - client->lastinteraction), + " idle=%I", (long long)(server.unixtime - client->last_interaction), " flags=%s", flags, " db=%i", client->db->id, " sub=%i", (int) dictSize(client->pubsub_channels), @@ -3026,13 +3027,13 @@ void clientSetinfoCommand(client *c) { /* Reset the client state to resemble a newly connected client. */ void resetCommand(client *c) { - /* MONITOR clients are also marked with CLIENT_SLAVE, we need to + /* MONITOR clients are also marked with CLIENT_REPLICA, we need to * distinguish between the two. */ uint64_t flags = c->flags; - if (flags & CLIENT_MONITOR) flags &= ~(CLIENT_MONITOR | CLIENT_SLAVE); + if (flags & CLIENT_MONITOR) flags &= ~(CLIENT_MONITOR | CLIENT_REPLICA); - if (flags & (CLIENT_SLAVE | CLIENT_MASTER | CLIENT_MODULE)) { + if (flags & (CLIENT_REPLICA | CLIENT_PRIMARY | CLIENT_MODULE)) { addReplyError(c, "can only reset normal client connections"); return; } @@ -3678,7 +3679,7 @@ void helloCommand(client *c) { if (!server.sentinel_mode) { addReplyBulkCString(c, "role"); - addReplyBulkCString(c, server.masterhost ? "replica" : "master"); + addReplyBulkCString(c, server.primary_host ? "replica" : "master"); } addReplyBulkCString(c, "modules"); @@ -3825,7 +3826,7 @@ void rewriteClientCommandArgument(client *c, int i, robj *newval) { * the caller wishes. The main usage of this function currently is * enforcing the client output length limits. */ size_t getClientOutputBufferMemoryUsage(client *c) { - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { size_t repl_buf_size = 0; size_t repl_node_num = 0; size_t repl_node_size = sizeof(listNode) + sizeof(replBufBlock); @@ -3875,15 +3876,15 @@ size_t getClientMemoryUsage(client *c, size_t *output_buffer_mem_usage) { * * The function will return one of the following: * CLIENT_TYPE_NORMAL -> Normal client, including MONITOR - * CLIENT_TYPE_SLAVE -> Slave + * CLIENT_TYPE_REPLICA -> replica * CLIENT_TYPE_PUBSUB -> Client subscribed to Pub/Sub channels - * CLIENT_TYPE_MASTER -> The client representing our replication master. + * CLIENT_TYPE_PRIMARY -> The client representing our replication primary. */ int getClientType(client *c) { - if (c->flags & CLIENT_MASTER) return CLIENT_TYPE_MASTER; + if (c->flags & CLIENT_PRIMARY) return CLIENT_TYPE_PRIMARY; /* Even though MONITOR clients are marked as replicas, we * want the expose them as normal clients. */ - if ((c->flags & CLIENT_SLAVE) && !(c->flags & CLIENT_MONITOR)) return CLIENT_TYPE_SLAVE; + if ((c->flags & CLIENT_REPLICA) && !(c->flags & CLIENT_MONITOR)) return CLIENT_TYPE_REPLICA; if (c->flags & CLIENT_PUBSUB) return CLIENT_TYPE_PUBSUB; return CLIENT_TYPE_NORMAL; } @@ -3892,13 +3893,13 @@ int getClientTypeByName(char *name) { if (!strcasecmp(name, "normal")) return CLIENT_TYPE_NORMAL; else if (!strcasecmp(name, "slave")) - return CLIENT_TYPE_SLAVE; + return CLIENT_TYPE_REPLICA; else if (!strcasecmp(name, "replica")) - return CLIENT_TYPE_SLAVE; + return CLIENT_TYPE_REPLICA; else if (!strcasecmp(name, "pubsub")) return CLIENT_TYPE_PUBSUB; else if (!strcasecmp(name, "master")) - return CLIENT_TYPE_MASTER; + return CLIENT_TYPE_PRIMARY; else return -1; } @@ -3906,9 +3907,9 @@ int getClientTypeByName(char *name) { char *getClientTypeName(int class) { switch (class) { case CLIENT_TYPE_NORMAL: return "normal"; - case CLIENT_TYPE_SLAVE: return "slave"; + case CLIENT_TYPE_REPLICA: return "slave"; case CLIENT_TYPE_PUBSUB: return "pubsub"; - case CLIENT_TYPE_MASTER: return "master"; + case CLIENT_TYPE_PRIMARY: return "master"; default: return NULL; } } @@ -3924,9 +3925,9 @@ int checkClientOutputBufferLimits(client *c) { unsigned long used_mem = getClientOutputBufferMemoryUsage(c); class = getClientType(c); - /* For the purpose of output buffer limiting, masters are handled + /* For the purpose of output buffer limiting, primaries are handled * like normal clients. */ - if (class == CLIENT_TYPE_MASTER) class = CLIENT_TYPE_NORMAL; + if (class == CLIENT_TYPE_PRIMARY) class = CLIENT_TYPE_NORMAL; /* Note that it doesn't make sense to set the replica clients output buffer * limit lower than the repl-backlog-size config (partial sync will succeed @@ -3935,7 +3936,7 @@ int checkClientOutputBufferLimits(client *c) { * This doesn't have memory consumption implications since the replica client * will share the backlog buffers memory. */ size_t hard_limit_bytes = server.client_obuf_limits[class].hard_limit_bytes; - if (class == CLIENT_TYPE_SLAVE && hard_limit_bytes && (long long)hard_limit_bytes < server.repl_backlog_size) + if (class == CLIENT_TYPE_REPLICA && hard_limit_bytes && (long long)hard_limit_bytes < server.repl_backlog_size) hard_limit_bytes = server.repl_backlog_size; if (server.client_obuf_limits[class].hard_limit_bytes && used_mem >= hard_limit_bytes) hard = 1; if (server.client_obuf_limits[class].soft_limit_bytes && @@ -3979,7 +3980,7 @@ int closeClientOnOutputBufferLimitReached(client *c, int async) { serverAssert(c->reply_bytes < SIZE_MAX - (1024 * 64)); /* Note that c->reply_bytes is irrelevant for replica clients * (they use the global repl buffers). */ - if ((c->reply_bytes == 0 && getClientType(c) != CLIENT_TYPE_SLAVE) || c->flags & CLIENT_CLOSE_ASAP) return 0; + if ((c->reply_bytes == 0 && getClientType(c) != CLIENT_TYPE_REPLICA) || c->flags & CLIENT_CLOSE_ASAP) return 0; if (checkClientOutputBufferLimits(c)) { sds client = catClientInfoString(sdsempty(), c); @@ -3998,18 +3999,18 @@ int closeClientOnOutputBufferLimitReached(client *c, int async) { return 0; } -/* Helper function used by performEvictions() in order to flush slaves +/* Helper function used by performEvictions() in order to flush replicas * output buffers without returning control to the event loop. * This is also called by SHUTDOWN for a best-effort attempt to send - * slaves the latest writes. */ -void flushSlavesOutputBuffers(void) { + * replicas the latest writes. */ +void flushReplicasOutputBuffers(void) { listIter li; listNode *ln; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = listNodeValue(ln); - int can_receive_writes = connHasWriteHandler(slave->conn) || (slave->flags & CLIENT_PENDING_WRITE); + client *replica = listNodeValue(ln); + int can_receive_writes = connHasWriteHandler(replica->conn) || (replica->flags & CLIENT_PENDING_WRITE); /* We don't want to send the pending data to the replica in a few * cases: @@ -4023,11 +4024,11 @@ void flushSlavesOutputBuffers(void) { * to send data to the replica in this case, please grep for the * flag for this flag. * - * 3. Obviously if the slave is not ONLINE. + * 3. Obviously if the replica is not ONLINE. */ - if (slave->replstate == SLAVE_STATE_ONLINE && !(slave->flags & CLIENT_CLOSE_ASAP) && can_receive_writes && - !slave->repl_start_cmd_stream_on_ack && clientHasPendingReplies(slave)) { - writeToClient(slave, 0); + if (replica->repl_state == REPLICA_STATE_ONLINE && !(replica->flags & CLIENT_CLOSE_ASAP) && + can_receive_writes && !replica->repl_start_cmd_stream_on_ack && clientHasPendingReplies(replica)) { + writeToClient(replica, 0); } } } @@ -4139,7 +4140,7 @@ uint32_t isPausedActionsWithUpdate(uint32_t actions_bitmask) { /* This function is called by the server in order to process a few events from * time to time while blocked into some not interruptible operation. * This allows to reply to clients with the -LOADING error while loading the - * data set at startup or after a full resynchronization with the master + * data set at startup or after a full resynchronization with the primary * and so forth. * * It calls the event loop in order to process a few events. Specifically we @@ -4403,7 +4404,7 @@ int handleClientsWithPendingWritesUsingThreads(void) { * buffer, to guarantee data accessing thread safe, we must put all * replicas client into io_threads_list[0] i.e. main thread handles * sending the output buffer of all replicas. */ - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { listAddNodeTail(io_threads_list[0], c); continue; } @@ -4469,7 +4470,7 @@ int handleClientsWithPendingWritesUsingThreads(void) { * pending read clients and flagged as such. */ int postponeClientRead(client *c) { if (server.io_threads_active && server.io_threads_do_reads && !ProcessingEventsWhileBlocked && - !(c->flags & (CLIENT_MASTER | CLIENT_SLAVE | CLIENT_BLOCKED)) && io_threads_op == IO_THREADS_OP_IDLE) { + !(c->flags & (CLIENT_PRIMARY | CLIENT_REPLICA | CLIENT_BLOCKED)) && io_threads_op == IO_THREADS_OP_IDLE) { listAddNodeHead(server.clients_pending_read, c); c->pending_read_list_node = listFirst(server.clients_pending_read); return 1; diff --git a/src/object.c b/src/object.c index 814b8de3a9..4312524c87 100644 --- a/src/object.c +++ b/src/object.c @@ -1173,11 +1173,11 @@ struct serverMemOverhead *getMemoryOverheadData(void) { * only if replication buffer memory is more than the repl backlog setting, * we consider the excess as replicas' memory. Otherwise, replication buffer * memory is the consumption of repl backlog. */ - if (listLength(server.slaves) && (long long)server.repl_buffer_mem > server.repl_backlog_size) { - mh->clients_slaves = server.repl_buffer_mem - server.repl_backlog_size; + if (listLength(server.replicas) && (long long)server.repl_buffer_mem > server.repl_backlog_size) { + mh->clients_replicas = server.repl_buffer_mem - server.repl_backlog_size; mh->repl_backlog = server.repl_backlog_size; } else { - mh->clients_slaves = 0; + mh->clients_replicas = 0; mh->repl_backlog = server.repl_buffer_mem; } if (server.repl_backlog) { @@ -1186,12 +1186,12 @@ struct serverMemOverhead *getMemoryOverheadData(void) { raxSize(server.repl_backlog->blocks_index) * sizeof(void *); } mem_total += mh->repl_backlog; - mem_total += mh->clients_slaves; + mem_total += mh->clients_replicas; /* Computing the memory used by the clients would be O(N) if done * here online. We use our values computed incrementally by * updateClientMemoryUsage(). */ - mh->clients_normal = server.stat_clients_type_memory[CLIENT_TYPE_MASTER] + + mh->clients_normal = server.stat_clients_type_memory[CLIENT_TYPE_PRIMARY] + server.stat_clients_type_memory[CLIENT_TYPE_PUBSUB] + server.stat_clients_type_memory[CLIENT_TYPE_NORMAL]; mem_total += mh->clients_normal; @@ -1271,7 +1271,7 @@ sds getMemoryDoctorReport(void) { int high_alloc_frag = 0; /* High allocator fragmentation. */ int high_proc_rss = 0; /* High process rss overhead. */ int high_alloc_rss = 0; /* High rss overhead. */ - int big_slave_buf = 0; /* Slave buffers are too big. */ + int big_replica_buf = 0; /* Replica buffers are too big. */ int big_client_buf = 0; /* Client buffers are too big. */ int many_scripts = 0; /* Script cache has too many scripts. */ int num_reports = 0; @@ -1312,16 +1312,16 @@ sds getMemoryDoctorReport(void) { } /* Clients using more than 200k each average? */ - long numslaves = listLength(server.slaves); - long numclients = listLength(server.clients) - numslaves; + long num_replicas = listLength(server.replicas); + long numclients = listLength(server.clients) - num_replicas; if (mh->clients_normal / numclients > (1024 * 200)) { big_client_buf = 1; num_reports++; } - /* Slaves using more than 10 MB each? */ - if (numslaves > 0 && mh->clients_slaves > (1024 * 1024 * 10)) { - big_slave_buf = 1; + /* Replicas using more than 10 MB each? */ + if (num_replicas > 0 && mh->clients_replicas > (1024 * 1024 * 10)) { + big_replica_buf = 1; num_reports++; } @@ -1386,7 +1386,7 @@ sds getMemoryDoctorReport(void) { "1.1 (this means that the Resident Set Size of the Valkey process is much larger than the RSS the " "allocator holds). This problem may be due to Lua scripts or Modules.\n\n"); } - if (big_slave_buf) { + if (big_replica_buf) { s = sdscat(s, " * Big replica buffers: The replica output buffers in this instance are greater than 10MB for " "each replica (on average). This likely means that there is some replica instance that is " @@ -1579,7 +1579,7 @@ NULL addReplyLongLong(c, mh->repl_backlog); addReplyBulkCString(c, "clients.slaves"); - addReplyLongLong(c, mh->clients_slaves); + addReplyLongLong(c, mh->clients_replicas); addReplyBulkCString(c, "clients.normal"); addReplyLongLong(c, mh->clients_normal); diff --git a/src/rdb.c b/src/rdb.c index b57cf44e7a..c7bb832fdc 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1183,7 +1183,7 @@ int rdbSaveInfoAuxFields(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { if (rsi) { if (rdbSaveAuxFieldStrInt(rdb, "repl-stream-db", rsi->repl_stream_db) == -1) return -1; if (rdbSaveAuxFieldStrStr(rdb, "repl-id", server.replid) == -1) return -1; - if (rdbSaveAuxFieldStrInt(rdb, "repl-offset", server.master_repl_offset) == -1) return -1; + if (rdbSaveAuxFieldStrInt(rdb, "repl-offset", server.primary_repl_offset) == -1) return -1; } if (rdbSaveAuxFieldStrInt(rdb, "aof-base", aof_base) == -1) return -1; return 1; @@ -1369,19 +1369,19 @@ int rdbSaveRio(int req, rio *rdb, int *error, int rdbflags, rdbSaveInfo *rsi) { snprintf(magic, sizeof(magic), "REDIS%04d", RDB_VERSION); if (rdbWriteRaw(rdb, magic, 9) == -1) goto werr; if (rdbSaveInfoAuxFields(rdb, rdbflags, rsi) == -1) goto werr; - if (!(req & SLAVE_REQ_RDB_EXCLUDE_DATA) && rdbSaveModulesAux(rdb, VALKEYMODULE_AUX_BEFORE_RDB) == -1) goto werr; + if (!(req & REPLICA_REQ_RDB_EXCLUDE_DATA) && rdbSaveModulesAux(rdb, VALKEYMODULE_AUX_BEFORE_RDB) == -1) goto werr; /* save functions */ - if (!(req & SLAVE_REQ_RDB_EXCLUDE_FUNCTIONS) && rdbSaveFunctions(rdb) == -1) goto werr; + if (!(req & REPLICA_REQ_RDB_EXCLUDE_FUNCTIONS) && rdbSaveFunctions(rdb) == -1) goto werr; /* save all databases, skip this if we're in functions-only mode */ - if (!(req & SLAVE_REQ_RDB_EXCLUDE_DATA)) { + if (!(req & REPLICA_REQ_RDB_EXCLUDE_DATA)) { for (j = 0; j < server.dbnum; j++) { if (rdbSaveDb(rdb, j, rdbflags, &key_counter) == -1) goto werr; } } - if (!(req & SLAVE_REQ_RDB_EXCLUDE_DATA) && rdbSaveModulesAux(rdb, VALKEYMODULE_AUX_AFTER_RDB) == -1) goto werr; + if (!(req & REPLICA_REQ_RDB_EXCLUDE_DATA) && rdbSaveModulesAux(rdb, VALKEYMODULE_AUX_AFTER_RDB) == -1) goto werr; /* EOF opcode */ if (rdbSaveType(rdb, RDB_OPCODE_EOF) == -1) goto werr; @@ -1495,7 +1495,7 @@ static int rdbSaveInternal(int req, const char *filename, rdbSaveInfo *rsi, int int rdbSaveToFile(const char *filename) { startSaving(RDBFLAGS_NONE); - if (rdbSaveInternal(SLAVE_REQ_NONE, filename, NULL, RDBFLAGS_NONE) != C_OK) { + if (rdbSaveInternal(REPLICA_REQ_NONE, filename, NULL, RDBFLAGS_NONE) != C_OK) { int saved_errno = errno; stopSaving(0); errno = saved_errno; @@ -1816,8 +1816,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { int deep_integrity_validation = server.sanitize_dump_payload == SANITIZE_DUMP_YES; if (server.sanitize_dump_payload == SANITIZE_DUMP_CLIENTS) { /* Skip sanitization when loading (an RDB), or getting a RESTORE command - * from either the master or a client using an ACL user with the skip-sanitize-payload flag. */ - int skip = server.loading || (server.current_client && (server.current_client->flags & CLIENT_MASTER)); + * from either the primary or a client using an ACL user with the skip-sanitize-payload flag. */ + int skip = server.loading || (server.current_client && (server.current_client->flags & CLIENT_PRIMARY)); if (!skip && server.current_client && server.current_client->user) skip = !!(server.current_client->user->flags & USER_FLAG_SANITIZE_PAYLOAD_SKIP); deep_integrity_validation = !skip; @@ -2434,12 +2434,12 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { } while (listpacks--) { - /* Get the master ID, the one we'll use as key of the radix tree + /* Get the primary ID, the one we'll use as key of the radix tree * node: the entries inside the listpack itself are delta-encoded * relatively to this ID. */ sds nodekey = rdbGenericLoadStringObject(rdb, RDB_LOAD_SDS, NULL); if (nodekey == NULL) { - rdbReportReadError("Stream master ID loading failed: invalid encoding or I/O error."); + rdbReportReadError("Stream primary ID loading failed: invalid encoding or I/O error."); decrRefCount(o); return NULL; } @@ -2883,7 +2883,7 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) { if (server.loading_process_events_interval_bytes && (r->processed_bytes + len) / server.loading_process_events_interval_bytes > r->processed_bytes / server.loading_process_events_interval_bytes) { - if (server.masterhost && server.repl_state == REPL_STATE_TRANSFER) replicationSendNewlineToMaster(); + if (server.primary_host && server.repl_state == REPL_STATE_TRANSFER) replicationSendNewlineToPrimary(); loadingAbsProgress(r->processed_bytes); processEventsWhileBlocked(); processModuleLoadingProgressEvent(0); @@ -3197,9 +3197,9 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin /* Check if the key already expired. This function is used when loading * an RDB file from disk, either at startup, or when an RDB was - * received from the master. In the latter case, the master is + * received from the primary. In the latter case, the primary is * responsible for key expiry. If we would expire keys here, the - * snapshot taken by the master may not be reflected on the slave. + * snapshot taken by the primary may not be reflected on the replica. * Similarly, if the base AOF is RDB format, we want to load all * the keys they are, since the log of operations in the incr AOF * is assumed to work in the exact keyspace state. */ @@ -3215,18 +3215,18 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin sdsfree(key); goto eoferr; } - } else if (iAmMaster() && !(rdbflags & RDBFLAGS_AOF_PREAMBLE) && expiretime != -1 && expiretime < now) { + } else if (iAmPrimary() && !(rdbflags & RDBFLAGS_AOF_PREAMBLE) && expiretime != -1 && expiretime < now) { if (rdbflags & RDBFLAGS_FEED_REPL) { /* Caller should have created replication backlog, * and now this path only works when rebooting, * so we don't have replicas yet. */ - serverAssert(server.repl_backlog != NULL && listLength(server.slaves) == 0); + serverAssert(server.repl_backlog != NULL && listLength(server.replicas) == 0); robj keyobj; initStaticStringObject(keyobj, key); robj *argv[2]; argv[0] = server.lazyfree_lazy_expire ? shared.unlink : shared.del; argv[1] = &keyobj; - replicationFeedSlaves(dbid, argv, 2); + replicationFeedReplicas(dbid, argv, 2); } sdsfree(key); decrRefCount(val); @@ -3378,7 +3378,7 @@ static void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal, time_t sav } /* A background saving child (BGSAVE) terminated its work. Handle this. - * This function covers the case of RDB -> Slaves socket transfers for + * This function covers the case of RDB -> Replicas socket transfers for * diskless replication. */ static void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { if (!bysignal && exitcode == 0) { @@ -3416,9 +3416,9 @@ void backgroundSaveDoneHandler(int exitcode, int bysignal) { server.rdb_child_type = RDB_CHILD_TYPE_NONE; server.rdb_save_time_last = save_end - server.rdb_save_time_start; server.rdb_save_time_start = -1; - /* Possibly there are slaves waiting for a BGSAVE in order to be served + /* Possibly there are replicas waiting for a BGSAVE in order to be served * (the first stage of SYNC is a bulk transfer of dump.rdb) */ - updateSlavesWaitingBgsave((!bysignal && exitcode == 0) ? C_OK : C_ERR, type); + updateReplicasWaitingBgsave((!bysignal && exitcode == 0) ? C_OK : C_ERR, type); } /* Kill the RDB saving child using SIGUSR1 (so that the parent will know @@ -3434,9 +3434,9 @@ void killRDBChild(void) { * - rdbRemoveTempFile */ } -/* Spawn an RDB child that writes the RDB to the sockets of the slaves - * that are currently in SLAVE_STATE_WAIT_BGSAVE_START state. */ -int rdbSaveToSlavesSockets(int req, rdbSaveInfo *rsi) { +/* Spawn an RDB child that writes the RDB to the sockets of the replicas + * that are currently in REPLICA_STATE_WAIT_BGSAVE_START state. */ +int rdbSaveToReplicasSockets(int req, rdbSaveInfo *rsi) { listNode *ln; listIter li; pid_t childpid; @@ -3468,17 +3468,17 @@ int rdbSaveToSlavesSockets(int req, rdbSaveInfo *rsi) { /* Collect the connections of the replicas we want to transfer * the RDB to, which are i WAIT_BGSAVE_START state. */ - server.rdb_pipe_conns = zmalloc(sizeof(connection *) * listLength(server.slaves)); + server.rdb_pipe_conns = zmalloc(sizeof(connection *) * listLength(server.replicas)); server.rdb_pipe_numconns = 0; server.rdb_pipe_numconns_writing = 0; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { - /* Check slave has the exact requirements */ - if (slave->slave_req != req) continue; - server.rdb_pipe_conns[server.rdb_pipe_numconns++] = slave->conn; - replicationSetupSlaveForFullResync(slave, getPsyncInitialOffset()); + client *replica = ln->value; + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_START) { + /* Check replica has the exact requirements */ + if (replica->replica_req != req) continue; + server.rdb_pipe_conns[server.rdb_pipe_numconns++] = replica->conn; + replicationSetupReplicaForFullResync(replica, getPsyncInitialOffset()); } } @@ -3522,13 +3522,13 @@ int rdbSaveToSlavesSockets(int req, rdbSaveInfo *rsi) { serverLog(LL_WARNING, "Can't save in background: fork: %s", strerror(errno)); /* Undo the state change. The caller will perform cleanup on - * all the slaves in BGSAVE_START state, but an early call to - * replicationSetupSlaveForFullResync() turned it into BGSAVE_END */ - listRewind(server.slaves, &li); + * all the replicas in BGSAVE_START state, but an early call to + * replicationSetupReplicaForFullResync() turned it into BGSAVE_END */ + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) { - slave->replstate = SLAVE_STATE_WAIT_BGSAVE_START; + client *replica = ln->value; + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_END) { + replica->repl_state = REPLICA_STATE_WAIT_BGSAVE_START; } } close(rdb_pipe_write); @@ -3563,7 +3563,7 @@ void saveCommand(client *c) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); - if (rdbSave(SLAVE_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) == C_OK) { + if (rdbSave(REPLICA_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) == C_OK) { addReply(c, shared.ok); } else { addReplyErrorObject(c, shared.err); @@ -3599,7 +3599,7 @@ void bgsaveCommand(client *c) { "Use BGSAVE SCHEDULE in order to schedule a BGSAVE whenever " "possible."); } - } else if (rdbSaveBackground(SLAVE_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) == C_OK) { + } else if (rdbSaveBackground(REPLICA_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) == C_OK) { addReplyStatus(c, "Background saving started"); } else { addReplyErrorObject(c, shared.err); @@ -3608,48 +3608,48 @@ void bgsaveCommand(client *c) { /* Populate the rdbSaveInfo structure used to persist the replication * information inside the RDB file. Currently the structure explicitly - * contains just the currently selected DB from the master stream, however + * contains just the currently selected DB from the primary stream, however * if the rdbSave*() family functions receive a NULL rsi structure also * the Replication ID/offset is not saved. The function populates 'rsi' * that is normally stack-allocated in the caller, returns the populated - * pointer if the instance has a valid master client, otherwise NULL + * pointer if the instance has a valid primary client, otherwise NULL * is returned, and the RDB saving will not persist any replication related * information. */ rdbSaveInfo *rdbPopulateSaveInfo(rdbSaveInfo *rsi) { rdbSaveInfo rsi_init = RDB_SAVE_INFO_INIT; *rsi = rsi_init; - /* If the instance is a master, we can populate the replication info + /* If the instance is a primary, we can populate the replication info * only when repl_backlog is not NULL. If the repl_backlog is NULL, * it means that the instance isn't in any replication chains. In this - * scenario the replication info is useless, because when a slave + * scenario the replication info is useless, because when a replica * connects to us, the NULL repl_backlog will trigger a full * synchronization, at the same time we will use a new replid and clear * replid2. */ - if (!server.masterhost && server.repl_backlog) { - /* Note that when server.slaveseldb is -1, it means that this master + if (!server.primary_host && server.repl_backlog) { + /* Note that when server.replicas_eldb is -1, it means that this primary * didn't apply any write commands after a full synchronization. - * So we can let repl_stream_db be 0, this allows a restarted slave + * So we can let repl_stream_db be 0, this allows a restarted replica * to reload replication ID/offset, it's safe because the next write * command must generate a SELECT statement. */ - rsi->repl_stream_db = server.slaveseldb == -1 ? 0 : server.slaveseldb; + rsi->repl_stream_db = server.replicas_eldb == -1 ? 0 : server.replicas_eldb; return rsi; } - /* If the instance is a slave we need a connected master + /* If the instance is a replica we need a connected primary * in order to fetch the currently selected DB. */ - if (server.master) { - rsi->repl_stream_db = server.master->db->id; + if (server.primary) { + rsi->repl_stream_db = server.primary->db->id; return rsi; } - /* If we have a cached master we can use it in order to populate the - * replication selected DB info inside the RDB file: the slave can - * increment the master_repl_offset only from data arriving from the - * master, so if we are disconnected the offset in the cached master + /* If we have a cached primary we can use it in order to populate the + * replication selected DB info inside the RDB file: the replica can + * increment the primary_repl_offset only from data arriving from the + * primary, so if we are disconnected the offset in the cached primary * is valid. */ - if (server.cached_master) { - rsi->repl_stream_db = server.cached_master->db->id; + if (server.cached_primary) { + rsi->repl_stream_db = server.cached_primary->db->id; return rsi; } return NULL; diff --git a/src/rdb.h b/src/rdb.h index 48762e10e1..393d2f658a 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -154,7 +154,7 @@ int rdbSaveObjectType(rio *rdb, robj *o); int rdbLoadObjectType(rio *rdb); int rdbLoad(char *filename, rdbSaveInfo *rsi, int rdbflags); int rdbSaveBackground(int req, char *filename, rdbSaveInfo *rsi, int rdbflags); -int rdbSaveToSlavesSockets(int req, rdbSaveInfo *rsi); +int rdbSaveToReplicasSockets(int req, rdbSaveInfo *rsi); void rdbRemoveTempFile(pid_t childpid, int from_signal); int rdbSaveToFile(const char *filename); int rdbSave(int req, char *filename, rdbSaveInfo *rsi, int rdbflags); diff --git a/src/replication.c b/src/replication.c index 375b637f61..d7af92b395 100644 --- a/src/replication.c +++ b/src/replication.c @@ -46,8 +46,8 @@ void replicationDiscardCachedMaster(void); void replicationResurrectCachedMaster(connection *conn); void replicationSendAck(void); -int replicaPutOnline(client *slave); -void replicaStartCommandStream(client *slave); +int replicaPutOnline(client *replica); +void replicaStartCommandStream(client *replica); int cancelReplicationHandshake(int reconnect); /* We take a global flag to remember if this instance generated an RDB @@ -64,20 +64,20 @@ static ConnectionType *connTypeOfReplication(void) { return connectionTypeTcp(); } -/* Return the pointer to a string representing the slave ip:listening_port - * pair. Mostly useful for logging, since we want to log a slave using its +/* Return the pointer to a string representing the replica ip:listening_port + * pair. Mostly useful for logging, since we want to log a replica using its * IP address and its listening port which is more clear for the user, for * example: "Closing connection with replica 10.1.2.3:6380". */ -char *replicationGetSlaveName(client *c) { +char *replicationGetReplicaName(client *c) { static char buf[NET_HOST_PORT_STR_LEN]; char ip[NET_IP_STR_LEN]; ip[0] = '\0'; buf[0] = '\0'; - if (c->slave_addr || connAddrPeerName(c->conn, ip, sizeof(ip), NULL) != -1) { - char *addr = c->slave_addr ? c->slave_addr : ip; - if (c->slave_listening_port) - formatAddr(buf, sizeof(buf), addr, c->slave_listening_port); + if (c->replica_addr || connAddrPeerName(c->conn, ip, sizeof(ip), NULL) != -1) { + char *addr = c->replica_addr ? c->replica_addr : ip; + if (c->replica_listening_port) + formatAddr(buf, sizeof(buf), addr, c->replica_listening_port); else snprintf(buf, sizeof(buf), "%s:", addr); } else { @@ -126,7 +126,7 @@ void createReplicationBacklog(void) { /* We don't have any data inside our buffer, but virtually the first * byte we have is the next byte that will be generated for the * replication stream. */ - server.repl_backlog->offset = server.master_repl_offset + 1; + server.repl_backlog->offset = server.primary_repl_offset + 1; } /* This function is called when the user modifies the replication backlog @@ -141,7 +141,7 @@ void resizeReplicationBacklog(void) { } void freeReplicationBacklog(void) { - serverAssert(listLength(server.slaves) == 0); + serverAssert(listLength(server.replicas) == 0); if (server.repl_backlog == NULL) return; /* Decrease the start buffer node reference count. */ @@ -174,7 +174,7 @@ void createReplicationBacklogIndex(listNode *ln) { } /* Rebase replication buffer blocks' offset since the initial - * setting offset starts from 0 when master restart. */ + * setting offset starts from 0 when primary restart. */ void rebaseReplicationBuffer(long long base_repl_offset) { raxFree(server.repl_backlog->blocks_index); server.repl_backlog->blocks_index = raxNew(); @@ -201,7 +201,7 @@ int canFeedReplicaReplBuffer(client *replica) { if (replica->flags & CLIENT_REPL_RDBONLY) return 0; /* Don't feed replicas that are still waiting for BGSAVE to start. */ - if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_START) return 0; + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_START) return 0; return 1; } @@ -215,11 +215,11 @@ int prepareReplicasToWrite(void) { listNode *ln; int prepared = 0; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - if (!canFeedReplicaReplBuffer(slave)) continue; - if (prepareClientToWrite(slave) == C_ERR) continue; + client *replica = ln->value; + if (!canFeedReplicaReplBuffer(replica)) continue; + if (prepareClientToWrite(replica) == C_ERR) continue; prepared++; } @@ -259,7 +259,7 @@ void incrementalTrimReplicationBacklog(size_t max_blocks) { /* Replicas increment the refcount of the first replication buffer block * they refer to, in that case, we don't trim the backlog even if * backlog_histlen exceeds backlog_size. This implicitly makes backlog - * bigger than our setting, but makes the master accept partial resync as + * bigger than our setting, but makes the primary accept partial resync as * much as possible. So that backlog must be the last reference of * replication buffer blocks. */ listNode *first = listFirst(server.repl_buffer_blocks); @@ -294,7 +294,7 @@ void incrementalTrimReplicationBacklog(size_t max_blocks) { } /* Set the offset of the first byte we have in the backlog. */ - server.repl_backlog->offset = server.master_repl_offset - server.repl_backlog->histlen + 1; + server.repl_backlog->offset = server.primary_repl_offset - server.repl_backlog->histlen + 1; } /* Free replication buffer blocks that are referenced by this client. */ @@ -339,7 +339,7 @@ void feedReplicationBuffer(char *s, size_t len) { tail->used += copy; s += copy; len -= copy; - server.master_repl_offset += copy; + server.primary_repl_offset += copy; server.repl_backlog->histlen += copy; } if (len) { @@ -357,7 +357,7 @@ void feedReplicationBuffer(char *s, size_t len) { size_t copy = (tail->size >= len) ? len : tail->size; tail->used = copy; tail->refcount = 0; - tail->repl_offset = server.master_repl_offset + 1; + tail->repl_offset = server.primary_repl_offset + 1; tail->id = repl_block_id++; memcpy(tail->buf, s, copy); listAddNodeTail(server.repl_buffer_blocks, tail); @@ -370,27 +370,27 @@ void feedReplicationBuffer(char *s, size_t len) { } s += copy; len -= copy; - server.master_repl_offset += copy; + server.primary_repl_offset += copy; server.repl_backlog->histlen += copy; } /* For output buffer of replicas. */ listIter li; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - if (!canFeedReplicaReplBuffer(slave)) continue; + client *replica = ln->value; + if (!canFeedReplicaReplBuffer(replica)) continue; /* Update shared replication buffer start position. */ - if (slave->ref_repl_buf_node == NULL) { - slave->ref_repl_buf_node = start_node; - slave->ref_block_pos = start_pos; + if (replica->ref_repl_buf_node == NULL) { + replica->ref_repl_buf_node = start_node; + replica->ref_block_pos = start_pos; /* Only increase the start block reference count. */ ((replBufBlock *)listNodeValue(start_node))->refcount++; } /* Check output buffer limit only when add new block. */ - if (add_new_block) closeClientOnOutputBufferLimitReached(slave, 1); + if (add_new_block) closeClientOnOutputBufferLimitReached(replica, 1); } /* For replication backlog */ @@ -417,11 +417,11 @@ void feedReplicationBuffer(char *s, size_t len) { /* Propagate write commands to replication stream. * - * This function is used if the instance is a master: we use the commands + * This function is used if the instance is a primary: we use the commands * received by our clients in order to create the replication stream. * Instead if the instance is a replica and has sub-replicas attached, we use * replicationFeedStreamFromMasterStream() */ -void replicationFeedSlaves(int dictid, robj **argv, int argc) { +void replicationFeedReplicas(int dictid, robj **argv, int argc) { int j, len; char llstr[LONG_STR_SIZE]; @@ -429,32 +429,32 @@ void replicationFeedSlaves(int dictid, robj **argv, int argc) { * pass dbid=-1 that indicate there is no need to replicate `select` command. */ serverAssert(dictid == -1 || (dictid >= 0 && dictid < server.dbnum)); - /* If the instance is not a top level master, return ASAP: we'll just proxy - * the stream of data we receive from our master instead, in order to - * propagate *identical* replication stream. In this way this slave can - * advertise the same replication ID as the master (since it shares the - * master replication history and has the same backlog and offsets). */ - if (server.masterhost != NULL) return; + /* If the instance is not a top level primary, return ASAP: we'll just proxy + * the stream of data we receive from our primary instead, in order to + * propagate *identical* replication stream. In this way this replica can + * advertise the same replication ID as the primary (since it shares the + * primary replication history and has the same backlog and offsets). */ + if (server.primary_host != NULL) return; - /* If there aren't slaves, and there is no backlog buffer to populate, + /* If there aren't replicas, and there is no backlog buffer to populate, * we can return ASAP. */ - if (server.repl_backlog == NULL && listLength(server.slaves) == 0) { + if (server.repl_backlog == NULL && listLength(server.replicas) == 0) { /* We increment the repl_offset anyway, since we use that for tracking AOF fsyncs * even when there's no replication active. This code will not be reached if AOF * is also disabled. */ - server.master_repl_offset += 1; + server.primary_repl_offset += 1; return; } - /* We can't have slaves attached and no backlog. */ - serverAssert(!(listLength(server.slaves) != 0 && server.repl_backlog == NULL)); + /* We can't have replicas attached and no backlog. */ + serverAssert(!(listLength(server.replicas) != 0 && server.repl_backlog == NULL)); /* Must install write handler for all replicas first before feeding * replication stream. */ prepareReplicasToWrite(); - /* Send SELECT command to every slave if needed. */ - if (dictid != -1 && server.slaveseldb != dictid) { + /* Send SELECT command to every replica if needed. */ + if (dictid != -1 && server.replicas_eldb != dictid) { robj *selectcmd; /* For a few DBs we have pre-computed SELECT command. */ @@ -472,7 +472,7 @@ void replicationFeedSlaves(int dictid, robj **argv, int argc) { if (dictid < 0 || dictid >= PROTO_SHARED_SELECT_CMDS) decrRefCount(selectcmd); - server.slaveseldb = dictid; + server.replicas_eldb = dictid; } /* Write the command to the replication buffer if any. */ @@ -532,12 +532,12 @@ void showLatestBacklog(void) { sdsfree(dump); } -/* This function is used in order to proxy what we receive from our master - * to our sub-slaves. */ +/* This function is used in order to proxy what we receive from our primary + * to our sub-replicas. */ #include -void replicationFeedStreamFromMasterStream(char *buf, size_t buflen) { - /* Debugging: this is handy to see the stream sent from master - * to slaves. Disabled with if(0). */ +void replicationFeedStreamFromPrimaryStream(char *buf, size_t buflen) { + /* Debugging: this is handy to see the stream sent from primary + * to replicas. Disabled with if(0). */ if (0) { printf("%zu:", buflen); for (size_t j = 0; j < buflen; j++) { @@ -546,8 +546,8 @@ void replicationFeedStreamFromMasterStream(char *buf, size_t buflen) { printf("\n"); } - /* There must be replication backlog if having attached slaves. */ - if (listLength(server.slaves)) serverAssert(server.repl_backlog != NULL); + /* There must be replication backlog if having attached replicas. */ + if (listLength(server.replicas)) serverAssert(server.repl_backlog != NULL); if (server.repl_backlog) { /* Must install write handler for all replicas first before feeding * replication stream. */ @@ -596,7 +596,7 @@ void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, decrRefCount(cmdobj); } -/* Feed the slave 'c' with the replication backlog starting from the +/* Feed the replica 'c' with the replication backlog starting from the * specified 'offset' up to the end of the backlog. */ long long addReplyReplicationBacklog(client *c, long long offset) { long long skip; @@ -663,46 +663,46 @@ long long addReplyReplicationBacklog(client *c, long long offset) { } /* Return the offset to provide as reply to the PSYNC command received - * from the slave. The returned value is only valid immediately after + * from the replica. The returned value is only valid immediately after * the BGSAVE process started and before executing any other command * from clients. */ long long getPsyncInitialOffset(void) { - return server.master_repl_offset; + return server.primary_repl_offset; } /* Send a FULLRESYNC reply in the specific case of a full resynchronization, - * as a side effect setup the slave for a full sync in different ways: + * as a side effect setup the replica for a full sync in different ways: * - * 1) Remember, into the slave client structure, the replication offset - * we sent here, so that if new slaves will later attach to the same + * 1) Remember, into the replica client structure, the replication offset + * we sent here, so that if new replicas will later attach to the same * background RDB saving process (by duplicating this client output - * buffer), we can get the right offset from this slave. - * 2) Set the replication state of the slave to WAIT_BGSAVE_END so that + * buffer), we can get the right offset from this replica. + * 2) Set the replication state of the replica to WAIT_BGSAVE_END so that * we start accumulating differences from this point. * 3) Force the replication stream to re-emit a SELECT statement so - * the new slave incremental differences will start selecting the + * the new replica incremental differences will start selecting the * right database number. * * Normally this function should be called immediately after a successful * BGSAVE for replication was started, or when there is one already in - * progress that we attached our slave to. */ -int replicationSetupSlaveForFullResync(client *slave, long long offset) { + * progress that we attached our replica to. */ +int replicationSetupReplicaForFullResync(client *replica, long long offset) { char buf[128]; int buflen; - slave->psync_initial_offset = offset; - slave->replstate = SLAVE_STATE_WAIT_BGSAVE_END; + replica->psync_initial_offset = offset; + replica->repl_state = REPLICA_STATE_WAIT_BGSAVE_END; /* We are going to accumulate the incremental changes for this - * slave as well. Set slaveseldb to -1 in order to force to re-emit + * replica as well. Set replicas_eldb to -1 in order to force to re-emit * a SELECT statement in the replication stream. */ - server.slaveseldb = -1; + server.replicas_eldb = -1; - /* Don't send this reply to slaves that approached us with + /* Don't send this reply to replicas that approached us with * the old SYNC command. */ - if (!(slave->flags & CLIENT_PRE_PSYNC)) { + if (!(replica->flags & CLIENT_PRE_PSYNC)) { buflen = snprintf(buf, sizeof(buf), "+FULLRESYNC %s %lld\r\n", server.replid, offset); - if (connWrite(slave->conn, buf, buflen) != buflen) { - freeClientAsync(slave); + if (connWrite(replica->conn, buf, buflen) != buflen) { + freeClientAsync(replica); return C_ERR; } } @@ -710,32 +710,32 @@ int replicationSetupSlaveForFullResync(client *slave, long long offset) { } /* This function handles the PSYNC command from the point of view of a - * master receiving a request for partial resynchronization. + * primary receiving a request for partial resynchronization. * * On success return C_OK, otherwise C_ERR is returned and we proceed * with the usual full resync. */ -int masterTryPartialResynchronization(client *c, long long psync_offset) { +int primaryTryPartialResynchronization(client *c, long long psync_offset) { long long psync_len; - char *master_replid = c->argv[1]->ptr; + char *primary_replid = c->argv[1]->ptr; char buf[128]; int buflen; - /* Is the replication ID of this master the same advertised by the wannabe - * slave via PSYNC? If the replication ID changed this master has a + /* Is the replication ID of this primary the same advertised by the wannabe + * replica via PSYNC? If the replication ID changed this primary has a * different replication history, and there is no way to continue. * * Note that there are two potentially valid replication IDs: the ID1 * and the ID2. The ID2 however is only valid up to a specific offset. */ - if (strcasecmp(master_replid, server.replid) && - (strcasecmp(master_replid, server.replid2) || psync_offset > server.second_replid_offset)) { - /* Replid "?" is used by slaves that want to force a full resync. */ - if (master_replid[0] != '?') { - if (strcasecmp(master_replid, server.replid) && strcasecmp(master_replid, server.replid2)) { + if (strcasecmp(primary_replid, server.replid) && + (strcasecmp(primary_replid, server.replid2) || psync_offset > server.second_replid_offset)) { + /* Replid "?" is used by replicas that want to force a full resync. */ + if (primary_replid[0] != '?') { + if (strcasecmp(primary_replid, server.replid) && strcasecmp(primary_replid, server.replid2)) { serverLog(LL_NOTICE, "Partial resynchronization not accepted: " "Replication ID mismatch (Replica asked for '%s', my " "replication IDs are '%s' and '%s')", - master_replid, server.replid, server.replid2); + primary_replid, server.replid, server.replid2); } else { serverLog(LL_NOTICE, "Partial resynchronization not accepted: " @@ -744,39 +744,39 @@ int masterTryPartialResynchronization(client *c, long long psync_offset) { psync_offset, server.second_replid_offset); } } else { - serverLog(LL_NOTICE, "Full resync requested by replica %s", replicationGetSlaveName(c)); + serverLog(LL_NOTICE, "Full resync requested by replica %s", replicationGetReplicaName(c)); } goto need_full_resync; } - /* We still have the data our slave is asking for? */ + /* We still have the data our replica is asking for? */ if (!server.repl_backlog || psync_offset < server.repl_backlog->offset || psync_offset > (server.repl_backlog->offset + server.repl_backlog->histlen)) { serverLog(LL_NOTICE, "Unable to partial resync with replica %s for lack of backlog (Replica request was: %lld).", - replicationGetSlaveName(c), psync_offset); - if (psync_offset > server.master_repl_offset) { - serverLog( - LL_WARNING, - "Warning: replica %s tried to PSYNC with an offset that is greater than the master replication offset.", - replicationGetSlaveName(c)); + replicationGetReplicaName(c), psync_offset); + if (psync_offset > server.primary_repl_offset) { + serverLog(LL_WARNING, + "Warning: replica %s tried to PSYNC with an offset that is greater than the primary replication " + "offset.", + replicationGetReplicaName(c)); } goto need_full_resync; } /* If we reached this point, we are able to perform a partial resync: - * 1) Set client state to make it a slave. + * 1) Set client state to make it a replica. * 2) Inform the client we can continue with +CONTINUE - * 3) Send the backlog data (from the offset to the end) to the slave. */ - c->flags |= CLIENT_SLAVE; - c->replstate = SLAVE_STATE_ONLINE; + * 3) Send the backlog data (from the offset to the end) to the replica. */ + c->flags |= CLIENT_REPLICA; + c->repl_state = REPLICA_STATE_ONLINE; c->repl_ack_time = server.unixtime; c->repl_start_cmd_stream_on_ack = 0; - listAddNodeTail(server.slaves, c); + listAddNodeTail(server.replicas, c); /* We can't use the connection buffers since they are used to accumulate * new commands at this stage. But we are sure the socket send buffer is * empty so this write will never fail actually. */ - if (c->slave_capa & SLAVE_CAPA_PSYNC2) { + if (c->replica_capa & REPLICA_CAPA_PSYNC2) { buflen = snprintf(buf, sizeof(buf), "+CONTINUE %s\r\n", server.replid); } else { buflen = snprintf(buf, sizeof(buf), "+CONTINUE\r\n"); @@ -789,12 +789,12 @@ int masterTryPartialResynchronization(client *c, long long psync_offset) { serverLog( LL_NOTICE, "Partial resynchronization request from %s accepted. Sending %lld bytes of backlog starting from offset %lld.", - replicationGetSlaveName(c), psync_len, psync_offset); - /* Note that we don't need to set the selected DB at server.slaveseldb - * to -1 to force the master to emit SELECT, since the slave already - * has this state from the previous connection with the master. */ + replicationGetReplicaName(c), psync_len, psync_offset); + /* Note that we don't need to set the selected DB at server.replicas_eldb + * to -1 to force the primary to emit SELECT, since the replica already + * has this state from the previous connection with the primary. */ - refreshGoodSlavesCount(); + refreshGoodReplicasCount(); /* Fire the replica change modules event. */ moduleFireServerEvent(VALKEYMODULE_EVENT_REPLICA_CHANGE, VALKEYMODULE_SUBEVENT_REPLICA_CHANGE_ONLINE, NULL); @@ -804,7 +804,7 @@ int masterTryPartialResynchronization(client *c, long long psync_offset) { need_full_resync: /* We need a full resync for some reason... Note that we can't * reply to PSYNC right now if a full SYNC is needed. The reply - * must include the master offset at the time the RDB file we transfer + * must include the primary offset at the time the RDB file we transfer * is generated, so we need to delay the reply to that moment. */ return C_ERR; } @@ -813,15 +813,15 @@ int masterTryPartialResynchronization(client *c, long long psync_offset) { * socket target depending on the configuration, and making sure that * the script cache is flushed before to start. * - * The mincapa argument is the bitwise AND among all the slaves capabilities - * of the slaves waiting for this BGSAVE, so represents the slave capabilities - * all the slaves support. Can be tested via SLAVE_CAPA_* macros. + * The mincapa argument is the bitwise AND among all the replicas capabilities + * of the replicas waiting for this BGSAVE, so represents the replica capabilities + * all the replicas support. Can be tested via REPLICA_CAPA_* macros. * * Side effects, other than starting a BGSAVE: * - * 1) Handle the slaves in WAIT_START state, by preparing them for a full + * 1) Handle the replicas in WAIT_START state, by preparing them for a full * sync if the BGSAVE was successfully started, or sending them an error - * and dropping them from the list of slaves. + * and dropping them from the list of replicas. * * 2) Flush the Lua scripting script cache if the BGSAVE was actually * started. @@ -833,22 +833,22 @@ int startBgsaveForReplication(int mincapa, int req) { listIter li; listNode *ln; - /* We use a socket target if slave can handle the EOF marker and we're configured to do diskless syncs. + /* We use a socket target if replica can handle the EOF marker and we're configured to do diskless syncs. * Note that in case we're creating a "filtered" RDB (functions-only, for example) we also force socket replication * to avoid overwriting the snapshot RDB file with filtered data. */ - socket_target = (server.repl_diskless_sync || req & SLAVE_REQ_RDB_MASK) && (mincapa & SLAVE_CAPA_EOF); + socket_target = (server.repl_diskless_sync || req & REPLICA_REQ_RDB_MASK) && (mincapa & REPLICA_CAPA_EOF); /* `SYNC` should have failed with error if we don't support socket and require a filter, assert this here */ - serverAssert(socket_target || !(req & SLAVE_REQ_RDB_MASK)); + serverAssert(socket_target || !(req & REPLICA_REQ_RDB_MASK)); serverLog(LL_NOTICE, "Starting BGSAVE for SYNC with target: %s", socket_target ? "replicas sockets" : "disk"); rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); /* Only do rdbSave* when rsiptr is not NULL, - * otherwise slave will miss repl-stream-db. */ + * otherwise replica will miss repl-stream-db. */ if (rsiptr) { if (socket_target) - retval = rdbSaveToSlavesSockets(req, rsiptr); + retval = rdbSaveToReplicasSockets(req, rsiptr); else { /* Keep the page cache since it'll get used soon */ retval = rdbSaveBackground(req, server.rdb_filename, rsiptr, RDBFLAGS_REPLICATION | RDBFLAGS_KEEP_CACHE); @@ -866,37 +866,37 @@ int startBgsaveForReplication(int mincapa, int req) { * the user enables it later with CONFIG SET, we are fine. */ if (retval == C_OK && !socket_target && server.rdb_del_sync_files) RDBGeneratedByReplication = 1; - /* If we failed to BGSAVE, remove the slaves waiting for a full - * resynchronization from the list of slaves, inform them with + /* If we failed to BGSAVE, remove the replicas waiting for a full + * resynchronization from the list of replicas, inform them with * an error about what happened, close the connection ASAP. */ if (retval == C_ERR) { serverLog(LL_WARNING, "BGSAVE for replication failed"); - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { - slave->replstate = REPL_STATE_NONE; - slave->flags &= ~CLIENT_SLAVE; - listDelNode(server.slaves, ln); - addReplyError(slave, "BGSAVE failed, replication can't continue"); - slave->flags |= CLIENT_CLOSE_AFTER_REPLY; + client *replica = ln->value; + + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_START) { + replica->repl_state = REPL_STATE_NONE; + replica->flags &= ~CLIENT_REPLICA; + listDelNode(server.replicas, ln); + addReplyError(replica, "BGSAVE failed, replication can't continue"); + replica->flags |= CLIENT_CLOSE_AFTER_REPLY; } } return retval; } - /* If the target is socket, rdbSaveToSlavesSockets() already setup - * the slaves for a full resync. Otherwise for disk target do it now.*/ + /* If the target is socket, rdbSaveToReplicasSockets() already setup + * the replicas for a full resync. Otherwise for disk target do it now.*/ if (!socket_target) { - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; + client *replica = ln->value; - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { - /* Check slave has the exact requirements */ - if (slave->slave_req != req) continue; - replicationSetupSlaveForFullResync(slave, getPsyncInitialOffset()); + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_START) { + /* Check replica has the exact requirements */ + if (replica->replica_req != req) continue; + replicationSetupReplicaForFullResync(replica, getPsyncInitialOffset()); } } } @@ -906,23 +906,23 @@ int startBgsaveForReplication(int mincapa, int req) { /* SYNC and PSYNC command implementation. */ void syncCommand(client *c) { - /* ignore SYNC if already slave or in monitor mode */ - if (c->flags & CLIENT_SLAVE) return; + /* ignore SYNC if already replica or in monitor mode */ + if (c->flags & CLIENT_REPLICA) return; /* Check if this is a failover request to a replica with the same replid and - * become a master if so. */ + * become a primary if so. */ if (c->argc > 3 && !strcasecmp(c->argv[0]->ptr, "psync") && !strcasecmp(c->argv[3]->ptr, "failover")) { serverLog(LL_NOTICE, "Failover request received for replid %s.", (unsigned char *)c->argv[1]->ptr); - if (!server.masterhost) { + if (!server.primary_host) { addReplyError(c, "PSYNC FAILOVER can't be sent to a master."); return; } if (!strcasecmp(c->argv[1]->ptr, server.replid)) { if (server.cluster_enabled) { - clusterPromoteSelfToMaster(); + clusterPromoteSelfToPrimary(); } else { - replicationUnsetMaster(); + replicationUnsetPrimary(); } sds client = catClientInfoString(sdsempty(), c); serverLog(LL_NOTICE, "MASTER MODE enabled (failover request from '%s')", client); @@ -939,9 +939,9 @@ void syncCommand(client *c) { return; } - /* Refuse SYNC requests if we are a slave but the link with our master + /* Refuse SYNC requests if we are a replica but the link with our primary * is not ok... */ - if (server.masterhost && server.repl_state != REPL_STATE_CONNECTED) { + if (server.primary_host && server.repl_state != REPL_STATE_CONNECTED) { addReplyError(c, "-NOMASTERLINK Can't SYNC while not connected with my master"); return; } @@ -949,54 +949,54 @@ void syncCommand(client *c) { /* SYNC can't be issued when the server has pending data to send to * the client about already issued commands. We need a fresh reply * buffer registering the differences between the BGSAVE and the current - * dataset, so that we can copy to other slaves if needed. */ + * dataset, so that we can copy to other replicas if needed. */ if (clientHasPendingReplies(c)) { addReplyError(c, "SYNC and PSYNC are invalid with pending output"); return; } - /* Fail sync if slave doesn't support EOF capability but wants a filtered RDB. This is because we force filtered + /* Fail sync if replica doesn't support EOF capability but wants a filtered RDB. This is because we force filtered * RDB's to be generated over a socket and not through a file to avoid conflicts with the snapshot files. Forcing * use of a socket is handled, if needed, in `startBgsaveForReplication`. */ - if (c->slave_req & SLAVE_REQ_RDB_MASK && !(c->slave_capa & SLAVE_CAPA_EOF)) { + if (c->replica_req & REPLICA_REQ_RDB_MASK && !(c->replica_capa & REPLICA_CAPA_EOF)) { addReplyError(c, "Filtered replica requires EOF capability"); return; } - serverLog(LL_NOTICE, "Replica %s asks for synchronization", replicationGetSlaveName(c)); + serverLog(LL_NOTICE, "Replica %s asks for synchronization", replicationGetReplicaName(c)); /* Try a partial resynchronization if this is a PSYNC command. * If it fails, we continue with usual full resynchronization, however - * when this happens replicationSetupSlaveForFullResync will replied + * when this happens replicationSetupReplicaForFullResync will replied * with: * * +FULLRESYNC * - * So the slave knows the new replid and offset to try a PSYNC later - * if the connection with the master is lost. */ + * So the replica knows the new replid and offset to try a PSYNC later + * if the connection with the primary is lost. */ if (!strcasecmp(c->argv[0]->ptr, "psync")) { long long psync_offset; if (getLongLongFromObjectOrReply(c, c->argv[2], &psync_offset, NULL) != C_OK) { serverLog(LL_WARNING, "Replica %s asks for synchronization but with a wrong offset", - replicationGetSlaveName(c)); + replicationGetReplicaName(c)); return; } - if (masterTryPartialResynchronization(c, psync_offset) == C_OK) { + if (primaryTryPartialResynchronization(c, psync_offset) == C_OK) { server.stat_sync_partial_ok++; return; /* No full resync needed, return. */ } else { - char *master_replid = c->argv[1]->ptr; + char *primary_replid = c->argv[1]->ptr; /* Increment stats for failed PSYNCs, but only if the - * replid is not "?", as this is used by slaves to force a full + * replid is not "?", as this is used by replicas to force a full * resync on purpose when they are not able to partially * resync. */ - if (master_replid[0] != '?') server.stat_sync_partial_err++; + if (primary_replid[0] != '?') server.stat_sync_partial_err++; } } else { - /* If a slave uses SYNC, we are dealing with an old implementation - * of the replication protocol (like valkey-cli --slave). Flag the client + /* If a replica uses SYNC, we are dealing with an old implementation + * of the replication protocol (like valkey-cli --replica). Flag the client * so that we don't expect to receive REPLCONF ACK feedbacks. */ c->flags |= CLIENT_PRE_PSYNC; } @@ -1004,16 +1004,16 @@ void syncCommand(client *c) { /* Full resynchronization. */ server.stat_sync_full++; - /* Setup the slave as one waiting for BGSAVE to start. The following code - * paths will change the state if we handle the slave differently. */ - c->replstate = SLAVE_STATE_WAIT_BGSAVE_START; + /* Setup the replica as one waiting for BGSAVE to start. The following code + * paths will change the state if we handle the replica differently. */ + c->repl_state = REPLICA_STATE_WAIT_BGSAVE_START; if (server.repl_disable_tcp_nodelay) connDisableTcpNoDelay(c->conn); /* Non critical if it fails. */ c->repldbfd = -1; - c->flags |= CLIENT_SLAVE; - listAddNodeTail(server.slaves, c); + c->flags |= CLIENT_REPLICA; + listAddNodeTail(server.replicas, c); /* Create the replication backlog if needed. */ - if (listLength(server.slaves) == 1 && server.repl_backlog == NULL) { + if (listLength(server.replicas) == 1 && server.repl_backlog == NULL) { /* When we create the backlog from scratch, we always use a new * replication ID and clear the ID2, since there is no valid * past history. */ @@ -1029,30 +1029,31 @@ void syncCommand(client *c) { /* CASE 1: BGSAVE is in progress, with disk target. */ if (server.child_type == CHILD_TYPE_RDB && server.rdb_child_type == RDB_CHILD_TYPE_DISK) { /* Ok a background save is in progress. Let's check if it is a good - * one for replication, i.e. if there is another slave that is + * one for replication, i.e. if there is another replica that is * registering differences since the server forked to save. */ - client *slave; + client *replica; listNode *ln; listIter li; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - slave = ln->value; + replica = ln->value; /* If the client needs a buffer of commands, we can't use * a replica without replication buffer. */ - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END && - (!(slave->flags & CLIENT_REPL_RDBONLY) || (c->flags & CLIENT_REPL_RDBONLY))) + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_END && + (!(replica->flags & CLIENT_REPL_RDBONLY) || (c->flags & CLIENT_REPL_RDBONLY))) break; } - /* To attach this slave, we check that it has at least all the - * capabilities of the slave that triggered the current BGSAVE + /* To attach this replica, we check that it has at least all the + * capabilities of the replica that triggered the current BGSAVE * and its exact requirements. */ - if (ln && ((c->slave_capa & slave->slave_capa) == slave->slave_capa) && c->slave_req == slave->slave_req) { + if (ln && ((c->replica_capa & replica->replica_capa) == replica->replica_capa) && + c->replica_req == replica->replica_req) { /* Perfect, the server is already registering differences for - * another slave. Set the right state, and copy the buffer. + * another replica. Set the right state, and copy the buffer. * We don't copy buffer if clients don't want. */ - if (!(c->flags & CLIENT_REPL_RDBONLY)) copyReplicaOutputBuffer(c, slave); - replicationSetupSlaveForFullResync(c, slave->psync_initial_offset); + if (!(c->flags & CLIENT_REPL_RDBONLY)) copyReplicaOutputBuffer(c, replica); + replicationSetupReplicaForFullResync(c, replica->psync_initial_offset); serverLog(LL_NOTICE, "Waiting for end of BGSAVE for SYNC"); } else { /* No way, we need to wait for the next BGSAVE in order to @@ -1069,16 +1070,16 @@ void syncCommand(client *c) { /* CASE 3: There is no BGSAVE is in progress. */ } else { - if (server.repl_diskless_sync && (c->slave_capa & SLAVE_CAPA_EOF) && server.repl_diskless_sync_delay) { + if (server.repl_diskless_sync && (c->replica_capa & REPLICA_CAPA_EOF) && server.repl_diskless_sync_delay) { /* Diskless replication RDB child is created inside * replicationCron() since we want to delay its start a - * few seconds to wait for more slaves to arrive. */ + * few seconds to wait for more replicas to arrive. */ serverLog(LL_NOTICE, "Delay next BGSAVE for diskless SYNC"); } else { /* We don't have a BGSAVE in progress, let's start one. Diskless * or disk-based mode is determined by replica's capacity. */ if (!hasActiveChildProcess()) { - startBgsaveForReplication(c->slave_capa, c->slave_req); + startBgsaveForReplication(c->replica_capa, c->replica_req); } else { serverLog(LL_NOTICE, "No BGSAVE in progress, but another BG operation is active. " "BGSAVE for replication delayed"); @@ -1091,7 +1092,7 @@ void syncCommand(client *c) { /* REPLCONF