diff --git a/.ci/setup_dnsmasq.sh b/.ci/setup_dnsmasq.sh new file mode 100644 index 000000000000..0eddf6b93ded --- /dev/null +++ b/.ci/setup_dnsmasq.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +if [ "$TEST_SUITE" == "unit" ]; then + echo "Exiting, no integration tests" + exit +fi + +mkdir -p $DNSMASQ_DIR + +if [ ! "$(ls -A $DNSMASQ_DIR)" ]; then + pushd $DNSMASQ_DIR + wget http://www.thekelleys.org.uk/dnsmasq/dnsmasq-${DNSMASQ_VERSION}.tar.gz + tar xzf dnsmasq-${DNSMASQ_VERSION}.tar.gz + + pushd dnsmasq-${DNSMASQ_VERSION} + make install DESTDIR=$DNSMASQ_DIR + popd + + popd +fi diff --git a/.ci/setup_lua.sh b/.ci/setup_lua.sh index 0f455c448053..2a2b2b2255dd 100644 --- a/.ci/setup_lua.sh +++ b/.ci/setup_lua.sh @@ -16,12 +16,12 @@ source .ci/platform.sh # Lua/LuaJIT ############ -if [ "$LUA" == "luajit" ]; then +if [ "$LUA_VERSION" == "luajit" ]; then LUAJIT="yes" - LUA="luajit-2.0" -elif [ "$LUA" == "luajit-2.0" ]; then + LUA_VERSION="luajit-2.0" +elif [ "$LUA_VERSION" == "luajit-2.0" ]; then LUAJIT="yes" -elif [ "$LUA" == "luajit-2.1" ]; then +elif [ "$LUA_VERSION" == "luajit-2.1" ]; then LUAJIT="yes" fi @@ -33,9 +33,9 @@ if [ "$LUAJIT" == "yes" ]; then git clone https://github.com/luajit/luajit $LUAJIT_DIR pushd $LUAJIT_DIR - if [ "$LUA" == "luajit-2.0" ]; then + if [ "$LUA_VERSION" == "luajit-2.0" ]; then git checkout v2.0.4 - elif [ "$LUA" == "luajit-2.1" ]; then + elif [ "$LUA_VERSION" == "luajit-2.1" ]; then git checkout v2.1 fi @@ -43,22 +43,22 @@ if [ "$LUAJIT" == "yes" ]; then make install PREFIX=$LUAJIT_DIR popd - if [ "$LUA" == "luajit-2.1" ]; then + if [ "$LUA_VERSION" == "luajit-2.1" ]; then ln -sf $LUAJIT_DIR/bin/luajit-2.1.0-beta1 $LUAJIT_DIR/bin/luajit fi ln -sf $LUAJIT_DIR/bin/luajit $LUAJIT_DIR/bin/lua fi - LUA_INCLUDE="$LUAJIT_DIR/include/$LUA" + LUA_INCLUDE="$LUAJIT_DIR/include/$LUA_VERSION" else - if [ "$LUA" == "lua5.1" ]; then + if [ "$LUA_VERSION" == "lua5.1" ]; then curl http://www.lua.org/ftp/lua-5.1.5.tar.gz | tar xz pushd lua-5.1.5 - elif [ "$LUA" == "lua5.2" ]; then + elif [ "$LUA_VERSION" == "lua5.2" ]; then curl http://www.lua.org/ftp/lua-5.2.3.tar.gz | tar xz pushd lua-5.2.3 - elif [ "$LUA" == "lua5.3" ]; then + elif [ "$LUA_VERSION" == "lua5.3" ]; then curl http://www.lua.org/ftp/lua-5.3.0.tar.gz | tar xz pushd lua-5.3.0 fi @@ -84,11 +84,11 @@ git checkout v$LUAROCKS_VERSION if [ "$LUAJIT" == "yes" ]; then LUA_DIR=$LUAJIT_DIR -elif [ "$LUA" == "lua5.1" ]; then +elif [ "$LUA_VERSION" == "lua5.1" ]; then CONFIGURE_FLAGS=$CONFIGURE_FLAGS" --lua-version=5.1" -elif [ "$LUA" == "lua5.2" ]; then +elif [ "$LUA_VERSION" == "lua5.2" ]; then CONFIGURE_FLAGS=$CONFIGURE_FLAGS" --lua-version=5.2" -elif [ "$LUA" == "lua5.3" ]; then +elif [ "$LUA_VERSION" == "lua5.3" ]; then CONFIGURE_FLAGS=$CONFIGURE_FLAGS" --lua-version=5.3" fi diff --git a/.ci/setup_serf.sh b/.ci/setup_serf.sh new file mode 100755 index 000000000000..7d0032dbd99e --- /dev/null +++ b/.ci/setup_serf.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -e + +if [ "$TEST_SUITE" == "unit" ]; then + echo "Exiting, no integration tests" + exit +fi + +mkdir -p $SERF_DIR + +if [ ! "$(ls -A $SERF_DIR)" ]; then + pushd $SERF_DIR + wget https://releases.hashicorp.com/serf/${SERF_VERSION}/serf_${SERF_VERSION}_linux_amd64.zip + unzip serf_${SERF_VERSION}_linux_amd64.zip + popd +fi diff --git a/kong-0.5.4-1.rockspec b/kong-0.6.0rc3-1.rockspec similarity index 99% rename from kong-0.5.4-1.rockspec rename to kong-0.6.0rc3-1.rockspec index d40d66f7f6b9..1d46a54e83d6 100644 --- a/kong-0.5.4-1.rockspec +++ b/kong-0.6.0rc3-1.rockspec @@ -1,9 +1,9 @@ package = "kong" -version = "0.5.4-1" +version = "0.6.0rc3-1" supported_platforms = {"linux", "macosx"} source = { url = "git://github.com/Mashape/kong", - tag = "0.5.4" + tag = "0.6.0rc3" } description = { summary = "Kong is a scalable and customizable API Management Layer built on top of Nginx.", diff --git a/kong/api/routes/cluster.lua b/kong/api/routes/cluster.lua index a7cd3f08349d..49ae121a186b 100644 --- a/kong/api/routes/cluster.lua +++ b/kong/api/routes/cluster.lua @@ -11,19 +11,22 @@ return { GET = function(self, dao_factory, helpers) local res, err = Serf(configuration):invoke_signal("members", {["-format"] = "json"}) if err then - return helpers.yield_error(err) + return responses.send_HTTP_INTERNAL_SERVER_ERROR(err) end local members = cjson.decode(res).members - local result = {data = {}, total = #members} + local result = {data = {}} for _, v in pairs(members) do - table_insert(result.data, { - name = v.name, - address = v.addr, - status = v.status - }) + if not self.params.status or (self.params.status and v.status == self.params.status) then + table_insert(result.data, { + name = v.name, + address = v.addr, + status = v.status + }) + end end + result.total = #result.data return responses.send_HTTP_OK(result) end, diff --git a/kong/cli/cmds/cluster.lua b/kong/cli/cmds/cluster.lua index 558607b1b468..a3b2961fedaa 100644 --- a/kong/cli/cmds/cluster.lua +++ b/kong/cli/cmds/cluster.lua @@ -13,17 +13,16 @@ Usage: kong cluster [options] Commands: (string) where is one of: - join, members, force-leave, reachability, keygen + members, force-leave, reachability, keygen Options: -c,--config (default %s) path to configuration file ]], constants.CLI.GLOBAL_KONG_CONF)) -local JOIN = "join" local KEYGEN = "keygen" local FORCE_LEAVE = "force-leave" -local SUPPORTED_COMMANDS = {JOIN, "members", KEYGEN, "reachability", FORCE_LEAVE} +local SUPPORTED_COMMANDS = {"members", KEYGEN, "reachability", FORCE_LEAVE} if not utils.table_contains(SUPPORTED_COMMANDS, args.command) then lapp.quit("Invalid cluster command. Supported commands are: "..table.concat(SUPPORTED_COMMANDS, ", ")) @@ -37,10 +36,7 @@ args.config = nil local skip_running_check -if signal == JOIN and utils.table_size(args) ~= 1 then - logger:error("You must specify one address") - os.exit(1) -elseif signal == FORCE_LEAVE and utils.table_size(args) ~= 1 then +if signal == FORCE_LEAVE and utils.table_size(args) ~= 1 then logger:error("You must specify a node name") os.exit(1) elseif signal == KEYGEN then diff --git a/kong/cli/services/base_service.lua b/kong/cli/services/base_service.lua index 5d661a5c1e0d..b03ae8c8cf42 100644 --- a/kong/cli/services/base_service.lua +++ b/kong/cli/services/base_service.lua @@ -53,11 +53,11 @@ function BaseService:new(name, nginx_working_dir) end function BaseService:is_running() - local result = nil + local result = false local pid = IO.read_file(self._pid_file_path) if pid then - local _, code = IO.os_execute("ps -p "..stringy.strip(pid)) + local _, code = IO.os_execute("kill -0 "..stringy.strip(pid)) if code and code == 0 then result = pid end diff --git a/kong/cli/services/dnsmasq.lua b/kong/cli/services/dnsmasq.lua index d714ac72dcc1..0067ae37d5a4 100644 --- a/kong/cli/services/dnsmasq.lua +++ b/kong/cli/services/dnsmasq.lua @@ -26,19 +26,21 @@ function Dnsmasq:start() return nil, err end - local res, code = IO.os_execute(cmd.." -p "..self._configuration.dns_resolver.port.." --pid-file="..self._pid_file_path.." -N -o") + -- dnsmasq always listens on the local 127.0.0.1 address + local res, code = IO.os_execute(cmd.." -p "..self._configuration.dns_resolver.port.." --pid-file="..self._pid_file_path.." -N -o --listen-address=127.0.0.1") if code == 0 then while not self:is_running() do -- Wait for PID file to be created end setmetatable(self._configuration.dns_resolver, require "kong.tools.printable") - logger:info(string.format([[dnsmasq ...........%s]], tostring(self._configuration.dns_resolver))) + logger:info(string.format([[dnsmasq............%s]], tostring(self._configuration.dns_resolver))) return true else return nil, res end end + return true end function Dnsmasq:stop() diff --git a/kong/cli/services/nginx.lua b/kong/cli/services/nginx.lua index 58c4516f9486..d2a4be861c2f 100644 --- a/kong/cli/services/nginx.lua +++ b/kong/cli/services/nginx.lua @@ -52,13 +52,18 @@ local function prepare_ssl_certificates(configuration) trusted_ssl_cert_path = trusted_ssl_cert_path } end +local function get_current_user() + return IO.os_execute("whoami") +end + local function prepare_nginx_configuration(configuration, ssl_config) -- Extract nginx config from kong config, replace any needed value local nginx_config = configuration.nginx local nginx_inject = { - proxy_port = configuration.listen_address..":"..configuration.proxy_port, - proxy_ssl_port = configuration.listen_address..":"..configuration.proxy_ssl_port, - admin_api_port = configuration.listen_address..":"..configuration.admin_api_port, + user = get_current_user(), + proxy_listen = configuration.proxy_listen, + proxy_listen_ssl = configuration.proxy_listen_ssl, + admin_api_listen = configuration.admin_api_listen, dns_resolver = configuration.dns_resolver.address, memory_cache_size = configuration.memory_cache_size, ssl_cert = ssl_config.ssl_cert_path, @@ -194,13 +199,13 @@ function Nginx:start() local ok, err = self:_invoke_signal(cmd, START) if ok then - local ports = { - proxy_port = self._configuration.proxy_port, - proxy_ssl_port = self._configuration.proxy_ssl_port, - admin_api_port = self._configuration.admin_api_port + local listen_addresses = { + proxy_listen = self._configuration.proxy_listen, + proxy_listen_ssl = self._configuration.proxy_listen_ssl, + admin_api_listen = self._configuration.admin_api_listen } - setmetatable(ports, require "kong.tools.printable") - logger:info(string.format([[nginx .............%s]], tostring(ports))) + setmetatable(listen_addresses, require "kong.tools.printable") + logger:info(string.format([[nginx .............%s]], tostring(listen_addresses))) end return ok, err diff --git a/kong/cli/services/serf.lua b/kong/cli/services/serf.lua index a28593d4211a..864ae9dc78d5 100644 --- a/kong/cli/services/serf.lua +++ b/kong/cli/services/serf.lua @@ -25,7 +25,7 @@ function Serf:new(configuration) end function Serf:_get_cmd() - local cmd, err = Serf.super._get_cmd(self, {}, function(path) + local cmd, err = Serf.super._get_cmd(self, {}, function(path) local res, code = IO.os_execute(path.." version") if code == 0 then return res:match("^Serf v0.7.0") @@ -57,7 +57,7 @@ fi echo $PAYLOAD > /tmp/payload -COMMAND='require("kong.tools.http_client").post("http://127.0.0.1:]]..self._configuration.admin_api_port..[[/cluster/events/", ]].."[['${PAYLOAD}']]"..[[, {["content-type"] = "application/json"})' +COMMAND='require("kong.tools.http_client").post("http://]]..self._configuration.admin_api_listen..[[/cluster/events/", ]].."[['${PAYLOAD}']]"..[[, {["content-type"] = "application/json"})' echo $COMMAND | ]]..luajit_path..[[ ]] @@ -75,12 +75,20 @@ echo $COMMAND | ]]..luajit_path..[[ return true end + +function Serf:_join_node(address) + local _, err = self:invoke_signal("join", {address}) + if err then + return false + end + return true +end + function Serf:_autojoin(current_node_name) if self._configuration.cluster["auto-join"] then + logger:info("Trying to auto-join Kong nodes, please wait..") - logger:info("Auto-joining cluster, please wait..") - - -- Delete current node just in case it was there + -- Delete current node just in case it was there (due to an inconsistency caused by a crash) local _, err = self._dao_factory.nodes:delete({ name = current_node_name }) @@ -95,25 +103,22 @@ function Serf:_autojoin(current_node_name) if #nodes == 0 then logger:warn("Cannot auto-join the cluster because no nodes were found") else - - -- Sort by newest to oldest + -- Sort by newest to oldest (although by TTL would be a better sort) table.sort(nodes, function(a, b) return a.created_at > b.created_at end) local joined for _, v in ipairs(nodes) do - local _, err = self:invoke_signal("join", {v.cluster_listening_address}) - if err then - logger:warn("Cannot join "..v.cluster_listening_address..". If the node does not exist anymore it will be automatically purged.") - else + if self:_join_node(v.cluster_listening_address) then logger:info("Successfully auto-joined "..v.cluster_listening_address) joined = true break + else + logger:warn("Cannot join "..v.cluster_listening_address..". If the node does not exist anymore it will be automatically purged.") end end if not joined then - --return false, "Could not join the existing cluster" logger:warn("Could not join the existing cluster") end end @@ -136,8 +141,8 @@ function Serf:start() -- Prepare arguments local cmd_args = { - ["-bind"] = self._configuration.listen_address..":"..self._configuration.cluster_listening_port, - ["-rpc-addr"] = "127.0.0.1:"..self._configuration.cluster_rpc_listening_port, + ["-bind"] = self._configuration.cluster_listen, + ["-rpc-addr"] = self._configuration.cluster_listen_rpc, ["-advertise"] = self._configuration.cluster.advertise, ["-encrypt"] = self._configuration.cluster.encrypt, ["-log-level"] = "err", @@ -184,7 +189,7 @@ function Serf:invoke_signal(signal, args, no_rpc, skip_running_check) if not args then args = {} end setmetatable(args, require "kong.tools.printable") - local res, code = IO.os_execute(cmd.." "..signal.." "..(no_rpc and "" or "-rpc-addr=127.0.0.1:"..self._configuration.cluster_rpc_listening_port).." "..tostring(args), true) + local res, code = IO.os_execute(cmd.." "..signal.." "..(no_rpc and "" or "-rpc-addr="..self._configuration.cluster_listen_rpc).." "..tostring(args), true) if code == 0 then return res else @@ -195,7 +200,7 @@ end function Serf:event(t_payload) local args = { ["-coalesce"] = false, - ["-rpc-addr"] = "127.0.0.1:"..self._configuration.cluster_rpc_listening_port + ["-rpc-addr"] = self._configuration.cluster_listen_rpc } setmetatable(args, require "kong.tools.printable") diff --git a/kong/cli/utils/services.lua b/kong/cli/utils/services.lua index 5e51fa523e67..119e2d71a194 100644 --- a/kong/cli/utils/services.lua +++ b/kong/cli/utils/services.lua @@ -76,10 +76,9 @@ function _M.check_status(configuration, configuration_path) end function _M.stop_all(configuration, configuration_path) - -- Stop in reverse order to keep dependencies running - for index = #services,1,-1 do - services[index](configuration, configuration_path):stop() - end + for _, service in ipairs(services) do + service(configuration, configuration_path):stop() + end end function _M.start_all(configuration, configuration_path) diff --git a/kong/constants.lua b/kong/constants.lua index 650db934b655..043141f0067b 100644 --- a/kong/constants.lua +++ b/kong/constants.lua @@ -1,4 +1,4 @@ -local VERSION = "0.5.4" +local VERSION = "0.6.0rc3" return { NAME = "kong", diff --git a/kong/core/cluster.lua b/kong/core/cluster.lua index cafacb5a2c21..ac78f2aca7ba 100644 --- a/kong/core/cluster.lua +++ b/kong/core/cluster.lua @@ -1,4 +1,7 @@ local cluster_utils = require "kong.tools.cluster" +local Serf = require "kong.cli.services.serf" +local cache = require "kong.tools.database_cache" +local cjson = require "cjson" local resty_lock local status, res = pcall(require, "resty.lock") @@ -6,7 +9,9 @@ if status then resty_lock = res end -local INTERVAL = 30 +local KEEPALIVE_INTERVAL = 30 +local ASYNC_AUTOJOIN_INTERVAL = 3 +local ASYNC_AUTOJOIN_RETRIES = 20 -- Try for max a minute (3s * 20) local function create_timer(at, cb) local ok, err = ngx.timer.at(at, cb) @@ -15,11 +20,60 @@ local function create_timer(at, cb) end end +local function async_autojoin(premature) + if premature then return end + + -- If this node is the only node in the cluster, but other nodes are present, then try to join them + -- This usually happens when two nodes are started very fast, and the first node didn't write his + -- information into the datastore yet. When the second node starts up, there is nothing to join yet. + if not configuration.cluster["auto-join"] then return end + + local lock = resty_lock:new("cluster_autojoin_locks", { + exptime = ASYNC_AUTOJOIN_INTERVAL - 0.001 + }) + local elapsed = lock:lock("async_autojoin") + if elapsed and elapsed == 0 then + -- If the current member count on this node's cluster is 1, but there are more than 1 active nodes in + -- the DAO, then try to join them + local count, err = dao.nodes:count_by_keys() + if err then + ngx.log(ngx.ERR, tostring(err)) + elseif count > 1 then + local serf = Serf(configuration) + local res, err = serf:invoke_signal("members", {["-format"] = "json"}) + if err then + ngx.log(ngx.ERR, tostring(err)) + end + + local members = cjson.decode(res).members + if #members < 2 then + -- Trigger auto-join + local _, err = serf:_autojoin(cluster_utils.get_node_name(configuration)) + if err then + ngx.log(ngx.ERR, tostring(err)) + end + else + return -- The node is already in the cluster and no need to continue + end + end + + -- Create retries counter key if it doesn't exist + if not cache.get(cache.autojoin_retries_key()) then + cache.rawset(cache.autojoin_retries_key(), 0) + end + + local autojoin_retries = cache.incr(cache.autojoin_retries_key(), 1) -- Increment retries counter + if (autojoin_retries < ASYNC_AUTOJOIN_RETRIES) then + create_timer(ASYNC_AUTOJOIN_INTERVAL, async_autojoin) + end + end +end + local function send_keepalive(premature) if premature then return end local lock = resty_lock:new("cluster_locks", { - exptime = INTERVAL - 0.001 + exptime = KEEPALIVE_INTERVAL - 0.001 }) local elapsed = lock:lock("keepalive") if elapsed and elapsed == 0 then @@ -37,11 +91,12 @@ local function send_keepalive(premature) end end - create_timer(INTERVAL, send_keepalive) + create_timer(KEEPALIVE_INTERVAL, send_keepalive) end return { init_worker = function() - create_timer(INTERVAL, send_keepalive) + create_timer(KEEPALIVE_INTERVAL, send_keepalive) + create_timer(ASYNC_AUTOJOIN_INTERVAL, async_autojoin) -- Only execute one time end } diff --git a/kong/core/hooks.lua b/kong/core/hooks.lua index 580242548df2..f183c772de7e 100644 --- a/kong/core/hooks.lua +++ b/kong/core/hooks.lua @@ -22,17 +22,21 @@ local function invalidate(message_t) end end -local function retrieve_member_address(name) +local function get_cluster_members() local serf = require("kong.cli.services.serf")(configuration) local res, err = serf:invoke_signal("members", { ["-format"] = "json" }) if err then ngx.log(ngx.ERR, err) else - local members = cjson.decode(res).members - for _, member in ipairs(members) do - if member.name == name then - return member.addr - end + return cjson.decode(res).members + end +end + +local function retrieve_member_address(name) + local members = get_cluster_members() + for _, member in ipairs(members) do + if member.name == name then + return member.addr end end end @@ -64,7 +68,7 @@ local function member_leave(message_t) end end -local function member_update(message_t) +local function member_update(message_t, is_reap) local member = parse_member(message_t.entity) local nodes, err = dao.nodes:find_by_keys({ @@ -84,6 +88,11 @@ local function member_update(message_t) return end end + + if is_reap and dao.nodes:count_by_keys({}) > 1 then + -- Purge the cache when a failed node re-appears + cache.delete_all() + end end local function member_join(message_t) @@ -107,10 +116,15 @@ local function member_join(message_t) return end elseif #nodes == 1 then -- Update - member_update(message_t, "alive") + member_update(message_t) else error("Inconsistency error. More than one node found with name "..member.name) end + + -- Purge the cache when a new node joins + if dao.nodes:count_by_keys({}) > 1 then -- If it's only one node, no need to delete the cache + cache.delete_all() + end end return { @@ -143,6 +157,6 @@ return { member_update(message_t) end, [events.TYPES["MEMBER-REAP"]] = function(message_t) - member_update(message_t) + member_update(message_t, true) end } \ No newline at end of file diff --git a/kong/dao/cassandra/base_dao.lua b/kong/dao/cassandra/base_dao.lua index 24a18d7fe9ba..72dd6c0b2b52 100644 --- a/kong/dao/cassandra/base_dao.lua +++ b/kong/dao/cassandra/base_dao.lua @@ -667,4 +667,4 @@ function BaseDao:event(type, data_t) end end -return BaseDao +return BaseDao \ No newline at end of file diff --git a/kong/tools/cluster.lua b/kong/tools/cluster.lua index 927a6a3ecd78..613067ca7db1 100644 --- a/kong/tools/cluster.lua +++ b/kong/tools/cluster.lua @@ -3,7 +3,7 @@ local utils = require "kong.tools.utils" local _M = {} function _M.get_node_name(conf) - return utils.get_hostname().."_"..conf.cluster_listening_port + return utils.get_hostname().."_"..conf.cluster_listen end return _M \ No newline at end of file diff --git a/kong/tools/config_loader.lua b/kong/tools/config_loader.lua index 1a412613bad3..25be65090651 100644 --- a/kong/tools/config_loader.lua +++ b/kong/tools/config_loader.lua @@ -15,6 +15,37 @@ local function get_type(value, val_type) end end +local function is_valid_IPv4(ip) + if not ip or stringy.strip(ip) == "" then return false end + + local a, b, c, d = ip:match("^(%d%d?%d?)%.(%d%d?%d?)%.(%d%d?%d?)%.(%d%d?%d?)$") + a = tonumber(a) + b = tonumber(b) + c = tonumber(c) + d = tonumber(d) + if not a or not b or not c or not d then return false end + if a < 0 or 255 < a then return false end + if b < 0 or 255 < b then return false end + if c < 0 or 255 < c then return false end + if d < 0 or 255 < d then return false end + + return true +end + +local function is_valid_address(value, only_IPv4) + if not value or stringy.strip(value) == "" then return false end + + local parts = stringy.split(value, ":") + if #parts ~= 2 then return false end + if stringy.strip(parts[1]) == "" then return false end + if only_IPv4 and not is_valid_IPv4(parts[1]) then return false end + local port = tonumber(parts[2]) + if not port then return false end + if not (port > 0 and port <= 65535) then return false end + + return true +end + local checks = { type = function(value, key_infos, value_type) if value_type ~= key_infos.type then @@ -85,7 +116,27 @@ function _M.validate(config) return false, errors end - -- Perform complex validations here if needed + -- Check listen addresses + if config.proxy_listen and not is_valid_address(config.proxy_listen) then + return false, {proxy_listen = config.proxy_listen.." is not a valid \"host:port\" value"} + end + if config.proxy_listen_ssl and not is_valid_address(config.proxy_listen_ssl) then + return false, {proxy_listen_ssl = config.proxy_listen_ssl.." is not a valid \"host:port\" value"} + end + if config.admin_api_listen and not is_valid_address(config.admin_api_listen) then + return false, {admin_api_listen = config.admin_api_listen.." is not a valid \"host:port\" value"} + end + -- Cluster listen addresses must have an IPv4 host (no hostnames) + if config.cluster_listen and not is_valid_address(config.cluster_listen, true) then + return false, {cluster_listen = config.cluster_listen.." is not a valid \"ip:port\" value"} + end + if config.cluster_listen_rpc and not is_valid_address(config.cluster_listen_rpc, true) then + return false, {cluster_listen_rpc = config.cluster_listen_rpc.." is not a valid \"ip:port\" value"} + end + -- Same for the cluster.advertise value + if config.cluster and config.cluster.advertise and stringy.strip(config.cluster.advertise) ~= "" and not is_valid_address(config.cluster.advertise, true) then + return false, {["cluster.advertise"] = config.cluster.advertise.." is not a valid \"ip:port\" value"} + end return true end diff --git a/kong/tools/database_cache.lua b/kong/tools/database_cache.lua index 939d1326b3da..546791dfaa84 100644 --- a/kong/tools/database_cache.lua +++ b/kong/tools/database_cache.lua @@ -14,6 +14,7 @@ local CACHE_KEYS = { ACLS = "acls", SSL = "ssl", REQUESTS = "requests", + AUTOJOIN_RETRIES = "autojoin_retries", TIMERS = "timers", ALL_APIS_BY_DIC = "ALL_APIS_BY_DIC" } @@ -60,6 +61,10 @@ function _M.requests_key() return CACHE_KEYS.REQUESTS end +function _M.autojoin_retries_key() + return CACHE_KEYS.AUTOJOIN_RETRIES +end + function _M.api_key(host) return CACHE_KEYS.APIS..":"..host end diff --git a/spec/integration/admin_api/cluster_routes_spec.lua b/spec/integration/admin_api/cluster_routes_spec.lua index ceffee028426..d4e479d78a1a 100644 --- a/spec/integration/admin_api/cluster_routes_spec.lua +++ b/spec/integration/admin_api/cluster_routes_spec.lua @@ -6,7 +6,7 @@ local utils = require "kong.tools.utils" describe("Admin API", function() setup(function() - spec_helper.drop_db() + spec_helper.prepare_db() spec_helper.start_kong() end) @@ -14,11 +14,24 @@ describe("Admin API", function() spec_helper.stop_kong() end) + describe("/cluster/events/", function() + local BASE_URL = spec_helper.API_URL.."/cluster/events" + + describe("POST", function() + + it("[SUCCESS] should post a new event", function() + local _, status = http_client.post(BASE_URL, {}, {}) + assert.equal(200, status) + end) + + end) + + end) + describe("/cluster/", function() local BASE_URL = spec_helper.API_URL.."/cluster/" describe("GET", function() - it("[SUCCESS] should get the list of members", function() os.execute("sleep 2") -- Let's wait for serf to register the node @@ -37,22 +50,15 @@ describe("Admin API", function() assert.equal("alive", member.status) end) + it("[FAILURE] should fail when serf is not running anymore", function() + os.execute("pkill -9 serf") - end) - - end) - - describe("/cluster/events/", function() - local BASE_URL = spec_helper.API_URL.."/cluster/events" - - describe("POST", function() - - it("[SUCCESS] should post a new event", function() - local _, status = http_client.post(BASE_URL, {}, {}) - assert.equal(200, status) + local _, status = http_client.get(BASE_URL, {}, {}) + assert.equal(500, status) end) - end) end) + + end) \ No newline at end of file diff --git a/spec/integration/cli/cmds/start_spec.lua b/spec/integration/cli/cmds/start_spec.lua index cf7a1c8c2592..beb4fd55b2e1 100644 --- a/spec/integration/cli/cmds/start_spec.lua +++ b/spec/integration/cli/cmds/start_spec.lua @@ -11,16 +11,23 @@ local API_URL = spec_helper.API_URL local function replace_conf_property(key, value) local yaml_value = yaml.load(IO.read_file(TEST_CONF)) yaml_value[key] = value - local ok = IO.write_to_file(SERVER_CONF, yaml.dump(yaml_value)) + local new_config_content = yaml.dump(yaml_value) + + -- Workaround for https://github.com/lubyk/yaml/issues/2 + -- This workaround is in two places. To remove it "Find and replace" in the code + new_config_content = string.gsub(new_config_content, "(%w+:%s*)([%w%.]+:%d+)", "%1\"%2\"") + + local ok = IO.write_to_file(SERVER_CONF, new_config_content) assert.truthy(ok) end describe("CLI", function() setup(function() + spec_helper.prepare_db() + os.execute("cp "..TEST_CONF.." "..SERVER_CONF) spec_helper.add_env(SERVER_CONF) - spec_helper.prepare_db(SERVER_CONF) end) teardown(function() @@ -64,7 +71,8 @@ describe("CLI", function() local nodes = {} local err - while(#nodes < 1) do + local start = os.time() + while(#nodes < 1 and (os.time() - start < 10)) do -- 10 seconds timeout nodes, err = dao_factory.nodes:find_all() assert.falsy(err) assert.truthy(nodes) @@ -78,7 +86,8 @@ describe("CLI", function() nodes = {} - while(#nodes > 0) do + start = os.time() + while(#nodes > 0 and (os.time() - start < 10)) do -- 10 seconds timeout nodes, err = dao_factory.nodes:find_all() assert.falsy(err) assert.truthy(nodes) @@ -185,9 +194,8 @@ describe("CLI", function() assert.error_matches(function() spec_helper.start_kong(SERVER_CONF, true) end, "You are using a plugin that has not been enabled in the configuration: custom-rate-limiting", nil, true) - end) end) -end) +end) \ No newline at end of file diff --git a/spec/integration/cli/services/nginx_spec.lua b/spec/integration/cli/services/nginx_spec.lua index c6c585ccdd67..22fdd851ede0 100644 --- a/spec/integration/cli/services/nginx_spec.lua +++ b/spec/integration/cli/services/nginx_spec.lua @@ -9,6 +9,7 @@ local TIMEOUT = 10 describe("Nginx", function() setup(function() + spec_helper.prepare_db() nginx:prepare() end) @@ -25,7 +26,7 @@ describe("Nginx", function() -- Wait end end) - + it("should prepare", function() local ok, err = nginx:prepare() assert.falsy(err) @@ -46,6 +47,12 @@ describe("Nginx", function() assert.truthy(ok) assert.falsy(err) + -- Wait for process to start, with a timeout + local start = os.time() + while (not nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + assert.truthy(nginx:is_running()) -- Trying again will fail @@ -55,6 +62,13 @@ describe("Nginx", function() assert.equal("nginx is already running", err) nginx:stop() + + -- Wait for process to quit, with a timeout + local start = os.time() + while (nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + assert.falsy(nginx:is_running()) end) @@ -65,6 +79,13 @@ describe("Nginx", function() assert.falsy(nginx:is_running()) nginx:stop() + + -- Wait for process to quit, with a timeout + local start = os.time() + while (nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + assert.falsy(nginx:is_running()) end) @@ -78,6 +99,12 @@ describe("Nginx", function() local ok, err = nginx:start() assert.truthy(ok) assert.falsy(err) + + -- Wait for process to start, with a timeout + local start = os.time() + while (not nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end assert.truthy(nginx:is_running()) local ok, err = nginx:quit() @@ -121,6 +148,12 @@ describe("Nginx", function() assert.truthy(ok) assert.falsy(err) + -- Wait for process to start, with a timeout + local start = os.time() + while (not nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + local pid = nginx:is_running() assert.truthy(pid) diff --git a/spec/integration/cluster/cluster_spec.lua b/spec/integration/cluster/cluster_spec.lua new file mode 100644 index 000000000000..e6a7785ddc8d --- /dev/null +++ b/spec/integration/cluster/cluster_spec.lua @@ -0,0 +1,347 @@ +local spec_helper = require "spec.spec_helpers" +local yaml = require "yaml" +local IO = require "kong.tools.io" +local http_client = require "kong.tools.http_client" +local cjson = require "cjson" + +local TEST_CONF = spec_helper.get_env().conf_file +local SERVER_CONF = "kong_TEST_SERVER.yml" + +local API_URL = spec_helper.API_URL +local PROXY_URL = spec_helper.PROXY_URL + +local SECOND_API_PORT = 9001 +local SECOND_API_URL = "http://127.0.0.1:"..SECOND_API_PORT + +local SECOND_PROXY_PORT = 9000 +local SECOND_PROXY_URL = "http://127.0.0.1:"..SECOND_PROXY_PORT + +local SECOND_SERVER_PROPERTIES = { + nginx_working_dir = "nginx_tmp_2", + proxy_listen = "0.0.0.0:"..SECOND_PROXY_PORT, + proxy_listen_ssl = "0.0.0.0:9443", + admin_api_listen = "0.0.0.0:"..SECOND_API_PORT, + cluster_listen = "0.0.0.0:9946", + cluster_listen_rpc = "0.0.0.0:9373", + dns_resolvers_available = { + dnsmasq = {port = 8054} + } +} + +local function replace_property(configuration, new_key, new_value) + if type(new_value) == "table" then + for k, v in pairs(new_value) do + if not configuration[new_key] then configuration[new_key] = {} end + configuration[new_key][k] = v + end + else + configuration[new_key] = new_value + end + return configuration +end + +local function replace_conf_property(t, output_file) + if not output_file then output_file = SERVER_CONF end + + local yaml_value = yaml.load(IO.read_file(TEST_CONF)) + for k, v in pairs(t) do + yaml_value = replace_property(yaml_value, k, v) + end + local new_config_content = yaml.dump(yaml_value) + + -- Workaround for https://github.com/lubyk/yaml/issues/2 + -- This workaround is in two places. To remove it "Find and replace" in the code + new_config_content = string.gsub(new_config_content, "(%w+:%s*)([%w%.]+:%d+)", "%1\"%2\"") + + local ok = IO.write_to_file(output_file, new_config_content) + assert.truthy(ok) +end + +describe("Cluster", function() + + local SECOND_WORKING_DIR = "nginx_tmp_2" + + setup(function() + pcall(spec_helper.stop_kong, TEST_CONF) + + spec_helper.prepare_db() + + os.execute("cp "..TEST_CONF.." "..SERVER_CONF) + os.execute("mkdir -p "..SECOND_WORKING_DIR) + spec_helper.add_env(SERVER_CONF) + spec_helper.prepare_db(SERVER_CONF) + replace_conf_property(SECOND_SERVER_PROPERTIES) + end) + + teardown(function() + os.remove(SERVER_CONF) + os.execute("rm -rf "..SECOND_WORKING_DIR) + spec_helper.remove_env(SERVER_CONF) + end) + + after_each(function() + pcall(spec_helper.stop_kong, TEST_CONF) + pcall(spec_helper.stop_kong, SERVER_CONF) + end) + + it("should register the node on startup", function() + local _, exit_code = spec_helper.start_kong(TEST_CONF, true) + assert.are.same(0, exit_code) + + local _, status = http_client.get(API_URL) + assert.equal(200, status) -- is running + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 1) do + -- Wait + end + + local res, err = spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) + assert.falsy(err) + assert.equal(1, #res) + assert.truthy(res[1].created_at) + assert.truthy(res[1].name) + assert.truthy(res[1].cluster_listening_address) + + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + assert.equal(1, cjson.decode(res).total) + end) + + it("should register the node on startup with the advertised address", function() + SECOND_SERVER_PROPERTIES.cluster = {advertise = "5.5.5.5:1234"} + replace_conf_property(SECOND_SERVER_PROPERTIES) + + local _, exit_code = spec_helper.start_kong(SERVER_CONF, true) + assert.are.same(0, exit_code) + + local _, status = http_client.get(SECOND_API_URL) + assert.equal(200, status) -- is running + + while(#spec_helper.envs[SERVER_CONF].dao_factory.nodes:find_by_keys({}) ~= 1) do + -- Wait + end + + local res, err = spec_helper.envs[SERVER_CONF].dao_factory.nodes:find_by_keys({}) + assert.falsy(err) + assert.equal(1, #res) + assert.truthy(res[1].created_at) + assert.truthy(res[1].name) + assert.truthy(res[1].cluster_listening_address) + assert.equal("5.5.5.5:1234", res[1].cluster_listening_address) + + local res, status = http_client.get(SECOND_API_URL.."/cluster") + assert.equal(200, status) + assert.equal(1, cjson.decode(res).total) + assert.equal("5.5.5.5:1234", cjson.decode(res).data[1].address) + + SECOND_SERVER_PROPERTIES.cluster = {advertise = ""} + replace_conf_property(SECOND_SERVER_PROPERTIES) + end) + + it("should register the second node on startup and auto-join sequentially", function() + SECOND_SERVER_PROPERTIES.cluster = {["auto-join"] = true} + replace_conf_property(SECOND_SERVER_PROPERTIES) + + local _, exit_code = spec_helper.start_kong(TEST_CONF, true) + assert.are.same(0, exit_code) + + local _, status = http_client.get(API_URL) + assert.equal(200, status) -- is running + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 1) do + -- Wait + end + + local _, exit_code = spec_helper.start_kong(SERVER_CONF, true) + assert.are.same(0, exit_code) + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 2) do + -- Wait + end + + local res, err = spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) + assert.falsy(err) + assert.equal(2, #res) + assert.truthy(res[1].created_at) + assert.truthy(res[1].name) + assert.truthy(res[1].cluster_listening_address) + assert.truthy(res[2].created_at) + assert.truthy(res[2].name) + assert.truthy(res[2].cluster_listening_address) + + local total + repeat + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + total = cjson.decode(res).total + until(total == 2) + + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + assert.equal(2, cjson.decode(res).total) + + local res, status = http_client.get(SECOND_API_URL.."/cluster") + assert.equal(200, status) + assert.equal(2, cjson.decode(res).total) + end) + + it("should register the second node on startup and auto-join asyncronously", function() + local _, exit_code = spec_helper.start_kong(TEST_CONF, true) + assert.are.same(0, exit_code) + + local _, exit_code = spec_helper.start_kong(SERVER_CONF, true) + assert.are.same(0, exit_code) + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 2) do + -- Wait + end + + os.execute("sleep 5") + + local res, err = spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) + assert.falsy(err) + assert.equal(2, #res) + assert.truthy(res[1].created_at) + assert.truthy(res[1].name) + assert.truthy(res[1].cluster_listening_address) + assert.truthy(res[2].created_at) + assert.truthy(res[2].name) + assert.truthy(res[2].cluster_listening_address) + + local total + repeat + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + total = cjson.decode(res).total + until(total == 2) + + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + assert.equal(2, cjson.decode(res).total) + + local res, status = http_client.get(SECOND_API_URL.."/cluster") + assert.equal(200, status) + assert.equal(2, cjson.decode(res).total) + end) + + it("should not join the second node on startup when auto-join is false", function() + SECOND_SERVER_PROPERTIES.cluster = {["auto-join"] = false} + replace_conf_property(SECOND_SERVER_PROPERTIES) + + local _, exit_code = spec_helper.start_kong(TEST_CONF, true) + assert.are.same(0, exit_code) + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 1) do + -- Wait + end + + local _, exit_code = spec_helper.start_kong(SERVER_CONF, true) + assert.are.same(0, exit_code) + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 2) do + -- Wait + end + + local res, err = spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) + assert.falsy(err) + assert.equal(2, #res) + assert.truthy(res[1].created_at) + assert.truthy(res[1].name) + assert.truthy(res[1].cluster_listening_address) + assert.truthy(res[2].created_at) + assert.truthy(res[2].name) + assert.truthy(res[2].cluster_listening_address) + + local total + repeat + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + total = cjson.decode(res).total + until(total == 1) + + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + assert.equal(1, cjson.decode(res).total) + + local res, status = http_client.get(SECOND_API_URL.."/cluster") + assert.equal(200, status) + assert.equal(1, cjson.decode(res).total) + end) + + it("cache should be purged on the node that joins", function() + replace_conf_property({cluster = {["auto-join"] = false}}, TEST_CONF) + SECOND_SERVER_PROPERTIES.cluster = {["auto-join"] = false} + replace_conf_property(SECOND_SERVER_PROPERTIES) + + -- Start the nodes + local _, exit_code = spec_helper.start_kong(TEST_CONF, true) + assert.are.same(0, exit_code) + local _, exit_code = spec_helper.start_kong(SERVER_CONF, true) + assert.are.same(0, exit_code) + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 2) do + -- Wait + end + + -- The nodes are sharing the same datastore, but not the same cluster + + -- Adding an API + local res, status = http_client.post(API_URL.."/apis", {request_host="test.com", upstream_url="http://mockbin.org"}) + assert.equal(201, status) + local api = cjson.decode(res) + + -- Populating the cache on both nodes + local _, status = http_client.get(PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(200, status) + local _, status = http_client.get(SECOND_PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(200, status) + + -- Updating API on first node + local _, status = http_client.patch(API_URL.."/apis/"..api.id, {request_host="test2.com"}) + assert.equal(200, status) + + -- Making the request again on both nodes (the second node still process correctly the request) + local _, status = http_client.get(PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(404, status) + local _, status = http_client.get(SECOND_PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(200, status) + + -- Making the request again with the updated property (only the first node processes this correctly) + local _, status = http_client.get(PROXY_URL.."/request", {}, {host = "test2.com"}) + assert.equal(200, status) + local _, status = http_client.get(SECOND_PROXY_URL.."/request", {}, {host = "test2.com"}) + assert.equal(404, status) + + -- Joining the nodes in the same cluster + local _, exit_code = IO.os_execute("serf join -rpc-addr=127.0.0.1:9101 join 127.0.0.1:9946") + assert.are.same(0, exit_code) + -- Wait for join to complete + local total + repeat + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + total = cjson.decode(res).total + until(total == 2) + + -- Wait for cache purge to be executed by the hooks + os.execute("sleep 5") + + -- Making the request again on the new property, and now both nodes should work + local _, status = http_client.get(PROXY_URL.."/request", {}, {host = "test2.com"}) + assert.equal(200, status) + local _, status = http_client.get(SECOND_PROXY_URL.."/request", {}, {host = "test2.com"}) + assert.equal(200, status) + + -- And it should not work on both on the old DNS + local _, status = http_client.get(PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(404, status) + local _, status = http_client.get(SECOND_PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(404, status) + + -------------------------------------------------------- + -- Bring back the auto-join for the default test FILE -- + -------------------------------------------------------- + replace_conf_property({cluster = {["auto-join"] = true}}, TEST_CONF) + end) + +end) \ No newline at end of file diff --git a/spec/unit/tools/config_loader_spec.lua b/spec/unit/tools/config_loader_spec.lua index 00582eec367b..dd7dc9b8e0f7 100644 --- a/spec/unit/tools/config_loader_spec.lua +++ b/spec/unit/tools/config_loader_spec.lua @@ -19,8 +19,11 @@ describe("Configuration validation", function() assert.falsy(errors) assert.truthy(conf.custom_plugins) - assert.truthy(conf.admin_api_port) - assert.truthy(conf.proxy_port) + assert.truthy(conf.admin_api_listen) + assert.truthy(conf.proxy_listen) + assert.truthy(conf.proxy_listen_ssl) + assert.truthy(conf.cluster_listen) + assert.truthy(conf.cluster_listen_rpc) assert.truthy(conf.database) assert.truthy(conf.cassandra) @@ -45,8 +48,8 @@ describe("Configuration validation", function() end) it("should validate various types", function() local ok, errors = config.validate({ - proxy_port = "string", - database = "cassandra", + proxy_listen = 123, + database = 777, cassandra = { contact_points = "127.0.0.1", ssl = { @@ -56,7 +59,9 @@ describe("Configuration validation", function() }) assert.False(ok) assert.truthy(errors) - assert.equal("must be a number", errors.proxy_port) + assert.equal("must be a string", errors.proxy_listen) + assert.equal("must be a string", errors.database[1]) + assert.equal("must be one of: 'cassandra'", errors.database[2]) assert.equal("must be a array", errors["cassandra.contact_points"]) assert.equal("must be a boolean", errors["cassandra.ssl.enabled"]) assert.falsy(errors.ssl_cert_path) @@ -81,5 +86,71 @@ describe("Configuration validation", function() assert.False(ok) assert.equal("must be one of: 'cassandra'", errors.database) end) -end) + it("should validate the selected dns_resolver property", function() + local ok, errors = config.validate({dns_resolver = "foo"}) + assert.False(ok) + assert.equal("must be one of: 'server, dnsmasq'", errors.dns_resolver) + end) + it("should validate the host:port listen addresses", function() + -- Missing port + local ok, errors = config.validate({proxy_listen = "foo"}) + assert.False(ok) + assert.equal("foo is not a valid \"host:port\" value", errors.proxy_listen) + + -- Port invalid + ok, errors = config.validate({proxy_listen = "foo:asd"}) + assert.False(ok) + assert.equal("foo:asd is not a valid \"host:port\" value", errors.proxy_listen) + + -- Port too large + ok, errors = config.validate({proxy_listen = "foo:8000000"}) + assert.False(ok) + assert.equal("foo:8000000 is not a valid \"host:port\" value", errors.proxy_listen) + -- Only port + ok, errors = config.validate({proxy_listen = "1231"}) + assert.False(ok) + assert.equal("1231 is not a valid \"host:port\" value", errors.proxy_listen) + + -- Only semicolon and port + ok, errors = config.validate({proxy_listen = ":1231"}) + assert.False(ok) + assert.equal(":1231 is not a valid \"host:port\" value", errors.proxy_listen) + + -- Valid with hostname + ok, errors = config.validate({proxy_listen = "hello:1231"}) + assert.True(ok) + assert.falsy(errors) + + -- Valid with IP + ok, errors = config.validate({proxy_listen = "1.1.1.1:1231"}) + assert.True(ok) + assert.falsy(errors) + end) + it("should validate the ip:port listen addresses", function() + -- Hostname instead of IP + local ok, errors = config.validate({cluster_listen = "hello.com:1231"}) + assert.False(ok) + assert.equal("hello.com:1231 is not a valid \"ip:port\" value", errors.cluster_listen) + + -- Invalid IP + ok, errors = config.validate({cluster_listen = "777.1.1.1:1231"}) + assert.False(ok) + assert.equal("777.1.1.1:1231 is not a valid \"ip:port\" value", errors.cluster_listen) + + -- Valid + ok, errors = config.validate({cluster_listen = "1.1.1.1:1231"}) + assert.True(ok) + assert.falsy(errors) + + -- Invalid cluster.advertise + ok, errors = config.validate({cluster={advertise = "1"}}) + assert.False(ok) + assert.equal("1 is not a valid \"ip:port\" value", errors["cluster.advertise"]) + + -- Valid cluster.advertise + ok, errors = config.validate({cluster={advertise = "1.1.1.1:1231"}}) + assert.True(ok) + assert.falsy(errors) + end) +end)