Skip to content

Commit

Permalink
Merge pull request #69 from thibaultcha/feat/silent-logging
Browse files Browse the repository at this point in the history
feat(cluster) 'silent' option to disable logging
  • Loading branch information
thibaultcha authored Oct 17, 2016
2 parents bf965cb + c1eb436 commit 2144fb3
Show file tree
Hide file tree
Showing 2 changed files with 99 additions and 10 deletions.
39 changes: 29 additions & 10 deletions lib/resty/cassandra/cluster.lua
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,9 @@ local function get_peers(self)
end

local function set_peer_down(self, host)
log(WARN, _log_prefix, 'setting host at ', host, ' DOWN')
if self.logging then
log(WARN, _log_prefix, 'setting host at ', host, ' DOWN')
end

local peer = get_peer(self, host, false)
peer = peer or empty_t -- this can be called from refresh() so no host in shm yet
Expand All @@ -139,7 +141,9 @@ local function set_peer_down(self, host)
end

local function set_peer_up(self, host)
log(NOTICE, _log_prefix, 'setting host at ', host, ' UP')
if self.logging then
log(NOTICE, _log_prefix, 'setting host at ', host, ' UP')
end
self.reconn_policy:reset(host)

local peer = get_peer(self, host, true)
Expand Down Expand Up @@ -336,6 +340,10 @@ function _Cluster.new(opts)
if type(v) ~= 'boolean' then
return nil, 'retry_on_timeout must be a boolean'
end
elseif k == 'silent' then
if type(v) ~= 'boolean' then
return nil, 'silent must be a boolean'
end
end
end

Expand All @@ -351,6 +359,7 @@ function _Cluster.new(opts)
timeout_connect = opts.timeout_connect or 1000,
retry_on_timeout = opts.retry_on_timeout == nil and true or opts.retry_on_timeout,
max_schema_consensus_wait = opts.max_schema_consensus_wait or 10000,
logging = not opts.silent,

lb_policy = opts.lb_policy
or require('resty.cassandra.policies.lb.rr').new(),
Expand Down Expand Up @@ -395,7 +404,9 @@ local function next_coordinator(self, coordinator_options)
if ok then
local peer, err = check_peer_health(self, peer_rec.host, coordinator_options, retry)
if peer then
log(DEBUG, _log_prefix, 'load balancing policy chose host at ', peer.host)
if self.logging then
log(DEBUG, _log_prefix, 'load balancing policy chose host at ', peer.host)
end
return peer
else
errors[peer_rec.host] = err
Expand Down Expand Up @@ -554,7 +565,9 @@ local function wait_schema_consensus(self, coordinator)
end

local function prepare(self, coordinator, query)
log(DEBUG, _log_prefix, 'preparing ', query, ' on host ', coordinator.host)
if self.logging then
log(DEBUG, _log_prefix, 'preparing ', query, ' on host ', coordinator.host)
end
-- we are the ones preparing the query
local res, err = coordinator:prepare(query)
if not res then return nil, 'could not prepare query: '..err end
Expand Down Expand Up @@ -607,7 +620,9 @@ function _Cluster:send_retry(request)
local coordinator, err = next_coordinator(self)
if not coordinator then return nil, err end

log(NOTICE, _log_prefix, 'retrying request on host at ', coordinator.host)
if self.logging then
log(NOTICE, _log_prefix, 'retrying request on host at ', coordinator.host)
end

request.retries = request.retries + 1

Expand All @@ -617,17 +632,21 @@ end
local function prepare_and_retry(self, coordinator, request)
if request.queries then
-- prepared batch
log(NOTICE, _log_prefix, 'some requests from this batch were not prepared on host ',
coordinator.host, ', preparing and retrying')
if self.logging then
log(NOTICE, _log_prefix, 'some requests from this batch were not prepared on host ',
coordinator.host, ', preparing and retrying')
end
for i = 1, #request.queries do
local query_id, err = prepare(self, coordinator, request.queries[i][1])
if not query_id then return nil, err end
request.queries[i][3] = query_id
end
else
-- prepared query
log(NOTICE, _log_prefix, request.query, ' was not prepared on host ',
coordinator.host, ', preparing and retrying')
if self.logging then
log(NOTICE, _log_prefix, request.query, ' was not prepared on host ',
coordinator.host, ', preparing and retrying')
end
local query_id, err = prepare(self, coordinator, request.query)
if not query_id then return nil, err end
request.query_id = query_id
Expand Down Expand Up @@ -680,7 +699,7 @@ send_request = function(self, coordinator, request)
local res, err, cql_code = coordinator:send(request)
if not res then
return handle_error(self, err, cql_code, coordinator, request)
elseif res.warnings then
elseif res.warnings and self.logging then
-- protocol v4 can return warnings to the client
for i = 1, #res.warnings do
log(WARN, _log_prefix, res.warnings[i])
Expand Down
70 changes: 70 additions & 0 deletions t/13-logging.t
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# vim:set ts=4 sw=4 et fdm=marker:
use Test::Nginx::Socket::Lua;
use t::Util;

our $HttpConfig = $t::Util::HttpConfig;

plan tests => repeat_each() * blocks() * 6;

log_level('debug');

run_tests();

__DATA__

=== TEST 1: logging enabled by default
--- log_level: debug
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua_block {
local Cluster = require 'resty.cassandra.cluster'
local cluster, err = Cluster.new()
if not cluster then
ngx.log(ngx.ERR, err)
return
end

cluster:set_peer_down('127.0.0.1')
}
}
--- request
GET /t
--- response_body

--- error_log eval
qr{\[warn\] .*? setting host at 127\.0\.0\.1 DOWN}
--- no_error_log
[error]
[info]
[debug]



=== TEST 2: opts.silent disables all logging
--- log_level: debug
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua_block {
local Cluster = require 'resty.cassandra.cluster'
local cluster, err = Cluster.new {
silent = true
}
if not cluster then
ngx.log(ngx.ERR, err)
return
end

cluster:set_peer_down('127.0.0.1')
}
}
--- request
GET /t
--- response_body

--- no_error_log
[error]
[warn]
[info]
[debug]

0 comments on commit 2144fb3

Please sign in to comment.