From 04c2d53d6a751f427c16f2d3b3790d7e3e30a66e Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Sun, 17 Dec 2017 23:23:54 -0800 Subject: [PATCH 01/74] chore(deps) bump lua-resty-jit-uuid to 0.0.7 There are no changes for UUID v4 generation between 0.0.5 and 0.0.7, but the 0.0.7 bump allows anyone to generate v3/v5 UUIDs. See: https://github.com/thibaultcha/lua-resty-jit-uuid/compare/0.0.5...0.0.7 From #3102 --- kong-0.11.2-0.rockspec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kong-0.11.2-0.rockspec b/kong-0.11.2-0.rockspec index 8ff6414d6c12..392e7517dbb6 100644 --- a/kong-0.11.2-0.rockspec +++ b/kong-0.11.2-0.rockspec @@ -15,7 +15,7 @@ dependencies = { "luasocket == 3.0-rc1", "penlight == 1.5.4", "lua-resty-http == 0.08", - "lua-resty-jit-uuid == 0.0.5", + "lua-resty-jit-uuid == 0.0.7", "multipart == 0.5.1", "version == 0.2", "kong-lapis == 1.6.0.1", From dfcea3826e16a83ac7f7d5daf4c933e891c2f79b Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Sun, 17 Dec 2017 23:25:05 -0800 Subject: [PATCH 02/74] chore(travis) bump OpenSSL and LuaRocks versions From #3103 --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index ae92f76f6ae6..00e6e46c7315 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,8 +19,8 @@ addons: env: global: - - LUAROCKS=2.4.2 - - OPENSSL=1.0.2l + - LUAROCKS=2.4.3 + - OPENSSL=1.0.2n - CASSANDRA=2.2.8 - OPENRESTY_BASE=1.11.2.4 - OPENRESTY_LATEST=1.11.2.4 From 1577661f748744506c6394bd1d8986baa8b9178d Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Fri, 15 Dec 2017 15:46:43 -0800 Subject: [PATCH 03/74] docs(changelog) add formal 0.12.0rc1 changelog --- CHANGELOG.md | 191 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 185 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dce52f84ba99..6f0d73ae8dce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,8 @@ - [Planned](#planned) - [Scheduled](#scheduled) - - [0.12.0](#0120) - [Released](#released) + - [0.12.0rc1](#0120rc1) - [0.11.2](#0112---20171129) - [0.11.1](#0111---20171024) - [0.10.4](#0104---20171024) @@ -28,14 +28,193 @@ Those releases do not have a fixed release date yet. This section describes upcoming releases that have a release date, along with a detailed changeset of their content. -## [0.12.0] +## [0.12.0rc1] * **Release Candidate**: 2017/12/20 * **Stable**: January 2018 -This release will focus on the introduction of health checks -[#112](https://github.com/Kong/kong/issues/112) and include a few major fixes. -Changelog upcoming. +Our third major release of 2017 focuses on two new features we are very +excited about: **health checks** and **hash based load balancing**! + +We also took this as an opportunity to fix a few prominent issues, sometimes +at the expense of breaking changes but overall improving the flexibility and +usability of Kong! Do keep in mind that this is a major release, and as such, +that we require of you to run the **migrations step**, via the +`kong migrations up` command. + +Please take a few minutes to thoroughly read the [0.12 Upgrade +Path](https://github.com/Kong/kong/blob/master/UPGRADE.md#upgrade-to-012x) +for more details regarding breaking changes and migrations before planning to +upgrade your Kong cluster. + +### Breaking changes + +##### Core + +- :warning: The required OpenResty version has been bumped to 1.11.2.5. If you + are installing Kong from one of our distribution packages, you are not + affected by this change. + [#3097](https://github.com/Kong/kong/pull/3097) +- :warning: As Kong now executes subsequent plugins when a request is being + short-circuited (e.g. HTTP 401 responses from auth plugins), plugins that + run in the header or body filter phases will be run upon such responses + from the access phase. We consider this change a big improvement in the + Kong run-loop as it allows for more flexibility for plugins. However, it is + unlikely, but possible that some of these plugins (e.g. your custom plugins) + now run in scenarios where they were not previously expected to run. + [#3079](https://github.com/Kong/kong/pull/3079) + +##### Admin API + +- :warning: By default, the Admin API now only listens on the local interface. + We consider this change to be an improvement in the default security policy + of Kong. If you are already using Kong, and your Admin API still binds to all + interfaces, consider updating it as well. You can do so by updating the + `admin_listen` configuration value, like so: `admin_listen = 127.0.0.1:8001`. + Thanks [@pduldig-at-tw](https://github.com/pduldig-at-tw) for the suggestion + and the patch. + [#3016](https://github.com/Kong/kong/pull/3016) + + :red_circle: **Note to Docker users**: Beware of this change as you may have + to ensure that your Admin API is reachable via the host's interface. + You can use the `-e KONG_ADMIN_LISTEN` argument when provisioning your + container(s) to update this value; for example, + `-e KONG_ADMIN_LISTEN=0.0.0.0:8001`. + +- :warning: To reduce confusion, the `/upstreams/:upstream_name_or_id/targets/` + has been updated to not show the full list of Targets anymore, but only + the ones that are currently active in the load balancer. To retrieve the full + history of Targets, you can now query + `/upstreams/:upstream_name_or_id/targets/all`. The + `/upstreams/:upstream_name_or_id/targets/active` endpoint has been removed. + Thanks [@hbagdi](https://github.com/hbagdi) for tackling this backlog item! + [#3049](https://github.com/Kong/kong/pull/3049) +- :warning: The `orderlist` property of Upstreams has been removed, along with + any confusion it may have brought. The balancer is now able to fully function + without it, yet with the same level of entropy in its load distribution. + [#2748](https://github.com/Kong/kong/pull/2748) + +##### CLI + +- :warning: The `$ kong compile` command which was deprecated in 0.11.0 has + been removed. + [#3069](https://github.com/Kong/kong/pull/3069) + +##### Plugins + +- :warning: In logging plugins, the `request.request_uri` field has been + renamed to `request.url`. + [#2445](https://github.com/Kong/kong/pull/2445) + [#3098](https://github.com/Kong/kong/pull/3098) + +### Added + +##### Core + +- :fireworks: Support for **health checks**! Kong can now short-circuit some + of your upstream Targets (replicas) from its load balancer when it encounters + too many TCP or HTTP errors. You can configure the number of failures, or the + HTTP status codes that should be considered invalid, and Kong will monitor + the failures and successes of proxied requests to each upstream Target. We + call this feature **passive health checks**. + Additionally, you can configure **active health checks**, which will make + Kong perform periodic HTTP test requests to actively monitor the health of + your upstream services, and pre-emptively short-circuit them. + Upstream Targets can be manually taken up or down via two new Admin API + endpoints: `/healthy` and `/unhealthy`. + [#3096](https://github.com/Kong/kong/pull/3096) +- :fireworks: Support for **hash based load balancing**! Kong now offers + consistent hashing/sticky sessions load balancing capabilities via the new + `hash_*` attributes of the Upstream entity. Hashes can be based off client + IPs, request headers, or Consumers! + [#2875](https://github.com/Kong/kong/pull/2875) +- :fireworks: Logging plugins now log requests that were short-circuited by + Kong! (e.g. HTTP 401 responses from auth plugins or HTTP 429 responses from + rate limiting plugins, etc.) Kong now executes any subsequent plugins once a + request has been short-circuited. Your plugin must be using the + `kong.tools.responses` module for this behavior to be respected. + [#3079](https://github.com/Kong/kong/pull/3079) +- Kong is now compatible with OpenResty up to version 1.13.6.1. Be aware that + the recommended (and default) version shipped with this release is still + 1.11.2.5. + [#3070](https://github.com/Kong/kong/pull/3070) + +##### CLI + +- `$ kong start` now considers the commonly used `/opt/openresty` prefix when + searching for the `nginx` executable. + [#3074](https://github.com/Kong/kong/pull/3074) + +##### Admin API + +- Two new endpoints, `/healthy` and `/unhealthy` can be used to manually bring + upstream Targets up or down, as part of the new health checks feature of the + load balancer. + [#3096](https://github.com/Kong/kong/pull/3096) + +##### Plugins + +- logging plugins: A new field `upstream_uri` now logs the value of the + upstream request's path. This is useful to help debugging plugins or setups + that aim at rewriting a request's URL during proxying. + Thanks [@shiprabehera](https://github.com/shiprabehera) for the patch! + [#2445](https://github.com/Kong/kong/pull/2445) +- tcp-log: Support for TLS handshake with the logs recipients for secure + transmissions of logging data. + [#3091](https://github.com/Kong/kong/pull/3091) +- jwt: Support for JWTs passed in cookies. Use the new `config.cookie_names` + property to configure the behavior to your liking. + Thanks [@mvanholsteijn](https://github.com/mvanholsteijn) for the patch! + [#2974](https://github.com/Kong/kong/pull/2974) +- oauth2 + - New `config.auth_header_name` property to customize the authorization + header's name. + Thanks [@supraja93](https://github.com/supraja93) + [#2928](https://github.com/Kong/kong/pull/2928) + - New `config.refresh_ttl` property to customize the TTL of refresh tokens, + previously hard-coded to 14 days. + Thanks [@bob983](https://github.com/bob983) for the patch! + [#2942](https://github.com/Kong/kong/pull/2942) + - Avoid an error in the logs when trying to retrieve an access token from + a request without a body. + Thanks [@WALL-E](https://github.com/WALL-E) for the patch. + [#3063](https://github.com/Kong/kong/pull/3063) +- ldap: New `config.header_type` property to customize the authorization method + in the `Authorization` header. + Thanks [@francois-maillard](https://github.com/francois-maillard) for the + patch! + [#2963](https://github.com/Kong/kong/pull/2963) + +### Fixed + +##### CLI + +- Fix a potential vulnerability in which an attacker could read the Kong + configuration file with insufficient permissions for a short window of time + while Kong is being started. + [#3057](https://github.com/Kong/kong/pull/3057) +- Proper log message upon timeout in `$ kong quit`. + [#3061](https://github.com/Kong/kong/pull/3061) + +##### Admin API + +- The `/certificates` endpoint now properly supports the `snis` parameter + in PUT and PATCH requests. + Thanks [@hbagdi](https://github.com/hbagdi) for the contribution! + [#3040](https://github.com/Kong/kong/pull/3040) +- Avoid sending the `HTTP/1.1 415 Unsupported Content Type` response when + receiving a request with a valid `Content-Type`, but with an empty payload. + [#3077](https://github.com/Kong/kong/pull/3077) + +##### Plugins + +- basic-auth: + - Accept passwords containing `:`. + Thanks [@nico-acidtango](https://github.com/nico-acidtango) for the patch! + [#3014](https://github.com/Kong/kong/pull/3014) + - Performance improvements, courtesy of + [@nico-acidtango](https://github.com/nico-acidtango) + [#3014](https://github.com/Kong/kong/pull/3014) [Back to TOC](#table-of-contents) @@ -1972,7 +2151,7 @@ First version running with Cassandra. [Back to TOC](#table-of-contents) -[0.12.0]: https://github.com/Kong/kong/compare/0.11.2...next +[0.12.0rc1]: https://github.com/Kong/kong/compare/0.11.2...0.12.0rc1 [0.11.2]: https://github.com/Kong/kong/compare/0.11.1...0.11.2 [0.11.1]: https://github.com/Kong/kong/compare/0.11.0...0.11.1 [0.10.4]: https://github.com/Kong/kong/compare/0.10.3...0.10.4 From 692c1736da5342226a4c7f2338ca9266fc97c381 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Fri, 15 Dec 2017 16:42:15 -0800 Subject: [PATCH 04/74] docs(upgrade) add 0.12 upgrade path and notices --- UPGRADE.md | 138 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 138 insertions(+) diff --git a/UPGRADE.md b/UPGRADE.md index cf3f10e41e1d..33ed0c3ca71d 100644 --- a/UPGRADE.md +++ b/UPGRADE.md @@ -36,6 +36,144 @@ starts new workers, which take over from old workers before those old workers are terminated. In this way, Kong will serve new requests via the new configuration, without dropping existing in-flight connections. +## Upgrade to `0.12.x` + +As it is the case most of the time, this new major version of Kong comes with +a few **database migrations**, some breaking changes, databases deprecation +notices, and minor updates to the NGINX configuration template. + +This document will only highlight the breaking changes that you need to be +aware of, and describe a recommended upgrade path. We recommend that you +consult the full [0.12.0 +Changelog](https://github.com/Kong/kong/blob/master/CHANGELOG.md) for a +complete list of changes and new features. + +See below the breaking changes section for a detailed list of steps recommended +to **run migrations** and upgrade from a previous version of Kong. + +#### Breaking changes + +#### Configuration + +- Several updates were made to the NGINX configuration template. If you are + using a custom template, you **must** apply those modifications. See below + for a list of changes to apply. + +##### Core + +- The required OpenResty version has been bumped to 1.11.2.5. If you + are installing Kong from one of our distribution packages, you are not + affected by this change. +- As Kong now executes subsequent plugins when a request is being + short-circuited (e.g. HTTP 401 responses from auth plugins), plugins that + run in the header or body filter phases will be run upon such responses + from the access phase. It is possible that some of these plugins (e.g. your + custom plugins) now run in scenarios where they were not previously expected + to run. + +##### Admin API + +- By default, the Admin API now only listens on the local interface. + We consider this change to be an improvement in the default security policy + of Kong. If you are already using Kong, and your Admin API still binds to all + interfaces, consider updating it as well. You can do so by updating the + `admin_listen` configuration value, like so: `admin_listen = 127.0.0.1:8001`. + + :red_circle: **Note to Docker users**: Beware of this change as you may have + to ensure that your Admin API is reachable via the host's interface. + You can use the `-e KONG_ADMIN_LISTEN` argument when provisioning your + container(s) to update this value; for example, + `-e KONG_ADMIN_LISTEN=0.0.0.0:8001`. + +- The `/upstreams/:upstream_name_or_id/targets/` has been updated to not show + the full list of Targets anymore, but only the ones that are currently + active in the load balancer. To retrieve the full history of Targets, you can + now query `/upstreams/:upstream_name_or_id/targets/all`. The + `/upstreams/:upstream_name_or_id/targets/active` endpoint has been removed. +- The `orderlist` property of Upstreams has been removed. + +##### CLI + +- The `$ kong compile` command which was deprecated in 0.11.0 has been removed. + +##### Plugins + +- In logging plugins, the `request.request_uri` field has been renamed to + `request.url`. + +#### Deprecations + +##### Databases + +- Starting with Kong 0.12.0, we have updated our databases support policy. + - Support for PostgreSQL 9.4 has been deprecated. We recommend using + PostgreSQL 9.5 or above. + - Support for Cassandra 2.0 has been deprecated. We recommend using + Cassandra 2.1 or above. + - Support for Redis versions 3.1 or below has been deprecated. We + recommend using Redis 3.2 or above. + +--- + +If you use a custom NGINX configuration template from Kong 0.11, before +attempting to run any 0.12 node, make sure to apply the following change to +your template: + +```diff +diff --git a/kong/templates/nginx_kong.lua b/kong/templates/nginx_kong.lua +index 5ab65ca3..8a6abd64 100644 +--- a/kong/templates/nginx_kong.lua ++++ b/kong/templates/nginx_kong.lua +@@ -32,6 +32,7 @@ lua_shared_dict kong 5m; + lua_shared_dict kong_cache ${{MEM_CACHE_SIZE}}; + lua_shared_dict kong_process_events 5m; + lua_shared_dict kong_cluster_events 5m; ++lua_shared_dict kong_healthchecks 5m; + > if database == "cassandra" then + lua_shared_dict kong_cassandra 5m; + > end +``` + +--- + +You can now start migrating your cluster from `0.11.x` to `0.12`. If you are +doing this upgrade "in-place", against the datastore of a running 0.11 cluster, +then for a short period of time, your database schema won't be fully compatible +with your 0.11 nodes anymore. This is why we suggest either performing this +upgrade when your 0.11 cluster is warm and most entities are cached, or against +a new database, if you can migrate your data. If you wish to temporarily make +your APIs unavailable, you can leverage the +[request-termination](https://getkong.org/plugins/request-termination/) plugin. + +The path to upgrade a 0.11 datastore is identical to the one of previous major +releases: + +1. If you are planning on upgrading Kong while 0.11 nodes are running against + the same datastore, make sure those nodes are warm enough (they should have + most of your entities cached already), or temporarily disable your APIs. +2. Provision a 0.12 node and configure it as you wish (environment variables/ + configuration file). Make sure to point this new 0.12 node to your current + datastore. +3. **Without starting the 0.12 node**, run the 0.12 migrations against your + current datastore: + +``` +$ kong migrations up [-c kong.conf] +``` + +As usual, this step should be executed from a **single node**. + +4. You can now provision a fresh 0.12 cluster pointing to your migrated + datastore and start your 0.12 nodes. +5. Gradually switch your traffic from the 0.11 cluster to the new 0.12 cluster. + Remember, once your database is migrated, your 0.11 nodes will rely on + their cache and not on the underlying database. Your traffic should switch + to the new cluster as quickly as possible. +6. Once your traffic is fully migrated to the 0.12 cluster, decommission + your 0.11 cluster. + +You have now successfully upgraded your cluster to run 0.12 nodes exclusively. + ## Upgrade to `0.11.x` Along with the usual database migrations shipped with our major releases, this From 8aa35c3e7cfbc5223a91411fdd384dca41e88cf7 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Tue, 2 Jan 2018 20:29:27 -0800 Subject: [PATCH 05/74] chore(*) the year is 2018 --- LICENSE | 2 +- README.md | 2 +- kong/dao/dao.lua | 2 +- kong/tools/responses.lua | 2 +- kong/tools/timestamp.lua | 2 +- kong/tools/utils.lua | 2 +- spec/helpers.lua | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/LICENSE b/LICENSE index 6f59129a0189..7f21d9a0772e 100644 --- a/LICENSE +++ b/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016-2017 Kong Inc. + Copyright 2016-2018 Kong Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index f45d3bb76504..0a26b8d2c152 100644 --- a/README.md +++ b/README.md @@ -232,7 +232,7 @@ Enterprise](https://konghq.com/kong-enterprise-edition/). ## License ``` -Copyright 2016-2017 Kong Inc. +Copyright 2016-2018 Kong Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/kong/dao/dao.lua b/kong/dao/dao.lua index c2e3f07d16ca..24dfdf49f949 100644 --- a/kong/dao/dao.lua +++ b/kong/dao/dao.lua @@ -8,7 +8,7 @@ -- and is responsible for propagating clustering events related to data invalidation, -- as well as foreign constraints when the underlying database does not support them -- (as with Cassandra). --- @copyright Copyright 2016-2017 Kong Inc. All rights reserved. +-- @copyright Copyright 2016-2018 Kong Inc. All rights reserved. -- @license [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- @module kong.dao diff --git a/kong/tools/responses.lua b/kong/tools/responses.lua index 51867805c76f..581ef2b63bca 100644 --- a/kong/tools/responses.lua +++ b/kong/tools/responses.lua @@ -1,7 +1,7 @@ --- Kong helper methods to send HTTP responses to clients. -- Can be used in the proxy (core/resolver), plugins or Admin API. -- Most used HTTP status codes and responses are implemented as helper methods. --- @copyright Copyright 2016-2017 Kong Inc. All rights reserved. +-- @copyright Copyright 2016-2018 Kong Inc. All rights reserved. -- @license [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- @module kong.tools.responses -- @usage diff --git a/kong/tools/timestamp.lua b/kong/tools/timestamp.lua index a2d401f0b377..76fb38fff53c 100644 --- a/kong/tools/timestamp.lua +++ b/kong/tools/timestamp.lua @@ -1,6 +1,6 @@ --- Module for timestamp support. -- Based on the LuaTZ module. --- @copyright Copyright 2016-2017 Kong Inc. All rights reserved. +-- @copyright Copyright 2016-2018 Kong Inc. All rights reserved. -- @license [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- @module kong.tools.timestamp diff --git a/kong/tools/utils.lua b/kong/tools/utils.lua index f51f3e2b4caf..cd42df2d1f3d 100644 --- a/kong/tools/utils.lua +++ b/kong/tools/utils.lua @@ -4,7 +4,7 @@ -- NOTE: Before implementing a function here, consider if it will be used in many places -- across Kong. If not, a local function in the appropriate module is prefered. -- --- @copyright Copyright 2016-2017 Kong Inc. All rights reserved. +-- @copyright Copyright 2016-2018 Kong Inc. All rights reserved. -- @license [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- @module kong.tools.utils diff --git a/spec/helpers.lua b/spec/helpers.lua index c749fd7d24d6..209a6325282a 100644 --- a/spec/helpers.lua +++ b/spec/helpers.lua @@ -1,7 +1,7 @@ ------------------------------------------------------------------ -- Collection of utilities to help testing Kong features and plugins. -- --- @copyright Copyright 2016-2017 Kong Inc. All rights reserved. +-- @copyright Copyright 2016-2018 Kong Inc. All rights reserved. -- @license [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- @module spec.helpers From 91d2051b0096db61924f4151242dbce2d0fce5f5 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Fri, 5 Jan 2018 11:10:42 -0800 Subject: [PATCH 06/74] docs(changelog) add 0.12.0rc2 changes * add 0.12.0rc2 changes * move 0.12.0rc1/2 sections under "Released" section --- CHANGELOG.md | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f0d73ae8dce..67eec7918e53 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,9 @@ - [Planned](#planned) - [Scheduled](#scheduled) + - [0.12.0](#0120) - [Released](#released) + - [0.12.0rc2](#0120rc2) - [0.12.0rc1](#0120rc1) - [0.11.2](#0112---20171129) - [0.11.1](#0111---20171024) @@ -28,6 +30,28 @@ Those releases do not have a fixed release date yet. This section describes upcoming releases that have a release date, along with a detailed changeset of their content. +## 0.12.0 + +Stable release planned for January 2018. See [0.12.0rc1](#0120rc1) +and [0.12.0rc2](#0120rc2). + +[Back to TOC](#table-of-contents) + +# Released + +This section describes publicly available releases and a detailed changeset of +their content. + +## [0.12.0rc2] + +* **Release Candidate**: 2018/01/05 +* **Stable**: January 2018 + +This release candidate fixes an issue from 0.12.0rc1 regarding database cache +invalidation upon Upstream creation and modification. + +[Back to TOC](#table-of-contents) + ## [0.12.0rc1] * **Release Candidate**: 2017/12/20 @@ -218,11 +242,6 @@ upgrade your Kong cluster. [Back to TOC](#table-of-contents) -# Released - -This section describes publicly available releases and a detailed changeset of -their content. - ## [0.11.2] - 2017/11/29 ### Added @@ -2151,6 +2170,7 @@ First version running with Cassandra. [Back to TOC](#table-of-contents) +[0.12.0rc2]: https://github.com/Kong/kong/compare/0.12.0rc1...0.12.0rc2 [0.12.0rc1]: https://github.com/Kong/kong/compare/0.11.2...0.12.0rc1 [0.11.2]: https://github.com/Kong/kong/compare/0.11.1...0.11.2 [0.11.1]: https://github.com/Kong/kong/compare/0.11.0...0.11.1 From cee785fdfcd96a72fa01fa9ff8c213c6f9c2a1e1 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Thu, 11 Jan 2018 12:24:11 -0200 Subject: [PATCH 07/74] fix(schema) do not run further tests when type is invalid Fix #3144 From #3145 Signed-off-by: Thibault Charbonnier --- kong/dao/schemas_validation.lua | 3 +++ spec/01-unit/006-schema_validation_spec.lua | 11 +++++++++++ 2 files changed, 14 insertions(+) diff --git a/kong/dao/schemas_validation.lua b/kong/dao/schemas_validation.lua index 23d1a77b181f..09376644033d 100644 --- a/kong/dao/schemas_validation.lua +++ b/kong/dao/schemas_validation.lua @@ -147,6 +147,7 @@ function _M.validate_entity(tbl, schema, options) if not is_valid_type and POSSIBLE_TYPES[v.type] then errors = utils.add_error(errors, error_prefix .. column, string.format("%s is not %s %s", column, v.type == "array" and "an" or "a", v.type)) + goto continue end end @@ -258,6 +259,8 @@ function _M.validate_entity(tbl, schema, options) end end end + + ::continue:: end -- Check for unexpected fields in the entity diff --git a/spec/01-unit/006-schema_validation_spec.lua b/spec/01-unit/006-schema_validation_spec.lua index 04d83b097a50..5b861110f403 100644 --- a/spec/01-unit/006-schema_validation_spec.lua +++ b/spec/01-unit/006-schema_validation_spec.lua @@ -157,6 +157,17 @@ describe("Schemas", function() assert.falsy(err) end) + it("should not crash when an array has invalid contents (regression for #3144)", function() + local values = { enum_array = 5 } + + assert.has_no_errors(function() + local valid, err = validate_entity(values, schema) + assert.falsy(valid) + assert.truthy(err) + assert.are.same("enum_array is not an array", err.enum_array) + end) + end) + it("should return error when an invalid boolean value is passed", function() local values = {string = "test", boolean_val = "ciao"} From 5c8b3c6917b9a69a007f30b23c1868551bd05e99 Mon Sep 17 00:00:00 2001 From: Thijs Schreijer Date: Mon, 11 Sep 2017 13:52:00 +0200 Subject: [PATCH 08/74] refactor(balancer) drop the orderlist property (#2748) This removes the `orderlist` property from the balancer entity. Due to a different implementation in the dns library, it is no longer required. from #2748 --- kong/core/balancer.lua | 2 - kong/dao/migrations/cassandra.lua | 7 + kong/dao/migrations/postgres.lua | 7 + kong/dao/schemas/upstreams.lua | 60 ------- kong/init.lua | 26 +-- spec/01-unit/007-entities_schemas_spec.lua | 89 --------- .../04-admin_api/07-upstreams_routes_spec.lua | 170 ++++-------------- .../04-admin_api/08-targets_routes_spec.lua | 2 - .../05-proxy/09-balancer_spec.lua | 24 ++- 9 files changed, 90 insertions(+), 297 deletions(-) diff --git a/kong/core/balancer.lua b/kong/core/balancer.lua index ae57205ed824..47edf076bdf7 100644 --- a/kong/core/balancer.lua +++ b/kong/core/balancer.lua @@ -176,7 +176,6 @@ local get_balancer = function(target) -- no balancer yet (or invalidated) so create a new one balancer, err = ring_balancer.new({ wheelSize = upstream.slots, - order = upstream.orderlist, dns = dns_client, }) @@ -222,7 +221,6 @@ local get_balancer = function(target) -- for now; create a new balancer from scratch balancer, err = ring_balancer.new({ wheelSize = upstream.slots, - order = upstream.orderlist, dns = dns_client, }) if not balancer then diff --git a/kong/dao/migrations/cassandra.lua b/kong/dao/migrations/cassandra.lua index d79ab0b3e093..ee5858e2247d 100644 --- a/kong/dao/migrations/cassandra.lua +++ b/kong/dao/migrations/cassandra.lua @@ -474,4 +474,11 @@ return { DROP TABLE nodes; ]], }, + { + name = "2017-07-28-225000_balancer_orderlist_remove", + up = [[ + ALTER TABLE upstreams DROP orderlist; + ]], + down = function(_, _, dao) end -- not implemented + }, } diff --git a/kong/dao/migrations/postgres.lua b/kong/dao/migrations/postgres.lua index a1bcb34dee73..fe6cc8d2901f 100644 --- a/kong/dao/migrations/postgres.lua +++ b/kong/dao/migrations/postgres.lua @@ -526,4 +526,11 @@ return { DROP INDEX ttls_primary_uuid_value_idx; ]] }, + { + name = "2017-07-28-225000_balancer_orderlist_remove", + up = [[ + ALTER TABLE upstreams DROP COLUMN IF EXISTS orderlist; + ]], + down = function(_, _, dao) end -- not implemented + }, } diff --git a/kong/dao/schemas/upstreams.lua b/kong/dao/schemas/upstreams.lua index d915d2c25641..3c027193a08e 100644 --- a/kong/dao/schemas/upstreams.lua +++ b/kong/dao/schemas/upstreams.lua @@ -31,13 +31,6 @@ return { type = "number", default = DEFAULT_SLOTS, }, - orderlist = { - -- a list of sequential, but randomly ordered, integer numbers. In the datastore - -- because all Kong nodes need the exact-same 'randomness'. If changed, consistency is lost. - -- must have exactly `slots` number of entries. - type = "array", - default = {}, - } }, self_check = function(schema, config, dao, is_updating) @@ -58,59 +51,6 @@ return { return false, Errors.schema(SLOTS_MSG) end - -- check the order array - local order = config.orderlist - if #order == config.slots then - -- array size unchanged, check consistency - - local t = utils.shallow_copy(order) - table.sort(t) - local count, max = 0, 0 - for i, v in pairs(t) do - if i ~= v then - return false, Errors.schema("invalid orderlist") - end - - count = count + 1 - if i > max then - max = i - end - end - - if count ~= config.slots or max ~= config.slots then - return false, Errors.schema("invalid orderlist") - end - - else - -- size mismatch - if #order > 0 then - -- size given, but doesn't match the size of the also given orderlist - return false, Errors.schema("size mismatch between 'slots' and 'orderlist'") - end - - -- No list given, generate order array - local t = {} - for i = 1, config.slots do - t[i] = { - id = i, - order = math.random(1, config.slots), - } - end - - -- sort the array (we don't check for -accidental- duplicates as the - -- id field is used for the order and that one is always unique) - table.sort(t, function(a,b) - return a.order < b.order - end) - - -- replace the created 'record' with only the id - for i, v in ipairs(t) do - t[i] = v.id - end - - config.orderlist = t - end - return true end, } diff --git a/kong/init.lua b/kong/init.lua index 80cba6211834..91e6349397e4 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -62,6 +62,10 @@ local kong_error_handlers = require "kong.core.error_handlers" local ngx = ngx local header = ngx.header +local ngx_log = ngx.log +local ngx_ERR = ngx.ERR +local ngx_CRIT = ngx.CRIT +local ngx_DEBUG = ngx.DEBUG local ipairs = ipairs local assert = assert local tostring = tostring @@ -73,7 +77,7 @@ local set_more_tries = ngx_balancer.set_more_tries local function load_plugins(kong_conf, dao) local in_db_plugins, sorted_plugins = {}, {} - ngx.log(ngx.DEBUG, "Discovering used plugins") + ngx_log(ngx_DEBUG, "Discovering used plugins") local rows, err_t = dao.plugins:find_all() if not rows then @@ -101,7 +105,7 @@ local function load_plugins(kong_conf, dao) return nil, "no configuration schema found for plugin: " .. plugin end - ngx.log(ngx.DEBUG, "Loading plugin: " .. plugin) + ngx_log(ngx_DEBUG, "Loading plugin: " .. plugin) sorted_plugins[#sorted_plugins+1] = { name = plugin, @@ -177,7 +181,7 @@ function Kong.init_worker() local ok, err = singletons.dao:init_worker() if not ok then - ngx.log(ngx.CRIT, "could not init DB: ", err) + ngx_log(ngx_CRIT, "could not init DB: ", err) return end @@ -197,7 +201,7 @@ function Kong.init_worker() wait_max = 0.5, -- max wait time before discarding event } if not ok then - ngx.log(ngx.CRIT, "could not start inter-worker events: ", err) + ngx_log(ngx_CRIT, "could not start inter-worker events: ", err) return end @@ -215,7 +219,7 @@ function Kong.init_worker() poll_offset = configuration.db_update_propagation, } if not cluster_events then - ngx.log(ngx.CRIT, "could not create cluster_events: ", err) + ngx_log(ngx_CRIT, "could not create cluster_events: ", err) return end @@ -235,7 +239,7 @@ function Kong.init_worker() }, } if not cache then - ngx.log(ngx.CRIT, "could not create kong cache: ", err) + ngx_log(ngx_CRIT, "could not create kong cache: ", err) return end @@ -243,7 +247,7 @@ function Kong.init_worker() return "init" end) if not ok then - ngx.log(ngx.CRIT, "could not set router version in cache: ", err) + ngx_log(ngx_CRIT, "could not set router version in cache: ", err) return end @@ -297,7 +301,7 @@ function Kong.balancer() local ok, err = balancer_execute(addr) if not ok then - ngx.log(ngx.ERR, "failed to retry the dns/balancer resolver for ", + ngx_log(ngx_ERR, "failed to retry the dns/balancer resolver for ", tostring(addr.host), "' with: ", tostring(err)) return responses.send(500) @@ -315,9 +319,11 @@ function Kong.balancer() current_try.port = addr.port -- set the targets as resolved + ngx_log(ngx_DEBUG, "setting address (try ", addr.try_count, "): ", + addr.ip, ":", addr.port) local ok, err = set_current_peer(addr.ip, addr.port) if not ok then - ngx.log(ngx.ERR, "failed to set the current peer (address: ", + ngx_log(ngx_ERR, "failed to set the current peer (address: ", tostring(addr.ip), " port: ", tostring(addr.port),"): ", tostring(err)) @@ -328,7 +334,7 @@ function Kong.balancer() addr.send_timeout / 1000, addr.read_timeout / 1000) if not ok then - ngx.log(ngx.ERR, "could not set upstream timeouts: ", err) + ngx_log(ngx_ERR, "could not set upstream timeouts: ", err) end core.balancer.after() diff --git a/spec/01-unit/007-entities_schemas_spec.lua b/spec/01-unit/007-entities_schemas_spec.lua index b27a17f4dbf1..3babbb5c1313 100644 --- a/spec/01-unit/007-entities_schemas_spec.lua +++ b/spec/01-unit/007-entities_schemas_spec.lua @@ -5,7 +5,6 @@ local targets_schema = require "kong.dao.schemas.targets" local upstreams_schema = require "kong.dao.schemas.upstreams" local validations = require "kong.dao.schemas_validation" local validate_entity = validations.validate_entity -local utils = require "kong.tools.utils" describe("Entities Schemas", function() @@ -781,94 +780,6 @@ describe("Entities Schemas", function() end end) - it("should require (optional) orderlist to be a proper list", function() - local data, valid, errors, check - local function validate_order(list, size) - assert(type(list) == "table", "expected list table, got " .. type(list)) - assert(next(list), "table is empty") - assert(type(size) == "number", "expected size number, got " .. type(size)) - assert(size > 0, "expected size to be > 0") - local c = {} - local max = 0 - for i,v in pairs(list) do --> note: pairs, not ipairs!! - if i > max then max = i end - c[i] = v - end - assert(max == size, "highest key is not equal to the size") - table.sort(c) - max = 0 - for i, v in ipairs(c) do - assert(i == v, "expected sorted table to have equal keys and values") - if i>max then max = i end - end - assert(max == size, "expected array, but got list with holes") - end - - for _ = 1, 20 do -- have Kong generate 20 random sized arrays and verify them - data = { - name = "valid.host.name", - slots = math.random(slots_min, slots_max) - } - valid, errors, check = validate_entity(data, upstreams_schema) - assert.is_true(valid) - assert.is_nil(errors) - assert.is_nil(check) - validate_order(data.orderlist, data.slots) - end - - local lst = { 9,7,5,3,1,2,4,6,8,10 } -- a valid list - data = { - name = "valid.host.name", - slots = 10, - orderlist = utils.shallow_copy(lst) - } - valid, errors, check = validate_entity(data, upstreams_schema) - assert.is_true(valid) - assert.is_nil(errors) - assert.is_nil(check) - assert.same(lst, data.orderlist) - - data = { - name = "valid.host.name", - slots = 10, - orderlist = { 9,7,5,3,1,2,4,6,8 } -- too short (9) - } - valid, errors, check = validate_entity(data, upstreams_schema) - assert.is_false(valid) - assert.is_nil(errors) - assert.are.equal("size mismatch between 'slots' and 'orderlist'",check.message) - - data = { - name = "valid.host.name", - slots = 10, - orderlist = { 9,7,5,3,1,2,4,6,8,10,11 } -- too long (11) - } - valid, errors, check = validate_entity(data, upstreams_schema) - assert.is_false(valid) - assert.is_nil(errors) - assert.are.equal("size mismatch between 'slots' and 'orderlist'",check.message) - - data = { - name = "valid.host.name", - slots = 10, - orderlist = { 9,7,5,3,1,2,4,6,8,8 } -- a double value (2x 8, no 10) - } - valid, errors, check = validate_entity(data, upstreams_schema) - assert.is_false(valid) - assert.is_nil(errors) - assert.are.equal("invalid orderlist",check.message) - - data = { - name = "valid.host.name", - slots = 10, - orderlist = { 9,7,5,3,1,2,4,6,8,11 } -- a hole (10 missing) - } - valid, errors, check = validate_entity(data, upstreams_schema) - assert.is_false(valid) - assert.is_nil(errors) - assert.are.equal("invalid orderlist",check.message) - end) - end) -- diff --git a/spec/02-integration/04-admin_api/07-upstreams_routes_spec.lua b/spec/02-integration/04-admin_api/07-upstreams_routes_spec.lua index 5704372317cf..37ac76557d1f 100644 --- a/spec/02-integration/04-admin_api/07-upstreams_routes_spec.lua +++ b/spec/02-integration/04-admin_api/07-upstreams_routes_spec.lua @@ -12,27 +12,6 @@ local function it_content_types(title, fn) it(title .. " with application/json", test_json) end -local function validate_order(list, size) - assert(type(list) == "table", "expected list table, got " .. type(list)) - assert(next(list), "table is empty") - assert(type(size) == "number", "expected size number, got " .. type(size)) - assert(size > 0, "expected size to be > 0") - local c = {} - local max = 0 - for i,v in pairs(list) do --> note: pairs, not ipairs!! - if i > max then max = i end - c[i] = v - end - assert(max == size, "highest key is not equal to the size") - table.sort(c) - max = 0 - for i, v in ipairs(c) do - assert(i == v, "expected sorted table to have equal keys and values") - if i>max then max = i end - end - assert(max == size, "expected array, but got list with holes") -end - dao_helpers.for_each_dao(function(kong_config) describe("Admin API: #" .. kong_config.database, function() @@ -41,6 +20,7 @@ describe("Admin API: #" .. kong_config.database, function() setup(function() dao = assert(DAOFactory.new(kong_config)) + helpers.run_migrations(dao) helpers.run_migrations(dao) assert(helpers.start_kong{ @@ -75,67 +55,45 @@ describe("Admin API: #" .. kong_config.database, function() assert.is_number(json.created_at) assert.is_string(json.id) assert.are.equal(slots_default, json.slots) - validate_order(json.orderlist, json.slots) end end) - it("creates an upstream without defaults with application/json", function() - local res = assert(client:send { - method = "POST", - path = "/upstreams", - body = { - name = "my.upstream", - slots = 10, - orderlist = { 10,9,8,7,6,5,4,3,2,1 }, - }, - headers = {["Content-Type"] = "application/json"} - }) - assert.response(res).has.status(201) - local json = assert.response(res).has.jsonbody() - assert.equal("my.upstream", json.name) - assert.is_number(json.created_at) - assert.is_string(json.id) - assert.are.equal(10, json.slots) - validate_order(json.orderlist, json.slots) - assert.are.same({ 10,9,8,7,6,5,4,3,2,1 }, json.orderlist) - end) - pending("creates an upstream without defaults with application/www-form-urlencoded", function() --- pending due to inability to pass array --- see also the todo's below - local res = assert(client:send { - method = "POST", - path = "/upstreams", - body = "name=my.upstream&slots=10&" .. - "orderlist[]=10&orderlist[]=9&orderlist[]=8&orderlist[]=7&" .. - "orderlist[]=6&orderlist[]=5&orderlist[]=4&orderlist[]=3&" .. - "orderlist[]=2&orderlist[]=1", - headers = {["Content-Type"] = "application/www-form-urlencoded"} - }) - assert.response(res).has.status(201) - local json = assert.response(res).has.jsonbody() - assert.equal("my.upstream", json.name) - assert.is_number(json.created_at) - assert.is_string(json.id) - assert.are.equal(10, json.slots) - validate_order(json.orderlist, json.slots) - assert.are.same({ 10,9,8,7,6,5,4,3,2,1 }, json.orderlist) + it_content_types("creates an upstream without defaults with application/json", function(content_type) + return function() + local res = assert(client:send { + method = "POST", + path = "/upstreams", + body = { + name = "my.upstream", + slots = 10, + }, + headers = {["Content-Type"] = content_type} + }) + assert.response(res).has.status(201) + local json = assert.response(res).has.jsonbody() + assert.equal("my.upstream", json.name) + assert.is_number(json.created_at) + assert.is_string(json.id) + assert.are.equal(10, json.slots) + end end) - it("creates an upstream with " .. slots_max .. " slots", function(content_type) - local res = assert(client:send { - method = "POST", - path = "/upstreams", - body = { - name = "my.upstream", - slots = slots_max, - }, - headers = {["Content-Type"] = "application/json"} - }) - assert.response(res).has.status(201) - local json = assert.response(res).has.jsonbody() - assert.equal("my.upstream", json.name) - assert.is_number(json.created_at) - assert.is_string(json.id) - assert.are.equal(slots_max, json.slots) - validate_order(json.orderlist, json.slots) + it_content_types("creates an upstream with " .. slots_max .. " slots", function(content_type) + return function() + local res = assert(client:send { + method = "POST", + path = "/upstreams", + body = { + name = "my.upstream", + slots = slots_max, + }, + headers = {["Content-Type"] = content_type} + }) + assert.response(res).has.status(201) + local json = assert.response(res).has.jsonbody() + assert.equal("my.upstream", json.name) + assert.is_number(json.created_at) + assert.is_string(json.id) + assert.are.equal(slots_max, json.slots) + end end) describe("errors", function() it("handles malformed JSON body", function() @@ -190,54 +148,6 @@ describe("Admin API: #" .. kong_config.database, function() assert.same({ message = "number of slots must be between 10 and 65536" }, json) end end) - it_content_types("handles invalid input - orderlist", function(content_type) - return function() ---TODO: line below disables the test for urlencoded, because the orderlist array isn't passed/received properly -if content_type == "application/x-www-form-urlencoded" then return end - -- non-integers - local res = assert(client:send { - method = "POST", - path = "/upstreams", - body = { - name = "my.upstream", - slots = 10, - orderlist = { "one","two","three","four","five","six","seven","eight","nine","ten" }, - }, - headers = {["Content-Type"] = content_type} - }) - local body = assert.res_status(400, res) - local json = cjson.decode(body) - assert.same({ message = "invalid orderlist" }, json) - -- non-consecutive - res = assert(client:send { - method = "POST", - path = "/upstreams", - body = { - name = "my.upstream", - slots = 10, - orderlist = { 1,2,3,4,5,6,7,8,9,11 }, -- 10 is missing - }, - headers = {["Content-Type"] = content_type} - }) - body = assert.res_status(400, res) - local json = cjson.decode(body) - assert.same({ message = "invalid orderlist" }, json) - -- doubles - res = assert(client:send { - method = "POST", - path = "/upstreams", - body = { - name = "my.upstream", - slots = 10, - orderlist = { 1,2,3,4,5,1,2,3,4,5 }, - }, - headers = {["Content-Type"] = content_type} - }) - body = assert.res_status(400, res) - local json = cjson.decode(body) - assert.same({ message = "invalid orderlist" }, json) - end - end) it_content_types("returns 409 on conflict", function(content_type) return function() local res = assert(client:send { @@ -288,12 +198,9 @@ if content_type == "application/x-www-form-urlencoded" then return end assert.is_number(json.created_at) assert.is_string(json.id) assert.is_number(json.slots) - assert.is_table(json.orderlist) end end) - --it_content_types("replaces if exists", function(content_type) - pending("replaces if exists", function(content_type) ---TODO: no idea why this fails in an odd manner... + it_content_types("replaces if exists", function(content_type) return function() local res = assert(client:send { method = "POST", @@ -323,7 +230,6 @@ if content_type == "application/x-www-form-urlencoded" then return end assert.equal("my-new-upstream", updated_json.name) assert.equal(123, updated_json.slots) assert.equal(json.id, updated_json.id) - assert.equal(json.created_at, updated_json.created_at) end end) describe("errors", function() diff --git a/spec/02-integration/04-admin_api/08-targets_routes_spec.lua b/spec/02-integration/04-admin_api/08-targets_routes_spec.lua index 192ec466dec4..59fab906a974 100644 --- a/spec/02-integration/04-admin_api/08-targets_routes_spec.lua +++ b/spec/02-integration/04-admin_api/08-targets_routes_spec.lua @@ -24,7 +24,6 @@ describe("Admin API", function() upstream = assert(helpers.dao.upstreams:insert { name = upstream_name, slots = 10, - orderlist = { 1,2,3,4,5,6,7,8,9,10 } }) end) @@ -250,7 +249,6 @@ describe("Admin API", function() assert(helpers.dao.upstreams:insert { name = upstream_name2, slots = 10, - orderlist = { 1,2,3,4,5,6,7,8,9,10 } }) end) diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index 6bc94631f175..c38a4f083e08 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -5,13 +5,28 @@ local helpers = require "spec.helpers" local dao_helpers = require "spec.02-integration.03-dao.helpers" local PORT = 21000 +local TEST_LOG = false -- extra verbose logging of test server + -- modified http-server. Accepts (sequentially) a number of incoming -- connections, and returns the number of succesful ones. -- Also features a timeout setting. local function http_server(timeout, count, port, ...) local threads = require "llthreads2.ex" local thread = threads.new({ - function(timeout, count, port) + function(timeout, count, port, TEST_LOG) + + local function test_log(...) + if not TEST_LOG then + return + end + + local t = { n = select( "#", ...), ...} + for i, v in ipairs(t) do + t[i] = tostring(v) + end + print(table.concat(t)) + end + local socket = require "socket" local server = assert(socket.tcp()) assert(server:setoption('reuseaddr', true)) @@ -20,6 +35,7 @@ local function http_server(timeout, count, port, ...) local expire = socket.gettime() + timeout assert(server:settimeout(0.1)) + test_log("test http server on port ", port, " started") local success = 0 while count > 0 do @@ -58,13 +74,16 @@ local function http_server(timeout, count, port, ...) if s then success = success + 1 end + test_log("test http server on port ", port, ": ", success, "/", + (success + count)," requests handled") end end server:close() + test_log("test http server on port ", port, " closed") return success end - }, timeout, count, port) + }, timeout, count, port, TEST_LOG) local server = thread:start(...) ngx.sleep(0.2) -- attempt to make sure server is started for failing CI tests @@ -80,6 +99,7 @@ dao_helpers.for_each_dao(function(kong_config) helpers.run_migrations() config_db = helpers.test_conf.database helpers.test_conf.database = kong_config.database + helpers.run_migrations() end) teardown(function() helpers.test_conf.database = config_db From 0d82647af14492adc5468ff6a2d98e6b94775c8b Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Wed, 4 Oct 2017 18:39:12 -0700 Subject: [PATCH 09/74] fix(postgres) migration for APIs created_at default precision Follow-up commit to 5cb196cd497823142cc804dbae6690d6c8930917 This migration is targeted for landing in 0.12.0, and will ensure that newly created APIs will have a `created_at` field with ms precision. From #2925 --- kong/dao/migrations/postgres.lua | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/kong/dao/migrations/postgres.lua b/kong/dao/migrations/postgres.lua index fe6cc8d2901f..2d721b970bfd 100644 --- a/kong/dao/migrations/postgres.lua +++ b/kong/dao/migrations/postgres.lua @@ -533,4 +533,13 @@ return { ]], down = function(_, _, dao) end -- not implemented }, + { + name = "2017-10-02-173400_apis_created_at_ms_precision", + up = [[ + ALTER TABLE apis ALTER COLUMN created_at SET DEFAULT CURRENT_TIMESTAMP(3); + ]], + down = [[ + ALTER TABLE apis ALTER COLUMN created_at SET DEFAULT CURRENT_TIMESTAMP(0); + ]] + }, } From f475eebbb881177176c70c6b445de5ea681e9843 Mon Sep 17 00:00:00 2001 From: Thijs Schreijer Date: Thu, 5 Oct 2017 20:49:52 +0200 Subject: [PATCH 10/74] feat(*) add run_on_preflight defaults migrations for jwt and key-auth PRs #2744 and #2857 implemented the preflight options (for a minor release). This adds the migrations including defaults (for a major release). From #2883 See #2643 #1292 #1535 --- kong-0.11.2-0.rockspec | 1 + kong/dao/db/cassandra.lua | 4 + kong/dao/migrations/helpers.lua | 99 +++++++++++++++++++ kong/plugins/jwt/handler.lua | 6 +- kong/plugins/jwt/migrations/cassandra.lua | 22 ++++- kong/plugins/jwt/migrations/postgres.lua | 22 ++++- kong/plugins/key-auth/handler.lua | 6 +- .../plugins/key-auth/migrations/cassandra.lua | 22 ++++- kong/plugins/key-auth/migrations/postgres.lua | 22 ++++- 9 files changed, 190 insertions(+), 14 deletions(-) create mode 100644 kong/dao/migrations/helpers.lua diff --git a/kong-0.11.2-0.rockspec b/kong-0.11.2-0.rockspec index 392e7517dbb6..ecc457d36ca7 100644 --- a/kong-0.11.2-0.rockspec +++ b/kong-0.11.2-0.rockspec @@ -117,6 +117,7 @@ build = { ["kong.dao.dao"] = "kong/dao/dao.lua", ["kong.dao.factory"] = "kong/dao/factory.lua", ["kong.dao.model_factory"] = "kong/dao/model_factory.lua", + ["kong.dao.migrations.helpers"] = "kong/dao/migrations/helpers.lua", ["kong.dao.migrations.cassandra"] = "kong/dao/migrations/cassandra.lua", ["kong.dao.migrations.postgres"] = "kong/dao/migrations/postgres.lua", diff --git a/kong/dao/db/cassandra.lua b/kong/dao/db/cassandra.lua index 3f30c4f0aa1f..c4ba7dd2cf6c 100644 --- a/kong/dao/db/cassandra.lua +++ b/kong/dao/db/cassandra.lua @@ -212,6 +212,10 @@ function _M:first_coordinator() return true end +function _M:get_coordinator() + return coordinator, coordinator and nil or "no coordinator has been set" +end + function _M:coordinator_change_keyspace(keyspace) if not coordinator then return nil, "no coordinator" diff --git a/kong/dao/migrations/helpers.lua b/kong/dao/migrations/helpers.lua new file mode 100644 index 000000000000..de3dd6f87e07 --- /dev/null +++ b/kong/dao/migrations/helpers.lua @@ -0,0 +1,99 @@ +local json_decode = require("cjson.safe").decode + + +local _M = {} + + +-- Iterator to update plugin configurations. +-- It works indepedent of the underlying datastore. +-- @param dao the dao to use +-- @param plugin_name the name of the plugin whos configurations +-- to iterate over +-- @return `ok+config+update` where `ok` is a boolean, `config` is the plugin configuration +-- table (or the error if not ok), and `update` is an update function to call with +-- the updated configuration table +-- @usage +-- up = function(_, _, dao) +-- for ok, config, update in plugin_config_iterator(dao, "jwt") do +-- if not ok then +-- return config +-- end +-- if config.run_on_preflight == nil then +-- config.run_on_preflight = true +-- local _, err = update(config) +-- if err then +-- return err +-- end +-- end +-- end +-- end +function _M.plugin_config_iterator(dao, plugin_name) + + -- iterates over rows + local run_rows = function(t) + for _, row in ipairs(t) do + if type(row.config) == "string" then + -- de-serialize in case of Cassandra + local json, err = json_decode(row.config) + if not json then + return nil, ("json decoding error '%s' while decoding '%s'"):format( + tostring(err), tostring(row.config)) + end + row.config = json + end + coroutine.yield(row.config, function(updated_config) + if type(updated_config) ~= "table" then + return nil, "expected table, got " .. type(updated_config) + end + row.created_at = nil + row.config = updated_config + return dao.plugins:update(row, {id = row.id}) + end) + end + return true + end + + local coro + if dao.db_type == "cassandra" then + coro = coroutine.create(function() + local coordinator = dao.db:get_coordinator() + for rows, err in coordinator:iterate([[ + SELECT * FROM plugins WHERE name = ']] .. plugin_name .. [['; + ]]) do + if err then + return nil, nil, err + end + + assert(run_rows(rows)) + end + end) + + elseif dao.db_type == "postgres" then + coro = coroutine.create(function() + local rows, err = dao.db:query([[ + SELECT * FROM plugins WHERE name = ']] .. plugin_name .. [['; + ]]) + if err then + return nil, nil, err + end + + assert(run_rows(rows)) + end) + + else + coro = coroutine.create(function() + return nil, nil, "unknown database type: " .. tostring(dao.db_type) + end) + end + + return function() + local coro_ok, config, update, err = coroutine.resume(coro) + if not coro_ok then return false, config end -- coroutine errored out + if err then return false, err end -- dao soft error + if not config then return nil end -- iterator done + return true, config, update + end +end + + +return _M diff --git a/kong/plugins/jwt/handler.lua b/kong/plugins/jwt/handler.lua index a30ae2aed98d..36f539b2cb1b 100644 --- a/kong/plugins/jwt/handler.lua +++ b/kong/plugins/jwt/handler.lua @@ -177,11 +177,7 @@ function JwtHandler:access(conf) JwtHandler.super.access(self) -- check if preflight request and whether it should be authenticated - if conf.run_on_preflight == false and get_method() == "OPTIONS" then - -- FIXME: the above `== false` test is because existing entries in the db will - -- have `nil` and hence will by default start passing the preflight request - -- This should be fixed by a migration to update the actual entries - -- in the datastore + if not conf.run_on_preflight and get_method() == "OPTIONS" then return end diff --git a/kong/plugins/jwt/migrations/cassandra.lua b/kong/plugins/jwt/migrations/cassandra.lua index 78af9bd130e1..8216916a2975 100644 --- a/kong/plugins/jwt/migrations/cassandra.lua +++ b/kong/plugins/jwt/migrations/cassandra.lua @@ -1,3 +1,5 @@ +local plugin_config_iterator = require("kong.dao.migrations.helpers").plugin_config_iterator + return { { name = "2015-06-09-jwt-auth", @@ -29,5 +31,23 @@ return { ALTER TABLE jwt_secrets DROP algorithm; ALTER TABLE jwt_secrets DROP rsa_public_key; ]] - } + }, + { + name = "2017-07-31-120200_jwt-auth_preflight_default", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "jwt") do + if not ok then + return config + end + if config.run_on_preflight == nil then + config.run_on_preflight = true + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + }, } diff --git a/kong/plugins/jwt/migrations/postgres.lua b/kong/plugins/jwt/migrations/postgres.lua index 6c075e1e8434..63dedbd6f946 100644 --- a/kong/plugins/jwt/migrations/postgres.lua +++ b/kong/plugins/jwt/migrations/postgres.lua @@ -1,3 +1,5 @@ +local plugin_config_iterator = require("kong.dao.migrations.helpers").plugin_config_iterator + return { { name = "2015-06-09-jwt-auth", @@ -47,5 +49,23 @@ return { down = [[ ALTER TABLE jwt_secrets ADD CONSTRAINT jwt_secrets_secret_key UNIQUE(secret); ]], - } + }, + { + name = "2017-07-31-120200_jwt-auth_preflight_default", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "jwt") do + if not ok then + return config + end + if config.run_on_preflight == nil then + config.run_on_preflight = true + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + }, } diff --git a/kong/plugins/key-auth/handler.lua b/kong/plugins/key-auth/handler.lua index 1cc33d802bc7..d6090f32fa90 100644 --- a/kong/plugins/key-auth/handler.lua +++ b/kong/plugins/key-auth/handler.lua @@ -158,11 +158,7 @@ function KeyAuthHandler:access(conf) KeyAuthHandler.super.access(self) -- check if preflight request and whether it should be authenticated - if conf.run_on_preflight == false and get_method() == "OPTIONS" then - -- FIXME: the above `== false` test is because existing entries in the db will - -- have `nil` and hence will by default start passing the preflight request - -- This should be fixed by a migration to update the actual entries - -- in the datastore + if not conf.run_on_preflight and get_method() == "OPTIONS" then return end diff --git a/kong/plugins/key-auth/migrations/cassandra.lua b/kong/plugins/key-auth/migrations/cassandra.lua index 33c5247207d0..d26b6f3f14a5 100644 --- a/kong/plugins/key-auth/migrations/cassandra.lua +++ b/kong/plugins/key-auth/migrations/cassandra.lua @@ -1,3 +1,5 @@ +local plugin_config_iterator = require("kong.dao.migrations.helpers").plugin_config_iterator + return { { name = "2015-07-31-172400_init_keyauth", @@ -16,5 +18,23 @@ return { down = [[ DROP TABLE keyauth_credentials; ]] - } + }, + { + name = "2017-07-31-120200_key-auth_preflight_default", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "key-auth") do + if not ok then + return config + end + if config.run_on_preflight == nil then + config.run_on_preflight = true + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + }, } diff --git a/kong/plugins/key-auth/migrations/postgres.lua b/kong/plugins/key-auth/migrations/postgres.lua index ad56bc5d4be6..13b26f3dddad 100644 --- a/kong/plugins/key-auth/migrations/postgres.lua +++ b/kong/plugins/key-auth/migrations/postgres.lua @@ -1,3 +1,5 @@ +local plugin_config_iterator = require("kong.dao.migrations.helpers").plugin_config_iterator + return { { name = "2015-07-31-172400_init_keyauth", @@ -23,5 +25,23 @@ return { down = [[ DROP TABLE keyauth_credentials; ]] - } + }, + { + name = "2017-07-31-120200_key-auth_preflight_default", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "key-auth") do + if not ok then + return config + end + if config.run_on_preflight == nil then + config.run_on_preflight = true + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + }, } From ea5aad6b01cccae40409898d5072da5b7a2ce086 Mon Sep 17 00:00:00 2001 From: Francois Maillard Date: Mon, 23 Oct 2017 16:43:00 +0200 Subject: [PATCH 11/74] feat(ldap) configurable name for Authorization header Makes the `Authorization` header type configurable. The default remains `LDAP`, but strings such as `Basic` can be used for ease of testing via the browser. Incidentally, this fixes the case-insensitive parsing of the `LDAP` string, which was broken. PR: #2963. --- kong/plugins/ldap-auth/access.lua | 13 ++++++++----- kong/plugins/ldap-auth/schema.lua | 1 + 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/kong/plugins/ldap-auth/access.lua b/kong/plugins/ldap-auth/access.lua index 2ff31c20db1f..802342a99205 100644 --- a/kong/plugins/ldap-auth/access.lua +++ b/kong/plugins/ldap-auth/access.lua @@ -4,6 +4,9 @@ local singletons = require "kong.singletons" local ldap = require "kong.plugins.ldap-auth.ldap" local match = string.match +local lower = string.lower +local find = string.find +local sub = string.sub local ngx_log = ngx.log local request = ngx.req local ngx_error = ngx.ERR @@ -18,12 +21,12 @@ local PROXY_AUTHORIZATION = "proxy-authorization" local _M = {} -local function retrieve_credentials(authorization_header_value) +local function retrieve_credentials(authorization_header_value, conf) local username, password if authorization_header_value then - local cred = match(authorization_header_value, "%s*[ldap|LDAP]%s+(.*)") - - if cred ~= nil then + local s, e = find(lower(authorization_header_value), "%s*" .. lower(conf.header_type) .. "%s+") + if s == 1 then + local cred = sub(authorization_header_value, e + 1) local decoded_cred = decode_base64(cred) username, password = match(decoded_cred, "(.+):(.+)") end @@ -82,7 +85,7 @@ local function load_credential(given_username, given_password, conf) end local function authenticate(conf, given_credentials) - local given_username, given_password = retrieve_credentials(given_credentials) + local given_username, given_password = retrieve_credentials(given_credentials, conf) if given_username == nil then return false end diff --git a/kong/plugins/ldap-auth/schema.lua b/kong/plugins/ldap-auth/schema.lua index ad93940e6961..0eb2bac7f14f 100644 --- a/kong/plugins/ldap-auth/schema.lua +++ b/kong/plugins/ldap-auth/schema.lua @@ -22,5 +22,6 @@ return { timeout = {type = "number", default = 10000}, keepalive = {type = "number", default = 60000}, anonymous = {type = "string", default = "", func = check_user}, + header_type = {type = "string", default = "ldap"}, } } From 1bea03c9db028d4f0ef90cfceb381e589ace4c31 Mon Sep 17 00:00:00 2001 From: supraja01 Date: Thu, 19 Oct 2017 17:05:44 -0700 Subject: [PATCH 12/74] feat(oauth2) new parameter auth_header_name Adds to the `oauth2` plugin a new parameter `auth_header_name` to define the header name to use. By default its value is `"authorization"`. From #2928 Signed-off-by: Hisham Muhammad --- kong/plugins/oauth2/access.lua | 10 +- kong/plugins/oauth2/migrations/cassandra.lua | 22 +++- kong/plugins/oauth2/migrations/postgres.lua | 22 +++- kong/plugins/oauth2/schema.lua | 1 + spec/03-plugins/26-oauth2/01-schema_spec.lua | 20 ++- spec/03-plugins/26-oauth2/03-access_spec.lua | 130 ++++++++++++++++++- 6 files changed, 196 insertions(+), 9 deletions(-) diff --git a/kong/plugins/oauth2/access.lua b/kong/plugins/oauth2/access.lua index 82cc9c1b821f..777cfd5f3a49 100644 --- a/kong/plugins/oauth2/access.lua +++ b/kong/plugins/oauth2/access.lua @@ -228,9 +228,9 @@ local function authorize(conf) }) end -local function retrieve_client_credentials(parameters) +local function retrieve_client_credentials(parameters, conf) local client_id, client_secret, from_authorization_header - local authorization_header = ngx.req.get_headers()["authorization"] + local authorization_header = ngx.req.get_headers()[conf.auth_header_name] if parameters[CLIENT_ID] and parameters[CLIENT_SECRET] then client_id = parameters[CLIENT_ID] client_secret = parameters[CLIENT_SECRET] @@ -281,7 +281,7 @@ local function issue_token(conf) response_params = {[ERROR] = "unsupported_grant_type", error_description = "Invalid " .. GRANT_TYPE} end - local client_id, client_secret, from_authorization_header = retrieve_client_credentials(parameters) + local client_id, client_secret, from_authorization_header = retrieve_client_credentials(parameters, conf) -- Check client_id and redirect_uri local allowed_redirect_uris, client = get_redirect_uri(client_id) @@ -415,7 +415,7 @@ local function parse_access_token(conf) local found_in = {} local result = retrieve_parameters()["access_token"] if not result then - local authorization = ngx.req.get_headers()["authorization"] + local authorization = ngx.req.get_headers()[conf.auth_header_name] if authorization then local parts = {} for v in authorization:gmatch("%S+") do -- Split by space @@ -430,7 +430,7 @@ local function parse_access_token(conf) if conf.hide_credentials then if found_in.authorization_header then - ngx.req.clear_header("authorization") + ngx.req.clear_header(conf.auth_header_name) else -- Remove from querystring local parameters = ngx.req.get_uri_args() diff --git a/kong/plugins/oauth2/migrations/cassandra.lua b/kong/plugins/oauth2/migrations/cassandra.lua index 1f81bf82337c..4aa11ec9f222 100644 --- a/kong/plugins/oauth2/migrations/cassandra.lua +++ b/kong/plugins/oauth2/migrations/cassandra.lua @@ -1,3 +1,5 @@ +local plugin_config_iterator = require("kong.dao.migrations.helpers").plugin_config_iterator + return { { name = "2015-08-03-132400_init_oauth2", @@ -151,5 +153,23 @@ return { end end end - } + }, + { + name = "2017-10-19-set_auth_header_name_default", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "oauth2") do + if not ok then + return config + end + if config.auth_header_name == nil then + config.auth_header_name = "authorization" + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + }, } diff --git a/kong/plugins/oauth2/migrations/postgres.lua b/kong/plugins/oauth2/migrations/postgres.lua index cd4aa25cc107..220b37bf97b1 100644 --- a/kong/plugins/oauth2/migrations/postgres.lua +++ b/kong/plugins/oauth2/migrations/postgres.lua @@ -1,3 +1,5 @@ +local plugin_config_iterator = require("kong.dao.migrations.helpers").plugin_config_iterator + return { { name = "2015-08-03-132400_init_oauth2", @@ -164,5 +166,23 @@ return { down = [[ ALTER TABLE oauth2_credentials ADD CONSTRAINT oauth2_credentials_client_secret_key UNIQUE(client_secret); ]], - } + }, + { + name = "2017-10-19-set_auth_header_name_default", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "oauth2") do + if not ok then + return config + end + if config.auth_header_name == nil then + config.auth_header_name = "authorization" + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + }, } diff --git a/kong/plugins/oauth2/schema.lua b/kong/plugins/oauth2/schema.lua index b0baaa71d8d6..51f317c550c6 100644 --- a/kong/plugins/oauth2/schema.lua +++ b/kong/plugins/oauth2/schema.lua @@ -31,6 +31,7 @@ return { accept_http_if_already_terminated = { required = false, type = "boolean", default = false }, anonymous = {type = "string", default = "", func = check_user}, global_credentials = {type = "boolean", default = false}, + auth_header_name = {required = false, type = "string", default = "authorization"}, }, self_check = function(schema, plugin_t, dao, is_update) if not plugin_t.enable_authorization_code and not plugin_t.enable_implicit_grant diff --git a/spec/03-plugins/26-oauth2/01-schema_spec.lua b/spec/03-plugins/26-oauth2/01-schema_spec.lua index 867b4b253a8c..41eeb9e604f4 100644 --- a/spec/03-plugins/26-oauth2/01-schema_spec.lua +++ b/spec/03-plugins/26-oauth2/01-schema_spec.lua @@ -28,7 +28,25 @@ describe("Plugin: oauth2 (schema)", function() assert.truthy(t.provision_key) assert.equal("hello", t.provision_key) end) - + it("sets default `auth_header_name` when not given", function() + local t = {enable_authorization_code = true, mandatory_scope = true, scopes = {"email", "info"}} + local ok, errors = validate_entity(t, oauth2_schema) + assert.True(ok) + assert.is_nil(errors) + assert.truthy(t.provision_key) + assert.equal(32, t.provision_key:len()) + assert.equal("authorization", t.auth_header_name) + end) + it("does not set default value for `auth_header_name` when it is given", function() + local t = {enable_authorization_code = true, mandatory_scope = true, scopes = {"email", "info"}, provision_key = "hello", + auth_header_name="custom_header_name"} + local ok, errors = validate_entity(t, oauth2_schema) + assert.True(ok) + assert.is_nil(errors) + assert.truthy(t.provision_key) + assert.equal("hello", t.provision_key) + assert.equal("custom_header_name", t.auth_header_name) + end) describe("errors", function() it("requires at least one flow", function() local ok, _, err = validate_entity({}, oauth2_schema) diff --git a/spec/03-plugins/26-oauth2/03-access_spec.lua b/spec/03-plugins/26-oauth2/03-access_spec.lua index 9b75d6e71ee4..262dcd65d945 100644 --- a/spec/03-plugins/26-oauth2/03-access_spec.lua +++ b/spec/03-plugins/26-oauth2/03-access_spec.lua @@ -96,6 +96,13 @@ describe("Plugin: oauth2 (access)", function() name = "testapp3", consumer_id = consumer.id }) + assert(helpers.dao.oauth2_credentials:insert { + client_id = "clientid1011", + client_secret = "secret1011", + redirect_uri = "http://google.com/kong", + name = "testapp31", + consumer_id = consumer.id + }) local api1 = assert(helpers.dao.apis:insert { name = "api-1", @@ -302,7 +309,45 @@ describe("Plugin: oauth2 (access)", function() anonymous = utils.uuid(), -- a non existing consumer }, }) - + local api11 = assert(helpers.dao.apis:insert { + name = "oauth2_11.com", + hosts = { "oauth2_11.com" }, + upstream_url = helpers.mock_upstream_url, + }) + assert(helpers.dao.plugins:insert { + name = "oauth2", + api_id = api11.id, + config = { + scopes = { "email", "profile", "user.email" }, + enable_authorization_code = true, + mandatory_scope = true, + provision_key = "provision123", + token_expiration = 7, + enable_implicit_grant = true, + global_credentials = true, + auth_header_name = "custom_header_name", + }, + }) + local api12 = assert(helpers.dao.apis:insert { + name = "oauth2_12.com", + hosts = { "oauth2_12.com" }, + upstream_url = helpers.mock_upstream_url, + }) + assert(helpers.dao.plugins:insert { + name = "oauth2", + api_id = api12.id, + config = { + scopes = { "email", "profile", "user.email" }, + enable_authorization_code = true, + mandatory_scope = true, + provision_key = "provision123", + token_expiration = 7, + enable_implicit_grant = true, + global_credentials = true, + auth_header_name = "custom_header_name", + hide_credentials = true, + }, + }) assert(helpers.start_kong({ trusted_ips = "127.0.0.1", nginx_conf = "spec/fixtures/custom_nginx.template", @@ -805,6 +850,35 @@ describe("Plugin: oauth2 (access)", function() assert.are.equal(5, data[1].expires_in) assert.falsy(data[1].refresh_token) end) + it("returns success and the token should have the right expiration when a custom header is passed", function() + local res = assert(proxy_ssl_client:send { + method = "POST", + path = "/oauth2/authorize", + body = { + provision_key = "provision123", + authenticated_userid = "id123", + client_id = "clientid1011", + scope = "email", + response_type = "token" + }, + headers = { + ["Host"] = "oauth2_11.com", + ["Content-Type"] = "application/json" + } + }) + local body = cjson.decode(assert.res_status(200, res)) + assert.is_table(ngx.re.match(body.redirect_uri, "^http://google\\.com/kong\\#access_token=[\\w]{32,32}&expires_in=[\\d]+&token_type=bearer$")) + + local iterator, err = ngx.re.gmatch(body.redirect_uri, "^http://google\\.com/kong\\#access_token=([\\w]{32,32})&expires_in=[\\d]+&token_type=bearer$") + assert.is_nil(err) + local m, err = iterator() + assert.is_nil(err) + local data = helpers.dao.oauth2_tokens:find_all {access_token = m[1]} + assert.are.equal(1, #data) + assert.are.equal(m[1], data[1].access_token) + assert.are.equal(7, data[1].expires_in) + assert.falsy(data[1].refresh_token) + end) it("returns success and store authenticated user properties", function() local res = assert(proxy_ssl_client:send { method = "POST", @@ -1695,6 +1769,32 @@ describe("Plugin: oauth2 (access)", function() }) assert.res_status(200, res) end) + it("work when a correct access_token is being sent in the custom header", function() + local token = provision_token("oauth2_11.com",nil,"clientid1011","secret1011") + + local res = assert(proxy_ssl_client:send { + method = "GET", + path = "/request", + headers = { + ["Host"] = "oauth2_11.com", + ["custom_header_name"] = "bearer " .. token.access_token, + } + }) + assert.res_status(200, res) + end) + it("fail when a correct access_token is being sent in the wrong header", function() + local token = provision_token("oauth2_11.com",nil,"clientid1011","secret1011") + + local res = assert(proxy_ssl_client:send { + method = "GET", + path = "/request", + headers = { + ["Host"] = "oauth2_11.com", + ["authorization"] = "bearer " .. token.access_token, + } + }) + assert.res_status(401, res) + end) it("does not work when requesting a different API", function() local token = provision_token() @@ -2161,6 +2261,19 @@ describe("Plugin: oauth2 (access)", function() local body = cjson.decode(assert.res_status(200, res)) assert.is_nil(body.uri_args.access_token) end) + it("hides credentials in the querystring for api with custom header", function() + local token = provision_token("oauth2_12.com",nil,"clientid1011","secret1011") + + local res = assert(proxy_client:send { + method = "GET", + path = "/request?access_token=" .. token.access_token, + headers = { + ["Host"] = "oauth2_12.com" + } + }) + local body = cjson.decode(assert.res_status(200, res)) + assert.is_nil(body.uri_args.access_token) + end) it("does not hide credentials in the header", function() local token = provision_token() @@ -2189,6 +2302,21 @@ describe("Plugin: oauth2 (access)", function() local body = cjson.decode(assert.res_status(200, res)) assert.is_nil(body.headers.authorization) end) + it("hides credentials in the custom header", function() + local token = provision_token("oauth2_12.com",nil,"clientid1011","secret1011") + + local res = assert(proxy_client:send { + method = "GET", + path = "/request", + headers = { + ["Host"] = "oauth2_12.com", + ["custom_header_name"] = "bearer " .. token.access_token + } + }) + local body = cjson.decode(assert.res_status(200, res)) + assert.is_nil(body.headers.authorization) + assert.is_nil(body.headers.custom_header_name) + end) it("does not abort when the request body is a multipart form upload", function() local token = provision_token("oauth2_3.com") From f63be18bf7349e2c0ccf083cccb825f5969b0d91 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Mon, 23 Oct 2017 15:23:54 -0400 Subject: [PATCH 13/74] hotfix(ldap) add migration adding field header_type Adds a migration using the plugin_config_iterator helper. Signed-off-by: Thibault Charbonnier --- kong-0.11.2-0.rockspec | 2 ++ .../ldap-auth/migrations/cassandra.lua | 23 +++++++++++++++++++ .../plugins/ldap-auth/migrations/postgres.lua | 23 +++++++++++++++++++ 3 files changed, 48 insertions(+) create mode 100644 kong/plugins/ldap-auth/migrations/cassandra.lua create mode 100644 kong/plugins/ldap-auth/migrations/postgres.lua diff --git a/kong-0.11.2-0.rockspec b/kong-0.11.2-0.rockspec index ecc457d36ca7..838fb0c8d49d 100644 --- a/kong-0.11.2-0.rockspec +++ b/kong-0.11.2-0.rockspec @@ -245,6 +245,8 @@ build = { ["kong.plugins.hmac-auth.api"] = "kong/plugins/hmac-auth/api.lua", ["kong.plugins.hmac-auth.daos"] = "kong/plugins/hmac-auth/daos.lua", + ["kong.plugins.ldap-auth.migrations.cassandra"] = "kong/plugins/ldap-auth/migrations/cassandra.lua", + ["kong.plugins.ldap-auth.migrations.postgres"] = "kong/plugins/ldap-auth/migrations/postgres.lua", ["kong.plugins.ldap-auth.handler"] = "kong/plugins/ldap-auth/handler.lua", ["kong.plugins.ldap-auth.access"] = "kong/plugins/ldap-auth/access.lua", ["kong.plugins.ldap-auth.schema"] = "kong/plugins/ldap-auth/schema.lua", diff --git a/kong/plugins/ldap-auth/migrations/cassandra.lua b/kong/plugins/ldap-auth/migrations/cassandra.lua new file mode 100644 index 000000000000..88440364e9df --- /dev/null +++ b/kong/plugins/ldap-auth/migrations/cassandra.lua @@ -0,0 +1,23 @@ +local plugin_config_iterator = require("kong.dao.migrations.helpers").plugin_config_iterator +local schema = require("kong.plugins.ldap-auth.schema") + +return { + { + name = "2017-10-23-150900_header_type_default", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "ldap-auth") do + if not ok then + return config + end + if config.header_type == nil then + config.header_type = schema.fields.header_type.default + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + }, +} diff --git a/kong/plugins/ldap-auth/migrations/postgres.lua b/kong/plugins/ldap-auth/migrations/postgres.lua new file mode 100644 index 000000000000..88440364e9df --- /dev/null +++ b/kong/plugins/ldap-auth/migrations/postgres.lua @@ -0,0 +1,23 @@ +local plugin_config_iterator = require("kong.dao.migrations.helpers").plugin_config_iterator +local schema = require("kong.plugins.ldap-auth.schema") + +return { + { + name = "2017-10-23-150900_header_type_default", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "ldap-auth") do + if not ok then + return config + end + if config.header_type == nil then + config.header_type = schema.fields.header_type.default + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + }, +} From 3867784333b1e5cc1418fce2d802eff5700057e4 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Mon, 23 Oct 2017 14:01:03 -0400 Subject: [PATCH 14/74] hotfix(ldap) anchor search to avoid bad substring match Make sure we don't match "invalidldap" when searching for "ldap". Prior to #2963, this plugin did an incorrect pattern matching leading it to accept invalid type strings, as long as they ended with the letters in "LDAP". This adds a regression test for that situation. Signed-off-by: Thibault Charbonnier --- kong/plugins/ldap-auth/access.lua | 2 +- spec/03-plugins/21-ldap-auth/01-access_spec.lua | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/kong/plugins/ldap-auth/access.lua b/kong/plugins/ldap-auth/access.lua index 802342a99205..79e652116400 100644 --- a/kong/plugins/ldap-auth/access.lua +++ b/kong/plugins/ldap-auth/access.lua @@ -24,7 +24,7 @@ local _M = {} local function retrieve_credentials(authorization_header_value, conf) local username, password if authorization_header_value then - local s, e = find(lower(authorization_header_value), "%s*" .. lower(conf.header_type) .. "%s+") + local s, e = find(lower(authorization_header_value), "^%s*" .. lower(conf.header_type) .. "%s+") if s == 1 then local cred = sub(authorization_header_value, e + 1) local decoded_cred = decode_base64(cred) diff --git a/spec/03-plugins/21-ldap-auth/01-access_spec.lua b/spec/03-plugins/21-ldap-auth/01-access_spec.lua index b669361ab1dd..dbc1b0b7319c 100644 --- a/spec/03-plugins/21-ldap-auth/01-access_spec.lua +++ b/spec/03-plugins/21-ldap-auth/01-access_spec.lua @@ -172,6 +172,19 @@ describe("Plugin: ldap-auth (access)", function() }) assert.response(r).has.status(200) end) + it("fails if credential type is invalid in post request", function() + local r = assert(client:send { + method = "POST", + path = "/request", + body = {}, + headers = { + host = "ldap.com", + authorization = "invalidldap " .. ngx.encode_base64("einstein:password"), + ["content-type"] = "application/x-www-form-urlencoded", + } + }) + assert.response(r).has.status(403) + end) it("passes if credential is valid and starts with space in post request", function() local r = assert(client:send { method = "POST", From a84faeef50562edbb9bbeb474c683ddb93d95678 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Mon, 23 Oct 2017 14:04:01 -0400 Subject: [PATCH 15/74] tests(ldap) adds tests for header_type functionality Adds tests for the functionality introduced in PR #2963. Signed-off-by: Thibault Charbonnier --- .../21-ldap-auth/01-access_spec.lua | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/spec/03-plugins/21-ldap-auth/01-access_spec.lua b/spec/03-plugins/21-ldap-auth/01-access_spec.lua index dbc1b0b7319c..294d9cf1be48 100644 --- a/spec/03-plugins/21-ldap-auth/01-access_spec.lua +++ b/spec/03-plugins/21-ldap-auth/01-access_spec.lua @@ -32,6 +32,11 @@ describe("Plugin: ldap-auth (access)", function() hosts = { "ldap4.com" }, upstream_url = helpers.mock_upstream_url, }) + local api5 = assert(helpers.dao.apis:insert { + name = "test-ldap5", + hosts = { "ldap5.com" }, + upstream_url = helpers.mock_upstream_url, + }) local anonymous_user = assert(helpers.dao.consumers:insert { username = "no-body" @@ -87,6 +92,18 @@ describe("Plugin: ldap-auth (access)", function() anonymous = utils.uuid(), -- non existing consumer } }) + assert(helpers.dao.plugins:insert { + api_id = api5.id, + name = "ldap-auth", + config = { + ldap_host = ldap_host_aws, + ldap_port = "389", + start_tls = false, + base_dn = "ou=scientists,dc=ldap,dc=mashape,dc=com", + attribute = "uid", + header_type = "basic", + } + }) assert(helpers.start_kong({ nginx_conf = "spec/fixtures/custom_nginx.template", @@ -279,6 +296,32 @@ describe("Plugin: ldap-auth (access)", function() assert.response(r).has.status(200) assert.request(r).has.no.header("authorization") end) + it("passes if custom credential type is given in post request", function() + local r = assert(client:send { + method = "POST", + path = "/request", + body = {}, + headers = { + host = "ldap5.com", + authorization = "basic " .. ngx.encode_base64("einstein:password"), + ["content-type"] = "application/x-www-form-urlencoded", + } + }) + assert.response(r).has.status(200) + end) + it("fails if custom credential type is invalid in post request", function() + local r = assert(client:send { + method = "POST", + path = "/request", + body = {}, + headers = { + host = "ldap5.com", + authorization = "invalidldap " .. ngx.encode_base64("einstein:password"), + ["content-type"] = "application/x-www-form-urlencoded", + } + }) + assert.response(r).has.status(403) + end) it("caches LDAP Auth Credential", function() local r = assert(client:send { method = "GET", From 9be362177ba4354d357aee57347983e32e1b8e54 Mon Sep 17 00:00:00 2001 From: Mark van Holsteijn Date: Thu, 19 Oct 2017 12:56:20 +0000 Subject: [PATCH 16/74] feat(jwt) support passing JWT in cookies When the JWT plugin is configured with the property cookie_names, the plugin will get the JWT token from one of the named cookies. * add `config.cookie_names` ocnfiguration option * add migration for previous records of this plugin * add integration test suite From #2974 Fix #2911 #2894 Signed-off-by: Thibault Charbonnier --- kong/plugins/jwt/handler.lua | 18 +++-- kong/plugins/jwt/migrations/cassandra.lua | 18 +++++ kong/plugins/jwt/migrations/postgres.lua | 18 +++++ kong/plugins/jwt/schema.lua | 1 + spec/03-plugins/17-jwt/03-access_spec.lua | 84 ++++++++++++++++++++++- 5 files changed, 134 insertions(+), 5 deletions(-) diff --git a/kong/plugins/jwt/handler.lua b/kong/plugins/jwt/handler.lua index 36f539b2cb1b..886c6089c1b8 100644 --- a/kong/plugins/jwt/handler.lua +++ b/kong/plugins/jwt/handler.lua @@ -3,11 +3,12 @@ local BasePlugin = require "kong.plugins.base_plugin" local responses = require "kong.tools.responses" local constants = require "kong.constants" local jwt_decoder = require "kong.plugins.jwt.jwt_parser" -local string_format = string.format -local ngx_re_gmatch = ngx.re.gmatch +local ipairs = ipairs +local string_format = string.format +local ngx_re_gmatch = ngx.re.gmatch local ngx_set_header = ngx.req.set_header -local get_method = ngx.req.get_method +local get_method = ngx.req.get_method local JwtHandler = BasePlugin:extend() @@ -15,7 +16,8 @@ JwtHandler.PRIORITY = 1005 JwtHandler.VERSION = "0.1.0" --- Retrieve a JWT in a request. --- Checks for the JWT in URI parameters, then in the `Authorization` header. +-- Checks for the JWT in URI parameters, then in cookies, and finally +-- in the `Authorization` header. -- @param request ngx request object -- @param conf Plugin configuration -- @return token JWT token contained in request (can be a table) or nil @@ -29,6 +31,14 @@ local function retrieve_token(request, conf) end end + local ngx_var = ngx.var + for _, v in ipairs(conf.cookie_names) do + local jwt_cookie = ngx_var["cookie_" .. v] + if jwt_cookie and jwt_cookie ~= "" then + return jwt_cookie + end + end + local authorization_header = request.get_headers()["authorization"] if authorization_header then local iterator, iter_err = ngx_re_gmatch(authorization_header, "\\s*[Bb]earer\\s+(.+)") diff --git a/kong/plugins/jwt/migrations/cassandra.lua b/kong/plugins/jwt/migrations/cassandra.lua index 8216916a2975..e2aefc7703ab 100644 --- a/kong/plugins/jwt/migrations/cassandra.lua +++ b/kong/plugins/jwt/migrations/cassandra.lua @@ -50,4 +50,22 @@ return { end, down = function(_, _, dao) end -- not implemented }, + { + name = "2017-10-25-211200_jwt_cookie_names_default", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "jwt") do + if not ok then + return config + end + if config.cookie_names == nil then + config.cookie_names = {} + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + }, } diff --git a/kong/plugins/jwt/migrations/postgres.lua b/kong/plugins/jwt/migrations/postgres.lua index 63dedbd6f946..822db9fbc8bb 100644 --- a/kong/plugins/jwt/migrations/postgres.lua +++ b/kong/plugins/jwt/migrations/postgres.lua @@ -68,4 +68,22 @@ return { end, down = function(_, _, dao) end -- not implemented }, + { + name = "2017-10-25-211200_jwt_cookie_names_default", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "jwt") do + if not ok then + return config + end + if config.cookie_names == nil then + config.cookie_names = {} + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + }, } diff --git a/kong/plugins/jwt/schema.lua b/kong/plugins/jwt/schema.lua index c2facbc895ec..89611b78a2d6 100644 --- a/kong/plugins/jwt/schema.lua +++ b/kong/plugins/jwt/schema.lua @@ -12,6 +12,7 @@ return { no_consumer = true, fields = { uri_param_names = {type = "array", default = {"jwt"}}, + cookie_names = {type = "array", default = {}}, key_claim_name = {type = "string", default = "iss"}, secret_is_base64 = {type = "boolean", default = false}, claims_to_verify = {type = "array", enum = {"exp", "nbf"}}, diff --git a/spec/03-plugins/17-jwt/03-access_spec.lua b/spec/03-plugins/17-jwt/03-access_spec.lua index 646ab5582357..7b8c93ad3f9b 100644 --- a/spec/03-plugins/17-jwt/03-access_spec.lua +++ b/spec/03-plugins/17-jwt/03-access_spec.lua @@ -20,7 +20,7 @@ describe("Plugin: jwt (access)", function() local apis = {} - for i = 1, 8 do + for i = 1, 9 do apis[i] = assert(helpers.dao.apis:insert({ name = "tests-jwt" .. i, hosts = { "jwt" .. i .. ".com" }, @@ -69,6 +69,10 @@ describe("Plugin: jwt (access)", function() api_id = apis[8].id, config = { run_on_preflight = false }, })) + assert(pdao:insert({ name = "jwt", + api_id = apis[9].id, + config = { cookie_names = { "silly", "crumble" } }, + })) jwt_secret = assert(helpers.dao.jwt_secrets:insert {consumer_id = consumer1.id}) base64_jwt_secret = assert(helpers.dao.jwt_secrets:insert {consumer_id = consumer2.id}) @@ -277,6 +281,84 @@ describe("Plugin: jwt (access)", function() }) assert.res_status(200, res) end) + it("returns 200 the JWT is found in the cookie crumble", function() + PAYLOAD.iss = jwt_secret.key + local jwt = jwt_encoder.encode(PAYLOAD, jwt_secret.secret) + local res = assert(proxy_client:send { + method = "GET", + path = "/request", + headers = { + ["Host"] = "jwt9.com", + ["Cookie"] = "crumble=" .. jwt .. "; path=/;domain=.jwt9.com", + } + }) + assert.res_status(200, res) + end) + it("returns 200 if the JWT is found in the cookie silly", function() + PAYLOAD.iss = jwt_secret.key + local jwt = jwt_encoder.encode(PAYLOAD, jwt_secret.secret) + local res = assert(proxy_client:send { + method = "GET", + path = "/request", + headers = { + ["Host"] = "jwt9.com", + ["Cookie"] = "silly=" .. jwt .. "; path=/;domain=.jwt9.com", + } + }) + assert.res_status(200, res) + end) + it("returns 403 if the JWT found in the cookie does not match a credential", function() + PAYLOAD.iss = "incorrect-issuer" + local jwt = jwt_encoder.encode(PAYLOAD, jwt_secret.secret) + local res = assert(proxy_client:send { + method = "GET", + path = "/request", + headers = { + ["Host"] = "jwt9.com", + ["Cookie"] = "silly=" .. jwt .. "; path=/;domain=.jwt9.com", + } + }) + local body = assert.res_status(403, res) + local json = cjson.decode(body) + assert.same({ message = "No credentials found for given 'iss'" }, json) + end) + it("returns a 401 if the JWT in the cookie is corrupted", function() + PAYLOAD.iss = jwt_secret.key + local jwt = "no-way-this-works" .. jwt_encoder.encode(PAYLOAD, jwt_secret.secret) + local res = assert(proxy_client:send { + method = "GET", + path = "/request", + headers = { + ["Host"] = "jwt9.com", + ["Cookie"] = "silly=" .. jwt .. "; path=/;domain=.jwt9.com", + } + }) + local body = assert.res_status(401, res) + assert.equal([[{"message":"Bad token; invalid JSON"}]], body) + end) + it("reports a 200 without cookies but with a JWT token in the Authorization header", function() + PAYLOAD.iss = jwt_secret.key + local jwt = jwt_encoder.encode(PAYLOAD, jwt_secret.secret) + local res = assert(proxy_client:send { + method = "GET", + path = "/request", + headers = { + ["Host"] = "jwt9.com", + ["Authorization"] = "Bearer " .. jwt, + } + }) + assert.res_status(200, res) + end) + it("returns 401 if no JWT tokens are found in cookies or Authorization header", function() + local res = assert(proxy_client:send { + method = "GET", + path = "/request", + headers = { + ["Host"] = "jwt9.com", + } + }) + assert.res_status(401, res) + end) it("finds the JWT if given in a custom URL parameter", function() PAYLOAD.iss = jwt_secret.key local jwt = jwt_encoder.encode(PAYLOAD, jwt_secret.secret) From 50c02531c8c4196fe6d88addaba70d645766fd19 Mon Sep 17 00:00:00 2001 From: Thijs Schreijer Date: Mon, 30 Oct 2017 17:21:49 +0100 Subject: [PATCH 17/74] feat(balancer) implement consistent hashing One can define 2 keys to hash on, if the first key returns nil, the fallback is used (for example if a header is not present). The options for the keys are: 1. `none`: do not use a hash, just do (weighted) round-robin. This is the default. 2. `consumer`: use the consumer_id, or if not found the credential_id. (later is for external auth like oauth2 or ldap) 3. `ip`: use the originating ip address 4. `header`: use the header specified in the additional header option. If there are multiple headers by this name, then all entries are used (concatenated) The hash value is created from the resulting string-value, and is calculated as a numerical crc32. PR #2875 --- kong/core/balancer.lua | 82 +++++++-- kong/core/handler.lua | 1 + kong/dao/migrations/cassandra.lua | 39 ++++ kong/dao/migrations/postgres.lua | 39 ++++ kong/dao/schemas/upstreams.lua | 75 ++++++++ spec/01-unit/011-balancer_spec.lua | 116 +++++++++++- .../04-admin_api/07-upstreams_routes_spec.lua | 168 +++++++++++++++++- .../05-proxy/09-balancer_spec.lua | 95 ++++++++-- 8 files changed, 583 insertions(+), 32 deletions(-) diff --git a/kong/core/balancer.lua b/kong/core/balancer.lua index 47edf076bdf7..18f2dcb86f28 100644 --- a/kong/core/balancer.lua +++ b/kong/core/balancer.lua @@ -4,6 +4,8 @@ local singletons = require "kong.singletons" local dns_client = require "resty.dns.client" -- due to startup/require order, cannot use the one from 'singletons' here local ring_balancer = require "resty.dns.balancer" +local table_concat = table.concat +local crc32 = ngx.crc32_short local toip = dns_client.toip local log = ngx.log @@ -146,7 +148,7 @@ end -- looks up a balancer for the target. -- @param target the table with the target details --- @return balancer if found, or `false` if not found, or nil+error on error +-- @return balancer+upstream if found, `false` if not found, or nil+error on error local get_balancer = function(target) -- NOTE: only called upon first lookup, so `cache_only` limitations do not apply here local hostname = target.host @@ -233,10 +235,55 @@ local get_balancer = function(target) end end - return balancer + return balancer, upstream end +-- Calculates hash-value. +-- Will only be called once per request, on first try. +-- @param upstream the upstream enity +-- @return integer value or nil if there is no hash to calculate +local create_hash = function(upstream) + local hash_on = upstream.hash_on + if hash_on == "none" then + return -- not hashing, exit fast + end + + local ctx = ngx.ctx + local identifier + local header_field_name = "hash_on_header" + + for _ = 1,2 do + + if hash_on == "consumer" then + -- consumer, fallback to credential + identifier = (ctx.authenticated_consumer or EMPTY_T).id or + (ctx.authenticated_credential or EMPTY_T).id + + elseif hash_on == "ip" then + identifier = ngx.var.remote_addr + + elseif hash_on == "header" then + identifier = ngx.req.get_headers()[upstream[header_field_name]] + if type(identifier) == "table" then + identifier = table_concat(identifier) + end + end + + if identifier then + return crc32(identifier) + end + + -- we missed the first, so now try the fallback + hash_on = upstream.hash_fallback + header_field_name = "hash_fallback_header" + if hash_on == "none" then + return nil + end + end + -- nothing found, leave without a hash +end + --=========================================================== -- Main entry point when resolving --=========================================================== @@ -260,29 +307,38 @@ local function execute(target) -- when tries == 0 it runs before the `balancer` context (in the `access` context), -- when tries >= 2 then it performs a retry in the `balancer` context local dns_cache_only = target.try_count ~= 0 - local balancer + local balancer, upstream, hash_value if dns_cache_only then -- retry, so balancer is already set if there was one balancer = target.balancer else - local err -- first try, so try and find a matching balancer/upstream object - balancer, err = get_balancer(target) - if err then -- check on err, `nil` without `err` means we do dns resolution - return nil, err + balancer, upstream = get_balancer(target) + if balancer == nil then -- `false` means no balancer, `nil` is error + return nil, upstream end - -- store for retries - target.balancer = balancer + if balancer then + -- store for retries + target.balancer = balancer + + -- calculate hash-value + -- only add it if it doesn't exist, in case a plugin inserted one + hash_value = target.hash_value + if not hash_value then + hash_value = create_hash(upstream) + target.hash_value = hash_value + end + end end if balancer then -- have to invoke the ring-balancer - local hashValue = nil -- TODO: implement, nil does simple round-robin - - local ip, port, hostname = balancer:getPeer(hashValue, nil, dns_cache_only) + local ip, port, hostname = balancer:getPeer(hash_value, + target.try_count, + dns_cache_only) if not ip then if port == "No peers are available" then -- in this case a "503 service unavailable", others will be a 500. @@ -297,6 +353,7 @@ local function execute(target) target.ip = ip target.port = port target.hostname = hostname + target.hash_value = hash_value return true end @@ -326,4 +383,5 @@ return { _load_upstreams_dict_into_memory = load_upstreams_dict_into_memory, _load_upstream_into_memory = load_upstream_into_memory, _load_targets_into_memory = load_targets_into_memory, + _create_hash = create_hash, } diff --git a/kong/core/handler.lua b/kong/core/handler.lua index 1b932cd9afd1..b2658f3c59ad 100644 --- a/kong/core/handler.lua +++ b/kong/core/handler.lua @@ -312,6 +312,7 @@ return { -- ip = nil, -- final target IP address -- balancer = nil, -- the balancer object, in case of a balancer -- hostname = nil, -- the hostname belonging to the final target IP + -- hash_value = nil, -- balancer hash (integer) } ctx.api = api diff --git a/kong/dao/migrations/cassandra.lua b/kong/dao/migrations/cassandra.lua index ee5858e2247d..25d3ff2c0eab 100644 --- a/kong/dao/migrations/cassandra.lua +++ b/kong/dao/migrations/cassandra.lua @@ -481,4 +481,43 @@ return { ]], down = function(_, _, dao) end -- not implemented }, + { + name = "2017-10-27-134100_consistent_hashing_1", + up = [[ + ALTER TABLE upstreams ADD hash_on text; + ALTER TABLE upstreams ADD hash_fallback text; + ALTER TABLE upstreams ADD hash_on_header text; + ALTER TABLE upstreams ADD hash_fallback_header text; + ]], + down = [[ + ALTER TABLE upstreams DROP hash_on; + ALTER TABLE upstreams DROP hash_fallback; + ALTER TABLE upstreams DROP hash_on_header; + ALTER TABLE upstreams DROP hash_fallback_header; + ]] + }, + { + name = "2017-10-27-134100_consistent_hashing_2", + up = function(_, _, dao) + local rows, err = dao.db:query([[ + SELECT * FROM upstreams; + ]]) + if err then + return err + end + + for _, row in ipairs(rows) do + if not row.hash_on or not row.hash_fallback then + row.hash_on = "none" + row.hash_fallback = "none" +-- row.created_at = nil + local _, err = dao.upstreams:update(row, { id = row.id }) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- n.a. since the columns will be dropped + }, } diff --git a/kong/dao/migrations/postgres.lua b/kong/dao/migrations/postgres.lua index 2d721b970bfd..8537c4bd42d7 100644 --- a/kong/dao/migrations/postgres.lua +++ b/kong/dao/migrations/postgres.lua @@ -542,4 +542,43 @@ return { ALTER TABLE apis ALTER COLUMN created_at SET DEFAULT CURRENT_TIMESTAMP(0); ]] }, + { + name = "2017-10-27-134100_consistent_hashing_1", + up = [[ + ALTER TABLE upstreams ADD hash_on text; + ALTER TABLE upstreams ADD hash_fallback text; + ALTER TABLE upstreams ADD hash_on_header text; + ALTER TABLE upstreams ADD hash_fallback_header text; + ]], + down = [[ + ALTER TABLE upstreams DROP COLUMN IF EXISTS hash_on; + ALTER TABLE upstreams DROP COLUMN IF EXISTS hash_fallback; + ALTER TABLE upstreams DROP COLUMN IF EXISTS hash_on_header; + ALTER TABLE upstreams DROP COLUMN IF EXISTS hash_fallback_header; + ]] + }, + { + name = "2017-10-27-134100_consistent_hashing_2", + up = function(_, _, dao) + local rows, err = dao.db:query([[ + SELECT * FROM upstreams; + ]]) + if err then + return err + end + + for _, row in ipairs(rows) do + if not row.hash_on or not row.hash_fallback then + row.hash_on = "none" + row.hash_fallback = "none" + row.created_at = nil + local _, err = dao.upstreams:update(row, { id = row.id }) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- n.a. since the columns will be dropped + }, } diff --git a/kong/dao/schemas/upstreams.lua b/kong/dao/schemas/upstreams.lua index 3c027193a08e..84d9a5c790a9 100644 --- a/kong/dao/schemas/upstreams.lua +++ b/kong/dao/schemas/upstreams.lua @@ -26,6 +26,36 @@ return { unique = true, required = true, }, + hash_on = { + -- primary hash-key + type = "string", + default = "none", + enum = { + "none", + "consumer", + "ip", + "header", + }, + }, + hash_fallback = { + -- secondary key, if primary fails + type = "string", + default = "none", + enum = { + "none", + "consumer", + "ip", + "header", + }, + }, + hash_on_header = { + -- header name, if `hash_on == "header"` + type = "string", + }, + hash_fallback_header = { + -- header name, if `hash_fallback == "header"` + type = "string", + }, slots = { -- the number of slots in the loadbalancer algorithm type = "number", @@ -46,6 +76,51 @@ return { return false, Errors.schema("Invalid name; no port allowed") end + if config.hash_on_header then + local ok, err = utils.validate_header_name(config.hash_on_header) + if not ok then + return false, Errors.schema("Header: " .. err) + end + end + + if config.hash_fallback_header then + local ok, err = utils.validate_header_name(config.hash_fallback_header) + if not ok then + return false, Errors.schema("Header: " .. err) + end + end + + if (config.hash_on == "header" + and not config.hash_on_header) or + (config.hash_fallback == "header" + and not config.hash_fallback_header) then + return false, Errors.schema("Hashing on 'header', " .. + "but no header name provided") + end + + if config.hash_on == "none" then + if config.hash_fallback ~= "none" then + return false, Errors.schema("Cannot set fallback if primary " .. + "'hash_on' is not set") + end + + else + if config.hash_on == config.hash_fallback then + if config.hash_on ~= "header" then + return false, Errors.schema("Cannot set fallback and primary " .. + "hashes to the same value") + + else + local upper_hash_on = config.hash_on_header:upper() + local upper_hash_fallback = config.hash_fallback_header:upper() + if upper_hash_on == upper_hash_fallback then + return false, Errors.schema("Cannot set fallback and primary ".. + "hashes to the same value") + end + end + end + end + -- check the slots number if config.slots < SLOTS_MIN or config.slots > SLOTS_MAX then return false, Errors.schema(SLOTS_MSG) diff --git a/spec/01-unit/011-balancer_spec.lua b/spec/01-unit/011-balancer_spec.lua index 4cd6aa7e98c0..b39eca0d6622 100644 --- a/spec/01-unit/011-balancer_spec.lua +++ b/spec/01-unit/011-balancer_spec.lua @@ -2,7 +2,8 @@ describe("Balancer", function() local singletons, balancer local UPSTREAMS_FIXTURES local TARGETS_FIXTURES - --local uuid = require("kong.tools.utils").uuid + local crc32 = ngx.crc32_short + local uuid = require("kong.tools.utils").uuid setup(function() @@ -111,4 +112,117 @@ describe("Balancer", function() assert(targets[4].id == "a1") end) end) + + describe("creating hash values", function() + local headers + local backup + before_each(function() + headers = setmetatable({}, { + __newindex = function(self, key, value) + rawset(self, key:upper(), value) + end, + __index = function(self, key) + return rawget(self, key:upper()) + end, + }) + backup = { ngx.req, ngx.var, ngx.ctx } + ngx.req = { get_headers = function() return headers end } + ngx.var = {} + ngx.ctx = {} + end) + after_each(function() + ngx.req = backup[1] + ngx.var = backup[2] + ngx.ctx = backup[3] + end) + it("none", function() + local hash = balancer._create_hash({ + hash_on = "none", + }) + assert.is_nil(hash) + end) + it("consumer", function() + local value = uuid() + ngx.ctx.authenticated_consumer = { id = value } + local hash = balancer._create_hash({ + hash_on = "consumer", + }) + assert.are.same(crc32(value), hash) + end) + it("ip", function() + local value = "1.2.3.4" + ngx.var.remote_addr = value + local hash = balancer._create_hash({ + hash_on = "ip", + }) + assert.are.same(crc32(value), hash) + end) + it("header", function() + local value = "some header value" + headers.HeaderName = value + local hash = balancer._create_hash({ + hash_on = "header", + hash_on_header = "HeaderName", + }) + assert.are.same(crc32(value), hash) + end) + it("multi-header", function() + local value = { "some header value", "another value" } + headers.HeaderName = value + local hash = balancer._create_hash({ + hash_on = "header", + hash_on_header = "HeaderName", + }) + assert.are.same(crc32(table.concat(value)), hash) + end) + describe("fallback", function() + it("none", function() + local hash = balancer._create_hash({ + hash_on = "consumer", + hash_fallback = "none", + }) + assert.is_nil(hash) + end) + it("consumer", function() + local value = uuid() + ngx.ctx.authenticated_consumer = { id = value } + local hash = balancer._create_hash({ + hash_on = "header", + hash_on_header = "non-existing", + hash_fallback = "consumer", + }) + assert.are.same(crc32(value), hash) + end) + it("ip", function() + local value = "1.2.3.4" + ngx.var.remote_addr = value + local hash = balancer._create_hash({ + hash_on = "consumer", + hash_fallback = "ip", + }) + assert.are.same(crc32(value), hash) + end) + it("header", function() + local value = "some header value" + headers.HeaderName = value + local hash = balancer._create_hash({ + hash_on = "consumer", + hash_fallback = "header", + hash_fallback_header = "HeaderName", + }) + assert.are.same(crc32(value), hash) + end) + it("multi-header", function() + local value = { "some header value", "another value" } + headers.HeaderName = value + local hash = balancer._create_hash({ + hash_on = "consumer", + hash_fallback = "header", + hash_fallback_header = "HeaderName", + }) + assert.are.same(crc32(table.concat(value)), hash) + end) + end) + end) + end) diff --git a/spec/02-integration/04-admin_api/07-upstreams_routes_spec.lua b/spec/02-integration/04-admin_api/07-upstreams_routes_spec.lua index 37ac76557d1f..87326f6e7187 100644 --- a/spec/02-integration/04-admin_api/07-upstreams_routes_spec.lua +++ b/spec/02-integration/04-admin_api/07-upstreams_routes_spec.lua @@ -55,9 +55,13 @@ describe("Admin API: #" .. kong_config.database, function() assert.is_number(json.created_at) assert.is_string(json.id) assert.are.equal(slots_default, json.slots) + assert.are.equal("none", json.hash_on) + assert.are.equal("none", json.hash_fallback) + assert.is_nil(json.hash_on_header) + assert.is_nil(json.hash_fallback_header) end end) - it_content_types("creates an upstream without defaults with application/json", function(content_type) + it_content_types("creates an upstream without defaults", function(content_type) return function() local res = assert(client:send { method = "POST", @@ -65,6 +69,10 @@ describe("Admin API: #" .. kong_config.database, function() body = { name = "my.upstream", slots = 10, + hash_on = "consumer", + hash_fallback = "ip", + hash_on_header = "HeaderName", + hash_fallback_header = "HeaderFallback", }, headers = {["Content-Type"] = content_type} }) @@ -74,6 +82,37 @@ describe("Admin API: #" .. kong_config.database, function() assert.is_number(json.created_at) assert.is_string(json.id) assert.are.equal(10, json.slots) + assert.are.equal("consumer", json.hash_on) + assert.are.equal("ip", json.hash_fallback) + assert.are.equal("HeaderName", json.hash_on_header) + assert.are.equal("HeaderFallback", json.hash_fallback_header) + end + end) + it_content_types("creates an upstream with 2 header hashes", function(content_type) + return function() + local res = assert(client:send { + method = "POST", + path = "/upstreams", + body = { + name = "my.upstream", + slots = 10, + hash_on = "header", + hash_fallback = "header", + hash_on_header = "HeaderName1", + hash_fallback_header = "HeaderName2", + }, + headers = {["Content-Type"] = content_type} + }) + assert.response(res).has.status(201) + local json = assert.response(res).has.jsonbody() + assert.equal("my.upstream", json.name) + assert.is_number(json.created_at) + assert.is_string(json.id) + assert.are.equal(10, json.slots) + assert.are.equal("header", json.hash_on) + assert.are.equal("header", json.hash_fallback) + assert.are.equal("HeaderName1", json.hash_on_header) + assert.are.equal("HeaderName2", json.hash_fallback_header) end end) it_content_types("creates an upstream with " .. slots_max .. " slots", function(content_type) @@ -133,6 +172,7 @@ describe("Admin API: #" .. kong_config.database, function() body = assert.res_status(400, res) local json = cjson.decode(body) assert.same({ message = "Invalid name; must be a valid hostname" }, json) + -- Invalid slots parameter res = assert(client:send { method = "POST", @@ -146,6 +186,132 @@ describe("Admin API: #" .. kong_config.database, function() body = assert.res_status(400, res) local json = cjson.decode(body) assert.same({ message = "number of slots must be between 10 and 65536" }, json) + + -- Invalid hash_on entries + res = assert(client:send { + method = "POST", + path = "/upstreams", + body = { + name = "my.upstream", + hash_on = "something that is invalid", + }, + headers = {["Content-Type"] = content_type} + }) + body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ hash_on = '"something that is invalid" is not allowed. Allowed values are: "none", "consumer", "ip", "header"' }, json) + + -- Invalid hash_fallback entries + res = assert(client:send { + method = "POST", + path = "/upstreams", + body = { + name = "my.upstream", + hash_on = "consumer", + hash_fallback = "something that is invalid", + }, + headers = {["Content-Type"] = content_type} + }) + body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ hash_fallback = '"something that is invalid" is not allowed. Allowed values are: "none", "consumer", "ip", "header"' }, json) + + -- same hash entries + res = assert(client:send { + method = "POST", + path = "/upstreams", + body = { + name = "my.upstream", + hash_on = "consumer", + hash_fallback = "consumer", + }, + headers = {["Content-Type"] = content_type} + }) + body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "Cannot set fallback and primary hashes to the same value" }, json) + + -- Invalid header + res = assert(client:send { + method = "POST", + path = "/upstreams", + body = { + name = "my.upstream", + hash_on = "header", + hash_fallback = "consumer", + hash_on_header = "not a <> valid <> header name", + }, + headers = {["Content-Type"] = content_type} + }) + body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "Header: bad header name 'not a <> valid <> header name', allowed characters are A-Z, a-z, 0-9, '_', and '-'" }, json) + + -- Invalid header + res = assert(client:send { + method = "POST", + path = "/upstreams", + body = { + name = "my.upstream", + hash_on = "consumer", + hash_fallback = "header", + hash_fallback_header = "not a <> valid <> header name", + }, + headers = {["Content-Type"] = content_type} + }) + body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "Header: bad header name 'not a <> valid <> header name', allowed characters are A-Z, a-z, 0-9, '_', and '-'" }, json) + + -- Same headers + res = assert(client:send { + method = "POST", + path = "/upstreams", + body = { + name = "my.upstream", + hash_on = "header", + hash_fallback = "header", + hash_on_header = "headername", + hash_fallback_header = "HeaderName", --> validate case insensitivity + }, + headers = {["Content-Type"] = content_type} + }) + body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "Cannot set fallback and primary hashes to the same value" }, json) + + -- No headername provided + res = assert(client:send { + method = "POST", + path = "/upstreams", + body = { + name = "my.upstream", + hash_on = "header", + hash_fallback = "header", + hash_on_header = nil, -- not given + hash_fallback_header = "HeaderName", + }, + headers = {["Content-Type"] = content_type} + }) + body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "Hashing on 'header', but no header name provided" }, json) + + -- No fallback headername provided + res = assert(client:send { + method = "POST", + path = "/upstreams", + body = { + name = "my.upstream", + hash_on = "consumer", + hash_fallback = "header", + }, + headers = {["Content-Type"] = content_type} + }) + body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "Hashing on 'header', but no header name provided" }, json) + end end) it_content_types("returns 409 on conflict", function(content_type) diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index c38a4f083e08..d1ff571ec38a 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -10,10 +10,10 @@ local TEST_LOG = false -- extra verbose logging of test server -- modified http-server. Accepts (sequentially) a number of incoming -- connections, and returns the number of succesful ones. -- Also features a timeout setting. -local function http_server(timeout, count, port, ...) +local function http_server(timeout, count, port, no_timeout) local threads = require "llthreads2.ex" local thread = threads.new({ - function(timeout, count, port, TEST_LOG) + function(timeout, count, port, no_timeout, TEST_LOG) local function test_log(...) if not TEST_LOG then @@ -44,7 +44,11 @@ local function http_server(timeout, count, port, ...) if err == "timeout" then if socket.gettime() > expire then server:close() - error("timeout") + if no_timeout then + return success + else + error("timeout") + end end elseif not client then server:close() @@ -83,9 +87,9 @@ local function http_server(timeout, count, port, ...) test_log("test http server on port ", port, " closed") return success end - }, timeout, count, port, TEST_LOG) + }, timeout, count, port, no_timeout, TEST_LOG) - local server = thread:start(...) + local server = thread:start() ngx.sleep(0.2) -- attempt to make sure server is started for failing CI tests return server end @@ -112,28 +116,52 @@ dao_helpers.for_each_dao(function(kong_config) end) describe("Balancing", function() - local client, api_client, upstream, target1, target2 + local client, api_client, upstream1, upstream2, target1, target2 before_each(function() helpers.run_migrations() + -- insert an api with round-robin balancer assert(helpers.dao.apis:insert { name = "balancer.test", hosts = { "balancer.test" }, upstream_url = "http://service.xyz.v1/path", }) - upstream = assert(helpers.dao.upstreams:insert { + upstream1 = assert(helpers.dao.upstreams:insert { name = "service.xyz.v1", slots = 10, }) target1 = assert(helpers.dao.targets:insert { target = "127.0.0.1:" .. PORT, weight = 10, - upstream_id = upstream.id, + upstream_id = upstream1.id, }) target2 = assert(helpers.dao.targets:insert { target = "127.0.0.1:" .. (PORT+1), weight = 10, - upstream_id = upstream.id, + upstream_id = upstream1.id, + }) + + -- insert an api with consistent-hashing balancer + assert(helpers.dao.apis:insert { + name = "hashing.test", + hosts = { "hashing.test" }, + upstream_url = "http://service.hashing.v1/path", + }) + upstream2 = assert(helpers.dao.upstreams:insert { + name = "service.hashing.v1", + slots = 10, + hash_on = "header", + hash_on_header = "hashme", + }) + assert(helpers.dao.targets:insert { + target = "127.0.0.1:" .. PORT+2, + weight = 10, + upstream_id = upstream2.id, + }) + assert(helpers.dao.targets:insert { + target = "127.0.0.1:" .. (PORT+3), + weight = 10, + upstream_id = upstream2.id, }) -- insert additional api + upstream with no targets @@ -162,7 +190,7 @@ dao_helpers.for_each_dao(function(kong_config) it("over multiple targets", function() local timeout = 10 - local requests = upstream.slots * 2 -- go round the balancer twice + local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers local server1 = http_server(timeout, requests/2, PORT) @@ -188,9 +216,40 @@ dao_helpers.for_each_dao(function(kong_config) assert.are.equal(requests/2, count1) assert.are.equal(requests/2, count2) end) + it("over multiple targets, with hashing", function() + local timeout = 5 + local requests = upstream2.slots * 2 -- go round the balancer twice + + -- setup target servers + local server1 = http_server(timeout, requests, PORT+2, true) + local server2 = http_server(timeout, requests, PORT+3, true) + + -- Go hit them with our test requests + for _ = 1, requests do + local res = assert(client:send { + method = "GET", + path = "/", + headers = { + ["Host"] = "hashing.test", + ["hashme"] = "just a value", + } + }) + assert.response(res).has.status(200) + end + + -- collect server results; hitcount + -- one should get all the hits, the other 0, and hence a timeout + local _, count1 = server1:join() + local _, count2 = server2:join() + + -- verify, print a warning about the timeout error + assert(count1 == 0 or count1 == requests, "counts should either get a timeout-error or ALL hits") + assert(count2 == 0 or count2 == requests, "counts should either get a timeout-error or ALL hits") + assert(count1 + count2 == requests) + end) it("adding a target", function() local timeout = 10 - local requests = upstream.slots * 2 -- go round the balancer twice + local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers local server1 = http_server(timeout, requests/2, PORT) @@ -219,7 +278,7 @@ dao_helpers.for_each_dao(function(kong_config) -- add a new target 3 local res = assert(api_client:send { method = "POST", - path = "/upstreams/" .. upstream.name .. "/targets", + path = "/upstreams/" .. upstream1.name .. "/targets", headers = { ["Content-Type"] = "application/json" }, @@ -262,7 +321,7 @@ dao_helpers.for_each_dao(function(kong_config) end) it("removing a target", function() local timeout = 10 - local requests = upstream.slots * 2 -- go round the balancer twice + local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers local server1 = http_server(timeout, requests/2, PORT) @@ -291,7 +350,7 @@ dao_helpers.for_each_dao(function(kong_config) -- modify weight for target 2, set to 0 local res = assert(api_client:send { method = "POST", - path = "/upstreams/" .. upstream.name .. "/targets", + path = "/upstreams/" .. upstream1.name .. "/targets", headers = { ["Content-Type"] = "application/json" }, @@ -328,7 +387,7 @@ dao_helpers.for_each_dao(function(kong_config) end) it("modifying target weight", function() local timeout = 10 - local requests = upstream.slots * 2 -- go round the balancer twice + local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers local server1 = http_server(timeout, requests/2, PORT) @@ -397,7 +456,7 @@ dao_helpers.for_each_dao(function(kong_config) end) it("failure due to targets all 0 weight", function() local timeout = 10 - local requests = upstream.slots * 2 -- go round the balancer twice + local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers local server1 = http_server(timeout, requests/2, PORT) @@ -426,7 +485,7 @@ dao_helpers.for_each_dao(function(kong_config) -- modify weight for both targets, set to 0 local res = assert(api_client:send { method = "POST", - path = "/upstreams/" .. upstream.name .. "/targets", + path = "/upstreams/" .. upstream1.name .. "/targets", headers = { ["Content-Type"] = "application/json" }, @@ -439,7 +498,7 @@ dao_helpers.for_each_dao(function(kong_config) res = assert(api_client:send { method = "POST", - path = "/upstreams/" .. upstream.name .. "/targets", + path = "/upstreams/" .. upstream1.name .. "/targets", headers = { ["Content-Type"] = "application/json" }, From d24b76f854385c9668e9d1b4d160d634cf7eddfa Mon Sep 17 00:00:00 2001 From: shiprabehera Date: Tue, 29 Aug 2017 11:58:01 +0900 Subject: [PATCH 18/74] feat(log) improve uri fields in basic log serializer * add `request.upstream_uri` * rename `request.uri` to `request.request_uri` * rename `request.request_uri` to `request.request_url` From #2445 --- kong/plugins/log-serializers/basic.lua | 7 ++++--- spec/01-unit/012-log_serializer_spec.lua | 8 +++++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/kong/plugins/log-serializers/basic.lua b/kong/plugins/log-serializers/basic.lua index 4409f9a670d1..811ab0ba9e93 100644 --- a/kong/plugins/log-serializers/basic.lua +++ b/kong/plugins/log-serializers/basic.lua @@ -15,8 +15,9 @@ function _M.serialize(ngx) return { request = { - uri = ngx.var.request_uri, - request_uri = ngx.var.scheme .. "://" .. ngx.var.host .. ":" .. ngx.var.server_port .. ngx.var.request_uri, + request_uri = ngx.var.request_uri, + upstream_uri = ngx.var.upstream_uri, + request_url = ngx.var.scheme .. "://" .. ngx.var.host .. ":" .. ngx.var.server_port .. ngx.var.request_uri, querystring = ngx.req.get_uri_args(), -- parameters, as a table method = ngx.req.get_method(), -- http method headers = ngx.req.get_headers(), @@ -44,4 +45,4 @@ function _M.serialize(ngx) } end -return _M +return _M \ No newline at end of file diff --git a/spec/01-unit/012-log_serializer_spec.lua b/spec/01-unit/012-log_serializer_spec.lua index a51c32867f02..534f83cb6e3f 100644 --- a/spec/01-unit/012-log_serializer_spec.lua +++ b/spec/01-unit/012-log_serializer_spec.lua @@ -17,6 +17,7 @@ describe("Log Serializer", function() }, var = { request_uri = "/request_uri", + upstream_uri = "/upstream_uri", scheme = "http", host = "test.com", server_port = 80, @@ -57,9 +58,10 @@ describe("Log Serializer", function() assert.same({"header1", "header2"}, res.request.headers) assert.equal("POST", res.request.method) assert.same({"arg1", "arg2"}, res.request.querystring) - assert.equal("http://test.com:80/request_uri", res.request.request_uri) + assert.equal("http://test.com:80/request_uri", res.request.request_url) + assert.equal("/upstream_uri", res.request.upstream_uri) assert.equal(200, res.request.size) - assert.equal("/request_uri", res.request.uri) + assert.equal("/request_uri", res.request.request_uri) -- Response assert.is_table(res.response) @@ -145,4 +147,4 @@ describe("Log Serializer", function() assert.is_nil(res.tries) end) end) -end) +end) \ No newline at end of file From 769cc280b509d61f691f9c78f6eeb77f5fb9617a Mon Sep 17 00:00:00 2001 From: Bob Stalmach Date: Thu, 2 Nov 2017 00:48:38 +0100 Subject: [PATCH 19/74] feat(oauth2) implement 'refresh_ttl' attribute for refresh_tokens A `refresh_token` TTL used to be hard-coded to 14 days. This pose a problem with scenarios where the token/refresh_token is only used sporadically. This change adds new config option `refresh_token_ttl` that specifies a refresh token's TTL. If the value is `nil` or 0, it means keep forever. Fix #2024 From #2942 --- kong/plugins/oauth2/access.lua | 9 +- kong/plugins/oauth2/migrations/cassandra.lua | 18 +++ kong/plugins/oauth2/migrations/postgres.lua | 18 +++ kong/plugins/oauth2/schema.lua | 1 + spec/03-plugins/26-oauth2/01-schema_spec.lua | 7 ++ spec/03-plugins/26-oauth2/03-access_spec.lua | 111 +++++++++++++++++++ 6 files changed, 162 insertions(+), 2 deletions(-) diff --git a/kong/plugins/oauth2/access.lua b/kong/plugins/oauth2/access.lua index 777cfd5f3a49..9a4b521d637a 100644 --- a/kong/plugins/oauth2/access.lua +++ b/kong/plugins/oauth2/access.lua @@ -44,6 +44,11 @@ local function generate_token(conf, api, credential, authenticated_userid, scope refresh_token = utils.random_string() end + local refresh_token_ttl + if conf.refresh_token_ttl and conf.refresh_token_ttl > 0 then + refresh_token_ttl = conf.refresh_token_ttl + end + local api_id if not conf.global_credentials then api_id = api.id @@ -55,8 +60,8 @@ local function generate_token(conf, api, credential, authenticated_userid, scope expires_in = token_expiration, refresh_token = refresh_token, scope = scope - }, {ttl = token_expiration > 0 and 1209600 or nil}) -- Access tokens (and their associated refresh token) are being - -- permanently deleted after 14 days (1209600 seconds) + }, {ttl = token_expiration > 0 and refresh_token_ttl or nil}) -- Access tokens (and their associated refresh token) are being + -- permanently deleted after 'refresh_token_ttl' seconds if err then return responses.send_HTTP_INTERNAL_SERVER_ERROR(err) diff --git a/kong/plugins/oauth2/migrations/cassandra.lua b/kong/plugins/oauth2/migrations/cassandra.lua index 4aa11ec9f222..1f3794158711 100644 --- a/kong/plugins/oauth2/migrations/cassandra.lua +++ b/kong/plugins/oauth2/migrations/cassandra.lua @@ -172,4 +172,22 @@ return { end, down = function(_, _, dao) end -- not implemented }, + { + name = "2017-10-11-oauth2_new_refresh_token_ttl_config_value", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "oauth2") do + if not ok then + return config + end + if config.refresh_token_ttl == nil then + config.refresh_token_ttl = 1209600 + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + } } diff --git a/kong/plugins/oauth2/migrations/postgres.lua b/kong/plugins/oauth2/migrations/postgres.lua index 220b37bf97b1..885e7d93130a 100644 --- a/kong/plugins/oauth2/migrations/postgres.lua +++ b/kong/plugins/oauth2/migrations/postgres.lua @@ -185,4 +185,22 @@ return { end, down = function(_, _, dao) end -- not implemented }, + { + name = "2017-10-11-oauth2_new_refresh_token_ttl_config_value", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "oauth2") do + if not ok then + return config + end + if config.refresh_token_ttl == nil then + config.refresh_token_ttl = 1209600 + local _, err = update(config) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end -- not implemented + } } diff --git a/kong/plugins/oauth2/schema.lua b/kong/plugins/oauth2/schema.lua index 51f317c550c6..7e35eba332a9 100644 --- a/kong/plugins/oauth2/schema.lua +++ b/kong/plugins/oauth2/schema.lua @@ -32,6 +32,7 @@ return { anonymous = {type = "string", default = "", func = check_user}, global_credentials = {type = "boolean", default = false}, auth_header_name = {required = false, type = "string", default = "authorization"}, + refresh_token_ttl = {required = true, type = "number", default = 1209600} -- original hardcoded value - 14 days }, self_check = function(schema, plugin_t, dao, is_update) if not plugin_t.enable_authorization_code and not plugin_t.enable_implicit_grant diff --git a/spec/03-plugins/26-oauth2/01-schema_spec.lua b/spec/03-plugins/26-oauth2/01-schema_spec.lua index 41eeb9e604f4..9ee294be5036 100644 --- a/spec/03-plugins/26-oauth2/01-schema_spec.lua +++ b/spec/03-plugins/26-oauth2/01-schema_spec.lua @@ -47,6 +47,13 @@ describe("Plugin: oauth2 (schema)", function() assert.equal("hello", t.provision_key) assert.equal("custom_header_name", t.auth_header_name) end) + it("sets refresh_token_ttl to default value if not set", function() + local t = {enable_authorization_code = true, mandatory_scope = false} + local ok, errors = validate_entity(t, oauth2_schema) + assert.True(ok) + assert.is_nil(errors) + assert.equal(1209600, t.refresh_token_ttl) + end) describe("errors", function() it("requires at least one flow", function() local ok, _, err = validate_entity({}, oauth2_schema) diff --git a/spec/03-plugins/26-oauth2/03-access_spec.lua b/spec/03-plugins/26-oauth2/03-access_spec.lua index 262dcd65d945..2fca131b0f86 100644 --- a/spec/03-plugins/26-oauth2/03-access_spec.lua +++ b/spec/03-plugins/26-oauth2/03-access_spec.lua @@ -1,6 +1,8 @@ local cjson = require "cjson" local helpers = require "spec.helpers" local utils = require "kong.tools.utils" +local fmt = string.format +local dao_helpers = require "spec.02-integration.03-dao.helpers" local function provision_code(host, extra_headers, client_id) local request_client = helpers.proxy_ssl_client() @@ -2559,3 +2561,112 @@ describe("Plugin: oauth2 (access)", function() end) end) + +dao_helpers.for_each_dao(function(kong_config) + describe("Plugin: oauth2 (ttl) with #"..kong_config.database, function() + + local client + + setup(function() + + local api11 = assert(helpers.dao.apis:insert { + name = "api-11", + hosts = { "oauth2_11.com" }, + upstream_url = "http://mockbin.com" + }) + + assert(helpers.dao.plugins:insert { + name = "oauth2", + api_id = api11.id, + config = { + enable_authorization_code = true, + mandatory_scope = false, + provision_key = "provision123", + anonymous = "", + global_credentials = false, + refresh_token_ttl = 2 + } + }) + + local api12 = assert(helpers.dao.apis:insert { + name = "api-12", + hosts = { "oauth2_12.com" }, + upstream_url = "http://mockbin.com" + }) + + assert(helpers.dao.plugins:insert { + name = "oauth2", + api_id = api12.id, + config = { + enable_authorization_code = true, + mandatory_scope = false, + provision_key = "provision123", + anonymous = "", + global_credentials = false, + refresh_token_ttl = 0 + } + }) + + local consumer = assert(helpers.dao.consumers:insert { + username = "bob" + }) + assert(helpers.dao.oauth2_credentials:insert { + client_id = "clientid123", + client_secret = "secret123", + redirect_uri = "http://google.com/kong", + name = "testapp", + consumer_id = consumer.id + }) + assert(helpers.start_kong()) + client = helpers.proxy_client() + end) + + teardown(function() + if client then client:close() end + helpers.stop_kong() + end) + + local function assert_ttls_records_for_token(uuid, count) + local DB = require "kong.dao.db.postgres" + local _db = DB.new(kong_config) + local query = fmt("SELECT COUNT(*) FROM ttls where table_name='oauth2_tokens' AND primary_uuid_value = '%s'", tostring(uuid)) + local result, error = _db:query(query) + assert.falsy(error) + assert.truthy(result[1].count == count) + end + + describe("refresh token", function() + it("is deleted after defined TTL", function() + local token = provision_token("oauth2_11.com") + local token_entity = helpers.dao.oauth2_tokens:find_all { access_token = token.access_token } + assert.equal(1, #token_entity) + + if kong_config.database == "postgres" then + assert_ttls_records_for_token(token_entity[1].id, 1) + end + + ngx.sleep(3) + + token_entity = helpers.dao.oauth2_tokens:find_all { access_token = token.access_token } + assert.equal(0, #token_entity) + end) + + it("is not deleted when when TTL is 0 == never", function() + local token = provision_token("oauth2_12.com") + local token_entity = helpers.dao.oauth2_tokens:find_all { access_token = token.access_token } + assert.equal(1, #token_entity) + + if kong_config.database == "postgres" then + assert_ttls_records_for_token(token_entity[1].id, 0) + end + + ngx.sleep(3) + + token_entity = helpers.dao.oauth2_tokens:find_all { access_token = token.access_token } + assert.equal(1, #token_entity) + end) + end) + + end) + +end) \ No newline at end of file From 382bb41a4b860c6f03213662a61589d5359755db Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Fri, 1 Dec 2017 17:56:56 -0800 Subject: [PATCH 20/74] chore(deps) bump OpenResty compat up to 1.13.6.1 --- .travis.yml | 2 +- kong/meta.lua | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 00e6e46c7315..bc894a803f4a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,7 +23,7 @@ env: - OPENSSL=1.0.2n - CASSANDRA=2.2.8 - OPENRESTY_BASE=1.11.2.4 - - OPENRESTY_LATEST=1.11.2.4 + - OPENRESTY_LATEST=1.13.6.1 - OPENRESTY=$OPENRESTY_BASE - DOWNLOAD_CACHE=$HOME/download-cache - INSTALL_CACHE=$HOME/install-cache diff --git a/kong/meta.lua b/kong/meta.lua index 494e854f5c10..870e5a6c9c84 100644 --- a/kong/meta.lua +++ b/kong/meta.lua @@ -18,6 +18,6 @@ return { -- third-party dependencies' required version, as they would be specified -- to lua-version's `set()` in the form {from, to} _DEPENDENCIES = { - nginx = {"1.11.2.4"}, + nginx = {"1.11.2.4", "1.13.6.1"}, } } From 71e298e93b3819aa62491bc2de800b9940a31537 Mon Sep 17 00:00:00 2001 From: Philip Duldig Date: Mon, 6 Nov 2017 14:15:44 -0500 Subject: [PATCH 21/74] fix(*) do not bind the Admin API to all interfaces by default Signed-off-by: Thibault Charbonnier A rationale to follow best security practises by default is here enforced by updating the default `admin_listen` property, so that it does not bind to all interfaces, but instead the local one only. As the Admin API exposes configuration data, secrets, SSL certificates and the likes, we want to prevent users from deploying Kong instances and exposing this data to the public out of negligence. Fix #3012 From #3016 --- kong.conf.default | 8 ++++---- kong/templates/kong_defaults.lua | 4 ++-- spec/01-unit/002-conf_loader_spec.lua | 10 +++++----- spec/01-unit/003-prefix_handler_spec.lua | 14 +++++++------- spec/02-integration/02-cmd/03-compile_spec.lua | 4 ++-- spec/kong_tests.conf | 2 +- 6 files changed, 21 insertions(+), 21 deletions(-) diff --git a/kong.conf.default b/kong.conf.default index be833c24ab18..0f828da9d840 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -83,14 +83,14 @@ #proxy_listen_ssl = 0.0.0.0:8443 # Address and port on which Kong will accept # HTTPS requests if `ssl` is enabled. -#admin_listen = 0.0.0.0:8001 # Address and port on which Kong will expose +#admin_listen = 127.0.0.1:8001 # Address and port on which Kong will expose # an entrypoint to the Admin API. # This API lets you configure and manage Kong, # and should be kept private and secured. -#admin_listen_ssl = 0.0.0.0:8444 # Address and port on which Kong will accept - # HTTPS requests to the admin API, if - # `admin_ssl` is enabled. +#admin_listen_ssl = 127.0.0.1:8444 # Address and port on which Kong will + # accept HTTPS requests to the admin API, + # if `admin_ssl` is enabled. #nginx_user = nobody nobody # Defines user and group credentials used by # worker processes. If group is omitted, a diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index b9f82b41c805..8510077d6792 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -10,8 +10,8 @@ anonymous_reports = on proxy_listen = 0.0.0.0:8000 proxy_listen_ssl = 0.0.0.0:8443 -admin_listen = 0.0.0.0:8001 -admin_listen_ssl = 0.0.0.0:8444 +admin_listen = 127.0.0.1:8001 +admin_listen_ssl = 127.0.0.1:8444 nginx_user = nobody nobody nginx_worker_processes = auto nginx_optimizations = on diff --git a/spec/01-unit/002-conf_loader_spec.lua b/spec/01-unit/002-conf_loader_spec.lua index 7fec772e2999..bce139b72fc4 100644 --- a/spec/01-unit/002-conf_loader_spec.lua +++ b/spec/01-unit/002-conf_loader_spec.lua @@ -7,10 +7,10 @@ describe("Configuration loader", function() assert.is_string(conf.lua_package_path) assert.is_nil(conf.nginx_user) assert.equal("auto", conf.nginx_worker_processes) - assert.equal("0.0.0.0:8001", conf.admin_listen) + assert.equal("127.0.0.1:8001", conf.admin_listen) assert.equal("0.0.0.0:8000", conf.proxy_listen) assert.equal("0.0.0.0:8443", conf.proxy_listen_ssl) - assert.equal("0.0.0.0:8444", conf.admin_listen_ssl) + assert.equal("127.0.0.1:8444", conf.admin_listen_ssl) assert.is_nil(conf.ssl_cert) -- check placeholder value assert.is_nil(conf.ssl_cert_key) assert.is_nil(conf.admin_ssl_cert) @@ -24,7 +24,7 @@ describe("Configuration loader", function() -- overrides assert.is_nil(conf.nginx_user) assert.equal("1", conf.nginx_worker_processes) - assert.equal("0.0.0.0:9001", conf.admin_listen) + assert.equal("127.0.0.1:9001", conf.admin_listen) assert.equal("0.0.0.0:9000", conf.proxy_listen) assert.equal("0.0.0.0:9443", conf.proxy_listen_ssl) assert.is_nil(getmetatable(conf)) @@ -75,9 +75,9 @@ describe("Configuration loader", function() end) it("extracts ports and listen ips from proxy_listen/admin_listen", function() local conf = assert(conf_loader()) - assert.equal("0.0.0.0", conf.admin_ip) + assert.equal("127.0.0.1", conf.admin_ip) assert.equal(8001, conf.admin_port) - assert.equal("0.0.0.0", conf.admin_ssl_ip) + assert.equal("127.0.0.1", conf.admin_ssl_ip) assert.equal(8444, conf.admin_ssl_port) assert.equal("0.0.0.0", conf.proxy_ip) assert.equal(8000, conf.proxy_port) diff --git a/spec/01-unit/003-prefix_handler_spec.lua b/spec/01-unit/003-prefix_handler_spec.lua index b59b157cd25d..f1b60ca6e01f 100644 --- a/spec/01-unit/003-prefix_handler_spec.lua +++ b/spec/01-unit/003-prefix_handler_spec.lua @@ -59,7 +59,7 @@ describe("NGINX conf compiler", function() local kong_nginx_conf = prefix_handler.compile_kong_conf(helpers.test_conf) assert.matches("lua_package_path './?.lua;./?/init.lua;;;'", kong_nginx_conf, nil, true) assert.matches("listen 0.0.0.0:9000;", kong_nginx_conf, nil, true) - assert.matches("listen 0.0.0.0:9001;", kong_nginx_conf, nil, true) + assert.matches("listen 127.0.0.1:9001;", kong_nginx_conf, nil, true) assert.matches("server_name kong;", kong_nginx_conf, nil, true) assert.matches("server_name kong_admin;", kong_nginx_conf, nil, true) assert.not_matches("lua_ssl_trusted_certificate", kong_nginx_conf, nil, true) @@ -83,8 +83,8 @@ describe("NGINX conf compiler", function() local kong_nginx_conf = prefix_handler.compile_kong_conf(conf) assert.matches("listen 0.0.0.0:9000;", kong_nginx_conf, nil, true) assert.matches("listen 0.0.0.0:9443 ssl http2;", kong_nginx_conf, nil, true) - assert.matches("listen 0.0.0.0:9001;", kong_nginx_conf, nil, true) - assert.matches("listen 0.0.0.0:8444 ssl http2;", kong_nginx_conf, nil, true) + assert.matches("listen 127.0.0.1:9001;", kong_nginx_conf, nil, true) + assert.matches("listen 127.0.0.1:8444 ssl http2;", kong_nginx_conf, nil, true) conf = assert(conf_loader(helpers.test_conf_path, { http2 = true, @@ -92,8 +92,8 @@ describe("NGINX conf compiler", function() kong_nginx_conf = prefix_handler.compile_kong_conf(conf) assert.matches("listen 0.0.0.0:9000;", kong_nginx_conf, nil, true) assert.matches("listen 0.0.0.0:9443 ssl http2;", kong_nginx_conf, nil, true) - assert.matches("listen 0.0.0.0:9001;", kong_nginx_conf, nil, true) - assert.matches("listen 0.0.0.0:8444 ssl;", kong_nginx_conf, nil, true) + assert.matches("listen 127.0.0.1:9001;", kong_nginx_conf, nil, true) + assert.matches("listen 127.0.0.1:8444 ssl;", kong_nginx_conf, nil, true) conf = assert(conf_loader(helpers.test_conf_path, { admin_http2 = true @@ -101,8 +101,8 @@ describe("NGINX conf compiler", function() kong_nginx_conf = prefix_handler.compile_kong_conf(conf) assert.matches("listen 0.0.0.0:9000;", kong_nginx_conf, nil, true) assert.matches("listen 0.0.0.0:9443 ssl;", kong_nginx_conf, nil, true) - assert.matches("listen 0.0.0.0:9001;", kong_nginx_conf, nil, true) - assert.matches("listen 0.0.0.0:8444 ssl http2;", kong_nginx_conf, nil, true) + assert.matches("listen 127.0.0.1:9001;", kong_nginx_conf, nil, true) + assert.matches("listen 127.0.0.1:8444 ssl http2;", kong_nginx_conf, nil, true) end) it("enables proxy_protocol", function() local conf = assert(conf_loader(helpers.test_conf_path, { diff --git a/spec/02-integration/02-cmd/03-compile_spec.lua b/spec/02-integration/02-cmd/03-compile_spec.lua index eb521206338a..9154c758f162 100644 --- a/spec/02-integration/02-cmd/03-compile_spec.lua +++ b/spec/02-integration/02-cmd/03-compile_spec.lua @@ -8,13 +8,13 @@ describe("kong compile", function() assert.matches("server_name kong", stdout) assert.matches("server_name kong_admin", stdout) assert.matches("listen 0.0.0.0:8000", stdout, nil, true) - assert.matches("listen 0.0.0.0:8001", stdout, nil, true) + assert.matches("listen 127.0.0.1:8001", stdout, nil, true) assert.matches("listen 0.0.0.0:8443 ssl", stdout, nil, true) end) it("accepts a custom Kong conf", function() local _, _, stdout = assert(helpers.kong_exec("compile --conf " .. helpers.test_conf_path)) assert.matches("listen 0.0.0.0:9000", stdout, nil, true) - assert.matches("listen 0.0.0.0:9001", stdout, nil, true) + assert.matches("listen 127.0.0.1:9001", stdout, nil, true) assert.matches("listen 0.0.0.0:9443 ssl", stdout, nil, true) end) end) diff --git a/spec/kong_tests.conf b/spec/kong_tests.conf index 6039471c5e3a..e00231c5f5db 100644 --- a/spec/kong_tests.conf +++ b/spec/kong_tests.conf @@ -1,5 +1,5 @@ # 1st digit is 9 for our test instances -admin_listen = 0.0.0.0:9001 +admin_listen = 127.0.0.1:9001 proxy_listen = 0.0.0.0:9000 proxy_listen_ssl = 0.0.0.0:9443 From 586b93ce1ed81986ff3ce3fbc0186badf2e393ca Mon Sep 17 00:00:00 2001 From: Robert Date: Tue, 12 Dec 2017 17:30:54 -0800 Subject: [PATCH 22/74] fix(*) invert tcp-log and request-termination priority Work done in #3079 highlighted that tcp-log executed with a lower priority than request-termination. While this functionally has not presented a significant problem, this commit cleans up the prioritization of these plugins to a more sane definition. This has the added benefit of not requiring the request-termination define its own delayed callback function once #3079 is merged. From #3089 --- kong/plugins/request-termination/handler.lua | 2 +- kong/plugins/tcp-log/handler.lua | 2 +- spec/01-unit/014-plugins_order_spec.lua | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/kong/plugins/request-termination/handler.lua b/kong/plugins/request-termination/handler.lua index e2fba7b74341..bb9d3e71e66d 100644 --- a/kong/plugins/request-termination/handler.lua +++ b/kong/plugins/request-termination/handler.lua @@ -6,7 +6,7 @@ local server_header = meta._NAME .. "/" .. meta._VERSION local RequestTerminationHandler = BasePlugin:extend() -RequestTerminationHandler.PRIORITY = 7 +RequestTerminationHandler.PRIORITY = 2 RequestTerminationHandler.VERSION = "0.1.0" function RequestTerminationHandler:new() diff --git a/kong/plugins/tcp-log/handler.lua b/kong/plugins/tcp-log/handler.lua index 594110b18aa9..fa6d0c3c1452 100644 --- a/kong/plugins/tcp-log/handler.lua +++ b/kong/plugins/tcp-log/handler.lua @@ -4,7 +4,7 @@ local cjson = require "cjson" local TcpLogHandler = BasePlugin:extend() -TcpLogHandler.PRIORITY = 2 +TcpLogHandler.PRIORITY = 7 TcpLogHandler.VERSION = "0.1.0" local function log(premature, conf, message) diff --git a/spec/01-unit/014-plugins_order_spec.lua b/spec/01-unit/014-plugins_order_spec.lua index 719b5cbc2b03..33e96093dcc6 100644 --- a/spec/01-unit/014-plugins_order_spec.lua +++ b/spec/01-unit/014-plugins_order_spec.lua @@ -66,12 +66,12 @@ describe("Plugins", function() "datadog", "file-log", "udp-log", - "request-termination", + "tcp-log", "loggly", "runscope", "syslog", "galileo", - "tcp-log", + "request-termination", "correlation-id", } From 8f5d9696f6218a7960b06d7467e909f1aef260ed Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Tue, 12 Dec 2017 20:19:26 -0800 Subject: [PATCH 23/74] feat(core) execute subsequent phases upon short-circuited requests Background context ------------------ Prior to this change, if a plugin short-circuits a request (e.g. auth plugin with HTTP 401, or rate limiting with HTTP 429) then subsequent plugins with a lower priority would not be executed by Kong. This is even more of an issue for logging plugins as they would be blind to requests short-circuited by Kong itself. The purpose of this patch is to allow other plugins in general to run after a plugin short-circuited a request in the access phase. Underlying issue ---------------- Our current plugins' run-loop is implemented in such a way that it both constructs the list of plugins to execute, and yields to its caller (for loop) at the same time. Once yielded, a plugin is instantly executed by the caller (for loop). If the plugin uses the `ngx.exit()` API, then the execution of the access phase is interrupted, and our run-loop never has the chance to add logging plugins to the list of plugins to execute for the current request (that is because logging plugins have a lower priority than our authentication plugins, which must run first). Possible solutions ------------------ One could think of several solutions to this issue: 1. Increase the priority of the logging plugins, so that they run earlier than auth plugin, and will be added to the list of plugins to execute for this request before the access phase is short-circuited. 2. Re-implement our run-loop (`plugins_iterator.lua` and `kong/init.lua`) so that it somehow builds the list of plugins to execute for a request first, then execute said plugin _after_. 3. Force the run-loop to rebuild the entire list of plugins inside of the logging phase. However, none of these solutions comes without a trade-off. 1. By updating the priority order of each plugin, we run the risk of unnecessarily breaking logic depending on the current order of execution. We also risk not fixing this issue for custom plugins without those plugins also bumping their priority order, which could cause cascading issues if other plugins depend on those plugins being executed at later phases. 2. While this is definitely a long-term goal, the breaking change nature of this solution should tell us that we would rather post-pone it until a better case study is made against it. Re-implementing our run-loop will benefit Kong in many ways (easier to write plugins, more fine-grained hooks, more sandboxing...), but doing so now would be disruptive for current plugins. One of this reasons behind this is that such a new run-loop should probably not follow the same paradigm of building itself and yielding at the same time. Instead, we should think of a run-loop executing global plugins first, then authentication plugins, then API/Consumer-specific plugin. Such an approach as of today would be extremely disruptive and break many assumptions made in existing plugins both defaults ones and in the wild. 3. The major flaw with this approach is that the run-loop depends on the datastore cache, which itself results in DB calls whenever a miss is encountered. However, relying on the datastore cache in the log phase is a very bad idea, since any sort of cache miss would trigger a DB request, which aren't supported in the log phase of NGINX due (rightfully so) to the lack of cosocket support in this phase. Proposed solution ----------------- This could be seen as a hack, or as a slightly more complex run-loop with some state. We take advantage of the fact that all of our plugins (and, very likely, most of third-party plugins out there) use the `responses` module to send their HTTP responses and short-circuit requests. The introduction of a flag in the request's context *delays* such a response, which gives the run-loop a chance to finish building the list of plugins to execute (but subsequent plugins do not run anymore once a plugin short-circuited the request). Once the list of plugins to execute is complete, we finally short-circuit the execution of the access phase, not giving Kong a chance to run the "after access" handler, thus not falsely leading other plugins into believe the request was proxied. Once the log phase kicks in, it will undoubtedly execute the registered plugins, even if their priority was lesser than that of the short-circuiting plugin. This way, we've achieved the desired result with minimal impact: * no plugin needs to update its `priority` constant * no plugin needs to see their code updated, as long as they use the `responses` module * the performance impact is minimal; we are only doing a few `ngx.ctx` accesses and there is no need to re-run the plugins iterators * the code change is minimal Changes ------- * Implemented a `ctx.delay_response` flag to play nice with the `responses` module. If set, we delay the flushing of the response until the plugins run-loop has finished running. Plugins can also make use of a custom flush callback for delayed response if they do not wish to use the `responses.send` API. They can do so by setting `ctx.delayed_response = true` and `ctx.delayed_response_callback` to a function accepting `ngx.ctx.` as its sole argument. * Ensure all plugins follow the correct pattern of always calling `responses.send()` with a `return` statement. * Implement regression tests for the subsequent-phases to run upon short-circuiting. Fix #490 Fix #892 From #3079 --- kong/core/plugins_iterator.lua | 3 +- kong/init.lua | 12 +- kong/plugins/acl/handler.lua | 2 +- kong/plugins/oauth2/access.lua | 15 +-- kong/tools/responses.lua | 27 ++++ spec/01-unit/009-responses_spec.lua | 66 +++++++++ .../05-proxy/03-plugins_triggering_spec.lua | 126 ++++++++++++++++++ .../kong/plugins/dummy/handler.lua | 13 ++ .../kong/plugins/dummy/schema.lua | 1 + 9 files changed, 253 insertions(+), 12 deletions(-) diff --git a/kong/core/plugins_iterator.lua b/kong/core/plugins_iterator.lua index 75e45522247a..1a758715b1b4 100644 --- a/kong/core/plugins_iterator.lua +++ b/kong/core/plugins_iterator.lua @@ -41,7 +41,8 @@ local function load_plugin_configuration(api_id, consumer_id, plugin_name) load_plugin_into_memory, api_id, consumer_id, plugin_name) if err then - responses.send_HTTP_INTERNAL_SERVER_ERROR(err) + ngx.ctx.delay_response = false + return responses.send_HTTP_INTERNAL_SERVER_ERROR(err) end if plugin ~= nil and plugin.enabled then return plugin.config or {} diff --git a/kong/init.lua b/kong/init.lua index 91e6349397e4..4be510c5e2b4 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -358,10 +358,20 @@ function Kong.access() local ctx = ngx.ctx core.access.before(ctx) + ctx.delay_response = true + for plugin, plugin_conf in plugins_iterator(singletons.loaded_plugins, true) do - plugin.handler:access(plugin_conf) + if not ctx.delayed_response then + plugin.handler:access(plugin_conf) + end end + if ctx.delayed_response then + return responses.flush_delayed_response(ctx) + end + + ctx.delay_response = false + core.access.after(ctx) end diff --git a/kong/plugins/acl/handler.lua b/kong/plugins/acl/handler.lua index 761e38c7a404..70d924191a87 100644 --- a/kong/plugins/acl/handler.lua +++ b/kong/plugins/acl/handler.lua @@ -70,7 +70,7 @@ function ACLHandler:access(conf) local acls, err = singletons.cache:get(cache_key, nil, load_acls_into_memory, consumer_id) if err then - responses.send_HTTP_INTERNAL_SERVER_ERROR(err) + return responses.send_HTTP_INTERNAL_SERVER_ERROR(err) end if not acls then acls = EMPTY diff --git a/kong/plugins/oauth2/access.lua b/kong/plugins/oauth2/access.lua index 9a4b521d637a..b1f836600127 100644 --- a/kong/plugins/oauth2/access.lua +++ b/kong/plugins/oauth2/access.lua @@ -553,17 +553,14 @@ function _M.execute(conf) if ngx.req.get_method() == "POST" then local uri = ngx.var.uri - local from, _ = string_find(uri, "/oauth2/token", nil, true) - + local from = string_find(uri, "/oauth2/token", nil, true) if from then - issue_token(conf) - - else - from, _ = string_find(uri, "/oauth2/authorize", nil, true) + return issue_token(conf) + end - if from then - authorize(conf) - end + from = string_find(uri, "/oauth2/authorize", nil, true) + if from then + return authorize(conf) end end diff --git a/kong/tools/responses.lua b/kong/tools/responses.lua index 581ef2b63bca..1d442f3bd768 100644 --- a/kong/tools/responses.lua +++ b/kong/tools/responses.lua @@ -21,6 +21,8 @@ local cjson = require "cjson.safe" local meta = require "kong.meta" +local type = type + --local server_header = _KONG._NAME .. "/" .. _KONG._VERSION local server_header = meta._NAME .. "/" .. meta._VERSION @@ -102,6 +104,18 @@ local function send_response(status_code) -- @param content (Optional) The content to send as a response. -- @return ngx.exit (Exit current context) return function(content, headers) + local ctx = ngx.ctx + + if ctx.delay_response and not ctx.delayed_response then + ctx.delayed_response = { + status_code = status_code, + content = content, + headers = headers, + } + + return + end + if status_code == _M.status_codes.HTTP_INTERNAL_SERVER_ERROR then if content then ngx.log(ngx.ERR, tostring(content)) @@ -137,6 +151,19 @@ local function send_response(status_code) end end +function _M.flush_delayed_response(ctx) + ctx.delay_response = false + + if type(ctx.delayed_response_callback) == "function" then + ctx.delayed_response_callback(ctx) + return -- avoid tail call + end + + _M.send(ctx.delayed_response.status_code, + ctx.delayed_response.content, + ctx.delayed_response.headers) +end + -- Generate sugar methods (closures) for the most used HTTP status codes. for status_code_name, status_code in pairs(_M.status_codes) do _M["send_" .. status_code_name] = send_response(status_code) diff --git a/spec/01-unit/009-responses_spec.lua b/spec/01-unit/009-responses_spec.lua index 793542296f9b..7fe313ecf31f 100644 --- a/spec/01-unit/009-responses_spec.lua +++ b/spec/01-unit/009-responses_spec.lua @@ -119,4 +119,70 @@ describe("Response helpers", function() assert.stub(ngx.exit).was.called_with(501) end) end) + + describe("delayed response", function() + it("does not call ngx.say/ngx.exit if `ctx.delayed_response = true`", function() + ngx.ctx.delay_response = true + + responses.send(401, "Unauthorized", { ["X-Hello"] = "world" }) + assert.stub(ngx.say).was_not_called() + assert.stub(ngx.exit).was_not_called() + assert.not_equal("world", ngx.header["X-Hello"]) + end) + + it("flush_delayed_response() sends delayed response's status/header/body", function() + ngx.ctx.delay_response = true + + responses.send(401, "Unauthorized", { ["X-Hello"] = "world" }) + responses.flush_delayed_response(ngx.ctx) + + assert.stub(ngx.say).was.called_with("{\"message\":\"Unauthorized\"}") + assert.stub(ngx.exit).was.called_with(401) + assert.equal("world", ngx.header["X-Hello"]) + assert.is_false(ngx.ctx.delay_response) + end) + + it("delayed response cannot be overriden", function() + ngx.ctx.delay_response = true + + responses.send(401, "Unauthorized") + responses.send(200, "OK") + responses.flush_delayed_response(ngx.ctx) + + assert.stub(ngx.say).was.called_with("{\"message\":\"Unauthorized\"}") + assert.stub(ngx.exit).was.called_with(401) + end) + + it("flush_delayed_response() use custom callback if set", function() + local s = spy.new(function(ctx) end) + + do + local old_type = _G.type + + -- luacheck: ignore + _G.type = function(v) + if v == s then + return "function" + end + + return old_type(v) + end + + finally(function() + _G.type = old_type + end) + end + + package.loaded["kong.tools.responses"] = nil + responses = require "kong.tools.responses" + + ngx.ctx.delay_response = true + ngx.ctx.delayed_response_callback = s + + responses.send(401, "Unauthorized", { ["X-Hello"] = "world" }) + responses.flush_delayed_response(ngx.ctx) + + assert.spy(s).was.called_with(ngx.ctx) + end) + end) end) diff --git a/spec/02-integration/05-proxy/03-plugins_triggering_spec.lua b/spec/02-integration/05-proxy/03-plugins_triggering_spec.lua index 2b2ebfc19167..1abe01896d3e 100644 --- a/spec/02-integration/05-proxy/03-plugins_triggering_spec.lua +++ b/spec/02-integration/05-proxy/03-plugins_triggering_spec.lua @@ -165,6 +165,132 @@ describe("Plugins triggering", function() assert.equal("5", res.headers["x-ratelimit-limit-hour"]) end) + describe("short-circuited requests", function() + local FILE_LOG_PATH = os.tmpname() + + setup(function() + if client then + client:close() + end + + helpers.stop_kong() + helpers.dao:truncate_tables() + + local api = assert(helpers.dao.apis:insert { + name = "example", + hosts = { "mock_upstream" }, + upstream_url = helpers.mock_upstream_url, + }) + + -- plugin able to short-circuit a request + assert(helpers.dao.plugins:insert { + name = "key-auth", + api_id = api.id, + }) + + -- response/body filter plugin + assert(helpers.dao.plugins:insert { + name = "dummy", + api_id = api.id, + config = { + append_body = "appended from body filtering", + } + }) + + -- log phase plugin + assert(helpers.dao.plugins:insert { + name = "file-log", + api_id = api.id, + config = { + path = FILE_LOG_PATH, + }, + }) + + assert(helpers.start_kong { + nginx_conf = "spec/fixtures/custom_nginx.template", + }) + + client = helpers.proxy_client() + end) + + teardown(function() + if client then + client:close() + end + + os.remove(FILE_LOG_PATH) + + helpers.stop_kong() + end) + + it("execute a log plugin", function() + local utils = require "kong.tools.utils" + local cjson = require "cjson" + local pl_path = require "pl.path" + local pl_file = require "pl.file" + local pl_stringx = require "pl.stringx" + + local uuid = utils.uuid() + + local res = assert(client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "mock_upstream", + ["X-UUID"] = uuid, + -- /!\ no key credential + } + }) + assert.res_status(401, res) + + -- TEST: ensure that our logging plugin was executed and wrote + -- something to disk. + + helpers.wait_until(function() + return pl_path.exists(FILE_LOG_PATH) and pl_path.getsize(FILE_LOG_PATH) > 0 + end, 3) + + local log = pl_file.read(FILE_LOG_PATH) + local log_message = cjson.decode(pl_stringx.strip(log)) + assert.equal("127.0.0.1", log_message.client_ip) + assert.equal(uuid, log_message.request.headers["x-uuid"]) + end) + + it("execute a header_filter plugin", function() + local res = assert(client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "mock_upstream", + } + }) + assert.res_status(401, res) + + -- TEST: ensure that the dummy plugin was executed by checking + -- that headers have been injected in the header_filter phase + -- Plugins such as CORS need to run on short-circuited requests + -- as well. + + assert.not_nil(res.headers["dummy-plugin"]) + end) + + it("execute a body_filter plugin", function() + local res = assert(client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "mock_upstream", + } + }) + local body = assert.res_status(401, res) + + -- TEST: ensure that the dummy plugin was executed by checking + -- that the body filtering phase has run + + assert.matches("appended from body filtering", body, nil, true) + end) + end) + describe("anonymous reports execution", function() -- anonymous reports are implemented as a plugin which is being executed -- by the plugins runloop, but which doesn't have a schema diff --git a/spec/fixtures/custom_plugins/kong/plugins/dummy/handler.lua b/spec/fixtures/custom_plugins/kong/plugins/dummy/handler.lua index 6d7e208ac62e..d60b899dcea0 100644 --- a/spec/fixtures/custom_plugins/kong/plugins/dummy/handler.lua +++ b/spec/fixtures/custom_plugins/kong/plugins/dummy/handler.lua @@ -21,6 +21,19 @@ function DummyHandler:header_filter(conf) DummyHandler.super.header_filter(self) ngx.header["Dummy-Plugin"] = conf.resp_header_value + + if conf.append_body then + ngx.header["Content-Length"] = nil + end +end + + +function DummyHandler:body_filter(conf) + DummyHandler.super.body_filter(self) + + if conf.append_body and not ngx.arg[2] then + ngx.arg[1] = string.sub(ngx.arg[1], 1, -2) .. conf.append_body + end end diff --git a/spec/fixtures/custom_plugins/kong/plugins/dummy/schema.lua b/spec/fixtures/custom_plugins/kong/plugins/dummy/schema.lua index ccee388296a2..b193b332fe39 100644 --- a/spec/fixtures/custom_plugins/kong/plugins/dummy/schema.lua +++ b/spec/fixtures/custom_plugins/kong/plugins/dummy/schema.lua @@ -1,5 +1,6 @@ return { fields = { resp_header_value = { type = "string", default = "1" }, + append_body = { type = "string" }, } } From 31dd3d1fae362a5db7630a6e9a1781336191891e Mon Sep 17 00:00:00 2001 From: Robert Date: Tue, 12 Dec 2017 22:02:45 -0800 Subject: [PATCH 24/74] chore(cli) remove deprecated compile command From #3069 --- kong-0.11.2-0.rockspec | 1 - kong.conf.default | 2 +- kong/cmd/compile.lua | 42 ------------------- kong/cmd/init.lua | 1 - .../02-integration/02-cmd/03-compile_spec.lua | 20 --------- ...{04-reload_spec.lua => 03-reload_spec.lua} | 0 ...5-version_spec.lua => 04-version_spec.lua} | 0 .../{06-check_spec.lua => 05-check_spec.lua} | 0 ...7-restart_spec.lua => 06-restart_spec.lua} | 0 ...{08-health_spec.lua => 07-health_spec.lua} | 0 .../{09-quit_spec.lua => 08-quit_spec.lua} | 0 ...0-prepare_spec.lua => 09-prepare_spec.lua} | 0 12 files changed, 1 insertion(+), 65 deletions(-) delete mode 100644 kong/cmd/compile.lua delete mode 100644 spec/02-integration/02-cmd/03-compile_spec.lua rename spec/02-integration/02-cmd/{04-reload_spec.lua => 03-reload_spec.lua} (100%) rename spec/02-integration/02-cmd/{05-version_spec.lua => 04-version_spec.lua} (100%) rename spec/02-integration/02-cmd/{06-check_spec.lua => 05-check_spec.lua} (100%) rename spec/02-integration/02-cmd/{07-restart_spec.lua => 06-restart_spec.lua} (100%) rename spec/02-integration/02-cmd/{08-health_spec.lua => 07-health_spec.lua} (100%) rename spec/02-integration/02-cmd/{09-quit_spec.lua => 08-quit_spec.lua} (100%) rename spec/02-integration/02-cmd/{10-prepare_spec.lua => 09-prepare_spec.lua} (100%) diff --git a/kong-0.11.2-0.rockspec b/kong-0.11.2-0.rockspec index 838fb0c8d49d..41b449192e8c 100644 --- a/kong-0.11.2-0.rockspec +++ b/kong-0.11.2-0.rockspec @@ -62,7 +62,6 @@ build = { ["kong.cmd.check"] = "kong/cmd/check.lua", ["kong.cmd.reload"] = "kong/cmd/reload.lua", ["kong.cmd.restart"] = "kong/cmd/restart.lua", - ["kong.cmd.compile"] = "kong/cmd/compile.lua", ["kong.cmd.prepare"] = "kong/cmd/prepare.lua", ["kong.cmd.migrations"] = "kong/cmd/migrations.lua", ["kong.cmd.health"] = "kong/cmd/health.lua", diff --git a/kong.conf.default b/kong.conf.default index 0f828da9d840..604a7c41865d 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -4,7 +4,7 @@ # # The commented-out settings shown in this file represent the default values. # -# This file is read when `kong start` or `kong compile` are used. Kong +# This file is read when `kong start` or `kong prepare` are used. Kong # generates the Nginx configuration with the settings specified in this file. # # All environment variables prefixed with `KONG_` and capitalized will override diff --git a/kong/cmd/compile.lua b/kong/cmd/compile.lua deleted file mode 100644 index 9504a9e1e7b2..000000000000 --- a/kong/cmd/compile.lua +++ /dev/null @@ -1,42 +0,0 @@ -local prefix_handler = require "kong.cmd.utils.prefix_handler" -local conf_loader = require "kong.conf_loader" -local log = require "kong.cmd.utils.log" - -local function execute(args) - log.warn("'kong compile' is deprecated, use 'kong prepare' instead") - - local conf = assert(conf_loader(args.conf)) - local kong_nginx_conf = assert(prefix_handler.compile_kong_conf(conf)) - print(kong_nginx_conf) -end - -local lapp = [[ -Usage: kong compile [OPTIONS] - -[DEPRECATED] This command is deprecated. Use 'kong prepare' instead. - -Compile the Nginx configuration file containing Kong's servers -contexts from a given Kong configuration file. - -Example usage: - kong compile -c kong.conf > /usr/local/openresty/nginx-kong.conf - - This file can then be included in an OpenResty configuration: - - http { - # ... - include 'nginx-kong.conf'; - } - -Note: - Third-party services such as Serf need to be properly configured - and started for Kong to be fully compatible while embedded. - -Options: - -c,--conf (optional string) configuration file -]] - -return { - lapp = lapp, - execute = execute -} diff --git a/kong/cmd/init.lua b/kong/cmd/init.lua index f2b0e4240bdb..b20c7e43d47a 100644 --- a/kong/cmd/init.lua +++ b/kong/cmd/init.lua @@ -19,7 +19,6 @@ local cmds = { reload = true, health = true, check = true, - compile = true, prepare = true, migrations = true, version = true, diff --git a/spec/02-integration/02-cmd/03-compile_spec.lua b/spec/02-integration/02-cmd/03-compile_spec.lua deleted file mode 100644 index 9154c758f162..000000000000 --- a/spec/02-integration/02-cmd/03-compile_spec.lua +++ /dev/null @@ -1,20 +0,0 @@ -local helpers = require "spec.helpers" - -describe("kong compile", function() - it("compiles a Kong NGINX config", function() - local _, _, stdout = assert(helpers.kong_exec "compile") - assert.matches("init_by_lua_block", stdout) - assert.matches("init_worker_by_lua_block", stdout) - assert.matches("server_name kong", stdout) - assert.matches("server_name kong_admin", stdout) - assert.matches("listen 0.0.0.0:8000", stdout, nil, true) - assert.matches("listen 127.0.0.1:8001", stdout, nil, true) - assert.matches("listen 0.0.0.0:8443 ssl", stdout, nil, true) - end) - it("accepts a custom Kong conf", function() - local _, _, stdout = assert(helpers.kong_exec("compile --conf " .. helpers.test_conf_path)) - assert.matches("listen 0.0.0.0:9000", stdout, nil, true) - assert.matches("listen 127.0.0.1:9001", stdout, nil, true) - assert.matches("listen 0.0.0.0:9443 ssl", stdout, nil, true) - end) -end) diff --git a/spec/02-integration/02-cmd/04-reload_spec.lua b/spec/02-integration/02-cmd/03-reload_spec.lua similarity index 100% rename from spec/02-integration/02-cmd/04-reload_spec.lua rename to spec/02-integration/02-cmd/03-reload_spec.lua diff --git a/spec/02-integration/02-cmd/05-version_spec.lua b/spec/02-integration/02-cmd/04-version_spec.lua similarity index 100% rename from spec/02-integration/02-cmd/05-version_spec.lua rename to spec/02-integration/02-cmd/04-version_spec.lua diff --git a/spec/02-integration/02-cmd/06-check_spec.lua b/spec/02-integration/02-cmd/05-check_spec.lua similarity index 100% rename from spec/02-integration/02-cmd/06-check_spec.lua rename to spec/02-integration/02-cmd/05-check_spec.lua diff --git a/spec/02-integration/02-cmd/07-restart_spec.lua b/spec/02-integration/02-cmd/06-restart_spec.lua similarity index 100% rename from spec/02-integration/02-cmd/07-restart_spec.lua rename to spec/02-integration/02-cmd/06-restart_spec.lua diff --git a/spec/02-integration/02-cmd/08-health_spec.lua b/spec/02-integration/02-cmd/07-health_spec.lua similarity index 100% rename from spec/02-integration/02-cmd/08-health_spec.lua rename to spec/02-integration/02-cmd/07-health_spec.lua diff --git a/spec/02-integration/02-cmd/09-quit_spec.lua b/spec/02-integration/02-cmd/08-quit_spec.lua similarity index 100% rename from spec/02-integration/02-cmd/09-quit_spec.lua rename to spec/02-integration/02-cmd/08-quit_spec.lua diff --git a/spec/02-integration/02-cmd/10-prepare_spec.lua b/spec/02-integration/02-cmd/09-prepare_spec.lua similarity index 100% rename from spec/02-integration/02-cmd/10-prepare_spec.lua rename to spec/02-integration/02-cmd/09-prepare_spec.lua From 7c334ecf02a00ab489553c3a32133498d889d1e8 Mon Sep 17 00:00:00 2001 From: daurnimator Date: Tue, 5 Dec 2017 11:30:23 +1100 Subject: [PATCH 25/74] feat(cli) add /opt/openresty path to executable search Used by some OpenResty packages, e.g. https://aur.archlinux.org/packages/openresty/ From #3074 --- kong/cmd/utils/nginx_signals.lua | 1 + 1 file changed, 1 insertion(+) diff --git a/kong/cmd/utils/nginx_signals.lua b/kong/cmd/utils/nginx_signals.lua index a4e84efc01c7..337b893a60a2 100644 --- a/kong/cmd/utils/nginx_signals.lua +++ b/kong/cmd/utils/nginx_signals.lua @@ -9,6 +9,7 @@ local fmt = string.format local nginx_bin_name = "nginx" local nginx_search_paths = { "/usr/local/openresty/nginx/sbin", + "/opt/openresty/nginx/sbin", "" } local nginx_version_pattern = "^nginx.-openresty.-([%d%.]+)" From 9a3c742d7f0b087ac63c85ab6754691bee4706ba Mon Sep 17 00:00:00 2001 From: daurnimator Date: Tue, 5 Dec 2017 11:37:40 +1100 Subject: [PATCH 26/74] tests(helpers) find NGINX executable using kong.cmd.utils.nginx_signals From #3074 --- kong/cmd/utils/nginx_signals.lua | 6 +++--- spec/helpers.lua | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/kong/cmd/utils/nginx_signals.lua b/kong/cmd/utils/nginx_signals.lua index 337b893a60a2..a23b4579328c 100644 --- a/kong/cmd/utils/nginx_signals.lua +++ b/kong/cmd/utils/nginx_signals.lua @@ -49,7 +49,7 @@ end local _M = {} -local function find_nginx_bin() +function _M.find_nginx_bin() log.debug("searching for OpenResty 'nginx' executable") local found @@ -71,7 +71,7 @@ local function find_nginx_bin() end function _M.start(kong_conf) - local nginx_bin, err = find_nginx_bin() + local nginx_bin, err = _M.find_nginx_bin() if not nginx_bin then return nil, err end @@ -107,7 +107,7 @@ function _M.reload(kong_conf) return nil, "nginx not running in prefix: " .. kong_conf.prefix end - local nginx_bin, err = find_nginx_bin() + local nginx_bin, err = _M.find_nginx_bin() if not nginx_bin then return nil, err end diff --git a/spec/helpers.lua b/spec/helpers.lua index 209a6325282a..ae7ee2fed7a9 100644 --- a/spec/helpers.lua +++ b/spec/helpers.lua @@ -24,6 +24,7 @@ local pl_file = require "pl.file" local pl_dir = require "pl.dir" local cjson = require "cjson.safe" local http = require "resty.http" +local nginx_signals = require "kong.cmd.utils.nginx_signals" local log = require "kong.cmd.utils.log" log.set_lvl(log.levels.quiet) -- disable stdout logs in tests @@ -36,7 +37,8 @@ package.path = CUSTOM_PLUGIN_PATH .. ";" .. package.path -- a numerical representation of it. -- Ex: 1.11.2.2 -> 11122 local function openresty_ver_num() - local ok, _, _, stderr = pl_utils.executeex("nginx -V") + local nginx_bin = assert(nginx_signals.find_nginx_bin()) + local ok, _, _, stderr = pl_utils.executeex(string.format("%s -V", nginx_bin)) if not ok then error("could not execute 'nginx -V': " .. stderr) end From 660ba8401415d3a990e1badf5f40cdca10ebc37e Mon Sep 17 00:00:00 2001 From: daurnimator Date: Tue, 5 Dec 2017 12:24:42 +1100 Subject: [PATCH 27/74] refactor(*) use luaossl instead of luacrypto From #3074 Closes #3073 --- Makefile | 2 +- kong-0.11.2-0.rockspec | 2 +- kong/plugins/aws-lambda/v4.lua | 6 +-- kong/plugins/basic-auth/crypto.lua | 11 +++++- kong/plugins/hmac-auth/access.lua | 8 ++-- kong/plugins/jwt/daos.lua | 25 ++++++------ kong/plugins/jwt/jwt_parser.lua | 38 +++++++++++-------- .../20-hmac-auth/03-access_spec.lua | 29 +++++++------- .../20-hmac-auth/04-invalidations_spec.lua | 4 +- 9 files changed, 70 insertions(+), 55 deletions(-) diff --git a/Makefile b/Makefile index 627e4267a500..48fe2a91c5c5 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ OPENSSL_DIR ?= /usr/local/opt/openssl .PHONY: install dev lint test test-integration test-plugins test-all install: - @luarocks make OPENSSL_DIR=$(OPENSSL_DIR) + @luarocks make OPENSSL_DIR=$(OPENSSL_DIR) CRYPTO_DIR=$(OPENSSL_DIR) dev: install @for rock in $(DEV_ROCKS) ; do \ diff --git a/kong-0.11.2-0.rockspec b/kong-0.11.2-0.rockspec index 41b449192e8c..0c1b3bb8dbcb 100644 --- a/kong-0.11.2-0.rockspec +++ b/kong-0.11.2-0.rockspec @@ -24,7 +24,7 @@ dependencies = { "luatz == 0.3", "lua_system_constants == 0.1.2", "lua-resty-iputils == 0.3.0", - "luacrypto == 0.3.2", + "luaossl == 20171028", "luasyslog == 1.0.0", "lua_pack == 1.0.5", "lua-resty-dns-client == 0.6.2", diff --git a/kong/plugins/aws-lambda/v4.lua b/kong/plugins/aws-lambda/v4.lua index dc421beaa2f9..4ba5c36092be 100644 --- a/kong/plugins/aws-lambda/v4.lua +++ b/kong/plugins/aws-lambda/v4.lua @@ -3,7 +3,7 @@ local resty_sha256 = require "resty.sha256" local pl_string = require "pl.stringx" -local crypto = require "crypto" +local openssl_hmac = require "openssl.hmac" local ALGORITHM = "AWS4-HMAC-SHA256" @@ -14,8 +14,8 @@ for i = 0, 255 do CHAR_TO_HEX[char] = hex end -local function hmac(key, msg) - return crypto.hmac.digest("sha256", msg, key, true) +local function hmac(secret, data) + return openssl_hmac.new(secret, "sha256"):final(data) end local function hash(str) diff --git a/kong/plugins/basic-auth/crypto.lua b/kong/plugins/basic-auth/crypto.lua index d1aaf466c9d3..eaff62d3fa73 100644 --- a/kong/plugins/basic-auth/crypto.lua +++ b/kong/plugins/basic-auth/crypto.lua @@ -1,8 +1,15 @@ -- Module to encrypt the basic-auth credentials password field -local crypto = require "crypto" +local openssl_digest = require "openssl.digest" local format = string.format +local function tohex(s) + s = s:gsub(".", function(c) + return string.format("%.2x", c:byte(1)) + end) + return s +end + --- Salt the password -- Password is salted with the credential's consumer_id (long enough, unique) -- @param credential The basic auth credential table @@ -16,6 +23,6 @@ return { -- @return hash of the salted credential's password encrypt = function(credential) local salted = salt_password(credential) - return crypto.digest("sha1", salted) + return tohex(openssl_digest.new("sha1"):final(salted)) end } diff --git a/kong/plugins/hmac-auth/access.lua b/kong/plugins/hmac-auth/access.lua index 1a0481ee9e8b..16f323092bb3 100644 --- a/kong/plugins/hmac-auth/access.lua +++ b/kong/plugins/hmac-auth/access.lua @@ -2,7 +2,7 @@ local utils = require "kong.tools.utils" local responses = require "kong.tools.responses" local constants = require "kong.constants" local singletons = require "kong.singletons" -local crypto = require "crypto" +local openssl_hmac = require "openssl.hmac" local resty_sha256 = require "resty.sha256" local math_abs = math.abs @@ -45,13 +45,13 @@ local hmac = { return ngx_hmac_sha1(secret, data) end, ["hmac-sha256"] = function(secret, data) - return crypto.hmac.digest("sha256", data, secret, true) + return openssl_hmac.new(secret, "sha256"):final(data) end, ["hmac-sha384"] = function(secret, data) - return crypto.hmac.digest("sha384", data, secret, true) + return openssl_hmac.new(secret, "sha384"):final(data) end, ["hmac-sha512"] = function(secret, data) - return crypto.hmac.digest("sha512", data, secret, true) + return openssl_hmac.new(secret, "sha512"):final(data) end } diff --git a/kong/plugins/jwt/daos.lua b/kong/plugins/jwt/daos.lua index 495b2652ddf9..27d75676eccc 100644 --- a/kong/plugins/jwt/daos.lua +++ b/kong/plugins/jwt/daos.lua @@ -1,6 +1,6 @@ local utils = require "kong.tools.utils" local Errors = require "kong.dao.errors" -local crypto = require "crypto" +local openssl_pkey = require "openssl.pkey" local SCHEMA = { primary_key = {"id"}, @@ -16,18 +16,19 @@ local SCHEMA = { algorithm = {type = "string", enum = {"HS256", "RS256", "RS512", "ES256"}, default = 'HS256'} }, self_check = function(schema, plugin_t, dao, is_update) - if plugin_t.algorithm == "RS256" and plugin_t.rsa_public_key == nil then - return false, Errors.schema "no mandatory 'rsa_public_key'" + if plugin_t.algorithm == "RS256" then + if plugin_t.rsa_public_key == nil then + return false, Errors.schema "no mandatory 'rsa_public_key'" + elseif not pcall(openssl_pkey.new, plugin_t.rsa_public_key) then + return false, Errors.schema "'rsa_public_key' format is invalid" + end + elseif plugin_t.algorithm == "RS512" then + if plugin_t.rsa_public_key == nil then + return false, Errors.schema "no mandatory 'rsa_public_key'" + elseif not pcall(openssl_pkey.new, plugin_t.rsa_public_key) then + return false, Errors.schema "'rsa_public_key' format is invalid" + end end - if plugin_t.algorithm == "RS256" and crypto.pkey.from_pem(plugin_t.rsa_public_key) == nil then - return false, Errors.schema "'rsa_public_key' format is invalid" - end - if plugin_t.algorithm == "RS512" and plugin_t.rsa_public_key == nil then - return false, Errors.schema "no mandatory 'rsa_public_key'" - end - if plugin_t.algorithm == "RS512" and crypto.pkey.from_pem(plugin_t.rsa_public_key) == nil then - return false, Errors.schema "'rsa_public_key' format is invalid" - end return true end, } diff --git a/kong/plugins/jwt/jwt_parser.lua b/kong/plugins/jwt/jwt_parser.lua index 1baabb1c7afe..b080ce8abddd 100644 --- a/kong/plugins/jwt/jwt_parser.lua +++ b/kong/plugins/jwt/jwt_parser.lua @@ -7,7 +7,9 @@ local json = require "cjson" local utils = require "kong.tools.utils" -local crypto = require "crypto" +local openssl_digest = require "openssl.digest" +local openssl_hmac = require "openssl.hmac" +local openssl_pkey = require "openssl.pkey" local asn_sequence = require "kong.plugins.jwt.asn_sequence" local error = error @@ -23,14 +25,14 @@ local decode_base64 = ngx.decode_base64 --- Supported algorithms for signing tokens. local alg_sign = { - ["HS256"] = function(data, key) return crypto.hmac.digest("sha256", data, key, true) end, - --["HS384"] = function(data, key) return crypto.hmac.digest("sha384", data, key, true) end, - --["HS512"] = function(data, key) return crypto.hmac.digest("sha512", data, key, true) end - ["RS256"] = function(data, key) return crypto.sign('sha256', data, crypto.pkey.from_pem(key, true)) end, - ["RS512"] = function(data, key) return crypto.sign('sha512', data, crypto.pkey.from_pem(key, true)) end, + ["HS256"] = function(data, key) return openssl_hmac.new(key, "sha256"):final(data) end, + --["HS384"] = function(data, key) return openssl_hmac.new(key, "sha384"):final(data) end, + --["HS512"] = function(data, key) return openssl_hmac.new(key, "sha512"):final(data) end, + ["RS256"] = function(data, key) return openssl_pkey.new(key):sign(openssl_digest.new("sha256"):update(data)) end, + ["RS512"] = function(data, key) return openssl_pkey.new(key):sign(openssl_digest.new("sha512"):update(data)) end, ["ES256"] = function(data, key) - local pkeyPrivate = crypto.pkey.from_pem(key, true) - local signature = crypto.sign('sha256', data, pkeyPrivate) + local pkeyPrivate = openssl_pkey.new(key) + local signature = pkeyPrivate:sign(openssl_digest.new("sha256"):update(data)) local derSequence = asn_sequence.parse_simple_sequence(signature) local r = asn_sequence.unsign_integer(derSequence[1], 32) @@ -45,23 +47,29 @@ local alg_sign = { local alg_verify = { ["HS256"] = function(data, signature, key) return signature == alg_sign["HS256"](data, key) end, --["HS384"] = function(data, signature, key) return signature == alg_sign["HS384"](data, key) end, - --["HS512"] = function(data, signature, key) return signature == alg_sign["HS512"](data, key) end + --["HS512"] = function(data, signature, key) return signature == alg_sign["HS512"](data, key) end, ["RS256"] = function(data, signature, key) - local pkey = assert(crypto.pkey.from_pem(key), "Consumer Public Key is Invalid") - return crypto.verify('sha256', data, signature, pkey) + local pkey_ok, pkey = pcall(openssl_pkey.new, key) + assert(pkey_ok, "Consumer Public Key is Invalid") + local digest = openssl_digest.new('sha256'):update(data) + return pkey:verify(signature, digest) end, ["RS512"] = function(data, signature, key) - local pkey = assert(crypto.pkey.from_pem(key), "Consumer Public Key is Invalid") - return crypto.verify('sha512', data, signature, pkey) + local pkey_ok, pkey = pcall(openssl_pkey.new, key) + assert(pkey_ok, "Consumer Public Key is Invalid") + local digest = openssl_digest.new('sha512'):update(data) + return pkey:verify(signature, digest) end, ["ES256"] = function(data, signature, key) - local pkey = assert(crypto.pkey.from_pem(key), "Consumer Public Key is Invalid") + local pkey_ok, pkey = pcall(openssl_pkey.new, key) + assert(pkey_ok, "Consumer Public Key is Invalid") assert(#signature == 64, "Signature must be 64 bytes.") local asn = {} asn[1] = asn_sequence.resign_integer(string_sub(signature, 1, 32)) asn[2] = asn_sequence.resign_integer(string_sub(signature, 33, 64)) local signatureAsn = asn_sequence.create_simple_sequence(asn) - return crypto.verify('sha256', data, signatureAsn, pkey) + local digest = openssl_digest.new('sha256'):update(data) + return pkey:verify(signatureAsn, digest) end } diff --git a/spec/03-plugins/20-hmac-auth/03-access_spec.lua b/spec/03-plugins/20-hmac-auth/03-access_spec.lua index 211da320cb48..76890c61a634 100644 --- a/spec/03-plugins/20-hmac-auth/03-access_spec.lua +++ b/spec/03-plugins/20-hmac-auth/03-access_spec.lua @@ -1,11 +1,11 @@ local cjson = require "cjson" -local crypto = require "crypto" +local openssl_hmac = require "openssl.hmac" local helpers = require "spec.helpers" local utils = require "kong.tools.utils" local resty_sha256 = require "resty.sha256" local hmac_sha1_binary = function(secret, data) - return crypto.hmac.digest("sha1", data, secret, true) + return openssl_hmac.new(secret, "sha1"):final(data) end local SIGNATURE_NOT_VALID = "HMAC signature cannot be verified" @@ -639,8 +639,8 @@ describe("Plugin: hmac-auth (access)", function() it("should not pass with GET with wrong algorithm", function() local date = os.date("!%a, %d %b %Y %H:%M:%S GMT") local encodedSignature = ngx.encode_base64( - crypto.hmac.digest("sha256","date: " .. date .. "\n" - .. "content-md5: md5" .. "\nGET /request HTTP/1.1", "secret", true)) + openssl_hmac.new("secret", "sha256"):final("date: " .. date .. "\n" + .. "content-md5: md5" .. "\nGET /request HTTP/1.1")) local hmacAuth = [[hmac username="bob",algorithm="hmac-sha",]] .. [[ headers="date content-md5 request-line",signature="]] .. encodedSignature .. [["]] @@ -662,9 +662,8 @@ describe("Plugin: hmac-auth (access)", function() it("should pass the right headers to the upstream server", function() local date = os.date("!%a, %d %b %Y %H:%M:%S GMT") local encodedSignature = ngx.encode_base64( - crypto.hmac.digest("sha256","date: " .. date .. "\n" - .. "content-md5: md5" .. "\nGET /request HTTP/1.1", - "secret", true)) + openssl_hmac.new("secret", "sha256"):final("date: " .. date .. "\n" + .. "content-md5: md5" .. "\nGET /request HTTP/1.1")) local hmacAuth = [[hmac username="bob",algorithm="hmac-sha256",]] .. [[ headers="date content-md5 request-line",signature="]] .. encodedSignature .. [["]] @@ -1097,8 +1096,8 @@ describe("Plugin: hmac-auth (access)", function() it("should pass with GET with hmac-sha384", function() local date = os.date("!%a, %d %b %Y %H:%M:%S GMT") local encodedSignature = ngx.encode_base64( - crypto.hmac.digest("sha384","date: " .. date .. "\n" - .. "content-md5: md5" .. "\nGET /request HTTP/1.1", "secret", true)) + openssl_hmac.new("secret", "sha384"):final("date: " .. date .. "\n" + .. "content-md5: md5" .. "\nGET /request HTTP/1.1")) local hmacAuth = [[hmac username="bob", algorithm="hmac-sha384", ]] .. [[headers="date content-md5 request-line", signature="]] .. encodedSignature .. [["]] @@ -1119,8 +1118,8 @@ describe("Plugin: hmac-auth (access)", function() it("should pass with GET with hmac-sha512", function() local date = os.date("!%a, %d %b %Y %H:%M:%S GMT") local encodedSignature = ngx.encode_base64( - crypto.hmac.digest("sha512","date: " .. date .. "\n" - .. "content-md5: md5" .. "\nGET /request HTTP/1.1", "secret", true)) + openssl_hmac.new("secret", "sha512"):final("date: " .. date .. "\n" + .. "content-md5: md5" .. "\nGET /request HTTP/1.1")) local hmacAuth = [[hmac username="bob", algorithm="hmac-sha512", ]] .. [[headers="date content-md5 request-line", signature="]] .. encodedSignature .. [["]] @@ -1141,8 +1140,8 @@ describe("Plugin: hmac-auth (access)", function() it("should not pass with hmac-sha512", function() local date = os.date("!%a, %d %b %Y %H:%M:%S GMT") local encodedSignature = ngx.encode_base64( - crypto.hmac.digest("sha512","date: " .. date .. "\n" - .. "content-md5: md5" .. "\nGET /request HTTP/1.1", "secret", true)) + openssl_hmac.new("secret", "sha512"):final("date: " .. date .. "\n" + .. "content-md5: md5" .. "\nGET /request HTTP/1.1")) local hmacAuth = [[hmac username="bob", algorithm="hmac-sha512", ]] .. [[headers="date content-md5 request-line", signature="]] .. encodedSignature .. [["]] @@ -1178,8 +1177,8 @@ describe("Plugin: hmac-auth (access)", function() it("should pass with hmac-sha1", function() local date = os.date("!%a, %d %b %Y %H:%M:%S GMT") local encodedSignature = ngx.encode_base64( - crypto.hmac.digest("sha1","date: " .. date .. "\n" - .. "content-md5: md5" .. "\nGET /request HTTP/1.1", "secret", true)) + openssl_hmac.new("secret", "sha1"):final("date: " .. date .. "\n" + .. "content-md5: md5" .. "\nGET /request HTTP/1.1")) local hmacAuth = [[hmac username="bob", algorithm="hmac-sha1", ]] .. [[headers="date content-md5 request-line", signature="]] .. encodedSignature .. [["]] diff --git a/spec/03-plugins/20-hmac-auth/04-invalidations_spec.lua b/spec/03-plugins/20-hmac-auth/04-invalidations_spec.lua index 835ba9587cc7..d3cd22db8a08 100644 --- a/spec/03-plugins/20-hmac-auth/04-invalidations_spec.lua +++ b/spec/03-plugins/20-hmac-auth/04-invalidations_spec.lua @@ -1,7 +1,7 @@ local helpers = require "spec.helpers" local cjson = require "cjson" -local crypto = require "crypto" +local openssl_hmac = require "openssl.hmac" describe("Plugin: hmac-auth (invalidations)", function() local client_proxy, client_admin, consumer, credential @@ -48,7 +48,7 @@ describe("Plugin: hmac-auth (invalidations)", function() end) local function hmac_sha1_binary(secret, data) - return crypto.hmac.digest("sha1", data, secret, true) + return openssl_hmac.new(secret, "sha1"):final(data) end local function get_authorization(username) From 152e3882f6ae08eb063cb2cc184b80e5e6b85910 Mon Sep 17 00:00:00 2001 From: daurnimator Date: Mon, 11 Dec 2017 11:56:20 +1100 Subject: [PATCH 28/74] refactor(basic-auth) use lua-resty-string instead of luaossl From #3074 --- kong/plugins/basic-auth/crypto.lua | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/kong/plugins/basic-auth/crypto.lua b/kong/plugins/basic-auth/crypto.lua index eaff62d3fa73..95d4106d9bda 100644 --- a/kong/plugins/basic-auth/crypto.lua +++ b/kong/plugins/basic-auth/crypto.lua @@ -1,15 +1,9 @@ -- Module to encrypt the basic-auth credentials password field -local openssl_digest = require "openssl.digest" +local resty_sha1 = require "resty.sha1" +local to_hex = require "resty.string".to_hex local format = string.format -local function tohex(s) - s = s:gsub(".", function(c) - return string.format("%.2x", c:byte(1)) - end) - return s -end - --- Salt the password -- Password is salted with the credential's consumer_id (long enough, unique) -- @param credential The basic auth credential table @@ -23,6 +17,8 @@ return { -- @return hash of the salted credential's password encrypt = function(credential) local salted = salt_password(credential) - return tohex(openssl_digest.new("sha1"):final(salted)) + local digest = resty_sha1:new() + assert(digest:update(salted)) + return to_hex(digest:final()) end } From 9c88e66870eb973ba6caa8a031dd1873edf64a8b Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Wed, 13 Dec 2017 09:25:32 -0800 Subject: [PATCH 29/74] feat(tcp-log) TLS handshake support for TCP logs --- kong-0.11.2-0.rockspec | 2 + kong/plugins/tcp-log/handler.lua | 9 +++++ kong/plugins/tcp-log/migrations/cassandra.lua | 22 +++++++++++ kong/plugins/tcp-log/migrations/postgres.lua | 22 +++++++++++ kong/plugins/tcp-log/schema.lua | 4 +- .../03-plugins/01-tcp-log/01-tcp-log_spec.lua | 39 +++++++++++++++++++ spec/helpers.lua | 22 +++++++++-- 7 files changed, 116 insertions(+), 4 deletions(-) create mode 100644 kong/plugins/tcp-log/migrations/cassandra.lua create mode 100644 kong/plugins/tcp-log/migrations/postgres.lua diff --git a/kong-0.11.2-0.rockspec b/kong-0.11.2-0.rockspec index 0c1b3bb8dbcb..41cb27ffefa8 100644 --- a/kong-0.11.2-0.rockspec +++ b/kong-0.11.2-0.rockspec @@ -151,6 +151,8 @@ build = { ["kong.plugins.tcp-log.handler"] = "kong/plugins/tcp-log/handler.lua", ["kong.plugins.tcp-log.schema"] = "kong/plugins/tcp-log/schema.lua", + ["kong.plugins.tcp-log.migrations.cassandra"] = "kong/plugins/tcp-log/migrations/cassandra.lua", + ["kong.plugins.tcp-log.migrations.postgres"] = "kong/plugins/tcp-log/migrations/postgres.lua", ["kong.plugins.udp-log.handler"] = "kong/plugins/udp-log/handler.lua", ["kong.plugins.udp-log.schema"] = "kong/plugins/udp-log/schema.lua", diff --git a/kong/plugins/tcp-log/handler.lua b/kong/plugins/tcp-log/handler.lua index fa6d0c3c1452..bd97f6e71876 100644 --- a/kong/plugins/tcp-log/handler.lua +++ b/kong/plugins/tcp-log/handler.lua @@ -27,6 +27,15 @@ local function log(premature, conf, message) return end + if conf.tls then + ok, err = sock:sslhandshake(true, conf.tls_sni, false) + if not ok then + ngx.log(ngx.ERR, "[tcp-log] failed to perform TLS handshake to ", + host, ":", port, ": ", err) + return + end + end + ok, err = sock:send(cjson.encode(message) .. "\r\n") if not ok then ngx.log(ngx.ERR, "[tcp-log] failed to send data to " .. host .. ":" .. tostring(port) .. ": ", err) diff --git a/kong/plugins/tcp-log/migrations/cassandra.lua b/kong/plugins/tcp-log/migrations/cassandra.lua new file mode 100644 index 000000000000..0ed8e2cfc2fd --- /dev/null +++ b/kong/plugins/tcp-log/migrations/cassandra.lua @@ -0,0 +1,22 @@ +local plugin_config_iterator = require("kong.dao.migrations.helpers").plugin_config_iterator + + +return { + { + name = "2017-12-13-120000_tcp-log_tls", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "tcp-log") do + if not ok then + return config + end + config.tls = false + local ok, err = update(config) + if not ok then + return err + end + end + end, + down = function(_, _, dao) end -- not implemented + }, +} + diff --git a/kong/plugins/tcp-log/migrations/postgres.lua b/kong/plugins/tcp-log/migrations/postgres.lua new file mode 100644 index 000000000000..0ed8e2cfc2fd --- /dev/null +++ b/kong/plugins/tcp-log/migrations/postgres.lua @@ -0,0 +1,22 @@ +local plugin_config_iterator = require("kong.dao.migrations.helpers").plugin_config_iterator + + +return { + { + name = "2017-12-13-120000_tcp-log_tls", + up = function(_, _, dao) + for ok, config, update in plugin_config_iterator(dao, "tcp-log") do + if not ok then + return config + end + config.tls = false + local ok, err = update(config) + if not ok then + return err + end + end + end, + down = function(_, _, dao) end -- not implemented + }, +} + diff --git a/kong/plugins/tcp-log/schema.lua b/kong/plugins/tcp-log/schema.lua index 8186fe17b795..7c99f62ffe61 100644 --- a/kong/plugins/tcp-log/schema.lua +++ b/kong/plugins/tcp-log/schema.lua @@ -3,6 +3,8 @@ return { host = { required = true, type = "string" }, port = { required = true, type = "number" }, timeout = { default = 10000, type = "number" }, - keepalive = { default = 60000, type = "number" } + keepalive = { default = 60000, type = "number" }, + tls = { default = false, type = "boolean" }, + tls_sni = { type = "string" }, } } diff --git a/spec/03-plugins/01-tcp-log/01-tcp-log_spec.lua b/spec/03-plugins/01-tcp-log/01-tcp-log_spec.lua index 8035dae3aa3c..4b750713eb7e 100644 --- a/spec/03-plugins/01-tcp-log/01-tcp-log_spec.lua +++ b/spec/03-plugins/01-tcp-log/01-tcp-log_spec.lua @@ -15,6 +15,12 @@ describe("Plugin: tcp-log (log)", function() upstream_url = helpers.mock_upstream_url, }) + local api2 = assert(helpers.dao.apis:insert { + name = "api-2", + hosts = { "tcp_logging_tls.com" }, + upstream_url = helpers.mock_upstream_url, + }) + assert(helpers.dao.plugins:insert { api_id = api1.id, name = "tcp-log", @@ -24,6 +30,16 @@ describe("Plugin: tcp-log (log)", function() }, }) + assert(helpers.dao.plugins:insert { + api_id = api2.id, + name = "tcp-log", + config = { + host = "127.0.0.1", + port = TCP_PORT, + tls = true, + }, + }) + assert(helpers.start_kong({ nginx_conf = "spec/fixtures/custom_nginx.template", })) @@ -81,4 +97,27 @@ describe("Plugin: tcp-log (log)", function() assert.True(log_message.latencies.proxy < 3000) assert.True(log_message.latencies.request >= log_message.latencies.kong + log_message.latencies.proxy) end) + + it("performs a TLS handshake on the remote TCP server", function() + local thread = helpers.tcp_server(TCP_PORT, { tls = true }) + + -- Making the request + local r = assert(client:send { + method = "GET", + path = "/request", + headers = { + host = "tcp_logging_tls.com", + }, + }) + assert.response(r).has.status(200) + + -- Getting back the TCP server input + local ok, res = thread:join() + assert.True(ok) + assert.is_string(res) + + -- Making sure it's alright + local log_message = cjson.decode(res) + assert.equal("127.0.0.1", log_message.client_ip) + end) end) diff --git a/spec/helpers.lua b/spec/helpers.lua index ae7ee2fed7a9..9d147c0d2527 100644 --- a/spec/helpers.lua +++ b/spec/helpers.lua @@ -310,11 +310,13 @@ end -- (single read). -- @name tcp_server -- @param `port` The port where the server will be listening to +-- @param `opts A table of options defining the server's behavior -- @return `thread` A thread object -local function tcp_server(port, ...) +local function tcp_server(port, opts, ...) local threads = require "llthreads2.ex" + opts = opts or {} local thread = threads.new({ - function(port) + function(port, opts) local socket = require "socket" local server = assert(socket.tcp()) server:settimeout(10) @@ -322,13 +324,27 @@ local function tcp_server(port, ...) assert(server:bind("*", port)) assert(server:listen()) local client = assert(server:accept()) + + if opts.tls then + local ssl = require "ssl" + local params = { + mode = "server", + protocol = "any", + key = "spec/fixtures/kong_spec.key", + certificate = "spec/fixtures/kong_spec.crt", + } + + client = ssl.wrap(client, params) + client:dohandshake() + end + local line = assert(client:receive()) client:send(line .. "\n") client:close() server:close() return line end - }, port) + }, port, opts) return thread:start(...) end From 8a55a31fff9a7a7456b42198336726e6cfb59e57 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Thu, 14 Dec 2017 10:11:11 -0800 Subject: [PATCH 30/74] chore(makefile) proper OPENSSL_DIR defaults Detect the default OPENSSL_DIR include path based on the OS. The variable can still be explicitly overridden. This fixes a regression introduced by 54691fb. --- Makefile | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Makefile b/Makefile index 48fe2a91c5c5..f0a6fe9db47c 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,14 @@ +OS := $(shell uname) + DEV_ROCKS = "busted 2.0.rc12" "luacheck 0.20.0" "lua-llthreads2 0.1.4" BUSTED_ARGS ?= -v TEST_CMD ?= bin/busted $(BUSTED_ARGS) + +ifeq ($(OS), Darwin) OPENSSL_DIR ?= /usr/local/opt/openssl +else +OPENSSL_DIR ?= /usr +endif .PHONY: install dev lint test test-integration test-plugins test-all From b9f775e1e0ff37c2f6bee5207e248e2662708958 Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Thu, 7 Dec 2017 23:21:35 +0200 Subject: [PATCH 31/74] fix(admin) unsupported media type (415) error reported on issue #1182 --- kong/api/init.lua | 28 +- .../04-admin_api/02-apis_routes_spec.lua | 478 +++++++++++------- .../04-admin_api/03-consumers_routes_spec.lua | 195 ++++++- 3 files changed, 500 insertions(+), 201 deletions(-) diff --git a/kong/api/init.lua b/kong/api/init.lua index faac6e7ab4b6..88013b27b442 100644 --- a/kong/api/init.lua +++ b/kong/api/init.lua @@ -8,6 +8,7 @@ local api_helpers = require "kong.api.api_helpers" local find = string.find +local sub = string.sub local app = lapis.Application() @@ -89,10 +90,31 @@ end app:before_filter(function(self) - if NEEDS_BODY[ngx.req.get_method()] - and not self.req.headers["content-type"] then - return responses.send_HTTP_UNSUPPORTED_MEDIA_TYPE() + if not NEEDS_BODY[ngx.req.get_method()] then + return + end + + local content_type = self.req.headers["content-type"] + if not content_type then + local content_length = self.req.headers["content-length"] + if content_length == "0" then + return + end + + if not content_length then + local _, err = ngx.req.socket() + if err == "no body" then + return + end + end + + elseif sub(content_type, 1, 16) == "application/json" or + sub(content_type, 1, 19) == "multipart/form-data" or + sub(content_type, 1, 33) == "application/x-www-form-urlencoded" then + return end + + return responses.send_HTTP_UNSUPPORTED_MEDIA_TYPE() end) diff --git a/spec/02-integration/04-admin_api/02-apis_routes_spec.lua b/spec/02-integration/04-admin_api/02-apis_routes_spec.lua index 54f3573f2621..3767eaa95cf6 100644 --- a/spec/02-integration/04-admin_api/02-apis_routes_spec.lua +++ b/spec/02-integration/04-admin_api/02-apis_routes_spec.lua @@ -13,7 +13,6 @@ local function it_content_types(title, fn) end dao_helpers.for_each_dao(function(kong_config) - describe("Admin API #" .. kong_config.database, function() local client local dao @@ -24,10 +23,8 @@ describe("Admin API #" .. kong_config.database, function() assert(helpers.start_kong{ database = kong_config.database }) - client = assert(helpers.admin_client()) end) teardown(function() - if client then client:close() end helpers.stop_kong() end) @@ -35,6 +32,10 @@ describe("Admin API #" .. kong_config.database, function() describe("POST", function() before_each(function() dao:truncate_tables() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end end) it_content_types("creates an API", function(content_type) return function() @@ -143,6 +144,8 @@ describe("Admin API #" .. kong_config.database, function() }) assert.res_status(201, res) + client = assert(helpers.admin_client()) + res = assert(client:send { method = "POST", path = "/apis", @@ -160,11 +163,14 @@ describe("Admin API #" .. kong_config.database, function() end) end) end) - end) describe("PUT", function() before_each(function() dao:truncate_tables() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end end) it_content_types("creates if not exists", function(content_type) @@ -206,7 +212,7 @@ describe("Admin API #" .. kong_config.database, function() -- -- Eventually, our Admin endpoint will follow a more appropriate -- behavior for PUT. - local res = assert(client:send { + local res = assert(helpers.admin_client():send { method = "PUT", path = "/apis", body = { @@ -235,6 +241,8 @@ describe("Admin API #" .. kong_config.database, function() local body = assert.res_status(201, res) local json = cjson.decode(body) + client = assert(helpers.admin_client()) + res = assert(client:send { method = "PUT", path = "/apis", @@ -275,6 +283,8 @@ describe("Admin API #" .. kong_config.database, function() upstream_url = "upstream_url is required" }, json) + client = assert(helpers.admin_client()) + -- Invalid parameter res = assert(client:send { method = "PUT", @@ -310,6 +320,8 @@ describe("Admin API #" .. kong_config.database, function() local body = assert.res_status(201, res) local json = cjson.decode(body) + client = assert(helpers.admin_client()) + res = assert(client:send { method = "PUT", path = "/apis", @@ -344,6 +356,12 @@ describe("Admin API #" .. kong_config.database, function() teardown(function() dao:truncate_tables() end) + before_each(function() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end + end) it("retrieves the first page", function() local res = assert(client:send { @@ -360,7 +378,7 @@ describe("Admin API #" .. kong_config.database, function() local offset for i = 1, 4 do - local res = assert(client:send { + local res = assert(helpers.admin_client():send { method = "GET", path = "/apis", query = {size = 3, offset = offset} @@ -423,259 +441,291 @@ describe("Admin API #" .. kong_config.database, function() end) end) - it("returns 405 on invalid method", function() - local methods = {"DELETE"} - for i = 1, #methods do + describe("DELETE", function() + before_each(function() + dao:truncate_tables() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end + end) + + it("returns 405 on invalid method", function() + local methods = {"DELETE"} + for i = 1, #methods do + local res = assert(client:send { + method = methods[i], + path = "/apis", + body = {}, -- tmp: body to allow POST/PUT to work + headers = {["Content-Type"] = "application/json"} + }) + local body = assert.response(res).has.status(405) + local json = cjson.decode(body) + assert.same({ message = "Method not allowed" }, json) + end + end) + end) + end) + + describe("/apis/{api}", function() + local api + setup(function() + dao:truncate_tables() + end) + before_each(function() + api = assert(dao.apis:insert { + name = "my-api", + uris = "/my-api", + upstream_url = "http://my-api.com" + }) + end) + after_each(function() + dao:truncate_tables() + end) + + describe("GET", function() + before_each(function() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end + end) + + it("retrieves by id", function() local res = assert(client:send { - method = methods[i], - path = "/apis", - body = {}, -- tmp: body to allow POST/PUT to work - headers = {["Content-Type"] = "application/json"} + method = "GET", + path = "/apis/" .. api.id + }) + local body = assert.res_status(200, res) + local json = cjson.decode(body) + assert.same(api, json) + end) + it("retrieves by name", function() + local res = assert(client:send { + method = "GET", + path = "/apis/" .. api.name }) - local body = assert.response(res).has.status(405) + local body = assert.res_status(200, res) local json = cjson.decode(body) - assert.same({ message = "Method not allowed" }, json) - end + assert.same(api, json) + end) + it("returns 404 if not found", function() + local res = assert(client:send { + method = "GET", + path = "/apis/_inexistent_" + }) + assert.res_status(404, res) + end) + it("ignores an invalid body", function() + local res = assert(client:send { + method = "GET", + path = "/apis/" .. api.id, + body = "this fails if decoded as json", + headers = { + ["Content-Type"] = "application/json", + } + }) + assert.res_status(200, res) + end) end) - describe("/apis/{api}", function() - local api - setup(function() - dao:truncate_tables() - end) + describe("PATCH", function() before_each(function() - api = assert(dao.apis:insert { - name = "my-api", - uris = "/my-api", - upstream_url = "http://my-api.com" - }) + client = assert(helpers.admin_client()) end) after_each(function() - dao:truncate_tables() + if client then client:close() end end) - describe("GET", function() - it("retrieves by id", function() + it_content_types("updates if found", function(content_type) + return function() local res = assert(client:send { - method = "GET", - path = "/apis/" .. api.id + method = "PATCH", + path = "/apis/" .. api.id, + body = { + name = "my-updated-api" + }, + headers = {["Content-Type"] = content_type} }) local body = assert.res_status(200, res) local json = cjson.decode(body) - assert.same(api, json) - end) - it("retrieves by name", function() + assert.equal("my-updated-api", json.name) + assert.equal(api.id, json.id) + + local in_db = assert(dao.apis:find {id = api.id}) + assert.same(json, in_db) + end + end) + it_content_types("updates a name from a name in path", function(content_type) + return function() local res = assert(client:send { - method = "GET", - path = "/apis/" .. api.name + method = "PATCH", + path = "/apis/" .. api.name, + body = { + name = "my-updated-api" + }, + headers = {["Content-Type"] = content_type} }) local body = assert.res_status(200, res) local json = cjson.decode(body) - assert.same(api, json) - end) - it("returns 404 if not found", function() + assert.equal("my-updated-api", json.name) + assert.equal(api.id, json.id) + + local in_db = assert(dao.apis:find {id = api.id}) + assert.same(json, in_db) + end + end) + it_content_types("updates uris", function(content_type) + return function() local res = assert(client:send { - method = "GET", - path = "/apis/_inexistent_" + method = "PATCH", + path = "/apis/" .. api.id, + body = { + uris = "/my-updated-api,/my-new-uri" + }, + headers = {["Content-Type"] = content_type} }) - assert.res_status(404, res) - end) - it("ignores an invalid body", function() + local body = assert.res_status(200, res) + local json = cjson.decode(body) + assert.same({ "/my-updated-api", "/my-new-uri" }, json.uris) + assert.equal(api.id, json.id) + + local in_db = assert(dao.apis:find {id = api.id}) + assert.same(json, in_db) + end + end) + it_content_types("updates strip_uri if not previously set", function(content_type) + return function() local res = assert(client:send { - method = "GET", + method = "PATCH", path = "/apis/" .. api.id, - body = "this fails if decoded as json", - headers = { - ["Content-Type"] = "application/json", - } + body = { + strip_uri = true + }, + headers = {["Content-Type"] = content_type} }) - assert.res_status(200, res) - end) + local body = assert.res_status(200, res) + local json = cjson.decode(body) + assert.True(json.strip_uri) + assert.equal(api.id, json.id) + + local in_db = assert(dao.apis:find {id = api.id}) + assert.same(json, in_db) + end end) + it_content_types("updates multiple fields at once", function(content_type) + return function() + local res = assert(client:send { + method = "PATCH", + path = "/apis/" .. api.id, + body = { + uris = "/my-updated-path", + hosts = "my-updated.tld" + }, + headers = {["Content-Type"] = content_type} + }) + local body = assert.res_status(200, res) + local json = cjson.decode(body) + assert.same({ "/my-updated-path" }, json.uris) + assert.same({ "my-updated.tld" }, json.hosts) + assert.equal(api.id, json.id) - describe("PATCH", function() - it_content_types("updates if found", function(content_type) - return function() + local in_db = assert(dao.apis:find {id = api.id}) + assert.same(json, in_db) + end + end) + it_content_types("removes optional field with ngx.null", function(content_type) + return function() + -- TODO: how should ngx.null work with application/www-form-urlencoded? + if content_type == "application/json" then local res = assert(client:send { method = "PATCH", path = "/apis/" .. api.id, body = { - name = "my-updated-api" + uris = ngx.null, + hosts = ngx.null, }, headers = {["Content-Type"] = content_type} }) local body = assert.res_status(200, res) local json = cjson.decode(body) - assert.equal("my-updated-api", json.name) + assert.is_nil(json.uris) + assert.is_nil(json.hosts) assert.equal(api.id, json.id) local in_db = assert(dao.apis:find {id = api.id}) assert.same(json, in_db) end - end) - it_content_types("updates a name from a name in path", function(content_type) - return function() - local res = assert(client:send { - method = "PATCH", - path = "/apis/" .. api.name, - body = { - name = "my-updated-api" - }, - headers = {["Content-Type"] = content_type} - }) - local body = assert.res_status(200, res) - local json = cjson.decode(body) - assert.equal("my-updated-api", json.name) - assert.equal(api.id, json.id) - - local in_db = assert(dao.apis:find {id = api.id}) - assert.same(json, in_db) - end - end) - it_content_types("updates uris", function(content_type) - return function() - local res = assert(client:send { - method = "PATCH", - path = "/apis/" .. api.id, - body = { - uris = "/my-updated-api,/my-new-uri" - }, - headers = {["Content-Type"] = content_type} - }) - local body = assert.res_status(200, res) - local json = cjson.decode(body) - assert.same({ "/my-updated-api", "/my-new-uri" }, json.uris) - assert.equal(api.id, json.id) + end + end) - local in_db = assert(dao.apis:find {id = api.id}) - assert.same(json, in_db) - end - end) - it_content_types("updates strip_uri if not previously set", function(content_type) + describe("errors", function() + it_content_types("returns 404 if not found", function(content_type) return function() local res = assert(client:send { method = "PATCH", - path = "/apis/" .. api.id, + path = "/apis/_inexistent_", body = { - strip_uri = true + uris = "/my-updated-path" }, headers = {["Content-Type"] = content_type} }) - local body = assert.res_status(200, res) - local json = cjson.decode(body) - assert.True(json.strip_uri) - assert.equal(api.id, json.id) - - local in_db = assert(dao.apis:find {id = api.id}) - assert.same(json, in_db) + assert.res_status(404, res) end end) - it_content_types("updates multiple fields at once", function(content_type) + it_content_types("handles invalid input", function(content_type) return function() local res = assert(client:send { method = "PATCH", path = "/apis/" .. api.id, body = { - uris = "/my-updated-path", - hosts = "my-updated.tld" + upstream_url = "api.com" }, headers = {["Content-Type"] = content_type} }) - local body = assert.res_status(200, res) + local body = assert.res_status(400, res) local json = cjson.decode(body) - assert.same({ "/my-updated-path" }, json.uris) - assert.same({ "my-updated.tld" }, json.hosts) - assert.equal(api.id, json.id) - - local in_db = assert(dao.apis:find {id = api.id}) - assert.same(json, in_db) - end - end) - it_content_types("removes optional field with ngx.null", function(content_type) - return function() - -- TODO: how should ngx.null work with application/www-form-urlencoded? - if content_type == "application/json" then - local res = assert(client:send { - method = "PATCH", - path = "/apis/" .. api.id, - body = { - uris = ngx.null, - hosts = ngx.null, - }, - headers = {["Content-Type"] = content_type} - }) - local body = assert.res_status(200, res) - local json = cjson.decode(body) - assert.is_nil(json.uris) - assert.is_nil(json.hosts) - assert.equal(api.id, json.id) - - local in_db = assert(dao.apis:find {id = api.id}) - assert.same(json, in_db) - end + assert.same({ upstream_url = "upstream_url is not a url" }, json) end end) + end) + end) - describe("errors", function() - it_content_types("returns 404 if not found", function(content_type) - return function() - local res = assert(client:send { - method = "PATCH", - path = "/apis/_inexistent_", - body = { - uris = "/my-updated-path" - }, - headers = {["Content-Type"] = content_type} - }) - assert.res_status(404, res) - end - end) - it_content_types("handles invalid input", function(content_type) - return function() - local res = assert(client:send { - method = "PATCH", - path = "/apis/" .. api.id, - body = { - upstream_url = "api.com" - }, - headers = {["Content-Type"] = content_type} - }) - local body = assert.res_status(400, res) - local json = cjson.decode(body) - assert.same({ upstream_url = "upstream_url is not a url" }, json) - end - end) - end) + describe("DELETE", function() + before_each(function() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end end) - describe("DELETE", function() - it("deletes an API by id", function() - local res = assert(client:send { - method = "DELETE", - path = "/apis/" .. api.id - }) - local body = assert.res_status(204, res) - assert.equal("", body) - end) - it("deletes an API by name", function() + it("deletes an API by id", function() + local res = assert(client:send { + method = "DELETE", + path = "/apis/" .. api.id + }) + local body = assert.res_status(204, res) + assert.equal("", body) + end) + it("deletes an API by name", function() + local res = assert(client:send { + method = "DELETE", + path = "/apis/" .. api.name + }) + local body = assert.res_status(204, res) + assert.equal("", body) + end) + describe("errors", function() + it("returns 404 if not found", function() local res = assert(client:send { method = "DELETE", - path = "/apis/" .. api.name + path = "/apis/_inexistent_" }) - local body = assert.res_status(204, res) - assert.equal("", body) - end) - describe("error", function() - it("returns 404 if not found", function() - local res = assert(client:send { - method = "DELETE", - path = "/apis/_inexistent_" - }) - assert.res_status(404, res) - end) + assert.res_status(404, res) end) end) end) + end) describe("/apis/{api}/plugins", function() local api @@ -693,6 +743,13 @@ describe("Admin API #" .. kong_config.database, function() end) describe("POST", function() + before_each(function() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end + end) + it_content_types("creates a plugin config", function(content_type) return function() local res = assert(client:send { @@ -728,9 +785,6 @@ describe("Admin API #" .. kong_config.database, function() end end) describe("errors", function() - -- TODO fix the weird nesting issues in this file that - -- require us to rescope client - local client before_each(function() client = assert(helpers.admin_client()) end) @@ -811,6 +865,13 @@ describe("Admin API #" .. kong_config.database, function() end) describe("PUT", function() + before_each(function() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end + end) + it_content_types("creates if not exists", function(content_type) return function() local res = assert(client:send { @@ -966,6 +1027,13 @@ describe("Admin API #" .. kong_config.database, function() end) describe("GET", function() + before_each(function() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end + end) + it("retrieves the first page", function() assert(dao.plugins:insert { name = "key-auth", @@ -1002,6 +1070,13 @@ describe("Admin API #" .. kong_config.database, function() end) describe("GET", function() + before_each(function() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end + end) + it("retrieves by id", function() local res = assert(client:send { method = "GET", @@ -1040,6 +1115,13 @@ describe("Admin API #" .. kong_config.database, function() end) describe("PATCH", function() + before_each(function() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end + end) + it_content_types("updates if found", function(content_type) return function() local res = assert(client:send { @@ -1145,6 +1227,13 @@ describe("Admin API #" .. kong_config.database, function() end) describe("DELETE", function() + before_each(function() + client = assert(helpers.admin_client()) + end) + after_each(function() + if client then client:close() end + end) + it("deletes a plugin configuration", function() local res = assert(client:send { method = "DELETE", @@ -1166,8 +1255,6 @@ describe("Admin API #" .. kong_config.database, function() end) end) -end) - describe("Admin API request size", function() local client setup(function() @@ -1224,3 +1311,4 @@ describe("Admin API request size", function() assert.res_status(413, res) end) end) +end) diff --git a/spec/02-integration/04-admin_api/03-consumers_routes_spec.lua b/spec/02-integration/04-admin_api/03-consumers_routes_spec.lua index c3e990666003..e006d188ad49 100644 --- a/spec/02-integration/04-admin_api/03-consumers_routes_spec.lua +++ b/spec/02-integration/04-admin_api/03-consumers_routes_spec.lua @@ -15,10 +15,8 @@ describe("Admin API", function() setup(function() helpers.run_migrations() assert(helpers.start_kong()) - client = helpers.admin_client() end) teardown(function() - if client then client:close() end helpers.stop_kong() end) @@ -37,6 +35,11 @@ describe("Admin API", function() username = "83825bb5-38c7-4160-8c23-54dd2b007f31", -- uuid format custom_id = "1a2b" }) + client = helpers.admin_client() + end) + + after_each(function() + if client then client:close() end end) describe("/consumers", function() @@ -109,6 +112,71 @@ describe("Admin API", function() assert.same({ custom_id = "already exists with value '1234'" }, json) end end) + it("returns 415 on invalid content-type", function() + local res = assert(client:request { + method = "POST", + path = "/consumers", + body = '{"hello": "world"}', + headers = {["Content-Type"] = "invalid"} + }) + assert.res_status(415, res) + end) + it("returns 415 on missing content-type with body ", function() + local res = assert(client:request { + method = "POST", + path = "/consumers", + body = "invalid" + }) + assert.res_status(415, res) + end) + it("returns 400 on missing body with application/json", function() + local res = assert(client:request { + method = "POST", + path = "/consumers", + headers = {["Content-Type"] = "application/json"} + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "Cannot parse JSON body" }, json) + end) + it("returns 400 on missing body with multipart/form-data", function() + local res = assert(client:request { + method = "POST", + path = "/consumers", + headers = {["Content-Type"] = "multipart/form-data"} + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ + custom_id = "At least a 'custom_id' or a 'username' must be specified", + username = "At least a 'custom_id' or a 'username' must be specified", + }, json) + end) + it("returns 400 on missing body with multipart/x-www-form-urlencoded", function() + local res = assert(client:request { + method = "POST", + path = "/consumers", + headers = {["Content-Type"] = "application/x-www-form-urlencoded"} + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ + custom_id = "At least a 'custom_id' or a 'username' must be specified", + username = "At least a 'custom_id' or a 'username' must be specified", + }, json) + end) + it("returns 400 on missing body with no content-type header", function() + local res = assert(client:request { + method = "POST", + path = "/consumers", + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ + custom_id = "At least a 'custom_id' or a 'username' must be specified", + username = "At least a 'custom_id' or a 'username' must be specified", + }, json) + end) end) end) @@ -173,7 +241,7 @@ describe("Admin API", function() -- -- Eventually, our Admin endpoint will follow a more appropriate -- behavior for PUT. - local res = assert(client:send { + local res = assert(helpers.admin_client():send { method = "PUT", path = "/consumers", body = { @@ -233,6 +301,71 @@ describe("Admin API", function() assert.same({ username = "already exists with value 'alice'" }, json) end end) + it("returns 415 on invalid content-type", function() + local res = assert(client:request { + method = "PUT", + path = "/consumers", + body = '{"hello": "world"}', + headers = {["Content-Type"] = "invalid"} + }) + assert.res_status(415, res) + end) + it("returns 415 on missing content-type with body ", function() + local res = assert(client:request { + method = "PUT", + path = "/consumers", + body = "invalid" + }) + assert.res_status(415, res) + end) + it("returns 400 on missing body with application/json", function() + local res = assert(client:request { + method = "PUT", + path = "/consumers", + headers = {["Content-Type"] = "application/json"} + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "Cannot parse JSON body" }, json) + end) + it("returns 400 on missing body with multipart/form-data", function() + local res = assert(client:request { + method = "PUT", + path = "/consumers", + headers = {["Content-Type"] = "multipart/form-data"} + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ + custom_id = "At least a 'custom_id' or a 'username' must be specified", + username = "At least a 'custom_id' or a 'username' must be specified", + }, json) + end) + it("returns 400 on missing body with multipart/x-www-form-urlencoded", function() + local res = assert(client:request { + method = "PUT", + path = "/consumers", + headers = {["Content-Type"] = "application/x-www-form-urlencoded"} + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ + custom_id = "At least a 'custom_id' or a 'username' must be specified", + username = "At least a 'custom_id' or a 'username' must be specified", + }, json) + end) + it("returns 400 on missing body with no content-type header", function() + local res = assert(client:request { + method = "PUT", + path = "/consumers", + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ + custom_id = "At least a 'custom_id' or a 'username' must be specified", + username = "At least a 'custom_id' or a 'username' must be specified", + }, json) + end) end) end) @@ -428,6 +561,62 @@ describe("Admin API", function() assert.same({ message = "empty body" }, json) end end) + it("returns 415 on invalid content-type", function() + local res = assert(client:request { + method = "PATCH", + path = "/consumers/" .. consumer.id, + body = '{"hello": "world"}', + headers = {["Content-Type"] = "invalid"} + }) + assert.res_status(415, res) + end) + it("returns 415 on missing content-type with body ", function() + local res = assert(client:request { + method = "PATCH", + path = "/consumers/" .. consumer.id, + body = "invalid" + }) + assert.res_status(415, res) + end) + it("returns 400 on missing body with application/json", function() + local res = assert(client:request { + method = "PATCH", + path = "/consumers/" .. consumer.id, + headers = {["Content-Type"] = "application/json"} + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "Cannot parse JSON body" }, json) + end) + it("returns 400 on missing body with multipart/form-data", function() + local res = assert(client:request { + method = "PATCH", + path = "/consumers/" .. consumer.id, + headers = {["Content-Type"] = "multipart/form-data"} + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "empty body" }, json) + end) + it("returns 400 on missing body with multipart/x-www-form-urlencoded", function() + local res = assert(client:request { + method = "PATCH", + path = "/consumers/" .. consumer.id, + headers = {["Content-Type"] = "application/x-www-form-urlencoded"} + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "empty body" }, json) + end) + it("returns 400 on missing body with no content-type header", function() + local res = assert(client:request { + method = "PATCH", + path = "/consumers/" .. consumer.id, + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same({ message = "empty body" }, json) + end) end) end) From 07202fd85be25751cff4df6a71763e28273d4cce Mon Sep 17 00:00:00 2001 From: hbagdi Date: Thu, 14 Dec 2017 17:14:26 -0800 Subject: [PATCH 32/74] feat(admin) /targets endpoint return active ones only Breaking change: The endpoints to list active and all Targets associated with an Upstream have been changed as follows: * `/upstreams/:upstream_name_or_id/targets/active` is removed. * `/upstreams/:upstream_name_or_id/targets/` now only lists the active Targets instead of listing all of them. * `/upstreams/:upstream_name_or_id/targets/all` is introduced, which returns a list of all the Targets (old and current), as per the previous behavior of the `/targets/` endpoint. From #3049 Closes #2791 --- kong/api/routes/upstreams.lua | 34 ++--- .../04-admin_api/08-targets_routes_spec.lua | 136 +++++++++--------- 2 files changed, 85 insertions(+), 85 deletions(-) diff --git a/kong/api/routes/upstreams.lua b/kong/api/routes/upstreams.lua index ce96f445128a..93f04f9d6f7c 100644 --- a/kong/api/routes/upstreams.lua +++ b/kong/api/routes/upstreams.lua @@ -107,23 +107,6 @@ return { self.params.upstream_id = self.upstream.id end, - GET = function(self, dao_factory) - crud.paginated_set(self, dao_factory.targets) - end, - - POST = function(self, dao_factory, helpers) - clean_history(self.params.upstream_id, dao_factory) - - crud.post(self.params, dao_factory.targets) - end, - }, - - ["/upstreams/:upstream_name_or_id/targets/active"] = { - before = function(self, dao_factory, helpers) - crud.find_upstream_by_name_or_id(self, dao_factory, helpers) - self.params.upstream_id = self.upstream.id - end, - GET = function(self, dao_factory) self.params.active = nil @@ -171,6 +154,23 @@ return { total = active_n, data = active, } + end, + + POST = function(self, dao_factory, helpers) + clean_history(self.params.upstream_id, dao_factory) + + crud.post(self.params, dao_factory.targets) + end, + }, + + ["/upstreams/:upstream_name_or_id/targets/all"] = { + before = function(self, dao_factory, helpers) + crud.find_upstream_by_name_or_id(self, dao_factory, helpers) + self.params.upstream_id = self.upstream.id + end, + + GET = function(self, dao_factory) + crud.paginated_set(self, dao_factory.targets) end }, diff --git a/spec/02-integration/04-admin_api/08-targets_routes_spec.lua b/spec/02-integration/04-admin_api/08-targets_routes_spec.lua index 59fab906a974..7fb8f3a98954 100644 --- a/spec/02-integration/04-admin_api/08-targets_routes_spec.lua +++ b/spec/02-integration/04-admin_api/08-targets_routes_spec.lua @@ -170,6 +170,65 @@ describe("Admin API", function() end) end) + describe("GET", function() + local upstream_name3 = "example.com" + local apis = {} + + before_each(function() + local upstream3 = assert(helpers.dao.upstreams:insert { + name = upstream_name3, + }) + + -- testing various behaviors + -- for each index in weights, create a number of targets, + -- each with its weight as each element of the sub-array + local weights = { + { 10, 0 }, -- two targets, eventually resulting in down + { 10, 0, 10 }, -- three targets, eventually resulting in up + { 10 }, -- one target, up + { 10, 10 }, -- two targets, up (we should only see one) + { 10, 50, 0 }, -- three targets, two up in a row, eventually down + { 10, 0, 20, 0 }, -- four targets, eventually down + } + + for i = 1, #weights do + for j = 1, #weights[i] do + ngx.sleep(0.01) + apis[i] = assert(helpers.dao.targets:insert { + target = "api-" .. tostring(i) .. ":80", + weight = weights[i][j], + upstream_id = upstream3.id + }) + end + end + end) + + it("only shows active targets", function() + for _, append in ipairs({ "", "/" }) do + local res = assert(client:send { + method = "GET", + path = "/upstreams/" .. upstream_name3 .. "/targets" .. append, + }) + assert.response(res).has.status(200) + local json = assert.response(res).has.jsonbody() + + -- we got three active targets for this upstream + assert.equal(3, #json.data) + assert.equal(3, json.total) + + -- when multiple active targets are present, we only see the last one + assert.equal(apis[4].id, json.data[1].id) + + -- validate the remaining returned targets + -- note the backwards order, because we walked the targets backwards + assert.equal(apis[3].target, json.data[2].target) + assert.equal(apis[2].target, json.data[3].target) + end + end) + end) + end) + + describe("/upstreams/{upstream}/targets/all/", function() describe("GET", function() before_each(function() for i = 1, 10 do @@ -184,7 +243,7 @@ describe("Admin API", function() it("retrieves the first page", function() local res = assert(client:send { methd = "GET", - path = "/upstreams/" .. upstream_name .. "/targets/", + path = "/upstreams/" .. upstream_name .. "/targets/all", }) assert.response(res).has.status(200) local json = assert.response(res).has.jsonbody() @@ -198,7 +257,7 @@ describe("Admin API", function() for i = 1, 4 do local res = assert(client:send { method = "GET", - path = "/upstreams/" .. upstream_name .. "/targets/", + path = "/upstreams/" .. upstream_name .. "/targets/all", query = {size = 3, offset = offset} }) assert.response(res).has.status(200) @@ -223,7 +282,7 @@ describe("Admin API", function() it("handles invalid filters", function() local res = assert(client:send { method = "GET", - path = "/upstreams/" .. upstream_name .. "/targets/", + path = "/upstreams/" .. upstream_name .. "/targets/all", query = {foo = "bar"}, }) local body = assert.response(res).has.status(400) @@ -233,7 +292,7 @@ describe("Admin API", function() it("ignores an invalid body", function() local res = assert(client:send { methd = "GET", - path = "/upstreams/" .. upstream_name .. "/targets/", + path = "/upstreams/" .. upstream_name .. "/targets/all", body = "this fails if decoded as json", headers = { ["Content-Type"] = "application/json", @@ -255,7 +314,7 @@ describe("Admin API", function() it("data property is an empty array", function() local res = assert(client:send { method = "GET", - path = "/upstreams/" .. upstream_name2 .. "/targets/", + path = "/upstreams/" .. upstream_name2 .. "/targets/all", }) local body = assert.response(res).has.status(200) local json = cjson.decode(body) @@ -265,65 +324,6 @@ describe("Admin API", function() end) end) - describe("/upstreams/{upstream}/targets/active/", function() - describe("GET", function() - local upstream_name3 = "example.com" - local apis = {} - - before_each(function() - local upstream3 = assert(helpers.dao.upstreams:insert { - name = upstream_name3, - }) - - -- testing various behaviors - -- for each index in weights, create a number of targets, - -- each with its weight as each element of the sub-array - local weights = { - { 10, 0 }, -- two targets, eventually resulting in down - { 10, 0, 10 }, -- three targets, eventually resulting in up - { 10 }, -- one target, up - { 10, 10 }, -- two targets, up (we should only see one) - { 10, 50, 0 }, -- three targets, two up in a row, eventually down - { 10, 0, 20, 0 }, -- four targets, eventually down - } - - for i = 1, #weights do - for j = 1, #weights[i] do - ngx.sleep(0.01) - apis[i] = assert(helpers.dao.targets:insert { - target = "api-" .. tostring(i) .. ":80", - weight = weights[i][j], - upstream_id = upstream3.id - }) - end - end - end) - - it("only shows active targets", function() - for _, append in ipairs({ "", "/" }) do - local res = assert(client:send { - method = "GET", - path = "/upstreams/" .. upstream_name3 .. "/targets/active" .. append, - }) - assert.response(res).has.status(200) - local json = assert.response(res).has.jsonbody() - - -- we got three active targets for this upstream - assert.equal(3, #json.data) - assert.equal(3, json.total) - - -- when multiple active targets are present, we only see the last one - assert.equal(apis[4].id, json.data[1].id) - - -- validate the remaining returned targets - -- note the backwards order, because we walked the targets backwards - assert.equal(apis[3].target, json.data[2].target) - assert.equal(apis[2].target, json.data[3].target) - end - end) - end) - end) - describe("/upstreams/{upstream}/targets/{target}", function() describe("DELETE", function() local target @@ -357,7 +357,7 @@ describe("Admin API", function() local targets = assert(client:send { method = "GET", - path = "/upstreams/" .. upstream_name4 .. "/targets/", + path = "/upstreams/" .. upstream_name4 .. "/targets/all", }) assert.response(targets).has.status(200) local json = assert.response(targets).has.jsonbody() @@ -366,7 +366,7 @@ describe("Admin API", function() local active = assert(client:send { method = "GET", - path = "/upstreams/" .. upstream_name4 .. "/targets/active", + path = "/upstreams/" .. upstream_name4 .. "/targets", }) assert.response(active).has.status(200) json = assert.response(active).has.jsonbody() @@ -384,7 +384,7 @@ describe("Admin API", function() local targets = assert(client:send { method = "GET", - path = "/upstreams/" .. upstream_name4 .. "/targets/", + path = "/upstreams/" .. upstream_name4 .. "/targets/all", }) assert.response(targets).has.status(200) local json = assert.response(targets).has.jsonbody() @@ -393,7 +393,7 @@ describe("Admin API", function() local active = assert(client:send { method = "GET", - path = "/upstreams/" .. upstream_name4 .. "/targets/active", + path = "/upstreams/" .. upstream_name4 .. "/targets", }) assert.response(active).has.status(200) json = assert.response(active).has.jsonbody() From 7824312c07e5b64cc13ce340906bffc0817154d3 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Thu, 14 Dec 2017 18:21:28 -0800 Subject: [PATCH 33/74] chore(deps) bump minimum OpenResty required version to 1.11.2.5 See: https://openresty.org/en/changelog-1011002.html From #3097 --- .travis.yml | 2 +- kong/meta.lua | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index bc894a803f4a..0d75615aac7a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,7 +22,7 @@ env: - LUAROCKS=2.4.3 - OPENSSL=1.0.2n - CASSANDRA=2.2.8 - - OPENRESTY_BASE=1.11.2.4 + - OPENRESTY_BASE=1.11.2.5 - OPENRESTY_LATEST=1.13.6.1 - OPENRESTY=$OPENRESTY_BASE - DOWNLOAD_CACHE=$HOME/download-cache diff --git a/kong/meta.lua b/kong/meta.lua index 870e5a6c9c84..597466cfaa50 100644 --- a/kong/meta.lua +++ b/kong/meta.lua @@ -18,6 +18,6 @@ return { -- third-party dependencies' required version, as they would be specified -- to lua-version's `set()` in the form {from, to} _DEPENDENCIES = { - nginx = {"1.11.2.4", "1.13.6.1"}, + nginx = {"1.11.2.5", "1.13.6.1"}, } } From bc7efc4ec9ab35f215bb91d6683d361455e8e4fe Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Mon, 6 Nov 2017 15:22:14 -0200 Subject: [PATCH 34/74] chore(deps) update dependencies for health checks support --- kong-0.11.2-0.rockspec | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kong-0.11.2-0.rockspec b/kong-0.11.2-0.rockspec index 41cb27ffefa8..626b93633dc6 100644 --- a/kong-0.11.2-0.rockspec +++ b/kong-0.11.2-0.rockspec @@ -27,9 +27,10 @@ dependencies = { "luaossl == 20171028", "luasyslog == 1.0.0", "lua_pack == 1.0.5", - "lua-resty-dns-client == 0.6.2", - "lua-resty-worker-events == 0.3.0", + "lua-resty-dns-client == 0.6.3", + "lua-resty-worker-events == 0.3.1", "lua-resty-mediador == 0.1.2", + "lua-resty-healthcheck == 0.2.0", } build = { type = "builtin", From 3931ade674b31421f3d614f480a93f418b354f7b Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Tue, 14 Nov 2017 16:06:30 -0200 Subject: [PATCH 35/74] feat(utils) add deep_merge utility function Backport from commit fd5c8dc39127e9362975faefd30d915f0710fa77 by @kikito --- kong/tools/utils.lua | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/kong/tools/utils.lua b/kong/tools/utils.lua index cd42df2d1f3d..4cad2e73a966 100644 --- a/kong/tools/utils.lua +++ b/kong/tools/utils.lua @@ -425,6 +425,28 @@ function _M.shallow_copy(orig) return copy end +--- Merges two tables recursively +-- For each subtable in t1 and t2, an equivalent (but different) table will +-- be created in the resulting merge. If t1 and t2 have a subtable with in the +-- same key k, res[k] will be a deep merge of both subtables. +-- Metatables are not taken into account. +-- Keys are copied by reference (if tables are used as keys they will not be +-- duplicated) +-- @param t1 one of the tables to merge +-- @param t2 one of the tables to merge +-- @return Returns a table representing a deep merge of the new table +function _M.deep_merge(t1, t2) + local res = _M.deep_copy(t1) + for k, v in pairs(t2) do + if type(v) == "table" and type(res[k]) == "table" then + res[k] = _M.deep_merge(res[k], v) + else + res[k] = _M.deep_copy(v) -- returns v when it is not a table + end + end + return res +end + local err_list_mt = {} --- Concatenates lists into a new table. From 82c489d5c2d6378ba7df964a0837d832f17a518c Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Fri, 10 Nov 2017 16:30:27 -0200 Subject: [PATCH 36/74] fix(dao) support self_check() on incomplete upstream objects --- kong/dao/schemas/upstreams.lua | 62 +++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/kong/dao/schemas/upstreams.lua b/kong/dao/schemas/upstreams.lua index 84d9a5c790a9..8f36da0ccd3a 100644 --- a/kong/dao/schemas/upstreams.lua +++ b/kong/dao/schemas/upstreams.lua @@ -65,15 +65,17 @@ return { self_check = function(schema, config, dao, is_updating) -- check the name - local p = utils.normalize_ip(config.name) - if not p then - return false, Errors.schema("Invalid name; must be a valid hostname") - end - if p.type ~= "name" then - return false, Errors.schema("Invalid name; no ip addresses allowed") - end - if p.port then - return false, Errors.schema("Invalid name; no port allowed") + if config.name then + local p = utils.normalize_ip(config.name) + if not p then + return false, Errors.schema("Invalid name; must be a valid hostname") + end + if p.type ~= "name" then + return false, Errors.schema("Invalid name; no ip addresses allowed") + end + if p.port then + return false, Errors.schema("Invalid name; no port allowed") + end end if config.hash_on_header then @@ -98,32 +100,36 @@ return { "but no header name provided") end - if config.hash_on == "none" then - if config.hash_fallback ~= "none" then - return false, Errors.schema("Cannot set fallback if primary " .. - "'hash_on' is not set") - end - - else - if config.hash_on == config.hash_fallback then - if config.hash_on ~= "header" then - return false, Errors.schema("Cannot set fallback and primary " .. - "hashes to the same value") + if config.hash_on and config.hash_fallback then + if config.hash_on == "none" then + if config.hash_fallback ~= "none" then + return false, Errors.schema("Cannot set fallback if primary " .. + "'hash_on' is not set") + end - else - local upper_hash_on = config.hash_on_header:upper() - local upper_hash_fallback = config.hash_fallback_header:upper() - if upper_hash_on == upper_hash_fallback then - return false, Errors.schema("Cannot set fallback and primary ".. + else + if config.hash_on == config.hash_fallback then + if config.hash_on ~= "header" then + return false, Errors.schema("Cannot set fallback and primary " .. "hashes to the same value") + + else + local upper_hash_on = config.hash_on_header:upper() + local upper_hash_fallback = config.hash_fallback_header:upper() + if upper_hash_on == upper_hash_fallback then + return false, Errors.schema("Cannot set fallback and primary ".. + "hashes to the same value") + end end end end end - -- check the slots number - if config.slots < SLOTS_MIN or config.slots > SLOTS_MAX then - return false, Errors.schema(SLOTS_MSG) + if config.slots then + -- check the slots number + if config.slots < SLOTS_MIN or config.slots > SLOTS_MAX then + return false, Errors.schema(SLOTS_MSG) + end end return true From af94b110a8dc6bbb785056f00a202e9766d8fedd Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Fri, 10 Nov 2017 16:29:22 -0200 Subject: [PATCH 37/74] feat(dao) add health checks config to upstreams entity --- kong/dao/migrations/cassandra.lua | 36 +++++++++ kong/dao/migrations/postgres.lua | 42 ++++++++++ kong/dao/schemas/upstreams.lua | 123 ++++++++++++++++++++++++++++++ 3 files changed, 201 insertions(+) diff --git a/kong/dao/migrations/cassandra.lua b/kong/dao/migrations/cassandra.lua index 25d3ff2c0eab..f43a3b30425b 100644 --- a/kong/dao/migrations/cassandra.lua +++ b/kong/dao/migrations/cassandra.lua @@ -520,4 +520,40 @@ return { end, down = function(_, _, dao) end -- n.a. since the columns will be dropped }, + { + name = "2017-11-07-192000_upstream_healthchecks", + up = [[ + ALTER TABLE upstreams ADD healthchecks text; + ]], + down = [[ + ALTER TABLE upstreams DROP healthchecks; + ]] + }, + { + name = "2017-11-07-192100_upstream_healthchecks_2", + up = function(_, _, dao) + local rows, err = dao.db:query([[ + SELECT * FROM upstreams; + ]]) + if err then + return err + end + + local upstreams = require("kong.dao.schemas.upstreams") + local default = upstreams.fields.healthchecks.default + + for _, row in ipairs(rows) do + if not row.healthchecks then + + local _, err = dao.upstreams:update({ + healthchecks = default, + }, { id = row.id }) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end + }, } diff --git a/kong/dao/migrations/postgres.lua b/kong/dao/migrations/postgres.lua index 8537c4bd42d7..aeb43d88a652 100644 --- a/kong/dao/migrations/postgres.lua +++ b/kong/dao/migrations/postgres.lua @@ -581,4 +581,46 @@ return { end, down = function(_, _, dao) end -- n.a. since the columns will be dropped }, + { + name = "2017-11-07-192000_upstream_healthchecks", + up = [[ + DO $$ + BEGIN + ALTER TABLE upstreams ADD COLUMN healthchecks json; + EXCEPTION WHEN duplicate_column THEN + -- Do nothing, accept existing state + END$$; + + ]], + down = [[ + ALTER TABLE upstreams DROP COLUMN IF EXISTS healthchecks; + ]] + }, + { + name = "2017-11-07-192100_upstream_healthchecks_2", + up = function(_, _, dao) + local rows, err = dao.db:query([[ + SELECT * FROM upstreams; + ]]) + if err then + return err + end + + local upstreams = require("kong.dao.schemas.upstreams") + local default = upstreams.fields.healthchecks.default + + for _, row in ipairs(rows) do + if not row.healthchecks then + + local _, err = dao.upstreams:update({ + healthchecks = default, + }, { id = row.id }) + if err then + return err + end + end + end + end, + down = function(_, _, dao) end + }, } diff --git a/kong/dao/schemas/upstreams.lua b/kong/dao/schemas/upstreams.lua index 8f36da0ccd3a..cf409850f943 100644 --- a/kong/dao/schemas/upstreams.lua +++ b/kong/dao/schemas/upstreams.lua @@ -1,10 +1,128 @@ local Errors = require "kong.dao.errors" local utils = require "kong.tools.utils" +local match = string.match +local sub = string.sub local DEFAULT_SLOTS = 100 local SLOTS_MIN, SLOTS_MAX = 10, 2^16 local SLOTS_MSG = "number of slots must be between " .. SLOTS_MIN .. " and " .. SLOTS_MAX + +local function check_nonnegative(arg) + if arg < 0 then + return false, "must be greater than or equal to 0" + end +end + + +local function check_positive_int(t) + if t < 1 or t > 2^31 - 1 or math.floor(t) ~= t then + return false, "must be an integer between 1 and " .. 2^31 - 1 + end + + return true +end + + +local function check_http_path(arg) + if match(arg, "^%s*$") then + return false, "path is empty" + end + if sub(arg, 1, 1) ~= "/" then + return false, "must be prefixed with slash" + end + return true +end + + +local function check_http_statuses(arg) + for _, s in ipairs(arg) do + if type(s) ~= "number" then + return false, "array element is not a number" + end + + if math.floor(s) ~= s then + return false, "must be an integer" + end + + -- Accept any three-digit status code, + -- applying Postel's law in case of nonstandard HTTP codes + if s < 100 or s > 999 then + return false, "invalid status code '" .. s .. + "': must be between 100 and 999" + end + end + return true +end + + +-- same fields as lua-resty-healthcheck library +local healthchecks_defaults = { + active = { + timeout = 1, + concurrency = 10, + http_path = "/", + healthy = { + interval = 0, -- 0 = disabled by default + http_statuses = { 200, 302 }, + successes = 2, + }, + unhealthy = { + interval = 0, -- 0 = disabled by default + http_statuses = { 429, 404, + 500, 501, 502, 503, 504, 505 }, + tcp_failures = 2, + timeouts = 3, + http_failures = 5, + }, + }, + passive = { + healthy = { + http_statuses = { 200, 201, 202, 203, 204, 205, 206, 207, 208, 226, + 300, 301, 302, 303, 304, 305, 306, 307, 308 }, + successes = 5, + }, + unhealthy = { + http_statuses = { 429, 500, 503 }, + tcp_failures = 2, + timeouts = 7, + http_failures = 5, + }, + }, +} + + +local funcs = { + timeout = check_nonnegative, + concurrency = check_positive_int, + interval = check_nonnegative, + successes = check_positive_int, + tcp_failures = check_positive_int, + timeouts = check_positive_int, + http_failures = check_positive_int, + http_path = check_http_path, + http_statuses = check_http_statuses, +} + + +local function gen_schema(tbl) + local ret = {} + for k, v in pairs(tbl) do + if type(v) == "number" or type(v) == "string" then + ret[k] = { type = type(v), default = v, func = funcs[k] } + + elseif type(v) == "table" then + if v[1] then + ret[k] = { type = "array", default = v, func = funcs[k] } + else + ret[k] = { type = "table", schema = gen_schema(v), default = v } + end + end + end + return { fields = ret } +end + + return { table = "upstreams", primary_key = {"id"}, @@ -61,6 +179,11 @@ return { type = "number", default = DEFAULT_SLOTS, }, + healthchecks = { + type = "table", + schema = gen_schema(healthchecks_defaults), + default = healthchecks_defaults, + }, }, self_check = function(schema, config, dao, is_updating) From 23377712128410ec23976d5370fc531314b71b8d Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Tue, 5 Dec 2017 12:51:58 -0200 Subject: [PATCH 38/74] tests(healthchecks) test validation of health check config --- spec/01-unit/007-entities_schemas_spec.lua | 110 +++++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/spec/01-unit/007-entities_schemas_spec.lua b/spec/01-unit/007-entities_schemas_spec.lua index 3babbb5c1313..05881ede4b39 100644 --- a/spec/01-unit/007-entities_schemas_spec.lua +++ b/spec/01-unit/007-entities_schemas_spec.lua @@ -748,6 +748,116 @@ describe("Entities Schemas", function() assert.is_nil(check) end) + it("should verify healthcheck configuration", function() + + -- tests for failure + local tests = { + {{ active = { timeout = -1 }}, "greater than or equal to 0" }, + {{ active = { concurrency = 0.5 }}, "must be an integer" }, + {{ active = { concurrency = -10 }}, "must be an integer" }, + {{ active = { http_path = "" }}, "is empty" }, + {{ active = { http_path = "ovo" }}, "must be prefixed with slash" }, + {{ active = { healthy = { interval = -1 }}}, "greater than or equal to 0" }, + {{ active = { healthy = { http_statuses = 404 }}}, "not an array" }, + {{ active = { healthy = { http_statuses = { "ovo" }}}}, "not a number" }, + {{ active = { healthy = { http_statuses = { -1 }}}}, "status code" }, + {{ active = { healthy = { http_statuses = { 99 }}}}, "status code" }, + {{ active = { healthy = { http_statuses = { 1000 }}}}, "status code" }, + {{ active = { healthy = { http_statuses = { 111.314 }}}}, "must be an integer" }, + {{ active = { healthy = { successes = 0.5 }}}, "must be an integer" }, + {{ active = { healthy = { successes = 0 }}}, "must be an integer" }, + {{ active = { healthy = { successes = -1 }}}, "an integer between" }, + {{ active = { unhealthy = { interval = -1 }}}, "greater than or equal to 0" }, + {{ active = { unhealthy = { http_statuses = 404 }}}, "not an array" }, + {{ active = { unhealthy = { http_statuses = { "ovo" }}}}, "not a number" }, + {{ active = { unhealthy = { http_statuses = { -1 }}}}, "status code" }, + {{ active = { unhealthy = { http_statuses = { 99 }}}}, "status code" }, + {{ active = { unhealthy = { http_statuses = { 1000 }}}}, "status code" }, + {{ active = { unhealthy = { tcp_failures = 0.5 }}}, "must be an integer" }, + {{ active = { unhealthy = { tcp_failures = 0 }}}, "must be an integer" }, + {{ active = { unhealthy = { tcp_failures = -1 }}}, "an integer between" }, + {{ active = { unhealthy = { timeouts = 0.5 }}}, "must be an integer" }, + {{ active = { unhealthy = { timeouts = 0 }}}, "must be an integer" }, + {{ active = { unhealthy = { timeouts = -1 }}}, "an integer between" }, + {{ active = { unhealthy = { http_failures = 0.5 }}}, "must be an integer" }, + {{ active = { unhealthy = { http_failures = -1 }}}, "an integer between" }, + {{ passive = { healthy = { http_statuses = 404 }}}, "not an array" }, + {{ passive = { healthy = { http_statuses = { "ovo" }}}}, "not a number" }, + {{ passive = { healthy = { http_statuses = { -1 }}}}, "status code" }, + {{ passive = { healthy = { http_statuses = { 99 }}}}, "status code" }, + {{ passive = { healthy = { http_statuses = { 1000 }}}}, "status code" }, + {{ passive = { healthy = { successes = 0.5 }}}, "must be an integer" }, + {{ passive = { healthy = { successes = 0 }}}, "must be an integer" }, + {{ passive = { healthy = { successes = -1 }}}, "an integer between" }, + {{ passive = { unhealthy = { http_statuses = 404 }}}, "not an array" }, + {{ passive = { unhealthy = { http_statuses = { "ovo" }}}}, "not a number" }, + {{ passive = { unhealthy = { http_statuses = { -1 }}}}, "status code" }, + {{ passive = { unhealthy = { http_statuses = { 99 }}}}, "status code" }, + {{ passive = { unhealthy = { http_statuses = { 1000 }}}}, "status code" }, + {{ passive = { unhealthy = { tcp_failures = 0.5 }}}, "must be an integer" }, + {{ passive = { unhealthy = { tcp_failures = 0 }}}, "must be an integer" }, + {{ passive = { unhealthy = { tcp_failures = -1 }}}, "an integer between" }, + {{ passive = { unhealthy = { timeouts = 0.5 }}}, "must be an integer" }, + {{ passive = { unhealthy = { timeouts = 0 }}}, "must be an integer" }, + {{ passive = { unhealthy = { timeouts = -1 }}}, "an integer between" }, + {{ passive = { unhealthy = { http_failures = 0.5 }}}, "must be an integer" }, + {{ passive = { unhealthy = { http_failures = 0 }}}, "must be an integer" }, + {{ passive = { unhealthy = { http_failures = -1 }}}, "an integer between" }, + } + for _, test in ipairs(tests) do + local entity = { + name = "x", + healthchecks = test[1], + } + + -- convert nested table to field name + local path = { "healthchecks" } + local t = test[1] + while type(t) == "table" and type(next(t)) == "string" do + table.insert(path, (next(t))) + t = t[next(t)] + end + local field_name = table.concat(path, ".") + + local valid, errors = validate_entity(entity, upstreams_schema) + assert.is_false(valid) + assert.match(test[2], errors[field_name]) + end + + -- tests for success + tests = { + { active = { timeout = 0.5 }}, + { active = { timeout = 1 }}, + { active = { concurrency = 2 }}, + { active = { http_path = "/" }}, + { active = { http_path = "/test" }}, + { active = { healthy = { interval = 0 }}}, + { active = { healthy = { http_statuses = { 200, 300 } }}}, + { active = { healthy = { successes = 2 }}}, + { active = { unhealthy = { interval = 0 }}}, + { active = { unhealthy = { http_statuses = { 404 }}}}, + { active = { unhealthy = { tcp_failures = 3 }}}, + { active = { unhealthy = { timeouts = 9 }}}, + { active = { unhealthy = { http_failures = 2 }}}, + { passive = { healthy = { http_statuses = { 200, 201 } }}}, + { passive = { healthy = { successes = 2 }}}, + { passive = { unhealthy = { http_statuses = { 400, 500 } }}}, + { passive = { unhealthy = { tcp_failures = 8 }}}, + { passive = { unhealthy = { timeouts = 1 }}}, + { passive = { unhealthy = { http_failures = 2 }}}, + } + for _, test in ipairs(tests) do + local entity = { + name = "x", + healthchecks = test, + } + + local valid = validate_entity(entity, upstreams_schema) + assert.is_true(valid) + end + + end) + it("should require (optional) slots in a valid range", function() local valid, errors, check, _ local data = { name = "valid.host.name" } From c0cb1b27147c326f35c426a808c9ede5a0d122c3 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Mon, 6 Nov 2017 15:24:25 -0200 Subject: [PATCH 39/74] feat(healthchecks) event-based balancer with health checks Reworks the management of balancer objects from being on-demand, on each request, to be event-based, responding to DAO events. Balancers are now initialized on system startup, and updated based on propagated worker-events and cluster-events. These changes allow healthchecks to update properly. Attaches a healthchecker object to each balancer. The balancers report HTTP statuses and TCP failures to the healthchecker. Moves objects that were attached to the balancer object using fields such as `__target_history` into weakly-keyed local tables. --- kong/constants.lua | 1 + kong/core/balancer.lua | 744 +++++++++++++++++++++------- kong/core/handler.lua | 125 +++-- kong/init.lua | 18 +- kong/templates/nginx_kong.lua | 1 + spec/fixtures/custom_nginx.template | 1 + 6 files changed, 663 insertions(+), 227 deletions(-) diff --git a/kong/constants.lua b/kong/constants.lua index 7ae1e987ff47..d538dca681b7 100644 --- a/kong/constants.lua +++ b/kong/constants.lua @@ -72,5 +72,6 @@ return { "kong_cache", "kong_process_events", "kong_cluster_events", + "kong_healthchecks", }, } diff --git a/kong/core/balancer.lua b/kong/core/balancer.lua index 18f2dcb86f28..dca3530d9c3b 100644 --- a/kong/core/balancer.lua +++ b/kong/core/balancer.lua @@ -1,126 +1,145 @@ local pl_tablex = require "pl.tablex" -local responses = require "kong.tools.responses" local singletons = require "kong.singletons" -local dns_client = require "resty.dns.client" -- due to startup/require order, cannot use the one from 'singletons' here -local ring_balancer = require "resty.dns.balancer" + +-- due to startup/require order, cannot use the ones from 'singletons' here +local dns_client = require "resty.dns.client" local table_concat = table.concat local crc32 = ngx.crc32_short local toip = dns_client.toip local log = ngx.log -local ERROR = ngx.ERR +local ERR = ngx.ERR +local WARN = ngx.WARN local DEBUG = ngx.DEBUG local EMPTY_T = pl_tablex.readonly {} ---=========================================================== +-- for unit-testing purposes only +local _load_upstreams_dict_into_memory +local _load_upstream_into_memory +local _load_targets_into_memory + + +--============================================================================== -- Ring-balancer based resolution ---=========================================================== -local balancers = {} -- table holding our balancer objects, indexed by upstream name - --- caching logic; --- we retain 3 entities: --- 1) list of upstreams: to be invalidated on any upstream change --- 2) individual upstreams: to be invalidated on individual basis --- 3) target history for an upstream, invalidated when: +--============================================================================== + + +-- table holding our balancer objects, indexed by upstream name +local balancers = {} + + +-- objects whose lifetimes are bound to that of a balancer +local healthcheckers = setmetatable({}, { __mode = "k" }) +local healthchecker_callbacks = setmetatable({}, { __mode = "k" }) +local target_histories = setmetatable({}, { __mode = "k" }) + + +-- Caching logic +-- +-- We retain 3 entities in singletons.cache: +-- +-- 1) `"balancer:upstreams"` - a list of upstreams +-- to be invalidated on any upstream change +-- 2) `"balancer:upstreams:" .. id` - individual upstreams +-- to be invalidated on individual basis +-- 3) `"balancer:targets:" .. id` +-- target history for an upstream, invalidated: -- a) along with the upstream it belongs to -- b) upon any target change for the upstream (can only add entries) +-- -- Distinction between 1 and 2 makes it possible to invalidate individual -- upstreams, instead of all at once forcing to rebuild all balancers --- Implements a simple dictionary with all upstream-ids indexed --- by their name. -local function load_upstreams_dict_into_memory() - log(DEBUG, "fetching all upstreams") - local upstreams, err = singletons.dao.upstreams:find_all() - if err then - return nil, err - end - - -- build a dictionary, indexed by the upstream name - local upstreams_dict = {} - for _, up in ipairs(upstreams) do - upstreams_dict[up.name] = up.id - end - -- check whether any of our existing balancers has been deleted - for upstream_name in pairs(balancers) do - if not upstreams_dict[upstream_name] then - -- this one was deleted, so also clear the balancer object - balancers[upstream_name] = nil +local function stop_healthchecker(balancer) + local healthchecker = healthcheckers[balancer] + if healthchecker then + local ok, err = healthchecker:clear() + if not ok then + log(ERR, "[healthchecks] error clearing healthcheck data: ", err) end + healthchecker:stop() end - - return upstreams_dict + healthcheckers[balancer] = nil end --- delete a balancer object from our internal cache -local function invalidate_balancer(upstream_name) - balancers[upstream_name] = nil -end --- loads a single upstream entity -local function load_upstream_into_memory(upstream_id) - log(DEBUG, "fetching upstream: ", tostring(upstream_id)) +local get_upstream_by_id +do + ------------------------------------------------------------------------------ + -- Loads a single upstream entity. + -- @param upstream_id string + -- @return the upstream table, or nil+error + local function load_upstream_into_memory(upstream_id) + log(DEBUG, "fetching upstream: ", tostring(upstream_id)) - local upstream, err = singletons.dao.upstreams:find_all {id = upstream_id} - if not upstream then - return nil, err + local upstream, err = singletons.dao.upstreams:find_all {id = upstream_id} + if not upstream then + return nil, err + end + + return upstream[1] -- searched by id, so only 1 row in the returned set end + _load_upstream_into_memory = load_upstream_into_memory - return upstream[1] -- searched by id, so only 1 row in the returned set + get_upstream_by_id = function(upstream_id) + local upstream_cache_key = "balancer:upstreams:" .. upstream_id + return singletons.cache:get(upstream_cache_key, nil, + load_upstream_into_memory, upstream_id) + end end --- finds and returns an upstream entity. This functions covers --- caching, invalidation, db access, et al. --- @return upstream table, or `false` if not found, or nil+error -local function get_upstream(upstream_name) - local upstreams_dict, err = singletons.cache:get("balancer:upstreams", nil, - load_upstreams_dict_into_memory) - if err then - return nil, err - end - local upstream_id = upstreams_dict[upstream_name] - if not upstream_id then - return false -- no upstream by this name - end +local fetch_target_history +do + ------------------------------------------------------------------------------ + -- Loads the target history from the DAO. + -- @param upstream_id Upstream uuid for which to load the target history + -- @return The target history array, with target entity tables. + local function load_targets_into_memory(upstream_id) + log(DEBUG, "fetching targets for upstream: ",tostring(upstream_id)) - local upstream_cache_key = "balancer:upstreams:" .. upstream_id - return singletons.cache:get(upstream_cache_key, nil, - load_upstream_into_memory, upstream_id) -end + local target_history, err = singletons.dao.targets:find_all {upstream_id = upstream_id} + if not target_history then + return nil, err + end --- loads the target history for an upstream --- @param upstream_id Upstream uuid for which to load the target history -local function load_targets_into_memory(upstream_id) - log(DEBUG, "fetching targets for upstream: ",tostring(upstream_id)) + -- perform some raw data updates + for _, target in ipairs(target_history) do + -- split `target` field into `name` and `port` + local port + target.name, port = string.match(target.target, "^(.-):(%d+)$") + target.port = tonumber(port) - local target_history, err = singletons.dao.targets:find_all {upstream_id = upstream_id} - if err then - return nil, err - end + -- need exact order, so create sort-key by created-time and uuid + target.order = target.created_at .. ":" .. target.id + end - -- perform some raw data updates - for _, target in ipairs(target_history) do - -- split `target` field into `name` and `port` - local port - target.name, port = string.match(target.target, "^(.-):(%d+)$") - target.port = tonumber(port) + table.sort(target_history, function(a,b) + return a.order < b.order + end) - -- need exact order, so create sort-key by created-time and uuid - target.order = target.created_at .. ":" .. target.id + return target_history end + _load_targets_into_memory = load_targets_into_memory - table.sort(target_history, function(a,b) - return a.order < b.order - end) - return target_history + ------------------------------------------------------------------------------ + -- Fetch target history, from cache or the DAO. + -- @param upstream The upstream entity object + -- @return The target history array, with target entity tables. + fetch_target_history = function(upstream) + local targets_cache_key = "balancer:targets:" .. upstream.id + return singletons.cache:get(targets_cache_key, nil, + load_targets_into_memory, upstream.id) + end end --- applies the history of lb transactions from index `start` forward --- @param rb ring-balancer object + +-------------------------------------------------------------------------------- +-- Applies the history of lb transactions from index `start` forward. +-- @param rb ring balancer object -- @param history list of targets/transactions to be applied -- @param start the index where to start in the `history` parameter -- @return true @@ -135,7 +154,7 @@ local function apply_history(rb, history, start) assert(rb:removeHost(target.name, target.port)) end - rb.__targets_history[i] = { + target_histories[rb][i] = { name = target.name, port = target.port, weight = target.weight, @@ -146,96 +165,388 @@ local function apply_history(rb, history, start) return true end + +local function populate_healthchecker(hc, balancer) + for weight, addr, host in balancer:addressIter() do + if weight > 0 then + local ipaddr = addr.ip + local port = addr.port + local hostname = host.hostname + local ok, err = hc:add_target(ipaddr, port, hostname) + if ok then + -- Get existing health status which may have been initialized + -- with data from another worker, and apply to the new balancer. + local tgt_status = hc:get_target_status(ipaddr, port) + balancer:setPeerStatus(tgt_status, ipaddr, port, hostname) + + else + log(ERR, "[healthchecks] failed adding target: ", err) + end + end + end +end + + +local create_balancer +do + local ring_balancer = require "resty.dns.balancer" + + local create_healthchecker + do + local healthcheck -- delay initialization + + ------------------------------------------------------------------------------ + -- Callback function that informs the healthchecker when targets are added + -- or removed to a balancer. + -- @param balancer the ring balancer object that triggers this callback. + -- @param action "added" or "removed" + -- @param ip string + -- @param port number + -- @param hostname string + local function ring_balancer_callback(balancer, action, ip, port, hostname) + local healthchecker = healthcheckers[balancer] + if action == "added" then + local ok, err = healthchecker:add_target(ip, port, hostname) + if not ok then + log(ERR, "[healthchecks] failed adding a target: ", err) + end + + elseif action == "removed" then + local ok, err = healthchecker:remove_target(ip, port) + if not ok then + log(ERR, "[healthchecks] failed adding a target: ", err) + end + + else + log(WARN, "[healthchecks] unknown status from balancer: ", + tostring(action)) + end + end + + -- @param healthchecker The healthchecker object + -- @param balancer The balancer object + local function attach_healthchecker_to_balancer(healthchecker, balancer) + local hc_callback = function(tgt, event) + local ok, err = true, nil + if event == healthchecker.events.healthy then + ok, err = balancer:setPeerStatus(true, tgt.ip, tgt.port, tgt.hostname) + elseif event == healthchecker.events.unhealthy then + ok, err = balancer:setPeerStatus(false, tgt.ip, tgt.port, tgt.hostname) + end + if not ok then + log(ERR, "[healthchecks] failed setting peer status: ", err) + end + end + + -- Register event using a weak-reference in worker-events, + -- and attach lifetime of callback to that of the balancer. + singletons.worker_events.register_weak(hc_callback, healthchecker.EVENT_SOURCE) + healthchecker_callbacks[balancer] = hc_callback + + -- The lifetime of the healthchecker is based on that of the balancer. + healthcheckers[balancer] = healthchecker + + balancer.report_http_status = function(ip, port, status) + local ok, err = healthchecker:report_http_status(ip, port, status, + "passive") + if not ok then + log(ERR, "[healthchecks] failed reporting status: ", err) + end + end + + balancer.report_tcp_failure = function(ip, port) + local ok, err = healthchecker:report_tcp_failure(ip, port, nil, + "passive") + if not ok then + log(ERR, "[healthchecks] failed reporting status: ", err) + end + end + end + + ---------------------------------------------------------------------------- + -- Create a healthchecker object. + -- @param upstream An upstream entity table. + create_healthchecker = function(balancer, upstream) + if not healthcheck then + healthcheck = require("resty.healthcheck") -- delayed initialization + end + local healthchecker, err = healthcheck.new({ + name = upstream.name, + shm_name = "kong_healthchecks", + checks = upstream.healthchecks, + }) + if not healthchecker then + log(ERR, "[healthchecks] error creating health checker: ", err) + return nil, err + end + + populate_healthchecker(healthchecker, balancer) + + attach_healthchecker_to_balancer(healthchecker, balancer) + + -- only enable the callback after the target history has been replayed. + balancer:setCallback(ring_balancer_callback) + end + end + + ------------------------------------------------------------------------------ + -- @return The new balancer object, or nil+error + create_balancer = function(upstream, history, start) + local balancer, err = ring_balancer.new({ + wheelSize = upstream.slots, + order = upstream.orderlist, + dns = dns_client, + }) + if not balancer then + return nil, err + end + + target_histories[balancer] = {} + + if not history then + history, err = fetch_target_history(upstream) + if not history then + return nil, err + end + start = 1 + end + + apply_history(balancer, history, start) + + create_healthchecker(balancer, upstream) + + -- only make the new balancer available for other requests after it + -- is fully set up. + balancers[upstream.name] = balancer + + return balancer + end +end + + +-------------------------------------------------------------------------------- +-- Compare the target history of the upstream with that of the +-- current balancer object, updating or recreating the balancer if necessary. +-- @param upstream The upstream entity object +-- @param balancer The ring balancer object +-- @return true if all went well, or nil + error in case of failures. +local function check_target_history(upstream, balancer) + -- Fetch the upstream's targets, from cache or the db + local new_history, err = fetch_target_history(upstream) + if err then + return nil, err + end + + local old_history = target_histories[balancer] + + -- check history state + local old_size = #old_history + local new_size = #new_history + + if old_size == new_size and + (old_history[old_size] or EMPTY_T).order == + (new_history[new_size] or EMPTY_T).order then + -- No history update is necessary in the balancer object. + return true + end + + -- last entries in history don't match, so we must do some updates. + + -- compare balancer history with db-loaded history + local last_equal_index = 0 -- last index where history is the same + for i, entry in ipairs(old_history) do + if entry.order ~= (new_history[i] or EMPTY_T).order then + last_equal_index = i - 1 + break + end + end + + if last_equal_index == old_size then + -- history is the same, so we only need to add new entries + apply_history(balancer, new_history, last_equal_index + 1) + return true + end + + -- history not the same. + -- TODO: ideally we would undo the last ones until we're equal again + -- and can replay changes, but not supported by ring-balancer yet. + -- for now; create a new balancer from scratch + + stop_healthchecker(balancer) + + local new_balancer, err = create_balancer(upstream, new_history, 1) + if not new_balancer then + return nil, err + end + + return true +end + + +local get_all_upstreams +do + ------------------------------------------------------------------------------ + -- Implements a simple dictionary with all upstream-ids indexed + -- by their name. + -- @return The upstreams dictionary, a map with upstream names as string keys + -- and upstream entity tables as values, or nil+error + local function load_upstreams_dict_into_memory() + log(DEBUG, "fetching all upstreams") + local upstreams, err = singletons.dao.upstreams:find_all() + if err then + return nil, err + end + + -- build a dictionary, indexed by the upstream name + local upstreams_dict = {} + for _, up in ipairs(upstreams) do + upstreams_dict[up.name] = up.id + end + + return upstreams_dict + end + _load_upstreams_dict_into_memory = load_upstreams_dict_into_memory + + + ------------------------------------------------------------------------------ + -- Finds and returns an upstream entity. This function covers + -- caching, invalidation, db access, et al. + -- @param upstream_name string. + -- @return upstream table, or `false` if not found, or nil+error + get_all_upstreams = function() + local upstreams_dict, err = singletons.cache:get("balancer:upstreams", nil, + load_upstreams_dict_into_memory) + if err then + return nil, err + end + + return upstreams_dict + end +end + + +------------------------------------------------------------------------------ +-- Finds and returns an upstream entity. This function covers +-- caching, invalidation, db access, et al. +-- @param upstream_name string. +-- @return upstream table, or `false` if not found, or nil+error +local function get_upstream_by_name(upstream_name) + local upstreams_dict, err = get_all_upstreams() + if err then + return nil, err + end + + local upstream_id = upstreams_dict[upstream_name] + if not upstream_id then + return false -- no upstream by this name + end + + return get_upstream_by_id(upstream_id) +end + + -- looks up a balancer for the target. -- @param target the table with the target details --- @return balancer+upstream if found, `false` if not found, or nil+error on error -local get_balancer = function(target) - -- NOTE: only called upon first lookup, so `cache_only` limitations do not apply here +-- @param no_create (optional) if true, do not attempt to create +-- (for thorough testing purposes) +-- @return balancer if found, `false` if not found, or nil+error on error +local function get_balancer(target, no_create) + -- NOTE: only called upon first lookup, so `cache_only` limitations + -- do not apply here local hostname = target.host -- first go and find the upstream object, from cache or the db - local upstream, err = get_upstream(hostname) - + local upstream, err = get_upstream_by_name(hostname) if upstream == false then - return false -- no upstream by this name + return false -- no upstream by this name end - if err then - return nil, err -- there was an error + return nil, err -- there was an error end - -- we've got the upstream, now fetch its targets, from cache or the db - local targets_cache_key = "balancer:targets:" .. upstream.id - local targets_history, err = singletons.cache:get(targets_cache_key, nil, - load_targets_into_memory, - upstream.id) - if err then - return nil, err + local balancer = balancers[upstream.name] + if not balancer then + if no_create then + return nil, "balancer not found" + else + log(ERR, "balancer not found for ", upstream.name, ", will create it") + return create_balancer(upstream) + end + end + + return balancer, upstream +end + + +--============================================================================== +-- Event Callbacks +--============================================================================== + + +-------------------------------------------------------------------------------- +-- Called on any changes to a target. +-- @param operation "create", "update" or "delete" +-- @param upstream Target table with `upstream_id` field +local function on_target_event(operation, target) + local upstream_id = target.upstream_id + + singletons.cache:invalidate_local("balancer:targets:" .. upstream_id) + + local upstream = get_upstream_by_id(upstream_id) + if not upstream then + log(ERR, "target ", operation, ": upstream not found for ", upstream_id) + return end local balancer = balancers[upstream.name] if not balancer then - -- no balancer yet (or invalidated) so create a new one - balancer, err = ring_balancer.new({ - wheelSize = upstream.slots, - dns = dns_client, - }) + log(ERR, "target ", operation, ": balancer not found for ", upstream.name) + return + end - if not balancer then - return balancer, err + local ok, err = check_target_history(upstream, balancer) + if not ok then + log(ERR, "failed checking target history for ", upstream.name, ": ", err) + end +end + + +-------------------------------------------------------------------------------- +-- Called on any changes to an upstream. +-- @param operation "create", "update" or "delete" +-- @param upstream Upstream table with `id` and `name` fields +local function on_upstream_event(operation, upstream) + + if operation == "create" then + local _, err = create_balancer(upstream) + if err then + log(ERR, "failed creating balancer for ", upstream.name, ": ", err) end - -- NOTE: we're inserting a foreign entity in the balancer, to keep track of - -- target-history changes! - balancer.__targets_history = {} - balancers[upstream.name] = balancer - end + elseif operation == "delete" or operation == "update" then - -- check history state - -- NOTE: in the code below variables are similarly named, but the - -- ones with `__`-prefixed, are the ones on the `balancer` object, and the - -- regular ones are the ones we just fetched and are comparing against. - local __size = #balancer.__targets_history - local size = #targets_history - - if __size ~= size or - (balancer.__targets_history[__size] or EMPTY_T).order ~= - (targets_history[size] or EMPTY_T).order then - -- last entries in history don't match, so we must do some updates. - - -- compare balancer history with db-loaded history - local last_equal_index = 0 -- last index where history is the same - for i, entry in ipairs(balancer.__targets_history) do - if entry.order ~= (targets_history[i] or EMPTY_T).order then - last_equal_index = i - 1 - break - end + if operation == "delete" then + singletons.cache:invalidate_local("balancer:upstreams") end + singletons.cache:invalidate_local("balancer:upstreams:" .. upstream.id) + singletons.cache:invalidate_local("balancer:targets:" .. upstream.id) - if last_equal_index == __size then - -- history is the same, so we only need to add new entries - apply_history(balancer, targets_history, last_equal_index + 1) + local balancer = balancers[upstream.name] + if balancer then + stop_healthchecker(balancer) + end + if operation == "delete" then + balancers[upstream.name] = nil else - -- history not the same. - -- TODO: ideally we would undo the last ones until we're equal again - -- and can replay changes, but not supported by ring-balancer yet. - -- for now; create a new balancer from scratch - balancer, err = ring_balancer.new({ - wheelSize = upstream.slots, - dns = dns_client, - }) - if not balancer then - return balancer, err + local _, err = create_balancer(upstream) + if err then + log(ERR, "failed recreating balancer for ", upstream.name, ": ", err) end - - balancer.__targets_history = {} - balancers[upstream.name] = balancer -- overwrite our existing one - apply_history(balancer, targets_history, 1) end + end - return balancer, upstream end @@ -281,21 +592,56 @@ local create_hash = function(upstream) return nil end end - -- nothing found, leave without a hash + -- nothing found, leave without a hash end ---=========================================================== + +--============================================================================== +-- Initialize balancers +--============================================================================== + + +local function init() + local upstreams, err = get_all_upstreams() + if not upstreams then + log(ngx.STDERR, "failed loading initial list of upstreams: ", err) + return + end + + local oks, errs = 0, 0 + for name, id in pairs(upstreams) do + local upstream = get_upstream_by_id(id) + local ok, err = create_balancer(upstream) + if ok ~= nil then + oks = oks + 1 + else + log(ngx.STDERR, "failed creating balancer for ", name, ": ", err) + errs = errs + 1 + end + end + log(DEBUG, "initialized ", oks, " balancer(s), ", errs, " error(s)") +end + + +--============================================================================== -- Main entry point when resolving ---=========================================================== +--============================================================================== + +-------------------------------------------------------------------------------- -- Resolves the target structure in-place (fields `ip`, `port`, and `hostname`). -- -- If the hostname matches an 'upstream' pool, then it must be balanced in that --- pool, in this case any port number provided will be ignored, as the pool provides it. +-- pool, in this case any port number provided will be ignored, as the pool +-- provides it. -- --- @param target the data structure as defined in `core.access.before` where it is created --- @return true on success, nil+error otherwise +-- @param target the data structure as defined in `core.access.before` where +-- it is created. +-- @param silent Do not produce body data (to be used in OpenResty contexts +-- which do not support sending it) +-- @return true on success, nil+error message+status code otherwise local function execute(target) + if target.type ~= "name" then -- it's an ip address (v4 or v6), so nothing we can do... target.ip = target.host @@ -304,8 +650,10 @@ local function execute(target) return true end - -- when tries == 0 it runs before the `balancer` context (in the `access` context), - -- when tries >= 2 then it performs a retry in the `balancer` context + -- when tries == 0, + -- it runs before the `balancer` context (in the `access` context), + -- when tries >= 2, + -- then it performs a retry in the `balancer` context local dns_cache_only = target.try_count ~= 0 local balancer, upstream, hash_value @@ -317,13 +665,13 @@ local function execute(target) -- first try, so try and find a matching balancer/upstream object balancer, upstream = get_balancer(target) if balancer == nil then -- `false` means no balancer, `nil` is error - return nil, upstream + return nil, upstream, 500 end if balancer then -- store for retries target.balancer = balancer - + -- calculate hash-value -- only add it if it doesn't exist, in case a plugin inserted one hash_value = target.hash_value @@ -334,54 +682,66 @@ local function execute(target) end end + local ip, port, hostname if balancer then -- have to invoke the ring-balancer - local ip, port, hostname = balancer:getPeer(hash_value, - target.try_count, - dns_cache_only) - if not ip then - if port == "No peers are available" then - -- in this case a "503 service unavailable", others will be a 500. - log(ERROR, "failure to get a peer from the ring-balancer '", - target.host, "': ", port) - return responses.send(503) - end - - return nil, port -- some other error + ip, port, hostname = balancer:getPeer(hash_value, + target.try_count, + dns_cache_only) + if not ip and port == "No peers are available" then + return nil, "failure to get a peer from the ring-balancer", 503 end - - target.ip = ip - target.port = port - target.hostname = hostname target.hash_value = hash_value - return true + + else + -- have to do a regular DNS lookup + ip, port = toip(target.host, target.port, dns_cache_only) + hostname = target.host + if not ip and port == "dns server error: 3 name error" then + return nil, "name resolution failed", 503 + end end - -- have to do a regular DNS lookup - local ip, port = toip(target.host, target.port, dns_cache_only) if not ip then - if port == "dns server error: 3 name error" then - -- in this case a "503 service unavailable", others will be a 500. - log(ERROR, "name resolution failed for '", tostring(target.host), - "': ", port) - return responses.send(503) - end - return nil, port + return nil, port, 500 end target.ip = ip target.port = port - target.hostname = target.host + target.hostname = hostname return true end + +-------------------------------------------------------------------------------- +-- for unit-testing purposes only +local function _get_healthchecker(balancer) + return healthcheckers[balancer] +end + + +-------------------------------------------------------------------------------- +-- for unit-testing purposes only +local function _get_target_history(balancer) + return target_histories[balancer] +end + + return { + init = init, execute = execute, - invalidate_balancer = invalidate_balancer, - - -- ones below are exported for test purposes - _load_upstreams_dict_into_memory = load_upstreams_dict_into_memory, - _load_upstream_into_memory = load_upstream_into_memory, - _load_targets_into_memory = load_targets_into_memory, + on_target_event = on_target_event, + on_upstream_event = on_upstream_event, + get_upstream_by_name = get_upstream_by_name, + get_all_upstreams = get_all_upstreams, + + -- ones below are exported for test purposes only + _create_balancer = create_balancer, + _get_balancer = get_balancer, + _get_healthchecker = _get_healthchecker, + _get_target_history = _get_target_history, + _load_upstreams_dict_into_memory = _load_upstreams_dict_into_memory, + _load_upstream_into_memory = _load_upstream_into_memory, + _load_targets_into_memory = _load_targets_into_memory, _create_hash = create_hash, } diff --git a/kong/core/handler.lua b/kong/core/handler.lua index b2658f3c59ad..3fe454b325fe 100644 --- a/kong/core/handler.lua +++ b/kong/core/handler.lua @@ -85,6 +85,12 @@ return { local cluster_events = singletons.cluster_events + -- initialize balancers + + + balancer.init() + + -- events dispatcher @@ -184,56 +190,107 @@ return { end, "crud", "ssl_certificates") - -- targets invalidations + -- target updates + -- worker_events local handler: event received from DAO worker_events.register(function(data) - log(DEBUG, "[events] Target updated, invalidating target in balancer") + local operation = data.operation local target = data.entity - - cache:invalidate("balancer:targets:" .. target.upstream_id) + -- => to worker_events node handler + local ok, err = worker_events.post("balancer", "targets", { + operation = data.operation, + entity = data.entity, + }) + if not ok then + log(ERR, "failed broadcasting target ", + operation, " to workers: ", err) + end + -- => to cluster_events handler + local key = fmt("%s:%s", operation, target.upstream_id) + ok, err = cluster_events:broadcast("balancer:targets", key) + if not ok then + log(ERR, "failed broadcasting target ", operation, " to cluster: ", err) + end end, "crud", "targets") - -- balancer invalidations + -- worker_events node handler + worker_events.register(function(data) + local operation = data.operation + local target = data.entity + -- => to balancer update + balancer.on_target_event(operation, target) + end, "balancer", "targets") - worker_events.register(function(data) - log(DEBUG, "[events] Upstream updated, invalidating balancer") - local upstream = data.entity - local ok, err = worker_events.post("balancer", "invalidate", upstream) + -- cluster_events handler + cluster_events:subscribe("balancer:targets", function(data) + local operation, key = unpack(utils.split(data, ":")) + -- => to worker_events node handler + local ok, err = worker_events.post("balancer", "targets", { + operation = operation, + entity = { + upstream_id = key, + } + }) if not ok then - log(ERR, "failed broadcasting balancer invalidation to workers: ", err) + log(ERR, "failed broadcasting target ", operation, " to workers: ", err) end + end) - local data = fmt("%s:%s", upstream.id, upstream.name) - local ok, err = cluster_events:broadcast("balancer:invalidate", data) + + -- upstream updates + + + -- worker_events local handler: event received from DAO + worker_events.register(function(data) + local operation = data.operation + local upstream = data.entity + -- => to worker_events node handler + local ok, err = worker_events.post("balancer", "upstreams", { + operation = data.operation, + entity = data.entity, + }) + if not ok then + log(ERR, "failed broadcasting upstream ", + operation, " to workers: ", err) + end + -- => to cluster_events handler + local key = fmt("%s:%s:%s", operation, upstream.id, upstream.name) + ok, err = cluster_events:broadcast("balancer:upstreams", key) if not ok then - log(ERR, "failed broadcasting balancer invalidation to cluster: ", err) + log(ERR, "failed broadcasting upstream ", operation, " to cluster: ", err) end end, "crud", "upstreams") - worker_events.register(function(upstream) - cache:invalidate_local("balancer:upstreams") - cache:invalidate_local("balancer:upstreams:" .. upstream.id) - cache:invalidate_local("balancer:targets:" .. upstream.id) - balancer.invalidate_balancer(upstream.name) - end, "balancer", "invalidate") + -- worker_events node handler + worker_events.register(function(data) + local operation = data.operation + local upstream = data.entity + -- => to balancer update + balancer.on_upstream_event(operation, upstream) + end, "balancer", "upstreams") - cluster_events:subscribe("balancer:invalidate", function(data) - local upstream_id, upstream_name = unpack(utils.split(data, ":")) - local ok, err = worker_events.post("balancer", "invalidate", { - id = upstream_id, - name = upstream_name, + cluster_events:subscribe("balancer:upstreams", function(data) + local operation, id, name = unpack(utils.split(data, ":")) + -- => to worker_events node handler + local ok, err = worker_events.post("balancer", "upstreams", { + operation = operation, + entity = { + id = id, + name = name, + } }) if not ok then - log(ERR, "failed broadcasting balancer invalidation to workers: ", err) + log(ERR, "failed broadcasting upstream ", operation, " to workers: ", err) end end) + end }, certificate = { @@ -389,12 +446,14 @@ return { end end - local ok, err = balancer.execute(ctx.balancer_address) + local ok, err, errcode = balancer.execute(ctx.balancer_address) if not ok then - return responses.send_HTTP_INTERNAL_SERVER_ERROR( - "failed the initial dns/balancer resolve for '" .. - ctx.balancer_address.host .. "' with: " .. - tostring(err)) + if errcode == 500 then + err = "failed the initial dns/balancer resolve for '" .. + ctx.balancer_address.host .. "' with: " .. + tostring(err) + end + return responses.send(errcode, err) end do @@ -489,6 +548,12 @@ return { log = { after = function(ctx) reports.log() + local addr = ctx.balancer_address + + -- Report HTTP status for health checks + if addr and addr.balancer and addr.ip then + addr.balancer.report_http_status(addr.ip, addr.port, ngx.status) + end end } } diff --git a/kong/init.lua b/kong/init.lua index 4be510c5e2b4..db19a906b0bb 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -299,12 +299,20 @@ function Kong.balancer() local previous_try = tries[addr.try_count - 1] previous_try.state, previous_try.code = get_last_failure() - local ok, err = balancer_execute(addr) + -- Report HTTP status for health checks + if addr.balancer then + if previous_try.state == "failed" then + addr.balancer.report_tcp_failure(addr.ip, addr.port) + else + addr.balancer.report_http_status(addr.ip, addr.port, previous_try.code) + end + end + + local ok, err, errcode = balancer_execute(addr) if not ok then ngx_log(ngx_ERR, "failed to retry the dns/balancer resolver for ", tostring(addr.host), "' with: ", tostring(err)) - - return responses.send(500) + return ngx.exit(errcode) end else @@ -326,8 +334,7 @@ function Kong.balancer() ngx_log(ngx_ERR, "failed to set the current peer (address: ", tostring(addr.ip), " port: ", tostring(addr.port),"): ", tostring(err)) - - return responses.send(500) + return ngx.exit(500) end ok, err = set_timeouts(addr.connect_timeout / 1000, @@ -356,6 +363,7 @@ end function Kong.access() local ctx = ngx.ctx + core.access.before(ctx) ctx.delay_response = true diff --git a/kong/templates/nginx_kong.lua b/kong/templates/nginx_kong.lua index 5ab65ca37bd5..8a6abd644faa 100644 --- a/kong/templates/nginx_kong.lua +++ b/kong/templates/nginx_kong.lua @@ -32,6 +32,7 @@ lua_shared_dict kong 5m; lua_shared_dict kong_cache ${{MEM_CACHE_SIZE}}; lua_shared_dict kong_process_events 5m; lua_shared_dict kong_cluster_events 5m; +lua_shared_dict kong_healthchecks 5m; > if database == "cassandra" then lua_shared_dict kong_cassandra 5m; > end diff --git a/spec/fixtures/custom_nginx.template b/spec/fixtures/custom_nginx.template index 176a0a863b7e..0f3e4ae80cd6 100644 --- a/spec/fixtures/custom_nginx.template +++ b/spec/fixtures/custom_nginx.template @@ -45,6 +45,7 @@ http { lua_shared_dict kong_cache ${{MEM_CACHE_SIZE}}; lua_shared_dict kong_process_events 5m; lua_shared_dict kong_cluster_events 5m; + lua_shared_dict kong_healthchecks 5m; > if database == "cassandra" then lua_shared_dict kong_cassandra 5m; > end From e9418030f0133f72a3b44db985594e40187b25f1 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Thu, 14 Dec 2017 15:01:02 -0200 Subject: [PATCH 40/74] tests(balancer) unit tests for event-based balancer with health checks --- spec/01-unit/011-balancer_spec.lua | 187 ++++++++++++++++++++++++++++- 1 file changed, 185 insertions(+), 2 deletions(-) diff --git a/spec/01-unit/011-balancer_spec.lua b/spec/01-unit/011-balancer_spec.lua index b39eca0d6622..e4aeb46435fc 100644 --- a/spec/01-unit/011-balancer_spec.lua +++ b/spec/01-unit/011-balancer_spec.lua @@ -9,6 +9,7 @@ describe("Balancer", function() setup(function() balancer = require "kong.core.balancer" singletons = require "kong.singletons" + singletons.worker_events = require "resty.worker.events" singletons.dao = {} singletons.dao.upstreams = { find_all = function(self) @@ -16,11 +17,22 @@ describe("Balancer", function() end } + singletons.worker_events.configure({ + shm = "kong_process_events", -- defined by "lua_shared_dict" + timeout = 5, -- life time of event data in shm + interval = 1, -- poll interval (seconds) + + wait_interval = 0.010, -- wait before retry fetching event data + wait_max = 0.5, -- max wait time before discarding event + }) + UPSTREAMS_FIXTURES = { {id = "a", name = "mashape", slots = 10, orderlist = {1,2,3,4,5,6,7,8,9,10} }, {id = "b", name = "kong", slots = 10, orderlist = {10,9,8,7,6,5,4,3,2,1} }, {id = "c", name = "gelato", slots = 20, orderlist = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20} }, {id = "d", name = "galileo", slots = 20, orderlist = {20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1} }, + {id = "e", name = "upstream_e", slots = 10, orderlist = {1,2,3,4,5,6,7,8,9,10} }, + {id = "f", name = "upstream_f", slots = 10, orderlist = {1,2,3,4,5,6,7,8,9,10} }, } singletons.dao.targets = { @@ -77,7 +89,156 @@ describe("Balancer", function() target = "mashape.com:80", weight = 10, }, + -- 3rd upstream: e (removed and re-added) + { + id = "e1", + created_at = "001", + upstream_id = "e", + target = "127.0.0.1:2112", + weight = 10, + }, + { + id = "e2", + created_at = "002", + upstream_id = "e", + target = "127.0.0.1:2112", + weight = 0, + }, + { + id = "e3", + created_at = "003", + upstream_id = "e", + target = "127.0.0.1:2112", + weight = 10, + }, + -- 4th upstream: f (removed and not re-added) + { + id = "f1", + created_at = "001", + upstream_id = "f", + target = "127.0.0.1:5150", + weight = 10, + }, + { + id = "f2", + created_at = "002", + upstream_id = "f", + target = "127.0.0.1:5150", + weight = 0, + }, + { + id = "f3", + created_at = "003", + upstream_id = "f", + target = "127.0.0.1:2112", + weight = 10, + }, } + + local function find_all_in_fixture_fn(fixture) + return function(self, match_on) + local ret = {} + for _, rec in ipairs(fixture) do + for key, val in pairs(match_on or {}) do + if rec[key] ~= val then + rec = nil + break + end + end + if rec then table.insert(ret, rec) end + end + return ret + end + end + + singletons.dao = { + targets = { + find_all = find_all_in_fixture_fn(TARGETS_FIXTURES) + }, + upstreams = { + find_all = find_all_in_fixture_fn(UPSTREAMS_FIXTURES) + }, + } + + singletons.cache = { + _cache = {}, + get = function(self, key, _, loader, arg) + local v = self._cache[key] + if v == nil then + v = loader(arg) + self._cache[key] = v + end + return v + end, + invalidate_local = function(self, key) + self._cache[key] = nil + end + } + + + end) + + describe("create_balancer()", function() + local dns_client = require("resty.dns.client") + dns_client.init() + + it("creates a balancer with a healthchecker", function() + local my_balancer = balancer._create_balancer(UPSTREAMS_FIXTURES[1]) + assert.truthy(my_balancer) + local hc = balancer._get_healthchecker(my_balancer) + local target_history = { + { name = "mashape.com", port = 80, order = "001:a3", weight = 10 }, + { name = "mashape.com", port = 80, order = "002:a2", weight = 10 }, + { name = "mashape.com", port = 80, order = "002:a4", weight = 10 }, + { name = "mashape.com", port = 80, order = "003:a1", weight = 10 }, + } + assert.same(target_history, balancer._get_target_history(my_balancer)) + assert.truthy(hc) + hc:stop() + end) + end) + + describe("get_balancer()", function() + local dns_client = require("resty.dns.client") + dns_client.init() + + setup(function() + -- In these tests, we pass `true` to get_balancer + -- to ensure that the upstream was created by `balancer.init()` + balancer.init() + end) + + it("balancer and healthchecker match; remove and re-add", function() + local my_balancer = balancer._get_balancer({ host = "upstream_e" }, true) + assert.truthy(my_balancer) + local target_history = { + { name = "127.0.0.1", port = 2112, order = "001:e1", weight = 10 }, + { name = "127.0.0.1", port = 2112, order = "002:e2", weight = 0 }, + { name = "127.0.0.1", port = 2112, order = "003:e3", weight = 10 }, + } + assert.same(target_history, balancer._get_target_history(my_balancer)) + local hc = balancer._get_healthchecker(my_balancer) + assert.truthy(hc) + assert.same(1, #hc.targets) + assert.truthy(hc.targets["127.0.0.1"]) + assert.truthy(hc.targets["127.0.0.1"][2112]) + end) + + it("balancer and healthchecker match; remove and not re-add", function() + local my_balancer = balancer._get_balancer({ host = "upstream_f" }, true) + assert.truthy(my_balancer) + local target_history = { + { name = "127.0.0.1", port = 5150, order = "001:f1", weight = 10 }, + { name = "127.0.0.1", port = 5150, order = "002:f2", weight = 0 }, + { name = "127.0.0.1", port = 2112, order = "003:f3", weight = 10 }, + } + assert.same(target_history, balancer._get_target_history(my_balancer)) + local hc = balancer._get_healthchecker(my_balancer) + assert.truthy(hc) + assert.same(1, #hc.targets) + assert.truthy(hc.targets["127.0.0.1"]) + assert.truthy(hc.targets["127.0.0.1"][2112]) + end) end) describe("load_upstreams_dict_into_memory()", function() @@ -96,6 +257,28 @@ describe("Balancer", function() end) end) + describe("get_all_upstreams()", function() + it("gets a map of all upstream names to ids", function() + local upstreams_dict = balancer.get_all_upstreams() + + local fixture_dict = {} + for _, upstream in ipairs(UPSTREAMS_FIXTURES) do + fixture_dict[upstream.name] = upstream.id + end + + assert.same(fixture_dict, upstreams_dict) + end) + end) + + describe("get_upstream_by_name()", function() + it("retrieves a complete upstream based on its name", function() + for _, fixture in ipairs(UPSTREAMS_FIXTURES) do + local upstream = balancer.get_upstream_by_name(fixture.name) + assert.same(fixture, upstream) + end + end) + end) + describe("load_targets_into_memory()", function() local targets local upstream @@ -126,12 +309,12 @@ describe("Balancer", function() end, }) backup = { ngx.req, ngx.var, ngx.ctx } - ngx.req = { get_headers = function() return headers end } + ngx.req = { get_headers = function() return headers end } -- luacheck: ignore ngx.var = {} ngx.ctx = {} end) after_each(function() - ngx.req = backup[1] + ngx.req = backup[1] -- luacheck: ignore ngx.var = backup[2] ngx.ctx = backup[3] end) From acecd6a77fd66369954ffd033b884d7bde04aa12 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Mon, 13 Nov 2017 16:22:40 -0200 Subject: [PATCH 41/74] feat(healthchecks) report HTTP status of proxied upstreams only Use ctx.KONG_PROXIED to differentiate between responses produced by an upstream vs. responses produced by Kong plugins. --- kong/core/handler.lua | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/kong/core/handler.lua b/kong/core/handler.lua index 3fe454b325fe..7c7a509ee7b8 100644 --- a/kong/core/handler.lua +++ b/kong/core/handler.lua @@ -550,9 +550,12 @@ return { reports.log() local addr = ctx.balancer_address - -- Report HTTP status for health checks - if addr and addr.balancer and addr.ip then - addr.balancer.report_http_status(addr.ip, addr.port, ngx.status) + -- If response was produced by an upstream (ie, not by a Kong plugin) + if ctx.KONG_PROXIED == true then + -- Report HTTP status for health checks + if addr and addr.balancer and addr.ip then + addr.balancer.report_http_status(addr.ip, addr.port, ngx.status) + end end end } From 751f8a0f2d8bc8a5d112c9f071ba75d068f8bf09 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Wed, 15 Nov 2017 16:56:00 -0200 Subject: [PATCH 42/74] tests(healthchecks) integration tests for passive health checks --- .../05-proxy/09-balancer_spec.lua | 274 ++++++++++++++++-- 1 file changed, 245 insertions(+), 29 deletions(-) diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index d1ff571ec38a..eca94ffa214a 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -4,16 +4,60 @@ local helpers = require "spec.helpers" local dao_helpers = require "spec.02-integration.03-dao.helpers" local PORT = 21000 +local utils = require "kong.tools.utils" + +local healthchecks_defaults = { + active = { + timeout = 1, + concurrency = 10, + http_path = "/", + healthy = { + interval = 0, -- 0 = disabled by default + http_statuses = { 200, 302 }, + successes = 2, + }, + unhealthy = { + interval = 0, -- 0 = disabled by default + http_statuses = { 429, 404, + 500, 501, 502, 503, 504, 505 }, + tcp_failures = 2, + timeouts = 3, + http_failures = 5, + }, + }, + passive = { + healthy = { + http_statuses = { 200, 201, 202, 203, 204, 205, 206, 207, 208, 226, + 300, 301, 302, 303, 304, 305, 306, 307, 308 }, + successes = 5, + }, + unhealthy = { + http_statuses = { 429, 500, 503 }, + tcp_failures = 2, + timeouts = 7, + http_failures = 5, + }, + }, +} + +local function healthchecks_config(config) + return utils.deep_merge(healthchecks_defaults, config) +end local TEST_LOG = false -- extra verbose logging of test server --- modified http-server. Accepts (sequentially) a number of incoming --- connections, and returns the number of succesful ones. --- Also features a timeout setting. -local function http_server(timeout, count, port, no_timeout) +-- Modified http-server. Accepts (sequentially) a number of incoming +-- connections and then rejects a given number of connections. +-- @param timeout Server timeout. +-- @param ok_count Number of 200 OK responses to give. +-- @param port Port number to use. +-- @param fail_count (optional, default 0) Number of 500 errors to respond. +-- @return Returns the number of succesful and failure responses. +local function http_server(timeout, ok_count, port, fail_count) + fail_count = fail_count or 0 local threads = require "llthreads2.ex" local thread = threads.new({ - function(timeout, count, port, no_timeout, TEST_LOG) + function(timeout, ok_count, port, fail_count) local function test_log(...) if not TEST_LOG then @@ -33,67 +77,127 @@ local function http_server(timeout, count, port, no_timeout) assert(server:bind("*", port)) assert(server:listen()) + local handshake_done = false + local expire = socket.gettime() + timeout - assert(server:settimeout(0.1)) + assert(server:settimeout(0.5)) test_log("test http server on port ", port, " started") - local success = 0 - while count > 0 do + local ok_responses, fail_responses = 0, 0 + local total_reqs = ok_count + fail_count + local n_reqs = 0 + while n_reqs < total_reqs do local client, err client, err = server:accept() if err == "timeout" then if socket.gettime() > expire then server:close() - if no_timeout then - return success - else - error("timeout") - end + break end elseif not client then server:close() error(err) else - count = count - 1 - local lines = {} local line, err while #lines < 7 do line, err = client:receive() if err then break + elseif #line == 0 then + break else table.insert(lines, line) end end - if err then + if err and err ~= "closed" then client:close() server:close() error(err) end + local got_handshake = lines[1]:match("/handshake") + if got_handshake then + client:send("HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n") + client:close() + handshake_done = true - local s = client:send("HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n") - client:close() - if s then - success = success + 1 + elseif lines[1]:match("/shutdown") then + client:send("HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n") + client:close() + break + + elseif handshake_done and not got_handshake then + n_reqs = n_reqs + 1 + local do_ok = ok_count > 0 + local response + if do_ok then + ok_count = ok_count - 1 + response = "HTTP/1.1 200 OK" + else + response = "HTTP/1.1 500 Internal Server Error" + end + local sent = client:send(response .. "\r\nConnection: close\r\n\r\n") + client:close() + if sent then + if do_ok then + ok_responses = ok_responses + 1 + else + fail_responses = fail_responses + 1 + end + end + else + error("got a request before the handshake was complete") end - test_log("test http server on port ", port, ": ", success, "/", - (success + count)," requests handled") + test_log("test http server on port ", port, ": ", ok_responses, " oks, ", + fail_responses," fails handled") end end - server:close() test_log("test http server on port ", port, " closed") - return success + return ok_responses, fail_responses end - }, timeout, count, port, no_timeout, TEST_LOG) + }, timeout, ok_count, port, fail_count, TEST_LOG) local server = thread:start() - ngx.sleep(0.2) -- attempt to make sure server is started for failing CI tests + + local expire = ngx.now() + timeout + repeat + local res, err + local pok = pcall(function() + local client = helpers.http_client("127.0.0.1", port, 10) + if client then + res, err = client:send { + method = "GET", + path = "/handshake", + headers = { ["Host"] = "whatever" } + } + client:close() + else + err = "waiting" + end + if err then + ngx.sleep(0.01) -- busy-wait + end + end) + until (ngx.now() > expire) or (pok and not err) + return server end +local function request_immediate_shutdown(host, port) + local pok, client = pcall(helpers.http_client, host, port) + if not pok then + return + end + client:send { + method = "GET", + path = "/shutdown", + headers = { ["Host"] = "whatever" } + } + client:close() +end + dao_helpers.for_each_dao(function(kong_config) describe("Ring-balancer #" .. kong_config.database, function() @@ -115,6 +219,115 @@ dao_helpers.for_each_dao(function(kong_config) collectgarbage() end) + describe("#healthchecks", function() + local upstream + + local slots = 20 + + before_each(function() + helpers.run_migrations() + assert(helpers.dao.apis:insert { + name = "balancer.test", + hosts = { "balancer.test" }, + upstream_url = "http://service.xyz.v1/path", + }) + upstream = assert(helpers.dao.upstreams:insert { + name = "service.xyz.v1", + slots = slots, + }) + assert(helpers.dao.targets:insert { + target = "127.0.0.1:" .. PORT, + weight = 10, + upstream_id = upstream.id, + }) + assert(helpers.dao.targets:insert { + target = "127.0.0.1:" .. (PORT+1), + weight = 10, + upstream_id = upstream.id, + }) + + helpers.start_kong() + end) + + after_each(function() + helpers.stop_kong(nil, true) + end) + + it("perform passive health checks", function() + + for fails = 1, slots do + + -- configure healthchecks + local api_client = helpers.admin_client() + assert(api_client:send { + method = "PATCH", + path = "/upstreams/" .. upstream.name, + headers = { + ["Content-Type"] = "application/json", + -- ["Kong-Debug"] = "1", + }, + body = { + healthchecks = healthchecks_config { + passive = { + unhealthy = { + http_failures = fails, + } + } + } + }, + }) + api_client:close() + + local timeout = 10 + local requests = upstream.slots * 2 -- go round the balancer twice + + -- setup target servers: + -- server2 will only respond for part of the test, + -- then server1 will take over. + local server2_oks = math.floor(requests / 4) + local server1 = http_server(timeout, requests - server2_oks - fails, PORT) + local server2 = http_server(timeout, server2_oks, PORT+1, fails) + + -- Go hit them with our test requests + local client_oks, client_fails = 0, 0 + + for _ = 1, requests do + local client = helpers.proxy_client() + local res = client:send { + method = "GET", + path = "/", + headers = { + ["Host"] = "balancer.test" + } + } + if res.status == 200 then + client_oks = client_oks + 1 + elseif res.status == 500 then + client_fails = client_fails + 1 + end + client:close() + end + + -- collect server results; hitcount + local _, ok1, fail1 = server1:join() + local _, ok2, fail2 = server2:join() + + -- verify + assert.are.equal(requests * 0.75 - fails, ok1) + assert.are.equal(requests * 0.25, ok2) + assert.are.equal(0, fail1) + assert.are.equal(fails, fail2) + + assert.are.equal(requests - fails, client_oks) + assert.are.equal(fails, client_fails) + end + end) + + pending("perform active health checks", function() + end) + + end) + describe("Balancing", function() local client, api_client, upstream1, upstream2, target1, target2 @@ -221,8 +434,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream2.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, requests, PORT+2, true) - local server2 = http_server(timeout, requests, PORT+3, true) + local server1 = http_server(timeout, requests, PORT+2, 0, true) + local server2 = http_server(timeout, requests, PORT+3, 0, true) -- Go hit them with our test requests for _ = 1, requests do @@ -231,12 +444,15 @@ dao_helpers.for_each_dao(function(kong_config) path = "/", headers = { ["Host"] = "hashing.test", - ["hashme"] = "just a value", + ["hashme"] = "just a value", } }) assert.response(res).has.status(200) end + request_immediate_shutdown("127.0.0.1", PORT + 2) + request_immediate_shutdown("127.0.0.1", PORT + 3) + -- collect server results; hitcount -- one should get all the hits, the other 0, and hence a timeout local _, count1 = server1:join() From 6aaf5ca9143458efc42675264aebed55e401330a Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Mon, 27 Nov 2017 18:30:29 -0200 Subject: [PATCH 43/74] tests(healthchecks) integration tests for active health checks --- .../05-proxy/09-balancer_spec.lua | 434 +++++++++++------- 1 file changed, 273 insertions(+), 161 deletions(-) diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index eca94ffa214a..5179d79400fd 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -40,12 +40,36 @@ local healthchecks_defaults = { }, } + local function healthchecks_config(config) return utils.deep_merge(healthchecks_defaults, config) end + local TEST_LOG = false -- extra verbose logging of test server + +local function direct_request(host, port, path) + local pok, client = pcall(helpers.http_client, host, port) + if not pok then + return nil, "pcall" + end + if not client then + return nil, "client" + end + local _, err = client:send { + method = "GET", + path = path, + headers = { ["Host"] = "whatever" } + } + client:close() + if err then + return nil, err + end + return true +end + + -- Modified http-server. Accepts (sequentially) a number of incoming -- connections and then rejects a given number of connections. -- @param timeout Server timeout. @@ -83,6 +107,8 @@ local function http_server(timeout, ok_count, port, fail_count) assert(server:settimeout(0.5)) test_log("test http server on port ", port, " started") + local healthy = true + local ok_responses, fail_responses = 0, 0 local total_reqs = ok_count + fail_count local n_reqs = 0 @@ -94,9 +120,11 @@ local function http_server(timeout, ok_count, port, fail_count) server:close() break end + elseif not client then server:close() error(err) + else local lines = {} local line, err @@ -104,13 +132,14 @@ local function http_server(timeout, ok_count, port, fail_count) line, err = client:receive() if err then break + elseif #line == 0 then break + else table.insert(lines, line) end end - if err and err ~= "closed" then client:close() server:close() @@ -127,6 +156,24 @@ local function http_server(timeout, ok_count, port, fail_count) client:close() break + elseif lines[1]:match("/status") then + if healthy then + client:send("HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n") + else + client:send("HTTP/1.1 500 Internal Server Error\r\nConnection: close\r\n\r\n") + end + client:close() + + elseif lines[1]:match("/healthy") then + healthy = true + client:send("HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n") + client:close() + + elseif lines[1]:match("/unhealthy") then + healthy = false + client:send("HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n") + client:close() + elseif handshake_done and not got_handshake then n_reqs = n_reqs + 1 local do_ok = ok_count > 0 @@ -134,6 +181,7 @@ local function http_server(timeout, ok_count, port, fail_count) if do_ok then ok_count = ok_count - 1 response = "HTTP/1.1 200 OK" + else response = "HTTP/1.1 500 Internal Server Error" end @@ -142,10 +190,12 @@ local function http_server(timeout, ok_count, port, fail_count) if sent then if do_ok then ok_responses = ok_responses + 1 + else fail_responses = fail_responses + 1 end end + else error("got a request before the handshake was complete") end @@ -163,41 +213,38 @@ local function http_server(timeout, ok_count, port, fail_count) local expire = ngx.now() + timeout repeat - local res, err - local pok = pcall(function() - local client = helpers.http_client("127.0.0.1", port, 10) - if client then - res, err = client:send { - method = "GET", - path = "/handshake", - headers = { ["Host"] = "whatever" } - } - client:close() - else - err = "waiting" - end - if err then - ngx.sleep(0.01) -- busy-wait - end - end) - until (ngx.now() > expire) or (pok and not err) + local _, err = direct_request("127.0.0.1", port, "/handshake") + if err then + ngx.sleep(0.01) -- poll-wait + end + until (ngx.now() > expire) or not err return server end -local function request_immediate_shutdown(host, port) - local pok, client = pcall(helpers.http_client, host, port) - if not pok then - return + +local function client_requests(n, headers) + local oks, fails = 0, 0 + for _ = 1, n do + local client = helpers.proxy_client() + local res = client:send { + method = "GET", + path = "/", + headers = headers or { + ["Host"] = "balancer.test" + } + } + if res.status == 200 then + oks = oks + 1 + elseif res.status == 500 then + fails = fails + 1 + end + client:close() end - client:send { - method = "GET", - path = "/shutdown", - headers = { ["Host"] = "whatever" } - } - client:close() + return oks, fails end + dao_helpers.for_each_dao(function(kong_config) describe("Ring-balancer #" .. kong_config.database, function() @@ -241,11 +288,10 @@ dao_helpers.for_each_dao(function(kong_config) upstream_id = upstream.id, }) assert(helpers.dao.targets:insert { - target = "127.0.0.1:" .. (PORT+1), + target = "127.0.0.1:" .. (PORT + 1), weight = 10, upstream_id = upstream.id, }) - helpers.start_kong() end) @@ -264,7 +310,6 @@ dao_helpers.for_each_dao(function(kong_config) path = "/upstreams/" .. upstream.name, headers = { ["Content-Type"] = "application/json", - -- ["Kong-Debug"] = "1", }, body = { healthchecks = healthchecks_config { @@ -289,32 +334,15 @@ dao_helpers.for_each_dao(function(kong_config) local server2 = http_server(timeout, server2_oks, PORT+1, fails) -- Go hit them with our test requests - local client_oks, client_fails = 0, 0 - - for _ = 1, requests do - local client = helpers.proxy_client() - local res = client:send { - method = "GET", - path = "/", - headers = { - ["Host"] = "balancer.test" - } - } - if res.status == 200 then - client_oks = client_oks + 1 - elseif res.status == 500 then - client_fails = client_fails + 1 - end - client:close() - end + local client_oks, client_fails = client_requests(requests) -- collect server results; hitcount local _, ok1, fail1 = server1:join() local _, ok2, fail2 = server2:join() -- verify - assert.are.equal(requests * 0.75 - fails, ok1) - assert.are.equal(requests * 0.25, ok2) + assert.are.equal(requests - server2_oks - fails, ok1) + assert.are.equal(server2_oks, ok2) assert.are.equal(0, fail1) assert.are.equal(fails, fail2) @@ -323,7 +351,161 @@ dao_helpers.for_each_dao(function(kong_config) end end) - pending("perform active health checks", function() + it("perform active health checks -- up then down", function() + + local healthcheck_interval = 0.01 + + for fails = 1, 5 do + + -- configure healthchecks + local api_client = helpers.admin_client() + assert(api_client:send { + method = "PATCH", + path = "/upstreams/" .. upstream.name, + headers = { + ["Content-Type"] = "application/json", + }, + body = { + healthchecks = healthchecks_config { + active = { + http_path = "/status", + healthy = { + interval = healthcheck_interval, + successes = 1, + }, + unhealthy = { + interval = healthcheck_interval, + http_failures = fails, + }, + } + } + }, + }) + api_client:close() + + local timeout = 10 + local requests = upstream.slots * 2 -- go round the balancer twice + + -- setup target servers: + -- server2 will only respond for part of the test, + -- then server1 will take over. + local server2_oks = math.floor(requests / 4) + local server1 = http_server(timeout, requests - server2_oks, PORT) + local server2 = http_server(timeout, server2_oks, PORT+1) + + -- Phase 1: server1 and server2 take requests + local client_oks, client_fails = client_requests(server2_oks * 2) + + -- Phase 2: server2 goes unhealthy + direct_request("127.0.0.1", PORT + 1, "/unhealthy") + + -- Give time for healthchecker to detect + ngx.sleep((2 + fails) * healthcheck_interval) + + -- Phase 3: server1 takes all requests + do + local p3oks, p3fails = client_requests(requests - (server2_oks * 2)) + client_oks = client_oks + p3oks + client_fails = client_fails + p3fails + end + + -- collect server results; hitcount + local _, ok1, fail1 = server1:join() + local _, ok2, fail2 = server2:join() + + -- verify + assert.are.equal(requests - server2_oks, ok1) + assert.are.equal(server2_oks, ok2) + assert.are.equal(0, fail1) + assert.are.equal(0, fail2) + + assert.are.equal(requests, client_oks) + assert.are.equal(0, client_fails) + end + end) + + it("perform active health checks -- automatic recovery", function() + + local healthcheck_interval = 0.01 + + for nchecks = 1, 5 do + + -- configure healthchecks + local api_client = helpers.admin_client() + assert(api_client:send { + method = "PATCH", + path = "/upstreams/" .. upstream.name, + headers = { + ["Content-Type"] = "application/json", + }, + body = { + healthchecks = healthchecks_config { + active = { + http_path = "/status", + healthy = { + interval = healthcheck_interval, + successes = nchecks, + }, + unhealthy = { + interval = healthcheck_interval, + http_failures = nchecks, + }, + } + } + }, + }) + api_client:close() + + local timeout = 10 + + -- setup target servers: + -- server2 will only respond for part of the test, + -- then server1 will take over. + local server1_oks = upstream.slots * 2 + local server2_oks = upstream.slots + local server1 = http_server(timeout, server1_oks, PORT) + local server2 = http_server(timeout, server2_oks, PORT+1) + + -- 1) server1 and server2 take requests + local oks, fails = client_requests(upstream.slots) + + -- server2 goes unhealthy + direct_request("127.0.0.1", PORT + 1, "/unhealthy") + -- Give time for healthchecker to detect + ngx.sleep((2 + nchecks) * healthcheck_interval) + + -- 2) server1 takes all requests + do + local o, f = client_requests(upstream.slots) + oks = oks + o + fails = fails + f + end + + -- server2 goes healthy again + direct_request("127.0.0.1", PORT + 1, "/healthy") + -- Give time for healthchecker to detect + ngx.sleep((2 + nchecks) * healthcheck_interval) + + -- 3) server1 and server2 take requests again + do + local o, f = client_requests(upstream.slots) + oks = oks + o + fails = fails + f + end + + -- collect server results; hitcount + local _, ok1, fail1 = server1:join() + local _, ok2, fail2 = server2:join() + + -- verify + assert.are.equal(upstream.slots * 2, ok1) + assert.are.equal(upstream.slots, ok2) + assert.are.equal(0, fail1) + assert.are.equal(0, fail2) + + assert.are.equal(upstream.slots * 3, oks) + assert.are.equal(0, fails) + end end) end) @@ -349,7 +531,7 @@ dao_helpers.for_each_dao(function(kong_config) upstream_id = upstream1.id, }) target2 = assert(helpers.dao.targets:insert { - target = "127.0.0.1:" .. (PORT+1), + target = "127.0.0.1:" .. (PORT + 1), weight = 10, upstream_id = upstream1.id, }) @@ -367,12 +549,12 @@ dao_helpers.for_each_dao(function(kong_config) hash_on_header = "hashme", }) assert(helpers.dao.targets:insert { - target = "127.0.0.1:" .. PORT+2, + target = "127.0.0.1:" .. PORT + 2, weight = 10, upstream_id = upstream2.id, }) assert(helpers.dao.targets:insert { - target = "127.0.0.1:" .. (PORT+3), + target = "127.0.0.1:" .. (PORT + 3), weight = 10, upstream_id = upstream2.id, }) @@ -410,24 +592,16 @@ dao_helpers.for_each_dao(function(kong_config) local server2 = http_server(timeout, requests/2, PORT+1) -- Go hit them with our test requests - for _ = 1, requests do - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - ["Host"] = "balancer.test" - } - }) - assert.response(res).has.status(200) - end + local oks = client_requests(requests) + assert.are.equal(requests, oks) -- collect server results; hitcount local _, count1 = server1:join() local _, count2 = server2:join() -- verify - assert.are.equal(requests/2, count1) - assert.are.equal(requests/2, count2) + assert.are.equal(requests / 2, count1) + assert.are.equal(requests / 2, count2) end) it("over multiple targets, with hashing", function() local timeout = 5 @@ -438,20 +612,14 @@ dao_helpers.for_each_dao(function(kong_config) local server2 = http_server(timeout, requests, PORT+3, 0, true) -- Go hit them with our test requests - for _ = 1, requests do - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - ["Host"] = "hashing.test", - ["hashme"] = "just a value", - } - }) - assert.response(res).has.status(200) - end + local oks = client_requests(requests, { + ["Host"] = "hashing.test", + ["hashme"] = "just a value", + }) + assert.are.equal(requests, oks) - request_immediate_shutdown("127.0.0.1", PORT + 2) - request_immediate_shutdown("127.0.0.1", PORT + 3) + direct_request("127.0.0.1", PORT + 2, "/shutdown") + direct_request("127.0.0.1", PORT + 3, "/shutdown") -- collect server results; hitcount -- one should get all the hits, the other 0, and hence a timeout @@ -472,24 +640,16 @@ dao_helpers.for_each_dao(function(kong_config) local server2 = http_server(timeout, requests/2, PORT+1) -- Go hit them with our test requests - for _ = 1, requests do - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - ["Host"] = "balancer.test" - } - }) - assert.response(res).has.status(200) - end + local oks = client_requests(requests) + assert.are.equal(requests, oks) -- collect server results; hitcount local _, count1 = server1:join() local _, count2 = server2:join() -- verify - assert.are.equal(requests/2, count1) - assert.are.equal(requests/2, count2) + assert.are.equal(requests / 2, count1) + assert.are.equal(requests / 2, count2) -- add a new target 3 local res = assert(api_client:send { @@ -499,8 +659,8 @@ dao_helpers.for_each_dao(function(kong_config) ["Content-Type"] = "application/json" }, body = { - target = "127.0.0.1:" .. (PORT+2), - weight = target1.weight/2 , -- shift proportions from 50/50 to 40/40/20 + target = "127.0.0.1:" .. (PORT + 2), + weight = target1.weight / 2 , -- shift proportions from 50/50 to 40/40/20 }, }) assert.response(res).has.status(201) @@ -514,16 +674,8 @@ dao_helpers.for_each_dao(function(kong_config) local server3 = http_server(timeout, requests * 0.2, PORT+2) -- Go hit them with our test requests - for _ = 1, requests do - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - ["Host"] = "balancer.test" - } - }) - assert.response(res).has.status(200) - end + local oks = client_requests(requests) + assert.are.equal(requests, oks) -- collect server results; hitcount _, count1 = server1:join() @@ -544,24 +696,16 @@ dao_helpers.for_each_dao(function(kong_config) local server2 = http_server(timeout, requests/2, PORT+1) -- Go hit them with our test requests - for _ = 1, requests do - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - ["Host"] = "balancer.test" - } - }) - assert.response(res).has.status(200) - end + local oks = client_requests(requests) + assert.are.equal(requests, oks) -- collect server results; hitcount local _, count1 = server1:join() local _, count2 = server2:join() -- verify - assert.are.equal(requests/2, count1) - assert.are.equal(requests/2, count2) + assert.are.equal(requests / 2, count1) + assert.are.equal(requests / 2, count2) -- modify weight for target 2, set to 0 local res = assert(api_client:send { @@ -584,16 +728,8 @@ dao_helpers.for_each_dao(function(kong_config) server1 = http_server(timeout, requests, PORT) -- Go hit them with our test requests - for _ = 1, requests do - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - ["Host"] = "balancer.test" - } - }) - assert.response(res).has.status(200) - end + local oks = client_requests(requests) + assert.are.equal(requests, oks) -- collect server results; hitcount _, count1 = server1:join() @@ -610,24 +746,16 @@ dao_helpers.for_each_dao(function(kong_config) local server2 = http_server(timeout, requests/2, PORT+1) -- Go hit them with our test requests - for _ = 1, requests do - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - ["Host"] = "balancer.test" - } - }) - assert.response(res).has.status(200) - end + local oks = client_requests(requests) + assert.are.equal(requests, oks) -- collect server results; hitcount local _, count1 = server1:join() local _, count2 = server2:join() -- verify - assert.are.equal(requests/2, count1) - assert.are.equal(requests/2, count2) + assert.are.equal(requests / 2, count1) + assert.are.equal(requests / 2, count2) -- modify weight for target 2 local res = assert(api_client:send { @@ -651,16 +779,8 @@ dao_helpers.for_each_dao(function(kong_config) server2 = http_server(timeout, requests * 0.6, PORT+1) -- Go hit them with our test requests - for _ = 1, requests do - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - ["Host"] = "balancer.test" - } - }) - assert.response(res).has.status(200) - end + local oks = client_requests(requests) + assert.are.equal(requests, oks) -- collect server results; hitcount _, count1 = server1:join() @@ -679,24 +799,16 @@ dao_helpers.for_each_dao(function(kong_config) local server2 = http_server(timeout, requests/2, PORT+1) -- Go hit them with our test requests - for _ = 1, requests do - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - ["Host"] = "balancer.test" - } - }) - assert.response(res).has.status(200) - end + local oks = client_requests(requests) + assert.are.equal(requests, oks) -- collect server results; hitcount local _, count1 = server1:join() local _, count2 = server2:join() -- verify - assert.are.equal(requests/2, count1) - assert.are.equal(requests/2, count2) + assert.are.equal(requests / 2, count1) + assert.are.equal(requests / 2, count2) -- modify weight for both targets, set to 0 local res = assert(api_client:send { From 585349d5d516e46bb77cfe7efffd3f949a371945 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Fri, 1 Dec 2017 15:09:05 -0200 Subject: [PATCH 44/74] tests(balancer) support sequences of responses in http_server Allow the internal http_server to produce a series of success and failure responses (e.g. 10 successes, followed by 5 failures, followed by 10 successes). --- .../05-proxy/09-balancer_spec.lua | 110 ++++++++++-------- 1 file changed, 64 insertions(+), 46 deletions(-) diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index 5179d79400fd..5578d91fa748 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -73,15 +73,15 @@ end -- Modified http-server. Accepts (sequentially) a number of incoming -- connections and then rejects a given number of connections. -- @param timeout Server timeout. --- @param ok_count Number of 200 OK responses to give. -- @param port Port number to use. --- @param fail_count (optional, default 0) Number of 500 errors to respond. +-- @param counts Array of response counts to give, +-- odd entries are 200s, event entries are 500s +-- @param test_log (optional, default fals) Produce detailed logs -- @return Returns the number of succesful and failure responses. -local function http_server(timeout, ok_count, port, fail_count) - fail_count = fail_count or 0 +local function http_server(timeout, port, counts, test_log) local threads = require "llthreads2.ex" local thread = threads.new({ - function(timeout, ok_count, port, fail_count) + function(timeout, port, counts, TEST_LOG) local function test_log(...) if not TEST_LOG then @@ -110,8 +110,12 @@ local function http_server(timeout, ok_count, port, fail_count) local healthy = true local ok_responses, fail_responses = 0, 0 - local total_reqs = ok_count + fail_count + local total_reqs = 0 + for _, c in pairs(counts) do + total_reqs = total_reqs + c + end local n_reqs = 0 + local reply_200 = true while n_reqs < total_reqs do local client, err client, err = server:accept() @@ -176,21 +180,29 @@ local function http_server(timeout, ok_count, port, fail_count) elseif handshake_done and not got_handshake then n_reqs = n_reqs + 1 - local do_ok = ok_count > 0 + + while counts[1] == 0 do + table.remove(counts, 1) + reply_200 = not reply_200 + end + if not counts[1] then + error("unexpected request") + end + if counts[1] > 0 then + counts[1] = counts[1] - 1 + end + local response - if do_ok then - ok_count = ok_count - 1 + if reply_200 then response = "HTTP/1.1 200 OK" - else response = "HTTP/1.1 500 Internal Server Error" end local sent = client:send(response .. "\r\nConnection: close\r\n\r\n") client:close() if sent then - if do_ok then + if reply_200 then ok_responses = ok_responses + 1 - else fail_responses = fail_responses + 1 end @@ -207,7 +219,7 @@ local function http_server(timeout, ok_count, port, fail_count) test_log("test http server on port ", port, " closed") return ok_responses, fail_responses end - }, timeout, ok_count, port, fail_count, TEST_LOG) + }, timeout, port, counts, test_log or TEST_LOG) local server = thread:start() @@ -301,7 +313,7 @@ dao_helpers.for_each_dao(function(kong_config) it("perform passive health checks", function() - for fails = 1, slots do + for nfails = 1, slots do -- configure healthchecks local api_client = helpers.admin_client() @@ -315,7 +327,7 @@ dao_helpers.for_each_dao(function(kong_config) healthchecks = healthchecks_config { passive = { unhealthy = { - http_failures = fails, + http_failures = nfails, } } } @@ -330,8 +342,13 @@ dao_helpers.for_each_dao(function(kong_config) -- server2 will only respond for part of the test, -- then server1 will take over. local server2_oks = math.floor(requests / 4) - local server1 = http_server(timeout, requests - server2_oks - fails, PORT) - local server2 = http_server(timeout, server2_oks, PORT+1, fails) + local server1 = http_server(timeout, PORT, { + requests - server2_oks - nfails + }) + local server2 = http_server(timeout, PORT + 1, { + server2_oks, + nfails + }) -- Go hit them with our test requests local client_oks, client_fails = client_requests(requests) @@ -341,13 +358,13 @@ dao_helpers.for_each_dao(function(kong_config) local _, ok2, fail2 = server2:join() -- verify - assert.are.equal(requests - server2_oks - fails, ok1) + assert.are.equal(requests - server2_oks - nfails, ok1) assert.are.equal(server2_oks, ok2) assert.are.equal(0, fail1) - assert.are.equal(fails, fail2) + assert.are.equal(nfails, fail2) - assert.are.equal(requests - fails, client_oks) - assert.are.equal(fails, client_fails) + assert.are.equal(requests - nfails, client_oks) + assert.are.equal(nfails, client_fails) end end) @@ -355,7 +372,7 @@ dao_helpers.for_each_dao(function(kong_config) local healthcheck_interval = 0.01 - for fails = 1, 5 do + for nfails = 1, 5 do -- configure healthchecks local api_client = helpers.admin_client() @@ -375,7 +392,7 @@ dao_helpers.for_each_dao(function(kong_config) }, unhealthy = { interval = healthcheck_interval, - http_failures = fails, + http_failures = nfails, }, } } @@ -390,8 +407,8 @@ dao_helpers.for_each_dao(function(kong_config) -- server2 will only respond for part of the test, -- then server1 will take over. local server2_oks = math.floor(requests / 4) - local server1 = http_server(timeout, requests - server2_oks, PORT) - local server2 = http_server(timeout, server2_oks, PORT+1) + local server1 = http_server(timeout, PORT, { requests - server2_oks }) + local server2 = http_server(timeout, PORT + 1, { server2_oks }) -- Phase 1: server1 and server2 take requests local client_oks, client_fails = client_requests(server2_oks * 2) @@ -400,7 +417,7 @@ dao_helpers.for_each_dao(function(kong_config) direct_request("127.0.0.1", PORT + 1, "/unhealthy") -- Give time for healthchecker to detect - ngx.sleep((2 + fails) * healthcheck_interval) + ngx.sleep((2 + nfails) * healthcheck_interval) -- Phase 3: server1 takes all requests do @@ -463,8 +480,8 @@ dao_helpers.for_each_dao(function(kong_config) -- then server1 will take over. local server1_oks = upstream.slots * 2 local server2_oks = upstream.slots - local server1 = http_server(timeout, server1_oks, PORT) - local server2 = http_server(timeout, server2_oks, PORT+1) + local server1 = http_server(timeout, PORT, { server1_oks }) + local server2 = http_server(timeout, PORT + 1, { server2_oks }) -- 1) server1 and server2 take requests local oks, fails = client_requests(upstream.slots) @@ -588,8 +605,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, requests/2, PORT) - local server2 = http_server(timeout, requests/2, PORT+1) + local server1 = http_server(timeout, PORT, { requests / 2 }) + local server2 = http_server(timeout, PORT + 1, { requests / 2 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -608,8 +625,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream2.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, requests, PORT+2, 0, true) - local server2 = http_server(timeout, requests, PORT+3, 0, true) + local server1 = http_server(timeout, PORT + 2, { requests }, true) + local server2 = http_server(timeout, PORT + 3, { requests }, true) -- Go hit them with our test requests local oks = client_requests(requests, { @@ -636,8 +653,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, requests/2, PORT) - local server2 = http_server(timeout, requests/2, PORT+1) + local server1 = http_server(timeout, PORT, { requests / 2 }) + local server2 = http_server(timeout, PORT + 1, { requests / 2 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -669,9 +686,10 @@ dao_helpers.for_each_dao(function(kong_config) ----------------------------------------- -- setup target servers - server1 = http_server(timeout, requests * 0.4, PORT) - server2 = http_server(timeout, requests * 0.4, PORT+1) - local server3 = http_server(timeout, requests * 0.2, PORT+2) + local server3 + server1 = http_server(timeout, PORT, { requests * 0.4 }) + server2 = http_server(timeout, PORT + 1, { requests * 0.4 }) + server3 = http_server(timeout, PORT + 2, { requests * 0.2 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -692,8 +710,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, requests/2, PORT) - local server2 = http_server(timeout, requests/2, PORT+1) + local server1 = http_server(timeout, PORT, { requests / 2 }) + local server2 = http_server(timeout, PORT + 1, { requests / 2 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -725,7 +743,7 @@ dao_helpers.for_each_dao(function(kong_config) ----------------------------------------- -- setup target servers - server1 = http_server(timeout, requests, PORT) + server1 = http_server(timeout, PORT, { requests }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -742,8 +760,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, requests/2, PORT) - local server2 = http_server(timeout, requests/2, PORT+1) + local server1 = http_server(timeout, PORT, { requests / 2 }) + local server2 = http_server(timeout, PORT + 1, { requests / 2 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -775,8 +793,8 @@ dao_helpers.for_each_dao(function(kong_config) ----------------------------------------- -- setup target servers - server1 = http_server(timeout, requests * 0.4, PORT) - server2 = http_server(timeout, requests * 0.6, PORT+1) + server1 = http_server(timeout, PORT, { requests * 0.4 }) + server2 = http_server(timeout, PORT + 1, { requests * 0.6 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -795,8 +813,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, requests/2, PORT) - local server2 = http_server(timeout, requests/2, PORT+1) + local server1 = http_server(timeout, PORT, { requests / 2 }) + local server2 = http_server(timeout, PORT + 1, { requests / 2 }) -- Go hit them with our test requests local oks = client_requests(requests) From 5e70e639625f3453f6103df3de18f6b3b205b7e6 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Fri, 1 Dec 2017 15:06:15 -0200 Subject: [PATCH 45/74] feat(admin) add endpoints for setting health Adds two new endpoints to Target entities: * `/upstreams/:id/targets/:target/healthy` * `/upstreams/:id/targets/:target/unhealthy` These post an event that is forwarded to all relevant healthcheckers, updating their immediate status. This is useful for manually re-enabling a target that has been disabled by passive health checks. --- kong/api/routes/upstreams.lua | 42 +++++++++++++++++++++++++++++++++++ kong/core/balancer.lua | 24 ++++++++++++++++++++ kong/core/handler.lua | 12 ++++++++++ 3 files changed, 78 insertions(+) diff --git a/kong/api/routes/upstreams.lua b/kong/api/routes/upstreams.lua index 93f04f9d6f7c..0d44b8a1f6cf 100644 --- a/kong/api/routes/upstreams.lua +++ b/kong/api/routes/upstreams.lua @@ -1,7 +1,11 @@ local crud = require "kong.api.crud_helpers" local app_helpers = require "lapis.application" local responses = require "kong.tools.responses" +local balancer = require "kong.core.balancer" +local singletons = require "kong.singletons" +local utils = require "kong.tools.utils" local cjson = require "cjson" +local cluster_events = singletons.cluster_events -- clean the target history for a given upstream @@ -68,6 +72,25 @@ local function clean_history(upstream_id, dao_factory) end end + +local function post_health(is_healthy) + return function(self, _) + local addr = utils.normalize_ip(self.target.target) + local ip, port = utils.format_host(addr.host), addr.port + local _, err = balancer.post_health(self.upstream, ip, port, is_healthy) + if err then + return app_helpers.yield_error(err) + end + + local health = is_healthy and 1 or 0 + local packet = ("%s|%d|%d|%s"):format(ip, port, health, self.upstream.name) + cluster_events:broadcast("balancer:post_health", packet) + + return responses.send_HTTP_NO_CONTENT() + end +end + + return { ["/upstreams/"] = { GET = function(self, dao_factory) @@ -195,5 +218,24 @@ return { return responses.send_HTTP_NO_CONTENT() end + }, + + ["/upstreams/:upstream_name_or_id/targets/:target_or_id/healthy"] = { + before = function(self, dao_factory, helpers) + crud.find_upstream_by_name_or_id(self, dao_factory, helpers) + crud.find_target_by_target_or_id(self, dao_factory, helpers) + end, + + POST = post_health(true), + }, + + ["/upstreams/:upstream_name_or_id/targets/:target_or_id/unhealthy"] = { + before = function(self, dao_factory, helpers) + crud.find_upstream_by_name_or_id(self, dao_factory, helpers) + crud.find_target_by_target_or_id(self, dao_factory, helpers) + end, + + POST = post_health(false), } + } diff --git a/kong/core/balancer.lua b/kong/core/balancer.lua index dca3530d9c3b..d45cc027415f 100644 --- a/kong/core/balancer.lua +++ b/kong/core/balancer.lua @@ -713,6 +713,29 @@ local function execute(target) end +-------------------------------------------------------------------------------- +-- Update health status and broadcast to workers +-- @param upstream a table with upstream data +-- @param ip target IP +-- @param port target port +-- @param is_healthy boolean: true if healthy, false if unhealthy +-- @return true if posting event was successful, nil+error otherwise +local function post_health(upstream, ip, port, is_healthy) + + local balancer = balancers[upstream.name] + if not balancer then + return nil, "Upstream " .. tostring(upstream.name) .. " has no balancer" + end + + local healthchecker = healthcheckers[balancer] + if not healthchecker then + return nil, "no healthchecker found for " .. tostring(upstream.name) + end + + return healthchecker:set_target_status(ip, port, is_healthy) +end + + -------------------------------------------------------------------------------- -- for unit-testing purposes only local function _get_healthchecker(balancer) @@ -734,6 +757,7 @@ return { on_upstream_event = on_upstream_event, get_upstream_by_name = get_upstream_by_name, get_all_upstreams = get_all_upstreams, + post_health = post_health, -- ones below are exported for test purposes only _create_balancer = create_balancer, diff --git a/kong/core/handler.lua b/kong/core/handler.lua index 7c7a509ee7b8..f23a52218895 100644 --- a/kong/core/handler.lua +++ b/kong/core/handler.lua @@ -241,6 +241,18 @@ return { end) + -- manual health updates + cluster_events:subscribe("balancer:post_health", function(data) + local ip, port, health, name = data:match("([^|]+)|([^|]+)|([^|]+)|(.*)") + port = tonumber(port) + local upstream = { name = name } + local ok, err = balancer.post_health(upstream, ip, port, health == "1") + if not ok then + log(ERR, "failed posting health of ", name, " to workers: ", err) + end + end) + + -- upstream updates From 36ba9759eb2a6323cf496d791501cf0d9ca1f084 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Fri, 1 Dec 2017 15:10:16 -0200 Subject: [PATCH 46/74] tests(admin) tests for /healthy and /unhealthy endpoints Add tests for: * manual recovery using /healthy endpoint: test scenario using passive healthchecks as circuit breaker, then using the endpoint to restore the target. * manual shutdown using the /unhealthy endpoint. --- .../05-proxy/09-balancer_spec.lua | 166 ++++++++++++++++++ 1 file changed, 166 insertions(+) diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index 5578d91fa748..4ee45a35de34 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -70,6 +70,25 @@ local function direct_request(host, port, path) end +local function post_target_endpoint(upstream_name, port, endpoint) + local url = "/upstreams/" .. upstream_name + .. "/targets/127.0.0.1:" .. port + .. "/" .. endpoint + + local api_client = helpers.admin_client() + local res, err = assert(api_client:send { + method = "POST", + path = url, + headers = { + ["Content-Type"] = "application/json", + }, + body = {}, + }) + api_client:close() + return res, err +end + + -- Modified http-server. Accepts (sequentially) a number of incoming -- connections and then rejects a given number of connections. -- @param timeout Server timeout. @@ -525,6 +544,153 @@ dao_helpers.for_each_dao(function(kong_config) end end) + it("perform passive health checks -- manual recovery", function() + + for nfails = 1, 5 do + + -- configure healthchecks + local api_client = helpers.admin_client() + assert(api_client:send { + method = "PATCH", + path = "/upstreams/" .. upstream.name, + headers = { + ["Content-Type"] = "application/json", + }, + body = { + healthchecks = healthchecks_config { + passive = { + unhealthy = { + http_failures = nfails, + } + } + } + }, + }) + api_client:close() + + local timeout = 10 + + -- setup target servers: + -- server2 will only respond for part of the test, + -- then server1 will take over. + local server1_oks = upstream.slots * 2 + local server2_oks = upstream.slots + local server1 = http_server(timeout, PORT, { + server1_oks - nfails + }) + local server2 = http_server(timeout, PORT + 1, { + server2_oks / 2, + nfails, + server2_oks / 2 + }) + + -- 1) server1 and server2 take requests + local oks, fails = client_requests(upstream.slots) + + -- 2) server1 takes all requests once server2 produces + -- `nfails` failures (even though server2 will be ready + -- to respond 200 again after `nfails`) + do + local o, f = client_requests(upstream.slots) + oks = oks + o + fails = fails + f + end + + -- manually bring it back using the endpoint + post_target_endpoint(upstream.name, PORT + 1, "healthy") + + -- 3) server1 and server2 take requests again + do + local o, f = client_requests(upstream.slots) + oks = oks + o + fails = fails + f + end + + -- collect server results; hitcount + local _, ok1, fail1 = server1:join() + local _, ok2, fail2 = server2:join() + + -- verify + assert.are.equal(server1_oks - nfails, ok1) + assert.are.equal(server2_oks, ok2) + assert.are.equal(0, fail1) + assert.are.equal(nfails, fail2) + + assert.are.equal(upstream.slots * 3 - nfails, oks) + assert.are.equal(nfails, fails) + end + end) + + it("perform passive health checks -- manual shutdown", function() + + -- configure healthchecks + local api_client = helpers.admin_client() + assert(api_client:send { + method = "PATCH", + path = "/upstreams/" .. upstream.name, + headers = { + ["Content-Type"] = "application/json", + }, + body = { + healthchecks = healthchecks_config { + passive = { + unhealthy = { + http_failures = 1, + } + } + } + }, + }) + api_client:close() + + local timeout = 10 + + -- setup target servers: + -- server2 will only respond for part of the test, + -- then server1 will take over. + local server1_oks = upstream.slots * 2 + local server2_oks = upstream.slots + local server1 = http_server(timeout, PORT, { server1_oks }) + local server2 = http_server(timeout, PORT + 1, { server2_oks }) + + -- 1) server1 and server2 take requests + local oks, fails = client_requests(upstream.slots) + + -- manually bring it down using the endpoint + post_target_endpoint(upstream.name, PORT + 1, "unhealthy") + + -- 2) server1 takes all requests + do + local o, f = client_requests(upstream.slots) + oks = oks + o + fails = fails + f + end + + -- manually bring it back using the endpoint + post_target_endpoint(upstream.name, PORT + 1, "healthy") + + -- 3) server1 and server2 take requests again + do + local o, f = client_requests(upstream.slots) + oks = oks + o + fails = fails + f + end + + -- collect server results; hitcount + local _, ok1, fail1 = server1:join() + local _, ok2, fail2 = server2:join() + + -- verify + assert.are.equal(upstream.slots * 2, ok1) + assert.are.equal(upstream.slots, ok2) + assert.are.equal(0, fail1) + assert.are.equal(0, fail2) + + assert.are.equal(upstream.slots * 3, oks) + assert.are.equal(0, fails) + + end) + end) describe("Balancing", function() From 9660ca2d5648cb7fa2218b02eee7892c462bdc4b Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Tue, 12 Dec 2017 13:22:54 -0200 Subject: [PATCH 47/74] tests(balancer) abstract away localhost Remove hardcoded references to 127.0.0.1 in balancer integration tests. This sets the stage for IPv6 tests, pending an update of lua-resty-dns-client. --- .../05-proxy/09-balancer_spec.lua | 118 +++++++++--------- 1 file changed, 62 insertions(+), 56 deletions(-) diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index 4ee45a35de34..c146a27d7a2d 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -3,6 +3,8 @@ local helpers = require "spec.helpers" local dao_helpers = require "spec.02-integration.03-dao.helpers" +local localhost = "127.0.0.1" +local ipv = "ipv4" local PORT = 21000 local utils = require "kong.tools.utils" @@ -70,11 +72,11 @@ local function direct_request(host, port, path) end -local function post_target_endpoint(upstream_name, port, endpoint) +local function post_target_endpoint(upstream_name, host, port, endpoint) local url = "/upstreams/" .. upstream_name - .. "/targets/127.0.0.1:" .. port + .. "/targets/" + .. utils.format_host(host, port) .. "/" .. endpoint - local api_client = helpers.admin_client() local res, err = assert(api_client:send { method = "POST", @@ -92,16 +94,16 @@ end -- Modified http-server. Accepts (sequentially) a number of incoming -- connections and then rejects a given number of connections. -- @param timeout Server timeout. +-- @param host Host name to use (IPv4 or IPv6 localhost). -- @param port Port number to use. -- @param counts Array of response counts to give, -- odd entries are 200s, event entries are 500s -- @param test_log (optional, default fals) Produce detailed logs -- @return Returns the number of succesful and failure responses. -local function http_server(timeout, port, counts, test_log) +local function http_server(timeout, host, port, counts, test_log) local threads = require "llthreads2.ex" local thread = threads.new({ - function(timeout, port, counts, TEST_LOG) - + function(timeout, host, port, counts, TEST_LOG) local function test_log(...) if not TEST_LOG then return @@ -115,7 +117,12 @@ local function http_server(timeout, port, counts, test_log) end local socket = require "socket" - local server = assert(socket.tcp()) + local server + if host:match(":") then + server = assert(socket.tcp6()) + else + server = assert(socket.tcp()) + end assert(server:setoption('reuseaddr', true)) assert(server:bind("*", port)) assert(server:listen()) @@ -238,13 +245,13 @@ local function http_server(timeout, port, counts, test_log) test_log("test http server on port ", port, " closed") return ok_responses, fail_responses end - }, timeout, port, counts, test_log or TEST_LOG) + }, timeout, host, port, counts, test_log or TEST_LOG) local server = thread:start() local expire = ngx.now() + timeout repeat - local _, err = direct_request("127.0.0.1", port, "/handshake") + local _, err = direct_request(host, port, "/handshake") if err then ngx.sleep(0.01) -- poll-wait end @@ -278,7 +285,7 @@ end dao_helpers.for_each_dao(function(kong_config) - describe("Ring-balancer #" .. kong_config.database, function() + describe("Ring-balancer #" .. kong_config.database .. " #" .. ipv, function() local config_db setup(function() @@ -304,22 +311,22 @@ dao_helpers.for_each_dao(function(kong_config) before_each(function() helpers.run_migrations() - assert(helpers.dao.apis:insert { + helpers.dao.apis:insert { name = "balancer.test", hosts = { "balancer.test" }, upstream_url = "http://service.xyz.v1/path", - }) + } upstream = assert(helpers.dao.upstreams:insert { name = "service.xyz.v1", slots = slots, }) assert(helpers.dao.targets:insert { - target = "127.0.0.1:" .. PORT, + target = utils.format_host(localhost, PORT), weight = 10, upstream_id = upstream.id, }) assert(helpers.dao.targets:insert { - target = "127.0.0.1:" .. (PORT + 1), + target = utils.format_host(localhost, PORT + 1), weight = 10, upstream_id = upstream.id, }) @@ -361,10 +368,10 @@ dao_helpers.for_each_dao(function(kong_config) -- server2 will only respond for part of the test, -- then server1 will take over. local server2_oks = math.floor(requests / 4) - local server1 = http_server(timeout, PORT, { + local server1 = http_server(timeout, localhost, PORT, { requests - server2_oks - nfails }) - local server2 = http_server(timeout, PORT + 1, { + local server2 = http_server(timeout, localhost, PORT + 1, { server2_oks, nfails }) @@ -426,14 +433,14 @@ dao_helpers.for_each_dao(function(kong_config) -- server2 will only respond for part of the test, -- then server1 will take over. local server2_oks = math.floor(requests / 4) - local server1 = http_server(timeout, PORT, { requests - server2_oks }) - local server2 = http_server(timeout, PORT + 1, { server2_oks }) + local server1 = http_server(timeout, localhost, PORT, { requests - server2_oks }) + local server2 = http_server(timeout, localhost, PORT + 1, { server2_oks }) -- Phase 1: server1 and server2 take requests local client_oks, client_fails = client_requests(server2_oks * 2) -- Phase 2: server2 goes unhealthy - direct_request("127.0.0.1", PORT + 1, "/unhealthy") + direct_request(localhost, PORT + 1, "/unhealthy") -- Give time for healthchecker to detect ngx.sleep((2 + nfails) * healthcheck_interval) @@ -499,14 +506,14 @@ dao_helpers.for_each_dao(function(kong_config) -- then server1 will take over. local server1_oks = upstream.slots * 2 local server2_oks = upstream.slots - local server1 = http_server(timeout, PORT, { server1_oks }) - local server2 = http_server(timeout, PORT + 1, { server2_oks }) + local server1 = http_server(timeout, localhost, PORT, { server1_oks }) + local server2 = http_server(timeout, localhost, PORT + 1, { server2_oks }) -- 1) server1 and server2 take requests local oks, fails = client_requests(upstream.slots) -- server2 goes unhealthy - direct_request("127.0.0.1", PORT + 1, "/unhealthy") + direct_request(localhost, PORT + 1, "/unhealthy") -- Give time for healthchecker to detect ngx.sleep((2 + nchecks) * healthcheck_interval) @@ -518,7 +525,7 @@ dao_helpers.for_each_dao(function(kong_config) end -- server2 goes healthy again - direct_request("127.0.0.1", PORT + 1, "/healthy") + direct_request(localhost, PORT + 1, "/healthy") -- Give time for healthchecker to detect ngx.sleep((2 + nchecks) * healthcheck_interval) @@ -547,7 +554,6 @@ dao_helpers.for_each_dao(function(kong_config) it("perform passive health checks -- manual recovery", function() for nfails = 1, 5 do - -- configure healthchecks local api_client = helpers.admin_client() assert(api_client:send { @@ -575,10 +581,10 @@ dao_helpers.for_each_dao(function(kong_config) -- then server1 will take over. local server1_oks = upstream.slots * 2 local server2_oks = upstream.slots - local server1 = http_server(timeout, PORT, { + local server1 = http_server(timeout, localhost, PORT, { server1_oks - nfails }) - local server2 = http_server(timeout, PORT + 1, { + local server2 = http_server(timeout, localhost, PORT + 1, { server2_oks / 2, nfails, server2_oks / 2 @@ -597,7 +603,7 @@ dao_helpers.for_each_dao(function(kong_config) end -- manually bring it back using the endpoint - post_target_endpoint(upstream.name, PORT + 1, "healthy") + post_target_endpoint(upstream.name, localhost, PORT + 1, "healthy") -- 3) server1 and server2 take requests again do @@ -650,14 +656,14 @@ dao_helpers.for_each_dao(function(kong_config) -- then server1 will take over. local server1_oks = upstream.slots * 2 local server2_oks = upstream.slots - local server1 = http_server(timeout, PORT, { server1_oks }) - local server2 = http_server(timeout, PORT + 1, { server2_oks }) + local server1 = http_server(timeout, localhost, PORT, { server1_oks }) + local server2 = http_server(timeout, localhost, PORT + 1, { server2_oks }) -- 1) server1 and server2 take requests local oks, fails = client_requests(upstream.slots) -- manually bring it down using the endpoint - post_target_endpoint(upstream.name, PORT + 1, "unhealthy") + post_target_endpoint(upstream.name, localhost, PORT + 1, "unhealthy") -- 2) server1 takes all requests do @@ -667,7 +673,7 @@ dao_helpers.for_each_dao(function(kong_config) end -- manually bring it back using the endpoint - post_target_endpoint(upstream.name, PORT + 1, "healthy") + post_target_endpoint(upstream.name, localhost, PORT + 1, "healthy") -- 3) server1 and server2 take requests again do @@ -709,12 +715,12 @@ dao_helpers.for_each_dao(function(kong_config) slots = 10, }) target1 = assert(helpers.dao.targets:insert { - target = "127.0.0.1:" .. PORT, + target = utils.format_host(localhost, PORT), weight = 10, upstream_id = upstream1.id, }) target2 = assert(helpers.dao.targets:insert { - target = "127.0.0.1:" .. (PORT + 1), + target = utils.format_host(localhost, PORT + 1), weight = 10, upstream_id = upstream1.id, }) @@ -732,12 +738,12 @@ dao_helpers.for_each_dao(function(kong_config) hash_on_header = "hashme", }) assert(helpers.dao.targets:insert { - target = "127.0.0.1:" .. PORT + 2, + target = utils.format_host(localhost, PORT + 2), weight = 10, upstream_id = upstream2.id, }) assert(helpers.dao.targets:insert { - target = "127.0.0.1:" .. (PORT + 3), + target = utils.format_host(localhost, PORT + 3), weight = 10, upstream_id = upstream2.id, }) @@ -771,8 +777,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, PORT, { requests / 2 }) - local server2 = http_server(timeout, PORT + 1, { requests / 2 }) + local server1 = http_server(timeout, localhost, PORT, { requests / 2 }) + local server2 = http_server(timeout, localhost, PORT + 1, { requests / 2 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -791,8 +797,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream2.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, PORT + 2, { requests }, true) - local server2 = http_server(timeout, PORT + 3, { requests }, true) + local server1 = http_server(timeout, localhost, PORT + 2, { requests }, true) + local server2 = http_server(timeout, localhost, PORT + 3, { requests }, true) -- Go hit them with our test requests local oks = client_requests(requests, { @@ -801,8 +807,8 @@ dao_helpers.for_each_dao(function(kong_config) }) assert.are.equal(requests, oks) - direct_request("127.0.0.1", PORT + 2, "/shutdown") - direct_request("127.0.0.1", PORT + 3, "/shutdown") + direct_request(localhost, PORT + 2, "/shutdown") + direct_request(localhost, PORT + 3, "/shutdown") -- collect server results; hitcount -- one should get all the hits, the other 0, and hence a timeout @@ -819,8 +825,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, PORT, { requests / 2 }) - local server2 = http_server(timeout, PORT + 1, { requests / 2 }) + local server1 = http_server(timeout, localhost, PORT, { requests / 2 }) + local server2 = http_server(timeout, localhost, PORT + 1, { requests / 2 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -842,7 +848,7 @@ dao_helpers.for_each_dao(function(kong_config) ["Content-Type"] = "application/json" }, body = { - target = "127.0.0.1:" .. (PORT + 2), + target = utils.format_host(localhost, PORT + 2), weight = target1.weight / 2 , -- shift proportions from 50/50 to 40/40/20 }, }) @@ -853,9 +859,9 @@ dao_helpers.for_each_dao(function(kong_config) -- setup target servers local server3 - server1 = http_server(timeout, PORT, { requests * 0.4 }) - server2 = http_server(timeout, PORT + 1, { requests * 0.4 }) - server3 = http_server(timeout, PORT + 2, { requests * 0.2 }) + server1 = http_server(timeout, localhost, PORT, { requests * 0.4 }) + server2 = http_server(timeout, localhost, PORT + 1, { requests * 0.4 }) + server3 = http_server(timeout, localhost, PORT + 2, { requests * 0.2 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -876,8 +882,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, PORT, { requests / 2 }) - local server2 = http_server(timeout, PORT + 1, { requests / 2 }) + local server1 = http_server(timeout, localhost, PORT, { requests / 2 }) + local server2 = http_server(timeout, localhost, PORT + 1, { requests / 2 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -909,7 +915,7 @@ dao_helpers.for_each_dao(function(kong_config) ----------------------------------------- -- setup target servers - server1 = http_server(timeout, PORT, { requests }) + server1 = http_server(timeout, localhost, PORT, { requests }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -926,8 +932,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, PORT, { requests / 2 }) - local server2 = http_server(timeout, PORT + 1, { requests / 2 }) + local server1 = http_server(timeout, localhost, PORT, { requests / 2 }) + local server2 = http_server(timeout, localhost, PORT + 1, { requests / 2 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -959,8 +965,8 @@ dao_helpers.for_each_dao(function(kong_config) ----------------------------------------- -- setup target servers - server1 = http_server(timeout, PORT, { requests * 0.4 }) - server2 = http_server(timeout, PORT + 1, { requests * 0.6 }) + server1 = http_server(timeout, localhost, PORT, { requests * 0.4 }) + server2 = http_server(timeout, localhost, PORT + 1, { requests * 0.6 }) -- Go hit them with our test requests local oks = client_requests(requests) @@ -979,8 +985,8 @@ dao_helpers.for_each_dao(function(kong_config) local requests = upstream1.slots * 2 -- go round the balancer twice -- setup target servers - local server1 = http_server(timeout, PORT, { requests / 2 }) - local server2 = http_server(timeout, PORT + 1, { requests / 2 }) + local server1 = http_server(timeout, localhost, PORT, { requests / 2 }) + local server2 = http_server(timeout, localhost, PORT + 1, { requests / 2 }) -- Go hit them with our test requests local oks = client_requests(requests) From 26dae699ae5faddd9080c2cc049feeec73f4ef4c Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Mon, 4 Dec 2017 11:54:21 -0200 Subject: [PATCH 48/74] tests(healthchecks) assert Kong-generated errors do not count Adds a test that attests that a Kong-generated error (in this case a `401 Unauthorized` generated by the key-auth plugin) does not add up to the health checker event counter. This integration test: 1. configures healthchecks with a 1-error threshold 2. adds a key-auth plugin with no configured consumer 3. runs request, which fails with 401, but doesn't hit the 1-error threshold 4. deletes the plugin 5. starts proxied servers, and runs two full rounds of the balancer wheel 6. each server reports one full round of the wheel worth of successes, meaning they are both healthy and were not affected by the 401. --- .../05-proxy/09-balancer_spec.lua | 96 ++++++++++++++++++- 1 file changed, 91 insertions(+), 5 deletions(-) diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index c146a27d7a2d..b18d46c35f8d 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -7,6 +7,7 @@ local localhost = "127.0.0.1" local ipv = "ipv4" local PORT = 21000 local utils = require "kong.tools.utils" +local cjson = require "cjson" local healthchecks_defaults = { active = { @@ -263,6 +264,7 @@ end local function client_requests(n, headers) local oks, fails = 0, 0 + local last_status for _ = 1, n do local client = helpers.proxy_client() local res = client:send { @@ -274,12 +276,13 @@ local function client_requests(n, headers) } if res.status == 200 then oks = oks + 1 - elseif res.status == 500 then + elseif res.status > 399 then fails = fails + 1 end + last_status = res.status client:close() end - return oks, fails + return oks, fails, last_status end @@ -337,6 +340,89 @@ dao_helpers.for_each_dao(function(kong_config) helpers.stop_kong(nil, true) end) + it("do not count Kong-generated errors as failures", function() + + -- configure healthchecks with a 1-error threshold + local api_client = helpers.admin_client() + assert(api_client:send { + method = "PATCH", + path = "/upstreams/" .. upstream.name, + headers = { + ["Content-Type"] = "application/json", + }, + body = { + healthchecks = healthchecks_config { + passive = { + healthy = { + successes = 1, + }, + unhealthy = { + http_statuses = { 401, 500 }, + http_failures = 1, + tcp_failures = 1, + timeouts = 1, + }, + } + } + }, + }) + api_client:close() + + -- add a plugin + api_client = helpers.admin_client() + local res = assert(api_client:send { + method = "POST", + path = "/apis/balancer.test/plugins/", + headers = { + ["Content-Type"] = "application/json", + }, + body = { + name = "key-auth", + }, + }) + local plugin_id = cjson.decode((res:read_body())).id + assert.string(plugin_id) + api_client:close() + + -- run request: fails with 401, but doesn't hit the 1-error threshold + local oks, fails, last_status = client_requests(1) + assert.same(0, oks) + assert.same(1, fails) + assert.same(401, last_status) + + -- delete the plugin + api_client = helpers.admin_client() + assert(api_client:send { + method = "DELETE", + path = "/apis/balancer.test/plugins/" .. plugin_id, + headers = { + ["Content-Type"] = "application/json", + }, + body = {}, + }) + api_client:close() + + -- start servers, they are unaffected by the failure above + local timeout = 10 + local server1 = http_server(timeout, localhost, PORT, { upstream.slots }) + local server2 = http_server(timeout, localhost, PORT + 1, { upstream.slots }) + + oks, fails = client_requests(upstream.slots * 2) + assert.same(upstream.slots * 2, oks) + assert.same(0, fails) + + -- collect server results + local _, ok1, fail1 = server1:join() + local _, ok2, fail2 = server2:join() + + -- both servers were fully operational + assert.same(upstream.slots, ok1) + assert.same(upstream.slots, ok2) + assert.same(0, fail1) + assert.same(0, fail2) + + end) + it("perform passive health checks", function() for nfails = 1, slots do @@ -579,10 +665,10 @@ dao_helpers.for_each_dao(function(kong_config) -- setup target servers: -- server2 will only respond for part of the test, -- then server1 will take over. - local server1_oks = upstream.slots * 2 + local server1_oks = upstream.slots * 2 - nfails local server2_oks = upstream.slots local server1 = http_server(timeout, localhost, PORT, { - server1_oks - nfails + server1_oks }) local server2 = http_server(timeout, localhost, PORT + 1, { server2_oks / 2, @@ -617,7 +703,7 @@ dao_helpers.for_each_dao(function(kong_config) local _, ok2, fail2 = server2:join() -- verify - assert.are.equal(server1_oks - nfails, ok1) + assert.are.equal(server1_oks, ok1) assert.are.equal(server2_oks, ok2) assert.are.equal(0, fail1) assert.are.equal(nfails, fail2) From c9ae0936750c31caea121faf8127a0bcf0d14155 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Thu, 14 Dec 2017 15:49:13 -0200 Subject: [PATCH 49/74] chore(deps) bump lua-resty-dns-client to 1.0.0 --- kong-0.11.2-0.rockspec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kong-0.11.2-0.rockspec b/kong-0.11.2-0.rockspec index 626b93633dc6..0dffe4ea17cd 100644 --- a/kong-0.11.2-0.rockspec +++ b/kong-0.11.2-0.rockspec @@ -27,7 +27,7 @@ dependencies = { "luaossl == 20171028", "luasyslog == 1.0.0", "lua_pack == 1.0.5", - "lua-resty-dns-client == 0.6.3", + "lua-resty-dns-client == 1.0.0", "lua-resty-worker-events == 0.3.1", "lua-resty-mediador == 0.1.2", "lua-resty-healthcheck == 0.2.0", From 928e2a83cfd6b06e3e109fd831b622d3cf0e6116 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Thu, 14 Dec 2017 15:52:57 -0200 Subject: [PATCH 50/74] tests(balancer) add ipv6 tests in targets --- .ci/run_tests.sh | 2 +- spec/02-integration/05-proxy/09-balancer_spec.lua | 13 +++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/.ci/run_tests.sh b/.ci/run_tests.sh index 2b307c4f84cf..c1b0e09dbf94 100755 --- a/.ci/run_tests.sh +++ b/.ci/run_tests.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -e -export BUSTED_ARGS="-o gtest -v --exclude-tags=flaky" +export BUSTED_ARGS="-o gtest -v --exclude-tags=flaky,ipv6" export TEST_CMD="bin/busted $BUSTED_ARGS" createuser --createdb kong diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index b18d46c35f8d..b304ccbd6187 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -3,8 +3,6 @@ local helpers = require "spec.helpers" local dao_helpers = require "spec.02-integration.03-dao.helpers" -local localhost = "127.0.0.1" -local ipv = "ipv4" local PORT = 21000 local utils = require "kong.tools.utils" local cjson = require "cjson" @@ -286,6 +284,15 @@ local function client_requests(n, headers) end +local localhosts = { + ipv4 = "127.0.0.1", + ipv6 = "0000:0000:0000:0000:0000:0000:0000:0001", +} + + +for ipv, localhost in pairs(localhosts) do + + dao_helpers.for_each_dao(function(kong_config) describe("Ring-balancer #" .. kong_config.database .. " #" .. ipv, function() @@ -1142,3 +1149,5 @@ dao_helpers.for_each_dao(function(kong_config) end) end) -- for 'database type' + +end From 14df6830c6e8730a51c9d07f14eb613c15248da0 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Fri, 15 Dec 2017 10:20:05 -0800 Subject: [PATCH 51/74] hotfix(log-serializer) avoid redundant `request.request_*` properties Reflecting back on #2445, the chosen names feel very redundant when accessed from a queryable interface. The same way, `upstream_uri` seems to have been wrongly added under the `request` scope, where it doesn't belong. This solution has the benefit of being less breaking as well. (Only one field gets renamed). From #3098 --- kong/plugins/log-serializers/basic.lua | 8 ++++---- spec/01-unit/012-log_serializer_spec.lua | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/kong/plugins/log-serializers/basic.lua b/kong/plugins/log-serializers/basic.lua index 811ab0ba9e93..2196faed9d3c 100644 --- a/kong/plugins/log-serializers/basic.lua +++ b/kong/plugins/log-serializers/basic.lua @@ -15,14 +15,14 @@ function _M.serialize(ngx) return { request = { - request_uri = ngx.var.request_uri, - upstream_uri = ngx.var.upstream_uri, - request_url = ngx.var.scheme .. "://" .. ngx.var.host .. ":" .. ngx.var.server_port .. ngx.var.request_uri, + uri = ngx.var.request_uri, + url = ngx.var.scheme .. "://" .. ngx.var.host .. ":" .. ngx.var.server_port .. ngx.var.request_uri, querystring = ngx.req.get_uri_args(), -- parameters, as a table method = ngx.req.get_method(), -- http method headers = ngx.req.get_headers(), size = ngx.var.request_length }, + upstream_uri = ngx.var.upstream_uri, response = { status = ngx.status, headers = ngx.resp.get_headers(), @@ -45,4 +45,4 @@ function _M.serialize(ngx) } end -return _M \ No newline at end of file +return _M diff --git a/spec/01-unit/012-log_serializer_spec.lua b/spec/01-unit/012-log_serializer_spec.lua index 534f83cb6e3f..ddae4bd9f03e 100644 --- a/spec/01-unit/012-log_serializer_spec.lua +++ b/spec/01-unit/012-log_serializer_spec.lua @@ -58,10 +58,10 @@ describe("Log Serializer", function() assert.same({"header1", "header2"}, res.request.headers) assert.equal("POST", res.request.method) assert.same({"arg1", "arg2"}, res.request.querystring) - assert.equal("http://test.com:80/request_uri", res.request.request_url) - assert.equal("/upstream_uri", res.request.upstream_uri) + assert.equal("http://test.com:80/request_uri", res.request.url) + assert.equal("/upstream_uri", res.upstream_uri) assert.equal(200, res.request.size) - assert.equal("/request_uri", res.request.request_uri) + assert.equal("/request_uri", res.request.uri) -- Response assert.is_table(res.response) @@ -147,4 +147,4 @@ describe("Log Serializer", function() assert.is_nil(res.tries) end) end) -end) \ No newline at end of file +end) From 263bedca6aedcbca4e801e299100dbc74e99b1c4 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Fri, 15 Dec 2017 10:51:28 -0800 Subject: [PATCH 52/74] chore(*) trim all trailing whitespaces (bis) New trailing whitespaces removal this time on top of the next branch. --- kong/dao/migrations/helpers.lua | 2 +- spec/03-plugins/26-oauth2/03-access_spec.lua | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kong/dao/migrations/helpers.lua b/kong/dao/migrations/helpers.lua index de3dd6f87e07..9010e887aa1d 100644 --- a/kong/dao/migrations/helpers.lua +++ b/kong/dao/migrations/helpers.lua @@ -36,7 +36,7 @@ function _M.plugin_config_iterator(dao, plugin_name) -- de-serialize in case of Cassandra local json, err = json_decode(row.config) if not json then - return nil, ("json decoding error '%s' while decoding '%s'"):format( + return nil, ("json decoding error '%s' while decoding '%s'"):format( tostring(err), tostring(row.config)) end row.config = json diff --git a/spec/03-plugins/26-oauth2/03-access_spec.lua b/spec/03-plugins/26-oauth2/03-access_spec.lua index 2fca131b0f86..ad453898fc8d 100644 --- a/spec/03-plugins/26-oauth2/03-access_spec.lua +++ b/spec/03-plugins/26-oauth2/03-access_spec.lua @@ -2669,4 +2669,4 @@ dao_helpers.for_each_dao(function(kong_config) end) -end) \ No newline at end of file +end) From 9727e12dcdfe924f22854bb005c05beb94499482 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Mon, 18 Dec 2017 10:18:15 -0800 Subject: [PATCH 53/74] feat(reports) execute defined ping values as functions --- kong/core/reports.lua | 3 +++ spec/01-unit/013-reports_spec.lua | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/kong/core/reports.lua b/kong/core/reports.lua index 3190c3ba1fd8..f7282ff9c291 100644 --- a/kong/core/reports.lua +++ b/kong/core/reports.lua @@ -81,6 +81,9 @@ local function send_report(signal_type, t, host, port) end v = json + + elseif type(v) == "function" then + v = v() end mutable_idx = mutable_idx + 1 diff --git a/spec/01-unit/013-reports_spec.lua b/spec/01-unit/013-reports_spec.lua index e940131d1de6..c8c33ea094ea 100644 --- a/spec/01-unit/013-reports_spec.lua +++ b/spec/01-unit/013-reports_spec.lua @@ -12,7 +12,8 @@ describe("reports", function() reports.send("stub", { hello = "world", - foo = "bar" + foo = "bar", + baz = function() return "bat" end, }, "127.0.0.1", 8189) local ok, res = thread:join() @@ -26,6 +27,7 @@ describe("reports", function() assert.matches("foo=bar", res, nil, true) assert.matches("hello=world", res, nil, true) assert.matches("signal=stub", res, nil, true) + assert.matches("baz=bat", res, nil, true) end) it("doesn't send if not enabled", function() reports.toggle(false) From 57c249712a007344c2c554e02eb7af8874027ef6 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Mon, 18 Dec 2017 14:23:22 -0800 Subject: [PATCH 54/74] fix(tests) silence unnecessary stderr output in reports tests --- spec/01-unit/013-reports_spec.lua | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/spec/01-unit/013-reports_spec.lua b/spec/01-unit/013-reports_spec.lua index c8c33ea094ea..065077b9bcf3 100644 --- a/spec/01-unit/013-reports_spec.lua +++ b/spec/01-unit/013-reports_spec.lua @@ -46,6 +46,11 @@ describe("reports", function() end) describe("retrieve_redis_version()", function() + setup(function() + _G.ngx = ngx + _G.ngx.log = function() return end + end) + before_each(function() package.loaded["kong.core.reports"] = nil reports = require "kong.core.reports" From 5c6a5afa81baafc5a37d72b1dde99fcfa68f9919 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Tue, 19 Dec 2017 13:01:29 -0200 Subject: [PATCH 55/74] chore(deps) bump lua-resty-healthcheck to 0.3.0 --- kong-0.11.2-0.rockspec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kong-0.11.2-0.rockspec b/kong-0.11.2-0.rockspec index 0dffe4ea17cd..55f08336d50b 100644 --- a/kong-0.11.2-0.rockspec +++ b/kong-0.11.2-0.rockspec @@ -30,7 +30,7 @@ dependencies = { "lua-resty-dns-client == 1.0.0", "lua-resty-worker-events == 0.3.1", "lua-resty-mediador == 0.1.2", - "lua-resty-healthcheck == 0.2.0", + "lua-resty-healthcheck == 0.3.0", } build = { type = "builtin", From c9fa92101add6fadf3e62042da9dd21594cbbd65 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Tue, 19 Dec 2017 13:10:14 -0200 Subject: [PATCH 56/74] hotfix(healthchecks) disable circuit-breaker functionality by default To avoid unexpected behaviors for users, this makes all health counter thresholds to be zero by default, effectively making the circuit-breaker functionality of passive healthchecks to be disabled by default. Users need to opt-in by supplying threshold values to enable the cut-off of targets based on analysis of ongoing traffic. This commit requires functionality introduced in lua-resty-healthcheck 0.3.0. --- kong/dao/schemas/upstreams.lua | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/kong/dao/schemas/upstreams.lua b/kong/dao/schemas/upstreams.lua index cf409850f943..c8e10ff94d43 100644 --- a/kong/dao/schemas/upstreams.lua +++ b/kong/dao/schemas/upstreams.lua @@ -63,30 +63,30 @@ local healthchecks_defaults = { concurrency = 10, http_path = "/", healthy = { - interval = 0, -- 0 = disabled by default + interval = 0, -- 0 = probing disabled by default http_statuses = { 200, 302 }, - successes = 2, + successes = 0, -- 0 = disabled by default }, unhealthy = { - interval = 0, -- 0 = disabled by default + interval = 0, -- 0 = probing disabled by default http_statuses = { 429, 404, 500, 501, 502, 503, 504, 505 }, - tcp_failures = 2, - timeouts = 3, - http_failures = 5, + tcp_failures = 0, -- 0 = disabled by default + timeouts = 0, -- 0 = disabled by default + http_failures = 0, -- 0 = disabled by default }, }, passive = { healthy = { http_statuses = { 200, 201, 202, 203, 204, 205, 206, 207, 208, 226, 300, 301, 302, 303, 304, 305, 306, 307, 308 }, - successes = 5, + successes = 0, }, unhealthy = { http_statuses = { 429, 500, 503 }, - tcp_failures = 2, - timeouts = 7, - http_failures = 5, + tcp_failures = 0, -- 0 = circuit-breaker disabled by default + timeouts = 0, -- 0 = circuit-breaker disabled by default + http_failures = 0, -- 0 = circuit-breaker disabled by default }, }, } From 6ee0614de644c94c3eb583242fa2c6ba56b4617e Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Tue, 19 Dec 2017 14:28:02 -0800 Subject: [PATCH 57/74] hotfix(migrations) check major C* version against new field Regression introduced by #3085. This highlights the dangers of using the DAO in our migrations. While each migration should be frozen in time an always considered valid given its parents have previously run, using our DAO in the migrations introduces a time factor as the DAO undergoes changes over time that might make previously written migrations suddenly become invalid (sometimes in worst cases than this, if the schema of entities has changed). From #3109 --- kong/dao/migrations/cassandra.lua | 2 +- spec/02-integration/03-dao/01-factory_spec.lua | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/kong/dao/migrations/cassandra.lua b/kong/dao/migrations/cassandra.lua index f43a3b30425b..96657cbfd1bb 100644 --- a/kong/dao/migrations/cassandra.lua +++ b/kong/dao/migrations/cassandra.lua @@ -319,7 +319,7 @@ return { up = function(db, kong_config) local keyspace_name = kong_config.cassandra_keyspace - if db.release_version < 3 then + if db.major_version_n < 3 then local rows, err = db:query([[ SELECT * FROM system.schema_columns diff --git a/spec/02-integration/03-dao/01-factory_spec.lua b/spec/02-integration/03-dao/01-factory_spec.lua index 0506d8ace68b..18561bb75dec 100644 --- a/spec/02-integration/03-dao/01-factory_spec.lua +++ b/spec/02-integration/03-dao/01-factory_spec.lua @@ -52,5 +52,14 @@ helpers.for_each_dao(function(kong_conf) assert.is_string(info.version) assert.not_equal("unknown", info.version) end) + + if kong_conf.database == "cassandra" then + it("[cassandra] sets the 'major_version_n' field on the DB", function() + local factory = assert(Factory.new(kong_conf)) + assert(factory:init()) + + assert.is_number(factory.db.major_version_n) + end) + end end) end) From 5b48f12a1d779d62e16035f2358fc1a88393531a Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Wed, 20 Dec 2017 16:17:41 -0800 Subject: [PATCH 58/74] hotfix(upstreams) ensure 0 is a valid value for health check fields A regression introduced in 3e240b8722141d65b84ac69ab5c2bbc3522eb69d. --- kong/dao/schemas/upstreams.lua | 23 ++++++++-- spec/01-unit/007-entities_schemas_spec.lua | 49 ++++++++++++++-------- 2 files changed, 51 insertions(+), 21 deletions(-) diff --git a/kong/dao/schemas/upstreams.lua b/kong/dao/schemas/upstreams.lua index c8e10ff94d43..5a6525ae46f7 100644 --- a/kong/dao/schemas/upstreams.lua +++ b/kong/dao/schemas/upstreams.lua @@ -24,6 +24,21 @@ local function check_positive_int(t) end +local function check_positive_int_or_zero(t) + if t == 0 then + return true + end + + local ok = check_positive_int(t) + if not ok then + return false, "must be 0 (disabled), or an integer between 1 and " + .. 2 ^31 - 1 + end + + return true +end + + local function check_http_path(arg) if match(arg, "^%s*$") then return false, "path is empty" @@ -96,10 +111,10 @@ local funcs = { timeout = check_nonnegative, concurrency = check_positive_int, interval = check_nonnegative, - successes = check_positive_int, - tcp_failures = check_positive_int, - timeouts = check_positive_int, - http_failures = check_positive_int, + successes = check_positive_int_or_zero, + tcp_failures = check_positive_int_or_zero, + timeouts = check_positive_int_or_zero, + http_failures = check_positive_int_or_zero, http_path = check_http_path, http_statuses = check_http_statuses, } diff --git a/spec/01-unit/007-entities_schemas_spec.lua b/spec/01-unit/007-entities_schemas_spec.lua index 05881ede4b39..43083b20eef2 100644 --- a/spec/01-unit/007-entities_schemas_spec.lua +++ b/spec/01-unit/007-entities_schemas_spec.lua @@ -754,6 +754,7 @@ describe("Entities Schemas", function() local tests = { {{ active = { timeout = -1 }}, "greater than or equal to 0" }, {{ active = { concurrency = 0.5 }}, "must be an integer" }, + {{ active = { concurrency = 0 }}, "must be an integer" }, {{ active = { concurrency = -10 }}, "must be an integer" }, {{ active = { http_path = "" }}, "is empty" }, {{ active = { http_path = "ovo" }}, "must be prefixed with slash" }, @@ -764,8 +765,8 @@ describe("Entities Schemas", function() {{ active = { healthy = { http_statuses = { 99 }}}}, "status code" }, {{ active = { healthy = { http_statuses = { 1000 }}}}, "status code" }, {{ active = { healthy = { http_statuses = { 111.314 }}}}, "must be an integer" }, - {{ active = { healthy = { successes = 0.5 }}}, "must be an integer" }, - {{ active = { healthy = { successes = 0 }}}, "must be an integer" }, + {{ active = { healthy = { successes = 0.5 }}}, "must be 0 (disabled), or an integer" }, + --{{ active = { healthy = { successes = 0 }}}, "must be an integer" }, {{ active = { healthy = { successes = -1 }}}, "an integer between" }, {{ active = { unhealthy = { interval = -1 }}}, "greater than or equal to 0" }, {{ active = { unhealthy = { http_statuses = 404 }}}, "not an array" }, @@ -773,36 +774,37 @@ describe("Entities Schemas", function() {{ active = { unhealthy = { http_statuses = { -1 }}}}, "status code" }, {{ active = { unhealthy = { http_statuses = { 99 }}}}, "status code" }, {{ active = { unhealthy = { http_statuses = { 1000 }}}}, "status code" }, - {{ active = { unhealthy = { tcp_failures = 0.5 }}}, "must be an integer" }, - {{ active = { unhealthy = { tcp_failures = 0 }}}, "must be an integer" }, + {{ active = { unhealthy = { tcp_failures = 0.5 }}}, "must be 0 (disabled), or an integer" }, + --{{ active = { unhealthy = { tcp_failures = 0 }}}, "must be an integer" }, {{ active = { unhealthy = { tcp_failures = -1 }}}, "an integer between" }, - {{ active = { unhealthy = { timeouts = 0.5 }}}, "must be an integer" }, - {{ active = { unhealthy = { timeouts = 0 }}}, "must be an integer" }, + {{ active = { unhealthy = { timeouts = 0.5 }}}, "must be 0 (disabled), or an integer" }, + --{{ active = { unhealthy = { timeouts = 0 }}}, "must be an integer" }, {{ active = { unhealthy = { timeouts = -1 }}}, "an integer between" }, - {{ active = { unhealthy = { http_failures = 0.5 }}}, "must be an integer" }, + {{ active = { unhealthy = { http_failures = 0.5 }}}, "must be 0 (disabled), or an integer" }, {{ active = { unhealthy = { http_failures = -1 }}}, "an integer between" }, {{ passive = { healthy = { http_statuses = 404 }}}, "not an array" }, {{ passive = { healthy = { http_statuses = { "ovo" }}}}, "not a number" }, {{ passive = { healthy = { http_statuses = { -1 }}}}, "status code" }, {{ passive = { healthy = { http_statuses = { 99 }}}}, "status code" }, {{ passive = { healthy = { http_statuses = { 1000 }}}}, "status code" }, - {{ passive = { healthy = { successes = 0.5 }}}, "must be an integer" }, - {{ passive = { healthy = { successes = 0 }}}, "must be an integer" }, + {{ passive = { healthy = { successes = 0.5 }}}, "must be 0 (disabled), or an integer" }, + --{{ passive = { healthy = { successes = 0 }}}, "must be an integer" }, {{ passive = { healthy = { successes = -1 }}}, "an integer between" }, {{ passive = { unhealthy = { http_statuses = 404 }}}, "not an array" }, {{ passive = { unhealthy = { http_statuses = { "ovo" }}}}, "not a number" }, {{ passive = { unhealthy = { http_statuses = { -1 }}}}, "status code" }, {{ passive = { unhealthy = { http_statuses = { 99 }}}}, "status code" }, {{ passive = { unhealthy = { http_statuses = { 1000 }}}}, "status code" }, - {{ passive = { unhealthy = { tcp_failures = 0.5 }}}, "must be an integer" }, - {{ passive = { unhealthy = { tcp_failures = 0 }}}, "must be an integer" }, + {{ passive = { unhealthy = { tcp_failures = 0.5 }}}, "must be 0 (disabled), or an integer" }, + --{{ passive = { unhealthy = { tcp_failures = 0 }}}, "must be an integer" }, {{ passive = { unhealthy = { tcp_failures = -1 }}}, "an integer between" }, - {{ passive = { unhealthy = { timeouts = 0.5 }}}, "must be an integer" }, - {{ passive = { unhealthy = { timeouts = 0 }}}, "must be an integer" }, + {{ passive = { unhealthy = { timeouts = 0.5 }}}, "must be 0 (disabled), or an integer" }, + --{{ passive = { unhealthy = { timeouts = 0 }}}, "must be an integer" }, {{ passive = { unhealthy = { timeouts = -1 }}}, "an integer between" }, - {{ passive = { unhealthy = { http_failures = 0.5 }}}, "must be an integer" }, - {{ passive = { unhealthy = { http_failures = 0 }}}, "must be an integer" }, + {{ passive = { unhealthy = { http_failures = 0.5 }}}, "must be 0 (disabled), or an integer" }, + --{{ passive = { unhealthy = { http_failures = 0 }}}, "must be an integer" }, {{ passive = { unhealthy = { http_failures = -1 }}}, "an integer between" }, + --]] } for _, test in ipairs(tests) do local entity = { @@ -821,7 +823,7 @@ describe("Entities Schemas", function() local valid, errors = validate_entity(entity, upstreams_schema) assert.is_false(valid) - assert.match(test[2], errors[field_name]) + assert.match(test[2], errors[field_name], nil, true) end -- tests for success @@ -858,10 +860,23 @@ describe("Entities Schemas", function() end) + it("creates an upstream with the default values", function() + local default = upstreams_schema.fields.healthchecks.default + local entity = { + name = "x", + healthchecks = default, + } + + local valid, errors = validate_entity(entity, upstreams_schema) + assert.is_nil(errors) + assert.is_true(valid) + end) + it("should require (optional) slots in a valid range", function() local valid, errors, check, _ local data = { name = "valid.host.name" } - valid, _, _ = validate_entity(data, upstreams_schema) + valid, errors, _ = validate_entity(data, upstreams_schema) + assert.is_nil(errors) assert.is_true(valid) assert.equal(slots_default, data.slots) From 04aeb2bc5110d49571fa8f27b16a27bb408a7d99 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Thu, 14 Dec 2017 21:15:30 -0800 Subject: [PATCH 59/74] chore(*) 0.12.0rc1 bump --- kong-0.11.2-0.rockspec => kong-0.12.0rc1-0.rockspec | 4 ++-- kong/meta.lua | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) rename kong-0.11.2-0.rockspec => kong-0.12.0rc1-0.rockspec (99%) diff --git a/kong-0.11.2-0.rockspec b/kong-0.12.0rc1-0.rockspec similarity index 99% rename from kong-0.11.2-0.rockspec rename to kong-0.12.0rc1-0.rockspec index 55f08336d50b..0ca293c575ff 100644 --- a/kong-0.11.2-0.rockspec +++ b/kong-0.12.0rc1-0.rockspec @@ -1,9 +1,9 @@ package = "kong" -version = "0.11.2-0" +version = "0.12.0rc1-0" supported_platforms = {"linux", "macosx"} source = { url = "git://github.com/Kong/kong", - tag = "0.11.2" + tag = "0.12.0rc1" } description = { summary = "Kong is a scalable and customizable API Management Layer built on top of Nginx.", diff --git a/kong/meta.lua b/kong/meta.lua index 597466cfaa50..baaca1bec452 100644 --- a/kong/meta.lua +++ b/kong/meta.lua @@ -1,8 +1,8 @@ local version = setmetatable({ major = 0, - minor = 11, - patch = 2, - --suffix = "" + minor = 12, + patch = 0, + suffix = "rc1" }, { __tostring = function(t) return string.format("%d.%d.%d%s", t.major, t.minor, t.patch, From 170bbea26409a8ab9e7985d1dde52df1bbb2879d Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Fri, 22 Dec 2017 09:46:23 -0200 Subject: [PATCH 60/74] hotfix(balancer) proper upstream cache invalidation upon CRUD * Make sure that cache for the overall list of upstreams is invalidated when an individual upstream is created. * Index balancers by upstream id, not name The old version of the balancer logic indexed balancers by upstream name. On the event of upstreams being updated, the list of balancers was cleaned up the next time the full list of upstreams was loaded. Now, we need to keep the balancer list up-to-date at all times, because the table of healthcheckers depends on it and we don't want stale healthcheckers to produce active healthcheck traffic. This patch changes balancer indexing from upstream name to upstream id, and includes a regression test to check for stale health checking. Signed-off-by: Thibault Charbonnier --- kong/core/balancer.lua | 19 +- .../05-proxy/09-balancer_spec.lua | 188 +++++++++++++++++- 2 files changed, 197 insertions(+), 10 deletions(-) diff --git a/kong/core/balancer.lua b/kong/core/balancer.lua index d45cc027415f..7a778fabe49b 100644 --- a/kong/core/balancer.lua +++ b/kong/core/balancer.lua @@ -317,7 +317,7 @@ do -- only make the new balancer available for other requests after it -- is fully set up. - balancers[upstream.name] = balancer + balancers[upstream.id] = balancer return balancer end @@ -464,7 +464,7 @@ local function get_balancer(target, no_create) return nil, err -- there was an error end - local balancer = balancers[upstream.name] + local balancer = balancers[upstream.id] if not balancer then if no_create then return nil, "balancer not found" @@ -498,7 +498,7 @@ local function on_target_event(operation, target) return end - local balancer = balancers[upstream.name] + local balancer = balancers[upstream.id] if not balancer then log(ERR, "target ", operation, ": balancer not found for ", upstream.name) return @@ -518,6 +518,9 @@ end local function on_upstream_event(operation, upstream) if operation == "create" then + + singletons.cache:invalidate_local("balancer:upstreams") + local _, err = create_balancer(upstream) if err then log(ERR, "failed creating balancer for ", upstream.name, ": ", err) @@ -525,19 +528,17 @@ local function on_upstream_event(operation, upstream) elseif operation == "delete" or operation == "update" then - if operation == "delete" then - singletons.cache:invalidate_local("balancer:upstreams") - end + singletons.cache:invalidate_local("balancer:upstreams") singletons.cache:invalidate_local("balancer:upstreams:" .. upstream.id) singletons.cache:invalidate_local("balancer:targets:" .. upstream.id) - local balancer = balancers[upstream.name] + local balancer = balancers[upstream.id] if balancer then stop_healthchecker(balancer) end if operation == "delete" then - balancers[upstream.name] = nil + balancers[upstream.id] = nil else local _, err = create_balancer(upstream) if err then @@ -722,7 +723,7 @@ end -- @return true if posting event was successful, nil+error otherwise local function post_health(upstream, ip, port, is_healthy) - local balancer = balancers[upstream.name] + local balancer = balancers[upstream.id] if not balancer then return nil, "Upstream " .. tostring(upstream.name) .. " has no balancer" end diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index b304ccbd6187..450daa1786d2 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -133,6 +133,7 @@ local function http_server(timeout, host, port, counts, test_log) test_log("test http server on port ", port, " started") local healthy = true + local n_checks = 0 local ok_responses, fail_responses = 0, 0 local total_reqs = 0 @@ -192,6 +193,7 @@ local function http_server(timeout, host, port, counts, test_log) client:send("HTTP/1.1 500 Internal Server Error\r\nConnection: close\r\n\r\n") end client:close() + n_checks = n_checks + 1 elseif lines[1]:match("/healthy") then healthy = true @@ -242,7 +244,7 @@ local function http_server(timeout, host, port, counts, test_log) end server:close() test_log("test http server on port ", port, " closed") - return ok_responses, fail_responses + return ok_responses, fail_responses, n_checks end }, timeout, host, port, counts, test_log or TEST_LOG) @@ -284,6 +286,24 @@ local function client_requests(n, headers) end +local function api_send(method, path, body) + local api_client = helpers.admin_client() + local res, err = api_client:send({ + method = method, + path = path, + headers = { + ["Content-Type"] = "application/json" + }, + body = body, + }) + if not res then + return nil, err + end + api_client:close() + return res.status +end + + local localhosts = { ipv4 = "127.0.0.1", ipv6 = "0000:0000:0000:0000:0000:0000:0000:0001", @@ -314,6 +334,172 @@ dao_helpers.for_each_dao(function(kong_config) collectgarbage() end) + describe("Upstream entities", function() + + before_each(function() + helpers.stop_kong() + helpers.run_migrations() + helpers.start_kong() + end) + + after_each(function() + helpers.stop_kong(nil, true) + end) + + -- Regression test for a missing invalidation in 0.12rc1 + it("created via the API are functional", function() + assert.same(201, api_send("POST", "/upstreams", { + name = "test_upstream", slots = 10, + })) + assert.same(201, api_send("POST", "/upstreams/test_upstream/targets", { + target = utils.format_host(localhost, 2112), + })) + assert.same(201, api_send("POST", "/apis", { + name = "test_api", + hosts = "test_host.com", + upstream_url = "http://test_upstream", + })) + + local server = http_server(10, localhost, 2112, { 1 }) + + local oks, fails, last_status = client_requests(1, { + ["Host"] = "test_host.com" + }) + assert.same(200, last_status) + assert.same(1, oks) + assert.same(0, fails) + + local _, server_oks, server_fails = server:join() + assert.same(1, server_oks) + assert.same(0, server_fails) + end) + + it("can be renamed without producing stale cache", function() + -- create two upstreams, each with a target pointing to a server + for i = 1, 2 do + local name = "test_upstr_" .. i + assert.same(201, api_send("POST", "/upstreams", { + name = name, slots = 10, + healthchecks = healthchecks_config {} + })) + assert.same(201, api_send("POST", "/upstreams/" .. name .. "/targets", { + target = utils.format_host(localhost, 2000 + i), + })) + assert.same(201, api_send("POST", "/apis", { + name = "test_api_" .. i, + hosts = name .. ".com", + upstream_url = "http://" .. name, + })) + end + + -- start two servers + local server1 = http_server(10, localhost, 2001, { 1 }) + local server2 = http_server(10, localhost, 2002, { 1 }) + + -- rename upstream 2 + assert.same(200, api_send("PATCH", "/upstreams/test_upstr_2", { + name = "test_upstr_3", + })) + + -- rename upstream 1 to upstream 2's original name + assert.same(200, api_send("PATCH", "/upstreams/test_upstr_1", { + name = "test_upstr_2", + })) + + -- hit a request through upstream 1 using the new name + local oks, fails, last_status = client_requests(1, { + ["Host"] = "test_upstr_2.com" + }) + assert.same(200, last_status) + assert.same(1, oks) + assert.same(0, fails) + + -- rename upstream 2 + assert.same(200, api_send("PATCH", "/upstreams/test_upstr_3", { + name = "test_upstr_1", + })) + + -- a single request to upstream 2 just to make server 2 shutdown + client_requests(1, { ["Host"] = "test_upstr_1.com" }) + + -- collect results + local _, server1_oks, server1_fails = server1:join() + local _, server2_oks, server2_fails = server2:join() + assert.same({1, 0}, { server1_oks, server1_fails }) + assert.same({1, 0}, { server2_oks, server2_fails }) + end) + + it("do not leave a stale healthchecker when renamed", function() + local healthcheck_interval = 0.1 + -- create an upstream + assert.same(201, api_send("POST", "/upstreams", { + name = "test_upstr", slots = 10, + healthchecks = healthchecks_config { + active = { + http_path = "/status", + healthy = { + interval = healthcheck_interval, + successes = 1, + }, + unhealthy = { + interval = healthcheck_interval, + http_failures = 1, + }, + } + } + })) + assert.same(201, api_send("POST", "/upstreams/test_upstr/targets", { + target = utils.format_host(localhost, 2000), + })) + assert.same(201, api_send("POST", "/apis", { + name = "test_api", + hosts = "test_upstr.com", + upstream_url = "http://test_upstr", + })) + + -- start server + local server1 = http_server(10, localhost, 2000, { 1 }) + + -- rename upstream + assert.same(200, api_send("PATCH", "/upstreams/test_upstr", { + name = "test_upstr_2", + })) + + -- reconfigure healthchecks + assert.same(200, api_send("PATCH", "/upstreams/test_upstr_2", { + healthchecks = { + active = { + http_path = "/status", + healthy = { + interval = 0, + successes = 1, + }, + unhealthy = { + interval = 0, + http_failures = 1, + }, + } + } + })) + + -- give time for healthchecker to (not!) run + ngx.sleep(healthcheck_interval * 5) + + assert.same(200, api_send("PATCH", "/apis/test_api", { + upstream_url = "http://test_upstr_2", + })) + + -- a single request to upstream just to make server shutdown + client_requests(1, { ["Host"] = "test_upstr.com" }) + + -- collect results + local _, server1_oks, server1_fails, hcs = server1:join() + assert.same({1, 0}, { server1_oks, server1_fails }) + assert.truthy(hcs < 2) + end) + + end) + describe("#healthchecks", function() local upstream From 4ed5376dcd702881b0fb40e37aaaa6ed0b871eba Mon Sep 17 00:00:00 2001 From: Karen Inman Date: Tue, 2 Jan 2018 13:58:19 -0800 Subject: [PATCH 61/74] feat(reports) post-processing of return values from ping functions If a ping value is a function, execute the function and then proceed with evaluating the value. This allows functions to return tables that will be JSON-encoded before being added to the ping message. Signed-off-by: Thibault Charbonnier From #3131 --- kong/core/reports.lua | 7 ++++--- spec/01-unit/013-reports_spec.lua | 6 ++++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/kong/core/reports.lua b/kong/core/reports.lua index f7282ff9c291..81560020ef06 100644 --- a/kong/core/reports.lua +++ b/kong/core/reports.lua @@ -74,6 +74,10 @@ local function send_report(signal_type, t, host, port) for k, v in pairs(t) do if k == "unique_id" or (k ~= "created_at" and sub(k, -2) ~= "id") then + if type(v) == "function" then + v = v() + end + if type(v) == "table" then local json, err = cjson.encode(v) if err then @@ -81,9 +85,6 @@ local function send_report(signal_type, t, host, port) end v = json - - elseif type(v) == "function" then - v = v() end mutable_idx = mutable_idx + 1 diff --git a/spec/01-unit/013-reports_spec.lua b/spec/01-unit/013-reports_spec.lua index 065077b9bcf3..dfadf541e525 100644 --- a/spec/01-unit/013-reports_spec.lua +++ b/spec/01-unit/013-reports_spec.lua @@ -1,6 +1,8 @@ local meta = require "kong.meta" local helpers = require "spec.helpers" local reports = require "kong.core.reports" +local cjson = require "cjson" + describe("reports", function() describe("send()", function() @@ -14,6 +16,8 @@ describe("reports", function() hello = "world", foo = "bar", baz = function() return "bat" end, + foobar = function() return { foo = "bar" } end, + bazbat = { baz = "bat" }, }, "127.0.0.1", 8189) local ok, res = thread:join() @@ -28,6 +32,8 @@ describe("reports", function() assert.matches("hello=world", res, nil, true) assert.matches("signal=stub", res, nil, true) assert.matches("baz=bat", res, nil, true) + assert.matches("foobar=" .. cjson.encode({ foo = "bar" }), res, nil, true) + assert.matches("bazbat=" .. cjson.encode({ baz = "bat" }), res, nil, true) end) it("doesn't send if not enabled", function() reports.toggle(false) From 27a49f2f2b0e75e95659a92cfd0690f8db349ce9 Mon Sep 17 00:00:00 2001 From: Marco Palladino Date: Thu, 4 Jan 2018 15:06:12 -0800 Subject: [PATCH 62/74] feat(reports) ignore nil values from ping Signed-off-by: Thibault Charbonnier From #3133 --- kong/core/reports.lua | 6 ++++-- spec/01-unit/013-reports_spec.lua | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/kong/core/reports.lua b/kong/core/reports.lua index 81560020ef06..34c5af5f9f6f 100644 --- a/kong/core/reports.lua +++ b/kong/core/reports.lua @@ -87,8 +87,10 @@ local function send_report(signal_type, t, host, port) v = json end - mutable_idx = mutable_idx + 1 - _buffer[mutable_idx] = k .. "=" .. tostring(v) + if v ~= nil then + mutable_idx = mutable_idx + 1 + _buffer[mutable_idx] = k .. "=" .. tostring(v) + end end end diff --git a/spec/01-unit/013-reports_spec.lua b/spec/01-unit/013-reports_spec.lua index dfadf541e525..d8c3090d9be6 100644 --- a/spec/01-unit/013-reports_spec.lua +++ b/spec/01-unit/013-reports_spec.lua @@ -18,6 +18,7 @@ describe("reports", function() baz = function() return "bat" end, foobar = function() return { foo = "bar" } end, bazbat = { baz = "bat" }, + nilval = function() return nil end, }, "127.0.0.1", 8189) local ok, res = thread:join() @@ -32,6 +33,7 @@ describe("reports", function() assert.matches("hello=world", res, nil, true) assert.matches("signal=stub", res, nil, true) assert.matches("baz=bat", res, nil, true) + assert.not_matches("nilval", res, nil, true) assert.matches("foobar=" .. cjson.encode({ foo = "bar" }), res, nil, true) assert.matches("bazbat=" .. cjson.encode({ baz = "bat" }), res, nil, true) end) From 6c265b33229c35f0050bcb7eaa09104d7a0d8001 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Fri, 5 Jan 2018 11:16:15 -0800 Subject: [PATCH 63/74] chore(*) 0.12.0rc2 bump --- kong-0.12.0rc1-0.rockspec => kong-0.12.0rc2-0.rockspec | 4 ++-- kong/meta.lua | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) rename kong-0.12.0rc1-0.rockspec => kong-0.12.0rc2-0.rockspec (99%) diff --git a/kong-0.12.0rc1-0.rockspec b/kong-0.12.0rc2-0.rockspec similarity index 99% rename from kong-0.12.0rc1-0.rockspec rename to kong-0.12.0rc2-0.rockspec index 0ca293c575ff..525f4e242250 100644 --- a/kong-0.12.0rc1-0.rockspec +++ b/kong-0.12.0rc2-0.rockspec @@ -1,9 +1,9 @@ package = "kong" -version = "0.12.0rc1-0" +version = "0.12.0rc2-0" supported_platforms = {"linux", "macosx"} source = { url = "git://github.com/Kong/kong", - tag = "0.12.0rc1" + tag = "0.12.0rc2" } description = { summary = "Kong is a scalable and customizable API Management Layer built on top of Nginx.", diff --git a/kong/meta.lua b/kong/meta.lua index baaca1bec452..80b77e9bf49d 100644 --- a/kong/meta.lua +++ b/kong/meta.lua @@ -2,7 +2,7 @@ local version = setmetatable({ major = 0, minor = 12, patch = 0, - suffix = "rc1" + suffix = "rc2" }, { __tostring = function(t) return string.format("%d.%d.%d%s", t.major, t.minor, t.patch, From a30e0d8f13a43bf689863a61e99b5b94c3021a75 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Wed, 10 Jan 2018 19:21:45 -0800 Subject: [PATCH 64/74] docs(changelog) promote 0.12.0rc2 changeset to stable --- CHANGELOG.md | 31 ++++++------------------------- 1 file changed, 6 insertions(+), 25 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 67eec7918e53..bf5c559541b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,10 +2,8 @@ - [Planned](#planned) - [Scheduled](#scheduled) - - [0.12.0](#0120) - [Released](#released) - - [0.12.0rc2](#0120rc2) - - [0.12.0rc1](#0120rc1) + - [0.12.0](#0120---20180116) - [0.11.2](#0112---20171129) - [0.11.1](#0111---20171024) - [0.10.4](#0104---20171024) @@ -30,10 +28,7 @@ Those releases do not have a fixed release date yet. This section describes upcoming releases that have a release date, along with a detailed changeset of their content. -## 0.12.0 - -Stable release planned for January 2018. See [0.12.0rc1](#0120rc1) -and [0.12.0rc2](#0120rc2). +*No scheduled releases yet.* [Back to TOC](#table-of-contents) @@ -42,23 +37,10 @@ and [0.12.0rc2](#0120rc2). This section describes publicly available releases and a detailed changeset of their content. -## [0.12.0rc2] - -* **Release Candidate**: 2018/01/05 -* **Stable**: January 2018 - -This release candidate fixes an issue from 0.12.0rc1 regarding database cache -invalidation upon Upstream creation and modification. - -[Back to TOC](#table-of-contents) - -## [0.12.0rc1] - -* **Release Candidate**: 2017/12/20 -* **Stable**: January 2018 +## [0.12.0] - 2018/01/16 -Our third major release of 2017 focuses on two new features we are very -excited about: **health checks** and **hash based load balancing**! +This major release focuses on two new features we are very excited about: +**health checks** and **hash based load balancing**! We also took this as an opportunity to fix a few prominent issues, sometimes at the expense of breaking changes but overall improving the flexibility and @@ -2170,8 +2152,7 @@ First version running with Cassandra. [Back to TOC](#table-of-contents) -[0.12.0rc2]: https://github.com/Kong/kong/compare/0.12.0rc1...0.12.0rc2 -[0.12.0rc1]: https://github.com/Kong/kong/compare/0.11.2...0.12.0rc1 +[0.12.0]: https://github.com/Kong/kong/compare/0.11.2...0.12.0 [0.11.2]: https://github.com/Kong/kong/compare/0.11.1...0.11.2 [0.11.1]: https://github.com/Kong/kong/compare/0.11.0...0.11.1 [0.10.4]: https://github.com/Kong/kong/compare/0.10.3...0.10.4 From 744d3b32e47323301bdbe402f9bf1cb2acbf88e8 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Thu, 11 Jan 2018 16:57:54 -0200 Subject: [PATCH 65/74] hotfix(runloop) ensure delayed response short-circuits plugin execution Prior to c6bde1a, the `responses.send` method was effectively an "abort" function, causing a plugin phase to stop executing. With the delayed execution of the actual response sending, this feature of `responses.send` was removed, breaking expectations of plugins which assumed that a function would stop running if a response was sent with `responses.send`. This PR restores the original behavior of aborting the ongoing phase of the plugin, while still retaining the benefits of c6bde1a, meaning that further phases and plugins still run. This issue was originally spotted by @nateslo for the key-auth plugin, and the precise issue was mapped out by @thibaultcha and originally described in PR #3413. From #3146 Signed-off-by: Thibault Charbonnier --- kong/init.lua | 3 +- kong/tools/responses.lua | 2 +- .../05-proxy/03-plugins_triggering_spec.lua | 128 ++++++++++++++---- .../kong/plugins/dummy/handler.lua | 7 + 4 files changed, 112 insertions(+), 28 deletions(-) diff --git a/kong/init.lua b/kong/init.lua index db19a906b0bb..e87d69dbef42 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -69,6 +69,7 @@ local ngx_DEBUG = ngx.DEBUG local ipairs = ipairs local assert = assert local tostring = tostring +local coroutine = coroutine local get_last_failure = ngx_balancer.get_last_failure local set_current_peer = ngx_balancer.set_current_peer local set_timeouts = ngx_balancer.set_timeouts @@ -370,7 +371,7 @@ function Kong.access() for plugin, plugin_conf in plugins_iterator(singletons.loaded_plugins, true) do if not ctx.delayed_response then - plugin.handler:access(plugin_conf) + coroutine.wrap(plugin.handler.access)(plugin.handler, plugin_conf) end end diff --git a/kong/tools/responses.lua b/kong/tools/responses.lua index 1d442f3bd768..66fb3ef134c4 100644 --- a/kong/tools/responses.lua +++ b/kong/tools/responses.lua @@ -113,7 +113,7 @@ local function send_response(status_code) headers = headers, } - return + coroutine.yield() end if status_code == _M.status_codes.HTTP_INTERNAL_SERVER_ERROR then diff --git a/spec/02-integration/05-proxy/03-plugins_triggering_spec.lua b/spec/02-integration/05-proxy/03-plugins_triggering_spec.lua index 1abe01896d3e..ca3c798b84d4 100644 --- a/spec/02-integration/05-proxy/03-plugins_triggering_spec.lua +++ b/spec/02-integration/05-proxy/03-plugins_triggering_spec.lua @@ -176,35 +176,67 @@ describe("Plugins triggering", function() helpers.stop_kong() helpers.dao:truncate_tables() - local api = assert(helpers.dao.apis:insert { - name = "example", - hosts = { "mock_upstream" }, - upstream_url = helpers.mock_upstream_url, - }) - -- plugin able to short-circuit a request - assert(helpers.dao.plugins:insert { - name = "key-auth", - api_id = api.id, - }) + do + local api = assert(helpers.dao.apis:insert { + name = "example", + hosts = { "mock_upstream" }, + upstream_url = helpers.mock_upstream_url, + }) + + -- plugin able to short-circuit a request + assert(helpers.dao.plugins:insert { + name = "key-auth", + api_id = api.id, + }) + + -- response/body filter plugin + assert(helpers.dao.plugins:insert { + name = "dummy", + api_id = api.id, + config = { + append_body = "appended from body filtering", + } + }) + + -- log phase plugin + assert(helpers.dao.plugins:insert { + name = "file-log", + api_id = api.id, + config = { + path = FILE_LOG_PATH, + }, + }) + end - -- response/body filter plugin - assert(helpers.dao.plugins:insert { - name = "dummy", - api_id = api.id, - config = { - append_body = "appended from body filtering", - } - }) - -- log phase plugin - assert(helpers.dao.plugins:insert { - name = "file-log", - api_id = api.id, - config = { - path = FILE_LOG_PATH, - }, - }) + do + -- API that will produce an error + local api_err = assert(helpers.dao.apis:insert { + name = "example_err", + hosts = { "mock_upstream_err" }, + upstream_url = helpers.mock_upstream_url, + }) + + -- plugin that produces an error + assert(helpers.dao.plugins:insert { + name = "dummy", + api_id = api_err.id, + config = { + append_body = "obtained even with error", + } + }) + + -- log phase plugin + assert(helpers.dao.plugins:insert { + name = "file-log", + api_id = api_err.id, + config = { + path = FILE_LOG_PATH, + }, + }) + end + assert(helpers.start_kong { nginx_conf = "spec/fixtures/custom_nginx.template", @@ -223,6 +255,10 @@ describe("Plugins triggering", function() helpers.stop_kong() end) + after_each(function() + os.execute("echo '' > " .. FILE_LOG_PATH) + end) + it("execute a log plugin", function() local utils = require "kong.tools.utils" local cjson = require "cjson" @@ -289,6 +325,46 @@ describe("Plugins triggering", function() assert.matches("appended from body filtering", body, nil, true) end) + + -- regression test for bug spotted in 0.12.0rc2 + it("responses.send stops plugin but runloop continues", function() + local utils = require "kong.tools.utils" + local cjson = require "cjson" + local pl_path = require "pl.path" + local pl_file = require "pl.file" + local pl_stringx = require "pl.stringx" + local uuid = utils.uuid() + + local res = assert(client:send { + method = "GET", + path = "/status/200?send_error=1", + headers = { + ["Host"] = "mock_upstream_err", + ["X-UUID"] = uuid, + } + }) + local body = assert.res_status(404, res) + + -- TEST: ensure that the dummy plugin stopped running after + -- running responses.send + + assert.not_equal("dummy", res.headers["dummy-plugin-access-header"]) + + -- ...but ensure that further phases are still executed + + -- header_filter phase of same plugin + assert.matches("obtained even with error", body, nil, true) + + -- access phase got a chance to inject the logging plugin + helpers.wait_until(function() + return pl_path.exists(FILE_LOG_PATH) and pl_path.getsize(FILE_LOG_PATH) > 0 + end, 3) + + local log = pl_file.read(FILE_LOG_PATH) + local log_message = cjson.decode(pl_stringx.strip(log)) + assert.equal("127.0.0.1", log_message.client_ip) + assert.equal(uuid, log_message.request.headers["x-uuid"]) + end) end) describe("anonymous reports execution", function() diff --git a/spec/fixtures/custom_plugins/kong/plugins/dummy/handler.lua b/spec/fixtures/custom_plugins/kong/plugins/dummy/handler.lua index d60b899dcea0..855cadb42431 100644 --- a/spec/fixtures/custom_plugins/kong/plugins/dummy/handler.lua +++ b/spec/fixtures/custom_plugins/kong/plugins/dummy/handler.lua @@ -1,4 +1,5 @@ local BasePlugin = require "kong.plugins.base_plugin" +local responses = require "kong.tools.responses" local DummyHandler = BasePlugin:extend() @@ -14,6 +15,12 @@ end function DummyHandler:access() DummyHandler.super.access(self) + + if ngx.req.get_uri_args()["send_error"] then + responses.send_HTTP_NOT_FOUND() + end + + ngx.header["Dummy-Plugin-Access-Header"] = "dummy" end From bc39469762405a19eae7663ced92fd1930fd4377 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Thu, 11 Jan 2018 16:38:34 -0800 Subject: [PATCH 66/74] docs(*) update DBs deprecation notices for 0.12 --- CHANGELOG.md | 13 +++++++++++++ UPGRADE.md | 27 ++++++++++++++------------- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bf5c559541b7..918b04c0318b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,19 @@ Path](https://github.com/Kong/kong/blob/master/UPGRADE.md#upgrade-to-012x) for more details regarding breaking changes and migrations before planning to upgrade your Kong cluster. +### Deprecation notices + +Starting with 0.12.0, we are announcing the deprecation of older versions +of our supported databases: + +- Support for PostgreSQL 9.4 is deprecated. Users are advised to upgrade to + 9.5+ +- Support for Cassandra 2.1 and below is deprecated. Users are advised to + upgrade to 2.2+ + +Note that the above deprecated versions are still supported in this release, +but will be dropped in subsequent ones. + ### Breaking changes ##### Core diff --git a/UPGRADE.md b/UPGRADE.md index 33ed0c3ca71d..9e3e6746d2ad 100644 --- a/UPGRADE.md +++ b/UPGRADE.md @@ -51,9 +51,22 @@ complete list of changes and new features. See below the breaking changes section for a detailed list of steps recommended to **run migrations** and upgrade from a previous version of Kong. +#### Deprecation notices + +Starting with 0.12.0, we are announcing the deprecation of older versions +of our supported databases: + +- Support for PostgreSQL 9.4 is deprecated. Users are advised to upgrade to + 9.5+ +- Support for Cassandra 2.1 and below is deprecated. Users are advised to + upgrade to 2.2+ + +Note that the above deprecated versions are still supported in this release, +but will be dropped in subsequent ones. + #### Breaking changes -#### Configuration +##### Configuration - Several updates were made to the NGINX configuration template. If you are using a custom template, you **must** apply those modifications. See below @@ -101,18 +114,6 @@ to **run migrations** and upgrade from a previous version of Kong. - In logging plugins, the `request.request_uri` field has been renamed to `request.url`. -#### Deprecations - -##### Databases - -- Starting with Kong 0.12.0, we have updated our databases support policy. - - Support for PostgreSQL 9.4 has been deprecated. We recommend using - PostgreSQL 9.5 or above. - - Support for Cassandra 2.0 has been deprecated. We recommend using - Cassandra 2.1 or above. - - Support for Redis versions 3.1 or below has been deprecated. We - recommend using Redis 3.2 or above. - --- If you use a custom NGINX configuration template from Kong 0.11, before From d1f7a9d7294181ebacd27376e3220e446efd52f5 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Thu, 11 Jan 2018 16:42:09 -0800 Subject: [PATCH 67/74] chore(*) 0.12.0 bump --- kong-0.12.0rc2-0.rockspec => kong-0.12.0-0.rockspec | 4 ++-- kong/meta.lua | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) rename kong-0.12.0rc2-0.rockspec => kong-0.12.0-0.rockspec (99%) diff --git a/kong-0.12.0rc2-0.rockspec b/kong-0.12.0-0.rockspec similarity index 99% rename from kong-0.12.0rc2-0.rockspec rename to kong-0.12.0-0.rockspec index 525f4e242250..6cebc13e5f85 100644 --- a/kong-0.12.0rc2-0.rockspec +++ b/kong-0.12.0-0.rockspec @@ -1,9 +1,9 @@ package = "kong" -version = "0.12.0rc2-0" +version = "0.12.0-0" supported_platforms = {"linux", "macosx"} source = { url = "git://github.com/Kong/kong", - tag = "0.12.0rc2" + tag = "0.12.0" } description = { summary = "Kong is a scalable and customizable API Management Layer built on top of Nginx.", diff --git a/kong/meta.lua b/kong/meta.lua index 80b77e9bf49d..cb74b2dac106 100644 --- a/kong/meta.lua +++ b/kong/meta.lua @@ -2,7 +2,7 @@ local version = setmetatable({ major = 0, minor = 12, patch = 0, - suffix = "rc2" + --suffix = "" }, { __tostring = function(t) return string.format("%d.%d.%d%s", t.major, t.minor, t.patch, From 16dd9cab26c9ca8d92c607d4a6e7b164addee033 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Wed, 17 Jan 2018 16:57:24 -0800 Subject: [PATCH 68/74] fix(busted) catch and propagate tests exit code LuaJIT's OpenResty is compiled with Lua 5.2 compatibility, which changes the return values of `os.execute()`. Once again, this breaking changes messes with us; it seems like this change will haunt us forever, whether in Kong or in the many third-party libraries we rely upon and already pushed fixes for... :sigh: --- bin/busted | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/busted b/bin/busted index 95ccbd1a4949..445a8cb9b3c7 100755 --- a/bin/busted +++ b/bin/busted @@ -42,7 +42,8 @@ if not os.getenv("KONG_BUSTED_RESPAWNED") then table.insert(script, table.concat(cmd, " ")) -- recurse cli command, with proper variables (un)set for clean testing - return os.execute(table.concat(script, "; ")) + local _, _, rc = os.execute(table.concat(script, "; ")) + os.exit(rc) end require "luarocks.loader" From 89693c5719f8a3858c68aaf35c773aaba988b95f Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Wed, 17 Jan 2018 17:40:44 -0800 Subject: [PATCH 69/74] tests(responses) fix delay_response test failure Ensure we run this test the same way we run our business logic: inside a coroutine. --- spec/01-unit/009-responses_spec.lua | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/spec/01-unit/009-responses_spec.lua b/spec/01-unit/009-responses_spec.lua index 7fe313ecf31f..7bca5c75ac24 100644 --- a/spec/01-unit/009-responses_spec.lua +++ b/spec/01-unit/009-responses_spec.lua @@ -121,10 +121,15 @@ describe("Response helpers", function() end) describe("delayed response", function() - it("does not call ngx.say/ngx.exit if `ctx.delayed_response = true`", function() + after_each(function() + ngx.ctx.delayed_response = nil + end) + + it("yields and does not call ngx.say/ngx.exit if `ctx.delaye_response = true`", function() ngx.ctx.delay_response = true - responses.send(401, "Unauthorized", { ["X-Hello"] = "world" }) + local co = coroutine.wrap(responses.send) + co(401, "Unauthorized", { ["X-Hello"] = "world" }) assert.stub(ngx.say).was_not_called() assert.stub(ngx.exit).was_not_called() assert.not_equal("world", ngx.header["X-Hello"]) From e7f4d59c79181c96c0c35cbd4a11aec7d9a6ddb2 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Thu, 18 Jan 2018 07:26:09 -0800 Subject: [PATCH 70/74] fix(migrations) fix broken Upstreams migrations There exists two problems with the behavior of upstream object migrations introduced in the 0.12.0 release: - First, the DDL migrations to add both healthcheck and hashing column definitions were required before executing function-level migrations to fill in default object data. This is a result of the current DAO implementation that requires that all Lua-schema definitions exist as column definitions in the underlying data store, even if the DAO update call does not reference the column in question. - Second, the functional definitions load each row directly by a directly underlying DB call (as opposed to a DAO find_all()); this resulted in "table" schema types being represented as literal JSON strings, instead of Lua table types, by the C* driver. The Postgres implementation does not suffer this as the underlying reprentation of table data in Postgres-backed schemas is Postgres' JSON type; this is automagically deserialized to a Lua table upon retrieval. As the C* implementation offers no such behind-the-scenes transformation, a direct load of rows containing "table" schemas results in an incompatible data type when iterating over the returned rows. The fix in the commit is to use the abstract DAO to load upstream rows when leveraging C*. Fix #3156 Signed-off-by: Thibault Charbonnier --- kong/dao/migrations/cassandra.lua | 58 +++++++++++++--------------- kong/dao/migrations/postgres.lua | 63 +++++++++++++++---------------- 2 files changed, 57 insertions(+), 64 deletions(-) diff --git a/kong/dao/migrations/cassandra.lua b/kong/dao/migrations/cassandra.lua index 96657cbfd1bb..34f584bbd30b 100644 --- a/kong/dao/migrations/cassandra.lua +++ b/kong/dao/migrations/cassandra.lua @@ -481,6 +481,15 @@ return { ]], down = function(_, _, dao) end -- not implemented }, + { + name = "2017-11-07-192000_upstream_healthchecks", + up = [[ + ALTER TABLE upstreams ADD healthchecks text; + ]], + down = [[ + ALTER TABLE upstreams DROP healthchecks; + ]] + }, { name = "2017-10-27-134100_consistent_hashing_1", up = [[ @@ -497,63 +506,48 @@ return { ]] }, { - name = "2017-10-27-134100_consistent_hashing_2", + name = "2017-11-07-192100_upstream_healthchecks_2", up = function(_, _, dao) - local rows, err = dao.db:query([[ - SELECT * FROM upstreams; - ]]) + local rows, err = dao.upstreams:find_all() if err then return err end + local upstreams = require("kong.dao.schemas.upstreams") + local default = upstreams.fields.healthchecks.default + for _, row in ipairs(rows) do - if not row.hash_on or not row.hash_fallback then - row.hash_on = "none" - row.hash_fallback = "none" --- row.created_at = nil - local _, err = dao.upstreams:update(row, { id = row.id }) + if not row.healthchecks then + local _, err = dao.upstreams:update({ + healthchecks = default, + }, { id = row.id }) if err then return err end end end end, - down = function(_, _, dao) end -- n.a. since the columns will be dropped - }, - { - name = "2017-11-07-192000_upstream_healthchecks", - up = [[ - ALTER TABLE upstreams ADD healthchecks text; - ]], - down = [[ - ALTER TABLE upstreams DROP healthchecks; - ]] + down = function(_, _, dao) end }, { - name = "2017-11-07-192100_upstream_healthchecks_2", + name = "2017-10-27-134100_consistent_hashing_2", up = function(_, _, dao) - local rows, err = dao.db:query([[ - SELECT * FROM upstreams; - ]]) + local rows, err = dao.upstreams:find_all() if err then return err end - local upstreams = require("kong.dao.schemas.upstreams") - local default = upstreams.fields.healthchecks.default - for _, row in ipairs(rows) do - if not row.healthchecks then - - local _, err = dao.upstreams:update({ - healthchecks = default, - }, { id = row.id }) + if not row.hash_on or not row.hash_fallback then + row.hash_on = "none" + row.hash_fallback = "none" + local _, err = dao.upstreams:update(row, { id = row.id }) if err then return err end end end end, - down = function(_, _, dao) end + down = function(_, _, dao) end -- n.a. since the columns will be dropped }, } diff --git a/kong/dao/migrations/postgres.lua b/kong/dao/migrations/postgres.lua index aeb43d88a652..423e53ea5703 100644 --- a/kong/dao/migrations/postgres.lua +++ b/kong/dao/migrations/postgres.lua @@ -542,6 +542,21 @@ return { ALTER TABLE apis ALTER COLUMN created_at SET DEFAULT CURRENT_TIMESTAMP(0); ]] }, + { + name = "2017-11-07-192000_upstream_healthchecks", + up = [[ + DO $$ + BEGIN + ALTER TABLE upstreams ADD COLUMN healthchecks json; + EXCEPTION WHEN duplicate_column THEN + -- Do nothing, accept existing state + END$$; + + ]], + down = [[ + ALTER TABLE upstreams DROP COLUMN IF EXISTS healthchecks; + ]] + }, { name = "2017-10-27-134100_consistent_hashing_1", up = [[ @@ -558,7 +573,7 @@ return { ]] }, { - name = "2017-10-27-134100_consistent_hashing_2", + name = "2017-11-07-192100_upstream_healthchecks_2", up = function(_, _, dao) local rows, err = dao.db:query([[ SELECT * FROM upstreams; @@ -567,37 +582,24 @@ return { return err end + local upstreams = require("kong.dao.schemas.upstreams") + local default = upstreams.fields.healthchecks.default + for _, row in ipairs(rows) do - if not row.hash_on or not row.hash_fallback then - row.hash_on = "none" - row.hash_fallback = "none" - row.created_at = nil - local _, err = dao.upstreams:update(row, { id = row.id }) + if not row.healthchecks then + local _, err = dao.upstreams:update({ + healthchecks = default, + }, { id = row.id }) if err then return err end end end end, - down = function(_, _, dao) end -- n.a. since the columns will be dropped - }, - { - name = "2017-11-07-192000_upstream_healthchecks", - up = [[ - DO $$ - BEGIN - ALTER TABLE upstreams ADD COLUMN healthchecks json; - EXCEPTION WHEN duplicate_column THEN - -- Do nothing, accept existing state - END$$; - - ]], - down = [[ - ALTER TABLE upstreams DROP COLUMN IF EXISTS healthchecks; - ]] + down = function(_, _, dao) end }, { - name = "2017-11-07-192100_upstream_healthchecks_2", + name = "2017-10-27-134100_consistent_hashing_2", up = function(_, _, dao) local rows, err = dao.db:query([[ SELECT * FROM upstreams; @@ -606,21 +608,18 @@ return { return err end - local upstreams = require("kong.dao.schemas.upstreams") - local default = upstreams.fields.healthchecks.default - for _, row in ipairs(rows) do - if not row.healthchecks then - - local _, err = dao.upstreams:update({ - healthchecks = default, - }, { id = row.id }) + if not row.hash_on or not row.hash_fallback then + row.hash_on = "none" + row.hash_fallback = "none" + row.created_at = nil + local _, err = dao.upstreams:update(row, { id = row.id }) if err then return err end end end end, - down = function(_, _, dao) end + down = function(_, _, dao) end -- n.a. since the columns will be dropped }, } From 1c414162afd480ced42c8cc1fa725edc736cccfa Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Wed, 17 Jan 2018 17:57:38 -0800 Subject: [PATCH 71/74] fix(runloop) propagate Lua errors from coroutine.wrap() It seems like ngx_lua's `corouting.wrap()` does not propagate Lua errors to the parent thread like PUC-Lua or LuaJIT's coroutine do - this seems to be an oversight for which a fix is currently being designed. This ensures Lua errors happening inside plugins' access phase propagate to the ngx_lua entry thread. --- kong/init.lua | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kong/init.lua b/kong/init.lua index e87d69dbef42..91a7c9756184 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -371,7 +371,11 @@ function Kong.access() for plugin, plugin_conf in plugins_iterator(singletons.loaded_plugins, true) do if not ctx.delayed_response then - coroutine.wrap(plugin.handler.access)(plugin.handler, plugin_conf) + local err = coroutine.wrap(plugin.handler.access)(plugin.handler, plugin_conf) + if err then + ctx.delay_response = false + return responses.send_HTTP_INTERNAL_SERVER_ERROR(err) + end end end From 5252197da76a6035e39a7a3d8482995c4c33cd87 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Thu, 18 Jan 2018 16:41:20 -0800 Subject: [PATCH 72/74] docs(changelog) add 0.12.1 changes --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 918b04c0318b..8def48a23420 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ - [Planned](#planned) - [Scheduled](#scheduled) - [Released](#released) + - [0.12.1](#0121---20180118) - [0.12.0](#0120---20180116) - [0.11.2](#0112---20171129) - [0.11.1](#0111---20171024) @@ -37,6 +38,23 @@ a detailed changeset of their content. This section describes publicly available releases and a detailed changeset of their content. +## [0.12.1] - 2018/01/18 + +This release addresses a few issues encountered with 0.12.0, including one +which would prevent upgrading from a previous version. The [0.12 Upgrade +Path](https://github.com/Kong/kong/blob/master/UPGRADE.md#upgrade-to-012x) +is still relevant for upgrading existing clusters to 0.12.1. + +### Fixed + +- Fix a migration between previous Kong versions and 0.12.0. + [#3159](https://github.com/Kong/kong/pull/3159) +- Ensure Lua errors are propagated when thrown in the `access` handler by + plugins. + [38580ff](https://github.com/Kong/kong/commit/38580ff547cbd4a557829e3ad135cd6a0f821f7c) + +[Back to TOC](#table-of-contents) + ## [0.12.0] - 2018/01/16 This major release focuses on two new features we are very excited about: From 52943f583e5422c8e53adbe3853cd8a1f726e64b Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Thu, 18 Jan 2018 16:42:32 -0800 Subject: [PATCH 73/74] chore(*) 0.12.1 bump --- kong-0.12.0-0.rockspec => kong-0.12.1-0.rockspec | 4 ++-- kong/meta.lua | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) rename kong-0.12.0-0.rockspec => kong-0.12.1-0.rockspec (99%) diff --git a/kong-0.12.0-0.rockspec b/kong-0.12.1-0.rockspec similarity index 99% rename from kong-0.12.0-0.rockspec rename to kong-0.12.1-0.rockspec index 6cebc13e5f85..895a3fea9b96 100644 --- a/kong-0.12.0-0.rockspec +++ b/kong-0.12.1-0.rockspec @@ -1,9 +1,9 @@ package = "kong" -version = "0.12.0-0" +version = "0.12.1-0" supported_platforms = {"linux", "macosx"} source = { url = "git://github.com/Kong/kong", - tag = "0.12.0" + tag = "0.12.1" } description = { summary = "Kong is a scalable and customizable API Management Layer built on top of Nginx.", diff --git a/kong/meta.lua b/kong/meta.lua index cb74b2dac106..a1f52d2e1211 100644 --- a/kong/meta.lua +++ b/kong/meta.lua @@ -1,7 +1,7 @@ local version = setmetatable({ major = 0, minor = 12, - patch = 0, + patch = 1, --suffix = "" }, { __tostring = function(t) From f35518dd82795d20796b587063d845340ba15836 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Thu, 18 Jan 2018 18:11:49 -0800 Subject: [PATCH 74/74] docs(changelog) add a missing diff link --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8def48a23420..7a91f40b5057 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2183,6 +2183,7 @@ First version running with Cassandra. [Back to TOC](#table-of-contents) +[0.12.1]: https://github.com/Kong/kong/compare/0.12.0...0.12.1 [0.12.0]: https://github.com/Kong/kong/compare/0.11.2...0.12.0 [0.11.2]: https://github.com/Kong/kong/compare/0.11.1...0.11.2 [0.11.1]: https://github.com/Kong/kong/compare/0.11.0...0.11.1