diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 7ab8506c7..a499c911a 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -109,6 +109,8 @@ jobs: lint-code: runs-on: ubuntu-22.04 + permissions: + pull-requests: write needs: - check-changes if: > @@ -204,12 +206,42 @@ jobs: run: helm lint ./charts/heimdall - name: Kubeconform decision mode deployment run: | - helm template --set demo.enabled=true ./charts/heimdall > decision-demo.yaml - kubeconform --skip RuleSet -kubernetes-version ${{ env.KUBERNETES_API_VERSION }} decision-demo.yaml + helm template ./charts/heimdall > decision-config.yaml + kubeconform --skip RuleSet -kubernetes-version ${{ env.KUBERNETES_API_VERSION }} decision-config.yaml - name: Kubeconform proxy mode deployment run: | - helm template --set operationMode=proxy --set demo.enabled=true ./charts/heimdall > proxy-demo.yaml - kubeconform --skip RuleSet -kubernetes-version ${{ env.KUBERNETES_API_VERSION }} decision-demo.yaml + helm template --set operationMode=proxy ./charts/heimdall > proxy-config.yaml + kubeconform --skip RuleSet -kubernetes-version ${{ env.KUBERNETES_API_VERSION }} proxy-config.yaml + + unittest-helm-chart: + runs-on: ubuntu-22.04 + needs: + - check-changes + if: > + needs.check-changes.outputs.helm_chart_changed == 'true' || + needs.check-changes.outputs.ci_config_changed == 'true' + steps: + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - name: Checkout repository + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + - name: Setup k8s tools + uses: yokawasa/action-setup-kube-tools@5fe385031665158529decddddb51d6224422836e # v0.11.1 + with: + setup-tools: | + helm + kubeconform + helm: '${{ env.HELM_VERSION }}' + kubeconform: '${{ env.KUBECONFORM_VERSION }}' + - name: Install Helm Unittest + run: helm plugin install --version v0.6.0 https://github.com/helm-unittest/helm-unittest.git + - name: Run tests + run: | + helm unittest ./charts/heimdall + test: runs-on: ubuntu-22.04 @@ -691,9 +723,6 @@ jobs: replace: "${{ needs.prepare-release.outputs.tag_name }}" regex: false include: docs/** - - name: Build documentation - working-directory: ./docs - run: hugo --minify -d ./public - name: Update uri for redirecting to new version uses: jacobtomlinson/gha-find-replace@099c88fbf2a7da26b083521a8bfa13e4f0886b97 # v3 with: @@ -701,6 +730,16 @@ jobs: replace: "${{ needs.prepare-release.outputs.tag_name }}" regex: false include: docs/** + - name: Update used image tags to the released version + uses: jacobtomlinson/gha-find-replace@099c88fbf2a7da26b083521a8bfa13e4f0886b97 # v3 + with: + find: "heimdall:dev" + replace: "heimdall:${{ needs.prepare-release.outputs.tag_name }}" + regex: false + include: docs/** + - name: Build documentation + working-directory: ./docs + run: hugo --minify -d ./public - name: Update versions JSON document id: update-version-json run: | diff --git a/.golangci.yaml b/.golangci.yaml index fbc6b6ccb..05d9f06ef 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -28,12 +28,13 @@ issues: - maintidx - cyclop - gocognit - - goerr113 + - err113 - lll - errcheck - canonicalheader - mnd - err113 + - forcetypeassert linters-settings: exhaustive: diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 89aec035b..8f3e0a494 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.14.5-alpha" + ".": "0.15.0" } \ No newline at end of file diff --git a/ADOPTERS.md b/ADOPTERS.md index 04a04bc9b..de8295797 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -5,4 +5,5 @@ To add your organization to this list, open a pull request that adds your organi Please remember to [sign your commits](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits) to comply with our contributors guidelines. | Name | Since | Website | Use-Case | -|:-|:-|:-|:-| \ No newline at end of file +|:-|:-|:-|:-| +| Éphémère Creative | 2023 | https://ephemerecreative.ca | Heimdall helps us manage access to a portal where users can monitor running services. | diff --git a/CHANGELOG.md b/CHANGELOG.md index 4df74c29a..8d0897c93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,84 @@ # Changelog +## [0.15.0](https://github.com/dadrus/heimdall/compare/v0.14.0-alpha...v0.15.0) (2024-09-16) + + +### ⚠ BREAKING CHANGES + +* Made the usage of `if` clauses in authentication & authorization, and error pipelines consistent ([#1784](https://github.com/dadrus/heimdall/issues/1784)) +* Deprecated OTEL attributes replaced ([#1669](https://github.com/dadrus/heimdall/issues/1669)) +* Configuration of `signer` moved into `jwt` finalizer ([#1534](https://github.com/dadrus/heimdall/issues/1534)) +* Demo installation removed from the helm chart ([#1544](https://github.com/dadrus/heimdall/issues/1544)) +* Subject has been made immutable ([#1487](https://github.com/dadrus/heimdall/issues/1487)) +* Rule matching configuration API redesigned ([#1358](https://github.com/dadrus/heimdall/issues/1358)) +* Default rule rejects requests with encoded slashes in the path of the URL with `400 Bad Request` ([#1358](https://github.com/dadrus/heimdall/issues/1358)) +* Support for `rule_path_match_prefix` on endpoint configurations for `http_endpoint` and `cloud_blob` providers has been dropped ([#1358](https://github.com/dadrus/heimdall/issues/1358)) + +### Features + +* Glob expressions are context aware and use `.` for host related expressions and `/` for path related ones as separators ([#1358](https://github.com/dadrus/heimdall/issues/1358)) ([f2f6867](https://github.com/dadrus/heimdall/commit/f2f6867576b758312b1a85dc06fe52be3ae9d2ff)) +* Multiple rules can be defined for the same path, e.g. to have separate rules for read and write requests ([#1358](https://github.com/dadrus/heimdall/issues/1358)) ([f2f6867](https://github.com/dadrus/heimdall/commit/f2f6867576b758312b1a85dc06fe52be3ae9d2ff)) +* New endpoint auth type to create http message signatures for outbound requests according to RFC 9421 ([#1507](https://github.com/dadrus/heimdall/issues/1507)) ([672988d](https://github.com/dadrus/heimdall/commit/672988d2463ddf8abbade7cb9f0656d848682ae3)) +* Route based matching of rules ([#1766](https://github.com/dadrus/heimdall/issues/1766)) ([8ef379d](https://github.com/dadrus/heimdall/commit/8ef379db1d504440b6fa19794b7b38c173a730b0)) +* Support for backtracking while matching rules ([#1358](https://github.com/dadrus/heimdall/issues/1358)) ([f2f6867](https://github.com/dadrus/heimdall/commit/f2f6867576b758312b1a85dc06fe52be3ae9d2ff)) +* Support for free and single (named) wildcards for request path matching and access of the captured values from the pipeline ([#1358](https://github.com/dadrus/heimdall/issues/1358)) ([f2f6867](https://github.com/dadrus/heimdall/commit/f2f6867576b758312b1a85dc06fe52be3ae9d2ff)) + + +### Code Refactorings + +* Configuration of `signer` moved into `jwt` finalizer ([#1534](https://github.com/dadrus/heimdall/issues/1534)) ([4475745](https://github.com/dadrus/heimdall/commit/447574557d109be7f17844bc743eb9cc625427d9)) +* Default rule rejects requests with encoded slashes in the path of the URL with `400 Bad Request` ([#1358](https://github.com/dadrus/heimdall/issues/1358)) ([f2f6867](https://github.com/dadrus/heimdall/commit/f2f6867576b758312b1a85dc06fe52be3ae9d2ff)) +* Demo installation removed from the helm chart ([#1544](https://github.com/dadrus/heimdall/issues/1544)) ([f8770b3](https://github.com/dadrus/heimdall/commit/f8770b3bfa3599c37290677454baa4f52c12a7a7)) +* Deprecated OTEL attributes replaced ([#1669](https://github.com/dadrus/heimdall/issues/1669)) ([e5ed3a5](https://github.com/dadrus/heimdall/commit/e5ed3a57f5de3164200c285a811908c7a32fbfc8)) +* Made the usage of `if` clauses in authentication & authorization, and error pipelines consistent ([#1784](https://github.com/dadrus/heimdall/issues/1784)) ([2577f56](https://github.com/dadrus/heimdall/commit/2577f560b80c49e3e5a4b3da547245af98844843)) +* Rule matching configuration API redesigned ([#1358](https://github.com/dadrus/heimdall/issues/1358)) ([f2f6867](https://github.com/dadrus/heimdall/commit/f2f6867576b758312b1a85dc06fe52be3ae9d2ff)) +* Subject has been made immutable ([#1487](https://github.com/dadrus/heimdall/issues/1487)) ([6c4957f](https://github.com/dadrus/heimdall/commit/6c4957fd897de55de4b23563be4406423ba26b00)) +* Support for `rule_path_match_prefix` on endpoint configurations for `http_endpoint` and `cloud_blob` providers has been dropped ([#1358](https://github.com/dadrus/heimdall/issues/1358)) ([f2f6867](https://github.com/dadrus/heimdall/commit/f2f6867576b758312b1a85dc06fe52be3ae9d2ff)) + + +### Performance Improvements + +* O(log(n)) time complexity for lookup of rules ([#1358](https://github.com/dadrus/heimdall/issues/1358)) ([f2f6867](https://github.com/dadrus/heimdall/commit/f2f6867576b758312b1a85dc06fe52be3ae9d2ff)) + + +### Bug Fixes + +* Corrected the placement of namespace selector properties in the Helm chart's admission controller configuration ([#1752](https://github.com/dadrus/heimdall/issues/1752)). ([4c059b3](https://github.com/dadrus/heimdall/commit/4c059b38510a1aa2d37d9103a3cb8935f4c2043b)) +* Fixed a nil pointer error in the Helm chart that occurred when a deployment was configured with custom annotations due to an incorrect reference in the deployment template ([#1752](https://github.com/dadrus/heimdall/issues/1752)). ([4c059b3](https://github.com/dadrus/heimdall/commit/4c059b38510a1aa2d37d9103a3cb8935f4c2043b)) +* Taking updates of certificates into account while collecting metrics ([#1534](https://github.com/dadrus/heimdall/issues/1534)) ([4475745](https://github.com/dadrus/heimdall/commit/447574557d109be7f17844bc743eb9cc625427d9)) +* Updated the admission controller configuration in the Helm chart to align with the redesigned structure done in v0.12.0-alpha release of heimdall ([#1752](https://github.com/dadrus/heimdall/issues/1752)). ([4c059b3](https://github.com/dadrus/heimdall/commit/4c059b38510a1aa2d37d9103a3cb8935f4c2043b)) + + +### Documentation + +* Guide for First-Party Authentication with OpenID Connect ([#1789](https://github.com/dadrus/heimdall/issues/1789)) ([8c6b9c3](https://github.com/dadrus/heimdall/commit/8c6b9c3c4fec7cc605fc8a1058e0847e7abb3947)) +* New integration guide for Envoy Gateway ([#1412](https://github.com/dadrus/heimdall/issues/1412)) ([526f381](https://github.com/dadrus/heimdall/commit/526f381c931cd58e9513716a1bc7fa9149c36e3d)) +* NGING Ingress Controller guide updated to cover global integration options ([#1469](https://github.com/dadrus/heimdall/issues/1469)) ([a710a64](https://github.com/dadrus/heimdall/commit/a710a640fc1ce2cadfa37eb59a4fc0fa52c5120b)) +* Traefik guide updated to cover `Ingress`, `IngressRoute` and `HTTPRoute` based integration options ([#1420](https://github.com/dadrus/heimdall/issues/1420)) ([303095e](https://github.com/dadrus/heimdall/commit/303095e204c3ea753b06a2b90171462de19b1eb4)) + + +### Dependencies + +* update golang to v1.23.1 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update golang.org/x/exp digest to 701f63a ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update google.golang.org/genproto/googleapis/rpc digest to 8af14fe ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module github.com/go-playground/validator/v10 to v10.22.1 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module github.com/jellydator/ttlcache/v3 to v3.3.0 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module github.com/masterminds/sprig/v3 to v3.3.0 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module github.com/prometheus/client_golang to v1.20.3 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module github.com/redis/rueidis to v1.0.45 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module github.com/redis/rueidis/rueidisotel to v1.0.45 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module github.com/rs/cors to v1.11.1 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc to v0.55.0 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module go.opentelemetry.io/contrib/instrumentation/host to v0.55.0 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp to v0.55.0 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module go.opentelemetry.io/contrib/instrumentation/runtime to v0.55.0 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module go.opentelemetry.io/contrib/propagators/autoprop to v0.55.0 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module gocloud.dev to v0.39.0 ([#1774](https://github.com/dadrus/heimdall/issues/1774)) ([4ffa9e4](https://github.com/dadrus/heimdall/commit/4ffa9e45227c177ba5f729b6111d6551de5a67a8)) +* update module google.golang.org/grpc to v1.66.2 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update module k8s.io/client-go to v0.31.1 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) +* update opentelemetry-go monorepo to v1.30.0 ([#1793](https://github.com/dadrus/heimdall/issues/1793)) ([54e6cad](https://github.com/dadrus/heimdall/commit/54e6cad5e4e8b909f646e2f0318f94388f793039)) + ## [0.14.5-alpha](https://github.com/dadrus/heimdall/compare/v0.14.4-alpha...v0.14.5-alpha) (2024-08-25) @@ -34,7 +113,7 @@ ### Bug Fixes -* OAuth2 `iss` claim verification in JWT/OIDC authenticators when used with `metadata_endpoint` ([#1660](https://github.com/dadrus/heimdall/issues/1660)) ([a9947f2](https://github.com/dadrus/heimdall/commit/a9947f20f412ca4133202ee7bc1e7b58f2903766)) +* OAuth2 `iss` claim verification in JWT/OIDC authenticators when used with `metadata_endpoint` ([#1660](https://github.com/dadrus/heimdall/issues/1660)) by [@martin31821](https://github.com/martin31821) ([a9947f2](https://github.com/dadrus/heimdall/commit/a9947f20f412ca4133202ee7bc1e7b58f2903766)) * Trailing useless bytes ignored while parsing PEM content ([#1564](https://github.com/dadrus/heimdall/issues/1564)) ([0c52bd3](https://github.com/dadrus/heimdall/commit/0c52bd30d308dbd8985f3223ba36180dbb808a24)) diff --git a/DockerHub-README.md b/DockerHub-README.md index 0fd3ae149..b6ad356fd 100644 --- a/DockerHub-README.md +++ b/DockerHub-README.md @@ -107,9 +107,6 @@ mechanisms: type: jwt default_rule: - methods: - - GET - - POST execute: - authenticator: anonymous_authenticator - authorizer: deny_all_requests @@ -124,11 +121,12 @@ providers: Create a rule file (`rule.yaml`) with the following contents: ```yaml -version: "1alpha3" +version: "1alpha4" rules: - id: test-rule match: - url: http://<**>/<**> + routes: + - path: /** forward_to: host: upstream execute: diff --git a/Justfile b/Justfile index 0507e0e78..3b8dd04bc 100644 --- a/Justfile +++ b/Justfile @@ -16,12 +16,12 @@ lint-dockerfile: lint-helmchart: helm lint ./charts/heimdall - helm template --set demo.enabled=true ./charts/heimdall > /tmp/decision-demo.yaml - helm template --set operationMode=proxy --set demo.enabled=true ./charts/heimdall > /tmp/proxy-demo.yaml - kubeconform --skip RuleSet -kubernetes-version 1.27.0 /tmp/decision-demo.yaml - kubeconform --skip RuleSet -kubernetes-version 1.27.0 /tmp/proxy-demo.yaml - rm /tmp/decision-demo.yaml - rm /tmp/proxy-demo.yaml + helm template ./charts/heimdall > /tmp/decision-config.yaml + helm template --set operationMode=proxy ./charts/heimdall > /tmp/proxy-config.yaml + kubeconform --skip RuleSet -kubernetes-version 1.27.0 /tmp/decision-config.yaml + kubeconform --skip RuleSet -kubernetes-version 1.27.0 /tmp/proxy-config.yaml + rm /tmp/decision-config.yaml + rm /tmp/proxy-config.yaml lint: check-licenses lint-api lint-code lint-dockerfile lint-helmchart diff --git a/README.md b/README.md index 506e2537d..e2ddf11a6 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,8 @@ Head over to the [documentation](https://dadrus.github.io/heimdall/) for details ## Current state -The current implementation is a late alpha version. That means it does not solve all the problems heimdall aims to solve. With other words a lot of functionality is missing. In addition, alpha version means, there will be breaking changes. Nevertheless, the code base is very stable and pretty good tested. Functionality already supported can be found in [Release descriptions](https://github.com/dadrus/heimdall/releases). Planned features can be found in the defined [Milestones](https://github.com/dadrus/heimdall/milestones). +The project is considered production-ready and is already in use by multiple organizations worldwide. The code base is stable and well-tested. However, some features are still missing, and the development of these features might lead to breaking changes in future updates. For information on the currently supported functionality, please refer to the [Release descriptions](https://github.com/dadrus/heimdall/releases). Planned features can be found in the defined [Milestones](https://github.com/dadrus/heimdall/milestones). + ## If you ... diff --git a/SECURITY.md b/SECURITY.md index 30ee49306..c349e88e0 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,9 +2,16 @@ ## Supported Versions -As long as the project is in its alpha or beta phase, security updates are provided with a new release only. -That means patches for older releases will not be provided. This policy will be updated as soon as the project is considered to have evolved to beta. +We provide security updates at least once a month for the last released minor version through patch releases. Currently, we do not offer patches for older versions. To ensure you receive timely security updates, please use the latest version of the project. ## Reporting a Vulnerability -Please report (suspected) security vulnerabilities by making use of [Security Advisories](https://github.com/dadrus/heimdall/security/advisories). Do not use GitHub issues for that. You will receive a response 48 hours. If the issue is confirmed, a new release addressing the reported vulnerability will be done as soon as possible (depending on complexity). +To report a security vulnerability, please use the [Security Advisories](https://github.com/dadrus/heimdall/security/advisories) feature. Do not use GitHub issues or other communication channels for security concerns. + +When you report a vulnerability, you will receive a response within 48 hours. If the issue is confirmed, we will issue a new release to address the vulnerability as soon as possible, depending on its complexity and severity. + +## Security Updates and Patching + +We prioritize timely response to confirmed security issues. Security updates will be provided in patch releases, and the release notes will detail the fixes and improvements made. + +To help you maintain a secure installation, we provide extensive guidance in the [Security chapter](https://dadrus.github.io/heimdall/dev/docs/operations/security/) of the documentation. In addition, the entire documentation includes relevant notes and best practices for securing your installation. diff --git a/charts/heimdall/Chart.yaml b/charts/heimdall/Chart.yaml index d72034a50..0ff8859fb 100644 --- a/charts/heimdall/Chart.yaml +++ b/charts/heimdall/Chart.yaml @@ -17,9 +17,9 @@ apiVersion: v2 name: heimdall description: A cloud native Identity Aware Proxy and Access Control Decision Service -version: 0.13.5 +version: 0.14.0 appVersion: latest -kubeVersion: ^1.19.0 +kubeVersion: ^1.27.0 type: application home: https://dadrus.github.io/heimdall/ @@ -43,5 +43,4 @@ keywords: - iap - auth-proxy - identity-aware-proxy - - decision-api - auth-filter diff --git a/charts/heimdall/README.adoc b/charts/heimdall/README.adoc index 55c42b9ff..2b2165395 100644 --- a/charts/heimdall/README.adoc +++ b/charts/heimdall/README.adoc @@ -4,9 +4,11 @@ This chart helps you to deploy heimdall in your Kubernetes cluster using Helm. +**Note:** Heimdall follows the https://kubernetes.io/releases/version-skew-policy/#supported-versions[Kubernetes support policy], and supports at least the latest three minor versions of Kubernetes. General functionality cannot be guaranteed for versions older than that. + == Prerequisites -* A Kubernetes version >= 1.19 or >= 1.23 if you would like to use HPA +* A Kubernetes version >= 1.27 * https://helm.sh/docs/intro/install/[Helm] 3.0+ * https://git-scm.com/downloads[Git] (optional) @@ -48,29 +50,6 @@ If you need to override the name of the heimdall resources such as the deploymen If you want to have detailed CPU & memory profiling information, you can enable the profiling service and let your APM scrape the corresponding pprof data. For https://grafana.com/docs/phlare[Phlare], the corresponding annotations are set if the profiling service is enabled. -This chart does also support a demo installation, which can be enabled by using the `--set demo.enabled=true` flag during the installation. For the demo installation there is no need to provide a configuration file (actually, you shall not provide it, as it will break the demo setup), as some simple mechanisms are already part of that installation. In addition, a demo rule set, as well as a simple echo service will be installed. If you're using an NGINX Ingress Controller, the chart will do the required integration, so you can directly see heimdall in action, by e.g. using curl: - -* Send requests to a "public" (unprotected) endpoint -+ -[source,bash] ----- -$ curl -v -H "Host: demo-app" /heimdall-demo/pub/foo ----- - -* Send requests to an endpoint which expects a JWT with a `sub` claim set to `anonymous` -+ -[source,bash] ----- -$ curl -v -H "Host: demo-app" /heimdall-demo/anon/foo ----- - -* All other endpoints are not allowed to be called and will result in HTTP 403 -+ -[source,bash] ----- -$ curl -v -H "Host: demo-app" /heimdall-demo/foo ----- - === Installing the CRD By default, heimdall requires a custom resource definition (CRD) installed in the cluster. The Helm client will install it for you. If the CRD is not installed, you'll neither be able to deploy the corresponding rule sets, nor will heimdall be able to communicate with the api server. @@ -220,26 +199,6 @@ a| `operationMode` The mode of operation for the heimdall installation. Can be `proxy` or `decision` a| `decision` -a| `demo.enabled` - -Wether a demo installation should be done. If demo installation is chosen, you don't have to provide a `heimdall.yaml` config file, as the required configuration is included in the demo setup. -a| `false` - -a| `demo.forwardAuthMiddlewareAnnotation` - -Which annotation to use on the demo app Ingress rule for decision operation mode to let the Ingress Controller use heimdall as authentication middleware -a| `nginx.ingress.kubernetes.io/auth-url` - -a| `demo.forwardAuthMiddlewareResponseAnnotation` - -Which annotation to use on the demo app Ingress rule for decision operation mode to let the Ingress Controller forwarding the response headers coming from heimdall to the demo app. -a| `nginx.ingress.kubernetes.io/auth-response-headers` - -a| `demo.forwardAuthMiddlewareRequestUri` - -Which macro/variable to use to forward the request uri to heimdall. Depending on your Ingress Controller, it can be omitted. E.g. Traefik sends such information in a header. -a| `/$request_uri` - a| `image.repository` The image repository to load heimdall image from diff --git a/charts/heimdall/crds/ruleset.yaml b/charts/heimdall/crds/ruleset.yaml index 6c50e05b3..f7fd7bfa7 100644 --- a/charts/heimdall/crds/ruleset.yaml +++ b/charts/heimdall/crds/ruleset.yaml @@ -27,7 +27,7 @@ spec: singular: ruleset listKind: RuleSetList versions: - - name: v1alpha3 + - name: v1alpha4 served: true storage: true schema: @@ -51,7 +51,7 @@ spec: type: array minItems: 1 items: - description: A himedall rule defining the pipeline mechanisms + description: A heimdall rule defining the pipeline mechanisms type: object required: - id @@ -75,20 +75,105 @@ spec: description: How to match the rule type: object required: - - url + - routes properties: - url: - description: The url to match - type: string - maxLength: 512 - strategy: - description: Strategy to match the url. Can either be regex or glob. + routes: + description: Routes to match + type: array + minItems: 1 + items: + description: Definition of a single route + type: object + required: + - path + properties: + path: + description: The path to match + type: string + maxLength: 512 + path_params: + description: Optional matching definitions for the captured wildcard + type: array + items: + description: Matching definition for a single wildcard + type: object + required: + - name + - type + - value + properties: + name: + description: The name of a wildcard + type: string + maxLength: 64 + type: + description: The type of the matching expression + type: string + maxLength: 5 + enum: + - "exact" + - "glob" + - "regex" + value: + description: The actual matching expression + type: string + maxLength: 256 + backtracking_enabled: + description: Wither this rule allows backtracking. Defaults to the value inherited from the default rule + type: boolean + methods: + description: The HTTP methods to match + type: array + minItems: 1 + items: + type: string + maxLength: 16 + enum: + - "CONNECT" + - "!CONNECT" + - "DELETE" + - "!DELETE" + - "GET" + - "!GET" + - "HEAD" + - "!HEAD" + - "OPTIONS" + - "!OPTIONS" + - "PATCH" + - "!PATCH" + - "POST" + - "!POST" + - "PUT" + - "!PUT" + - "TRACE" + - "!TRACE" + - "ALL" + scheme: + description: The HTTP scheme, which should be matched. If not set, http and https are matched type: string maxLength: 5 - default: glob - enum: - - regex - - glob + hosts: + description: Optional expressions to match the host if required. If not set, all hosts are matched. + type: array + items: + description: Expression to match a host + type: object + required: + - type + - value + properties: + type: + description: The type of the host matching expression + type: string + maxLength: 5 + enum: + - "exact" + - "glob" + - "regex" + value: + description: The actual host matching expression + type: string + maxLength: 256 forward_to: description: Where to forward the request to. Required only if heimdall is used in proxy operation mode. type: object @@ -125,33 +210,6 @@ spec: items: type: string maxLength: 128 - methods: - description: The allowed HTTP methods - type: array - minItems: 1 - items: - type: string - maxLength: 16 - enum: - - "CONNECT" - - "!CONNECT" - - "DELETE" - - "!DELETE" - - "GET" - - "!GET" - - "HEAD" - - "!HEAD" - - "OPTIONS" - - "!OPTIONS" - - "PATCH" - - "!PATCH" - - "POST" - - "!POST" - - "PUT" - - "!PUT" - - "TRACE" - - "!TRACE" - - "ALL" execute: description: The pipeline mechanisms to execute type: array diff --git a/charts/heimdall/templates/NOTES.txt b/charts/heimdall/templates/NOTES.txt index b952f8c3d..c18df6e49 100644 --- a/charts/heimdall/templates/NOTES.txt +++ b/charts/heimdall/templates/NOTES.txt @@ -12,23 +12,5 @@ services. Consult the Ingress Controller documentation of your choice on how to {{- else }} Heimdall is installed and configured to operate in proxy mode. - {{- if not .Values.demo.enabled }} - The actual integration depends pretty much on your requirements and setup. - {{- end }} {{- end }} - -{{- if .Values.demo.enabled }} - -The setup includes a demo app (which just echoes the request) and a rule set. If you're using NGINX as Ingress Controller, the abovesaid annotations are already added to the demo app ingress rule. So, to see heimdall in actions, just do. - -> curl -v -H "Host: demo-app" /heimdall-demo/pub/test -This endpoint is not protected. so heimdall will just allow the request - -> curl -v -H "Host: demo-app" /heimdall-demo/anon/test -This endpoint is configured to accept anonymous requests. So heimdall will allow the request, -but create a JWT with "sub" claim set to anonymous. - -Requests to any other endpoints will be answered with HTTP 403. E.g. -curl -v -H "Host: demo-app" /heimdall-demo/foo -{{- end }} \ No newline at end of file diff --git a/charts/heimdall/templates/heimdall/configmap.yaml b/charts/heimdall/templates/configmap.yaml similarity index 91% rename from charts/heimdall/templates/heimdall/configmap.yaml rename to charts/heimdall/templates/configmap.yaml index b471c5b45..0d9e5d234 100644 --- a/charts/heimdall/templates/heimdall/configmap.yaml +++ b/charts/heimdall/templates/configmap.yaml @@ -14,7 +14,6 @@ # # SPDX-License-Identifier: Apache-2.0 -{{- if not .Values.demo.enabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -49,11 +48,6 @@ data: {{- toYaml . | nindent 6 }} {{- end }} - {{- with .Values.signer }} - signer: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.mechanisms }} mechanisms: {{- toYaml . | nindent 6 }} @@ -67,5 +61,4 @@ data: {{- with .Values.providers }} providers: {{- toYaml . | nindent 6 }} - {{- end }} -{{- end }} \ No newline at end of file + {{- end }} \ No newline at end of file diff --git a/charts/heimdall/templates/demo/_helpers.tpl b/charts/heimdall/templates/demo/_helpers.tpl deleted file mode 100644 index 5d753cf4b..000000000 --- a/charts/heimdall/templates/demo/_helpers.tpl +++ /dev/null @@ -1,49 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "heimdall.demo.name" -}} -{{- $name := printf "%s-demo" (default .Chart.Name .Values.nameOverride) -}} -{{- $name | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "heimdall.demo.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- $name := printf "%s-demo" .Values.fullnameOverride -}} -{{- $name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := printf "%s-demo" (default .Chart.Name .Values.nameOverride) }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Common demo labels -*/}} -{{- define "heimdall.demo.labels" -}} -{{ include "heimdall.demo.selectorLabels" . }} -helm.sh/chart: {{ include "heimdall.chart" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -app.kubernetes.io/part-of: {{ include "heimdall.name" . }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "heimdall.demo.selectorLabels" -}} -app.kubernetes.io/name: {{ include "heimdall.name" . }}-demo-app -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - - diff --git a/charts/heimdall/templates/demo/configmap.yaml b/charts/heimdall/templates/demo/configmap.yaml deleted file mode 100644 index 1de165d69..000000000 --- a/charts/heimdall/templates/demo/configmap.yaml +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2022 Dimitrij Drus -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.demo.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "heimdall.fullname" . }}-config - namespace: {{ include "heimdall.namespace" . }} - labels: -{{ include "heimdall.labels" . | indent 4 }} -data: - heimdall.yaml: | - {{- with .Values.serve }} - serve: - {{- toYaml . | nindent 6 }} - {{- end }} - - {{- with .Values.log }} - log: - {{- toYaml . | nindent 6 }} - {{- else }} - log: - level: info - format: gelf - {{- end }} - - {{- with .Values.metrics }} - metrics: - {{- toYaml . | nindent 6 }} - {{- end }} - - {{- with .Values.tracing }} - tracing: - {{- toYaml . | nindent 6 }} - {{- end }} - - {{- with .Values.profiling }} - profiling: - {{- toYaml . | nindent 6 }} - {{- end }} - - {{- with .Values.signer }} - signer: - {{- toYaml . | nindent 6 }} - {{- end }} - - mechanisms: - authenticators: - - id: anonymous_authenticator - type: anonymous - authorizers: - - id: deny_all_requests - type: deny - - id: allow_all_requests - type: allow - finalizers: - - id: create_jwt - type: jwt - - id: noop_finalizer - type: noop - - default_rule: - methods: - - GET - - POST - execute: - - authenticator: anonymous_authenticator - - authorizer: deny_all_requests - - finalizer: create_jwt - - providers: - kubernetes: {} - {{- end }} \ No newline at end of file diff --git a/charts/heimdall/templates/demo/deployment.yaml b/charts/heimdall/templates/demo/deployment.yaml deleted file mode 100644 index 3a55d82f9..000000000 --- a/charts/heimdall/templates/demo/deployment.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2022 Dimitrij Drus -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.demo.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "heimdall.demo.fullname" . }} - namespace: heimdall-demo - labels: - {{- include "heimdall.demo.labels" . | nindent 4 }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/demo/configmap.yaml") . | sha256sum }} -spec: - selector: - matchLabels: - {{- include "heimdall.demo.selectorLabels" . | nindent 6 }} - template: - metadata: - labels: - {{- include "heimdall.demo.selectorLabels" . | nindent 8 }} - spec: - imagePullSecrets: - {{- toYaml .Values.image.pullSecrets | nindent 8 }} - # https://github.com/kubernetes/kubernetes/issues/57601 - automountServiceAccountToken: false - securityContext: - {{- toYaml .Values.deployment.pod.securityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }}-test-service - securityContext: - {{- toYaml .Values.deployment.securityContext | nindent 12 }} - image: "containous/whoami:latest" - imagePullPolicy: {{ .Values.image.pullPolicy }} - args: [ "--port", "8080", "--name", "{{ include "heimdall.demo.name" . }}" ] - ports: - - name: http-demo - protocol: TCP - containerPort: 8080 -{{- end }} \ No newline at end of file diff --git a/charts/heimdall/templates/demo/ingress.yaml b/charts/heimdall/templates/demo/ingress.yaml deleted file mode 100644 index 38d3a956b..000000000 --- a/charts/heimdall/templates/demo/ingress.yaml +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2022 Dimitrij Drus -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -{{ if .Values.demo.enabled -}} -{{- $service := "unset" -}} -{{- $port := "unset" -}} - {{- if eq .Values.operationMode "decision" -}} - {{- $service = include "heimdall.demo.fullname" . -}} - {{- $port = 8080 -}} - {{- else -}} - {{- $service = "heimdall-proxy" -}} - {{- $port = .Values.service.proxy.port -}} - {{- end }} -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: {{ include "heimdall.demo.fullname" . }} - namespace: heimdall-demo - labels: - {{- include "heimdall.demo.labels" . | nindent 4 }} - {{- if eq .Values.operationMode "decision" }} - annotations: - nginx.ingress.kubernetes.io/configuration-snippet: | - auth_request /_auth; - auth_request_set $authHeader0 $upstream_http_authorization; - proxy_set_header 'Authorization' $authHeader0; - nginx.ingress.kubernetes.io/server-snippet: | - location = /_auth { - internal; - proxy_method $request_method; - proxy_pass "http://{{ include "heimdall.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.decision.port }}{{ .Values.demo.forwardAuthMiddlewareRequestUri }}"; - proxy_pass_request_body off; - proxy_set_header Content-Length ""; - proxy_set_header Host $http_host; - } - {{- end }} -spec: - ingressClassName: nginx - rules: - - host: "demo-app" - http: - paths: - - path: "/heimdall-demo" - pathType: ImplementationSpecific - backend: - service: - name: {{ $service }} - port: - number: {{ $port }} -{{- end }} diff --git a/charts/heimdall/templates/demo/namespace.yaml b/charts/heimdall/templates/demo/namespace.yaml deleted file mode 100644 index f2419d247..000000000 --- a/charts/heimdall/templates/demo/namespace.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2022 Dimitrij Drus -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.demo.enabled }} -kind: Namespace -apiVersion: v1 -metadata: - name: heimdall-demo -{{- end }} \ No newline at end of file diff --git a/charts/heimdall/templates/demo/service.yaml b/charts/heimdall/templates/demo/service.yaml deleted file mode 100644 index c5cf85cec..000000000 --- a/charts/heimdall/templates/demo/service.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2022 Dimitrij Drus -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.demo.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "heimdall.demo.fullname" . }} - namespace: heimdall-demo - labels: - {{- include "heimdall.demo.labels" . | nindent 4 }} -spec: - type: ClusterIP - ports: - - port: 8080 - targetPort: http-demo - protocol: TCP - name: demo-port - selector: - {{- include "heimdall.demo.selectorLabels" . | nindent 4 }} - - {{- if eq .Values.operationMode "proxy" -}} - {{- $heimdallService := include "heimdall.fullname" . -}} - {{- $heimdallService = print $heimdallService "." .Release.Namespace ".svc.cluster.local" }} ---- -apiVersion: v1 -kind: Service -metadata: - name: heimdall-proxy - namespace: heimdall-demo - labels: - {{- include "heimdall.demo.labels" . | nindent 4 }} -spec: - type: ExternalName - externalName: {{ $heimdallService }} - ports: - - port: {{ .Values.service.proxy.port }} - selector: - {{- include "heimdall.demo.selectorLabels" . | nindent 4 }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/heimdall/templates/demo/test-rule.yaml b/charts/heimdall/templates/demo/test-rule.yaml deleted file mode 100644 index 1ce7b4b40..000000000 --- a/charts/heimdall/templates/demo/test-rule.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2022 Dimitrij Drus -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.demo.enabled }} -apiVersion: heimdall.dadrus.github.com/v1alpha3 -kind: RuleSet -metadata: - name: {{ include "heimdall.demo.fullname" . }}-test-rule - namespace: heimdall-demo - labels: - {{- include "heimdall.demo.labels" . | nindent 4 }} -spec: - rules: - - id: public-access - match: - url: http://<**>/pub/<**> - forward_to: - host: {{ include "heimdall.demo.fullname" . }}.heimdall-demo.svc.cluster.local:8080 - execute: - - authenticator: anonymous_authenticator - - authorizer: allow_all_requests - - finalizer: noop_finalizer - - id: anonymous-access - match: - url: http://<**>/anon/<**> - forward_to: - host: {{ include "heimdall.demo.fullname" . }}.heimdall-demo.svc.cluster.local:8080 - execute: - - authorizer: allow_all_requests - -{{- end }} \ No newline at end of file diff --git a/charts/heimdall/templates/heimdall/deployment.yaml b/charts/heimdall/templates/deployment.yaml similarity index 86% rename from charts/heimdall/templates/heimdall/deployment.yaml rename to charts/heimdall/templates/deployment.yaml index af4ff369f..0b25cd4f0 100644 --- a/charts/heimdall/templates/heimdall/deployment.yaml +++ b/charts/heimdall/templates/deployment.yaml @@ -14,11 +14,7 @@ # # SPDX-License-Identifier: Apache-2.0 -{{ $opMode := required "operationMode is not set! Call helm with --set operationMode=" .Values.operationMode -}} -{{- if not (or (eq $opMode "decision") (eq $opMode "proxy")) -}} -{{- required "A valid operationMode is required! Call helm with --set operationMode=" .Values.operationMode -}} -{{- end -}} -{{- $data := dict "Release" .Release "Values" .Values "Chart" .Chart "Component" "deployment" -}} +{{- $data := dict "Release" .Release "Values" .Values "Chart" .Chart "Component" "deployment" }} apiVersion: apps/v1 kind: Deployment metadata: @@ -27,9 +23,9 @@ metadata: labels: {{- include "heimdall.labels" $data | nindent 4 }} annotations: - checksum/config: {{ include (print $.Template.BasePath "/heimdall/configmap.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} {{- with .Values.deployment.annotations }} - {{- toYaml .Values.deployment.annotations | nindent 4 }} + {{- toYaml . | nindent 4 }} {{- end }} spec: {{- if not .Values.deployment.autoscaling.enabled }} @@ -41,7 +37,7 @@ spec: template: metadata: labels: - {{- include "heimdall.selectorLabels" $data | nindent 8 }} + {{- include "heimdall.labels" $data | nindent 8 }} annotations: {{- if contains "prometheus" (.Values.env.OTEL_METRICS_EXPORTER | default "") }} prometheus.io/scrape: "true" @@ -77,12 +73,12 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} args: - serve - - {{- if eq $opMode "decision" }} decision{{- else }} proxy{{- end }} + - {{- if eq .Values.operationMode "decision" }} decision{{- else }} proxy{{- end }} {{- with .Values.extraArgs }} {{- toYaml . | nindent 12 }} {{- end }} ports: - {{- if eq $opMode "decision" }} + {{- if eq .Values.operationMode "decision" }} - name: http-decision containerPort: {{ .Values.serve.decision.port }} protocol: TCP @@ -104,8 +100,7 @@ spec: protocol: TCP containerPort: {{ .Values.profiling.port }} {{- end }} - {{- $rules := .Values.rules | default dict }} - {{- $providers := $rules.providers | default dict }} + {{- $providers := .Values.providers | default dict }} {{- $kubernetes := $providers.kubernetes | default dict}} {{- if $kubernetes.tls }} - name: https-webhook diff --git a/charts/heimdall/templates/heimdall/hpa.yaml b/charts/heimdall/templates/hpa.yaml similarity index 74% rename from charts/heimdall/templates/heimdall/hpa.yaml rename to charts/heimdall/templates/hpa.yaml index 21878a3d5..4767e90b2 100644 --- a/charts/heimdall/templates/heimdall/hpa.yaml +++ b/charts/heimdall/templates/hpa.yaml @@ -15,18 +15,22 @@ # SPDX-License-Identifier: Apache-2.0 {{- if .Values.deployment.autoscaling.enabled }} +{{- if and (not .Values.deployment.autoscaling.targetCPUUtilizationPercentage) (not .Values.deployment.autoscaling.targetMemoryUtilizationPercentage) }} + {{- fail "autoscaling is enabled, but usage of both, the cpu and the memory metrics is disabled" }} +{{- end }} +{{- $data := dict "Release" .Release "Values" .Values "Chart" .Chart }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: - name: {{ include "heimdall.fullname" . }} - namespace: {{ include "heimdall.namespace" . }} + name: {{ include "heimdall.fullname" $data }} + namespace: {{ include "heimdall.namespace" $data }} labels: - {{- include "heimdall.labels" . | nindent 4 }} + {{- include "heimdall.labels" $data | nindent 4 }} spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: {{ include "heimdall.fullname" . }} + name: {{ include "heimdall.fullname" $data }} minReplicas: {{ .Values.deployment.autoscaling.minReplicas }} maxReplicas: {{ .Values.deployment.autoscaling.maxReplicas }} metrics: diff --git a/charts/heimdall/templates/heimdall/service.yaml b/charts/heimdall/templates/service.yaml similarity index 78% rename from charts/heimdall/templates/heimdall/service.yaml rename to charts/heimdall/templates/service.yaml index cefa17f96..261d50fac 100644 --- a/charts/heimdall/templates/heimdall/service.yaml +++ b/charts/heimdall/templates/service.yaml @@ -14,11 +14,7 @@ # # SPDX-License-Identifier: Apache-2.0 -{{ $opMode := required "operationMode is not set! Call helm with --set operationMode=" .Values.operationMode -}} -{{- if not (or (eq $opMode "decision") (eq $opMode "proxy")) -}} - {{- required "A valid operationMode is required! Call helm with --set operationMode=" .Values.operationMode -}} -{{- end -}} -{{- $data := dict "Release" .Release "Values" .Values "Chart" .Chart "Component" "service" -}} +{{- $data := dict "Release" .Release "Values" .Values "Chart" .Chart "Component" "service" }} apiVersion: v1 kind: Service metadata: @@ -35,7 +31,7 @@ spec: targetPort: http-management protocol: TCP name: {{ .Values.service.management.name }} - {{- if eq $opMode "decision" }} + {{- if eq .Values.operationMode "decision" }} - port: {{ .Values.service.decision.port }} targetPort: http-decision protocol: TCP @@ -46,8 +42,7 @@ spec: protocol: TCP name: {{ .Values.service.proxy.name }} {{- end }} - {{- $rules := default dict .Values.rules }} - {{- $providers := default dict $rules.providers }} + {{- $providers := default dict .Values.providers }} {{- $kubernetes := default dict $providers.kubernetes }} {{- if $kubernetes.tls }} - port: {{ .Values.service.admissionController.port }} diff --git a/charts/heimdall/templates/heimdall/serviceaccount.yaml b/charts/heimdall/templates/serviceaccount.yaml similarity index 96% rename from charts/heimdall/templates/heimdall/serviceaccount.yaml rename to charts/heimdall/templates/serviceaccount.yaml index dc1c6f41f..8ccc2687c 100644 --- a/charts/heimdall/templates/heimdall/serviceaccount.yaml +++ b/charts/heimdall/templates/serviceaccount.yaml @@ -21,8 +21,6 @@ metadata: namespace: {{ include "heimdall.namespace" . }} labels: {{- include "heimdall.labels" . | nindent 4 }} - annotations: - kubernetes.io/service-account.name: {{ include "heimdall.fullname" . }} automountServiceAccountToken: false --- diff --git a/charts/heimdall/templates/heimdall/admissioncontroller.yaml b/charts/heimdall/templates/validating_webhook.yaml similarity index 76% rename from charts/heimdall/templates/heimdall/admissioncontroller.yaml rename to charts/heimdall/templates/validating_webhook.yaml index 8f7bfecd1..a73a2bc93 100644 --- a/charts/heimdall/templates/heimdall/admissioncontroller.yaml +++ b/charts/heimdall/templates/validating_webhook.yaml @@ -1,5 +1,4 @@ -{{- $rules := default dict .Values.rules }} -{{- $providers := default dict $rules.providers }} +{{- $providers := default dict .Values.providers }} {{- $kubernetes := default dict $providers.kubernetes }} {{- if $kubernetes.tls }} # Only active if .Values.rules.providers.kubernetes.tls is configured @@ -14,23 +13,21 @@ metadata: annotations: {{- toYaml .Values.admissionController.annotations | nindent 4 }} webhooks: - - name: "admission-controller.heimdall.dadrus.github.com" + - name: admission-controller.heimdall.dadrus.github.com admissionReviewVersions: [ "v1" ] sideEffects: None timeoutSeconds: {{ .Values.admissionController.timeoutSeconds }} {{- with .Values.admissionController.namespaceSelector }} + namespaceSelector: {{- toYaml . | nindent 8 }} {{- end }} rules: - apiGroups: ["heimdall.dadrus.github.com"] - apiVersions: ["v1alpha3"] + apiVersions: ["v1alpha4"] operations: ["CREATE", "UPDATE"] resources: ["rulesets"] scope: "Namespaced" matchConditions: - {{- $rules := .Values.rules | default dict }} - {{- $providers := $rules.providers | default dict }} - {{- $kubernetes := $providers.kubernetes | default dict}} # Match only those rule sets, which relate to the configured auth class - name: 'auth-class-filter' expression: 'object.spec.authClassName == {{ default (quote "default") (quote $kubernetes.auth_class) }}' @@ -39,8 +36,8 @@ webhooks: caBundle: {{ . }} {{- end }} service: - namespace: {{ include "heimdall.namespace" $data }} - name: {{ include "heimdall.fullname" $data }} + namespace: {{ include "heimdall.namespace" . }} + name: {{ include "heimdall.fullname" . }} path: "/validate-ruleset" port: {{ .Values.service.admissionController.port }} {{- end }} \ No newline at end of file diff --git a/charts/heimdall/templates/validations/operation_mode.yaml b/charts/heimdall/templates/validations/operation_mode.yaml new file mode 100644 index 000000000..eb4972606 --- /dev/null +++ b/charts/heimdall/templates/validations/operation_mode.yaml @@ -0,0 +1,4 @@ +{{ $opMode := required "operationMode is not set! Call helm with --set operationMode=" .Values.operationMode }} +{{- if not (or (eq $opMode "decision") (eq $opMode "proxy")) }} + {{- fail "A valid operationMode is required! Call helm with --set operationMode=" }} +{{- end }} \ No newline at end of file diff --git a/charts/heimdall/tests/configmap_test.yaml b/charts/heimdall/tests/configmap_test.yaml new file mode 100644 index 000000000..a12c46833 --- /dev/null +++ b/charts/heimdall/tests/configmap_test.yaml @@ -0,0 +1,277 @@ +suite: test suite for configmap configuration +templates: + - configmap.yaml +tests: + - it: should be configured by default + asserts: + - isKind: + of: ConfigMap + - isAPIVersion: + of: v1 + + - it: name should be set with default name + asserts: + - equal: + path: metadata.name + value: RELEASE-NAME-heimdall-config + + - it: name should be set with overwritten name + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: metadata.name + value: test-release-foo-config + + - it: namespace should be set + release: + namespace: test-namespace + asserts: + - equal: + path: metadata.namespace + value: test-namespace + + - it: should set default labels with default values + asserts: + - isSubset: + path: metadata.labels + content: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: heimdall + app.kubernetes.io/version: latest + - matchRegex: + path: metadata.labels["helm.sh/chart"] + pattern: heimdall-* + + - it: should not have any annotations + asserts: + - notExists: + path: metadata.annotations + + - it: should include default entries if nothing is specified + asserts: + - equal: + path: data["heimdall.yaml"] + value: + |- + serve: + decision: + port: 4456 + management: + port: 4457 + proxy: + port: 4455 + profiling: + enabled: false + host: 0.0.0.0 + port: 10251 + + - it: should allow configuration of any heimdall setting + set: + serve: + decision: + host: 127.0.0.1 + tls: + key_store: + path: /path/to/keystore.pem + password: VerySecure! + timeout: + read: 1s + write: 2s + idle: 30s + buffer_limit: + read: 4KB + write: 10KB + trusted_proxies: + - 192.168.1.0/24 + respond: + verbose: true + with: + authentication_error: + code: 404 + authorization_error: + code: 404 + management: + host: 127.0.0.1 + tls: + key_store: + path: /path/to/keystore.pem + password: VerySecure! + timeout: + read: 1s + write: 2s + idle: 30s + buffer_limit: + read: 4KB + write: 10KB + metrics: + enabled: true + profiling: + host: 192.168.2.10 + port: 9999 + tracing: + enabled: true + span_processor: batch + log: + level: debug + format: gelf + mechanisms: + authenticators: + - id: deny_all + type: unauthorized + - id: anon + type: anonymous + - id: jwt_auth + type: jwt + config: + jwks_endpoint: http://idp:8080/.well-known/jwks + assertions: + issuers: + - demo_issuer + authorizers: + - id: opa + type: remote + config: + endpoint: http://opa:8181/v1/data/{{ .Values.policy }} + payload: "{}" + expressions: + - expression: | + Payload.result == true + finalizers: + - id: create_jwt + type: jwt + config: + signer: + key_store: + path: /etc/heimdall/signer.pem + - id: noop + type: noop + default_rule: + execute: + - authenticator: deny_all + - finalizer: create_jwt + providers: + file_system: + src: /etc/heimdall/rules.yaml + watch: true + http_endpoint: + endpoints: + - url: http://foo.bar/ruleset1 + cloud_blob: + buckets: + - url: gs://my-bucket + kubernetes: + auth_class: foo + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: data["heimdall.yaml"] + value: + |- + serve: + decision: + buffer_limit: + read: 4KB + write: 10KB + host: 127.0.0.1 + port: 4456 + respond: + verbose: true + with: + authentication_error: + code: 404 + authorization_error: + code: 404 + timeout: + idle: 30s + read: 1s + write: 2s + tls: + key_store: + password: VerySecure! + path: /path/to/keystore.pem + trusted_proxies: + - 192.168.1.0/24 + management: + buffer_limit: + read: 4KB + write: 10KB + host: 127.0.0.1 + port: 4457 + timeout: + idle: 30s + read: 1s + write: 2s + tls: + key_store: + password: VerySecure! + path: /path/to/keystore.pem + proxy: + port: 4455 + log: + format: gelf + level: debug + metrics: + enabled: true + tracing: + enabled: true + span_processor: batch + profiling: + enabled: false + host: 192.168.2.10 + port: 9999 + mechanisms: + authenticators: + - id: deny_all + type: unauthorized + - id: anon + type: anonymous + - config: + assertions: + issuers: + - demo_issuer + jwks_endpoint: http://idp:8080/.well-known/jwks + id: jwt_auth + type: jwt + authorizers: + - config: + endpoint: http://opa:8181/v1/data/{{ .Values.policy }} + expressions: + - expression: | + Payload.result == true + payload: '{}' + id: opa + type: remote + finalizers: + - config: + signer: + key_store: + path: /etc/heimdall/signer.pem + id: create_jwt + type: jwt + - id: noop + type: noop + default_rule: + execute: + - authenticator: deny_all + - finalizer: create_jwt + providers: + cloud_blob: + buckets: + - url: gs://my-bucket + file_system: + src: /etc/heimdall/rules.yaml + watch: true + http_endpoint: + endpoints: + - url: http://foo.bar/ruleset1 + kubernetes: + auth_class: foo + tls: + key_store: + path: /path/to/file.pem diff --git a/charts/heimdall/tests/container_test.yaml b/charts/heimdall/tests/container_test.yaml new file mode 100644 index 000000000..10c01e253 --- /dev/null +++ b/charts/heimdall/tests/container_test.yaml @@ -0,0 +1,391 @@ +suite: test suite for container configuration +templates: + - deployment.yaml + - configmap.yaml +chart: + appVersion: v0.14.0 +tests: + - it: should have the default image when no value is specified + template: deployment.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: ghcr.io/dadrus/heimdall:v0.14.0 + + - it: should change image when image.tag value is specified + template: deployment.yaml + set: + image: + tag: v2.0.0 + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: ghcr.io/dadrus/heimdall:v2.0.0 + + - it: should change image when image.repository value is specified + template: deployment.yaml + set: + image: + repository: docker.io/dadrus/heimdall + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: docker.io/dadrus/heimdall:v0.14.0 + + - it: imagePullPolicy should be IfNotPresent by default + template: deployment.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].imagePullPolicy + value: IfNotPresent + + - it: should use custom imagePullPolicy if specified + template: deployment.yaml + set: + image.pullPolicy: Always + asserts: + - equal: + path: spec.template.spec.containers[0].imagePullPolicy + value: Always + + - it: should have security context configured by default + template: deployment.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].securityContext + value: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + + - it: should allow security context configuration and merge the provided configuration with the default one + template: deployment.yaml + set: + deployment.securityContext: + allowPrivilegeEscalation: true + foo: bar + asserts: + - equal: + path: spec.template.spec.containers[0].securityContext + value: + allowPrivilegeEscalation: true + capabilities: + drop: + - ALL + foo: bar + readOnlyRootFilesystem: true + + - it: should have liveness and readiness probes configured + template: deployment.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].livenessProbe + value: + httpGet: + path: /.well-known/health + port: http-management + - equal: + path: spec.template.spec.containers[0].readinessProbe + value: + httpGet: + path: /.well-known/health + port: http-management + + - it: should have no resource limits by default + template: deployment.yaml + asserts: + - isNullOrEmpty: + path: spec.template.spec.containers[0].resources + + - it: should have custom resource limits when specified + template: deployment.yaml + set: + deployment.resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "300m" + memory: "150Mi" + asserts: + - equal: + path: spec.template.spec.containers[0].resources + value: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "300m" + memory: "150Mi" + + - it: can configure environment variables + template: deployment.yaml + set: + env: + ENV_VAR_CONFIGMAP: + configMapKeyRef: + name: "my-configmap" + key: "EXAMPLE_KEY" + ENV_VAR_SIMPLE: "value" + envFrom: + - mountPath: /some/file.yaml + name: some-file-volume + readOnly: true + subPath: file.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].env + value: + - name: ENV_VAR_CONFIGMAP + valueFrom: + configMapKeyRef: + key: EXAMPLE_KEY + name: my-configmap + - name: ENV_VAR_SIMPLE + value: value + - equal: + path: spec.template.spec.containers[0].envFrom + value: + - mountPath: /some/file.yaml + name: some-file-volume + readOnly: true + subPath: file.yaml + + - it: should configure one volume mount for heimdall configuration by default + template: deployment.yaml + release: + name: test-release + asserts: + - equal: + path: spec.template.spec.volumes + value: + - configMap: + name: test-release-heimdall-config + name: heimdall-config-volume + - equal: + path: spec.template.spec.containers[0].volumeMounts + value: + - name: heimdall-config-volume + mountPath: /etc/heimdall + readOnly: true + + - it: should configure further volume mounts if specified + template: deployment.yaml + set: + deployment.volumes: + - name: some-volume-name + configMap: + name: some-config-map + deployment.volumeMounts: + - name: some-volume-name + mountPath: /some/path + asserts: + - equal: + path: spec.template.spec.volumes + value: + - configMap: + name: RELEASE-NAME-heimdall-config + name: heimdall-config-volume + - configMap: + name: some-config-map + name: some-volume-name + - equal: + path: spec.template.spec.containers[0].volumeMounts + value: + - name: heimdall-config-volume + mountPath: /etc/heimdall + readOnly: true + - name: some-volume-name + mountPath: /some/path + + - it: should start heimdall in decision mode by default + template: deployment.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].args + value: + - serve + - decision + + - it: should allow starting heimdall in proxy mode + template: deployment.yaml + set: + operationMode: proxy + asserts: + - equal: + path: spec.template.spec.containers[0].args + value: + - serve + - proxy + + - it: should allow specifying additional arguments while starting heimdall + template: deployment.yaml + set: + extraArgs: + - foo + - bar + asserts: + - equal: + path: spec.template.spec.containers[0].args + value: + - serve + - decision + - foo + - bar + + - it: should expose management and decision service ports by default + template: deployment.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].ports + value: + - containerPort: 4456 + name: http-decision + protocol: TCP + - containerPort: 4457 + name: http-management + protocol: TCP + + - it: should expose management and proxy service ports if operated in proxy mode + template: deployment.yaml + set: + operationMode: proxy + asserts: + - equal: + path: spec.template.spec.containers[0].ports + value: + - containerPort: 4455 + name: http-proxy + protocol: TCP + - containerPort: 4457 + name: http-management + protocol: TCP + + - it: should expose metrics port in addition to the standard ports if configured + template: deployment.yaml + set: + env: + OTEL_METRICS_EXPORTER: "prometheus" + asserts: + - equal: + path: spec.template.spec.containers[0].ports + value: + - containerPort: 4456 + name: http-decision + protocol: TCP + - containerPort: 4457 + name: http-management + protocol: TCP + - containerPort: 9464 + name: http-metrics + protocol: TCP + + - it: metrics port value can be configured + template: deployment.yaml + set: + env: + OTEL_METRICS_EXPORTER: "prometheus" + OTEL_EXPORTER_PROMETHEUS_PORT: 9999 + asserts: + - equal: + path: spec.template.spec.containers[0].ports + value: + - containerPort: 4456 + name: http-decision + protocol: TCP + - containerPort: 4457 + name: http-management + protocol: TCP + - containerPort: 9999 + name: http-metrics + protocol: TCP + + - it: should expose profiling port in addition to the standard ports if configured + template: deployment.yaml + set: + profiling.enabled: true + asserts: + - equal: + path: spec.template.spec.containers[0].ports + value: + - containerPort: 4456 + name: http-decision + protocol: TCP + - containerPort: 4457 + name: http-management + protocol: TCP + - containerPort: 10251 + name: http-profiling + protocol: TCP + + - it: profiling port value can be configured + template: deployment.yaml + set: + profiling.enabled: true + profiling.port: 8888 + asserts: + - equal: + path: spec.template.spec.containers[0].ports + value: + - containerPort: 4456 + name: http-decision + protocol: TCP + - containerPort: 4457 + name: http-management + protocol: TCP + - containerPort: 8888 + name: http-profiling + protocol: TCP + + - it: should expose admission controller web hook port if configured + template: deployment.yaml + set: + providers.kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: spec.template.spec.containers[0].ports + value: + - containerPort: 4456 + name: http-decision + protocol: TCP + - containerPort: 4457 + name: http-management + protocol: TCP + - containerPort: 4458 + name: https-webhook + protocol: TCP + + - it: should configure all possible ports if requested + template: deployment.yaml + set: + profiling.enabled: true + env: + OTEL_METRICS_EXPORTER: "prometheus" + providers.kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: spec.template.spec.containers[0].ports + value: + - containerPort: 4456 + name: http-decision + protocol: TCP + - containerPort: 4457 + name: http-management + protocol: TCP + - containerPort: 9464 + name: http-metrics + protocol: TCP + - containerPort: 10251 + name: http-profiling + protocol: TCP + - containerPort: 4458 + name: https-webhook + protocol: TCP \ No newline at end of file diff --git a/charts/heimdall/tests/deployment_test.yaml b/charts/heimdall/tests/deployment_test.yaml new file mode 100644 index 000000000..0235f8483 --- /dev/null +++ b/charts/heimdall/tests/deployment_test.yaml @@ -0,0 +1,417 @@ +suite: test suite for deployment configuration +templates: + - deployment.yaml + - configmap.yaml +tests: + - it: should be configured by default + template: deployment.yaml + asserts: + - isKind: + of: Deployment + - isAPIVersion: + of: apps/v1 + + - it: name should be set with default name + template: deployment.yaml + asserts: + - equal: + path: metadata.name + value: RELEASE-NAME-heimdall + + - it: name should be set with overwritten name + template: deployment.yaml + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: metadata.name + value: test-release-foo + + - it: namespace should be set + template: deployment.yaml + release: + namespace: test-namespace + asserts: + - equal: + path: metadata.namespace + value: test-namespace + + - it: should set default labels with default values + template: deployment.yaml + asserts: + - isSubset: + path: metadata.labels + content: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: heimdall + app.kubernetes.io/version: latest + - matchRegex: + path: metadata.labels["helm.sh/chart"] + pattern: heimdall-* + + - it: should set custom labels in addition to default ones if provided + template: deployment.yaml + set: + deployment.labels: + foo: bar + asserts: + - isSubset: + path: metadata.labels + content: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: heimdall + app.kubernetes.io/version: latest + foo: bar + - matchRegex: + path: metadata.labels["helm.sh/chart"] + pattern: heimdall-* + + - it: should set default labels with overwrites + template: deployment.yaml + chart: + appVersion: 1.0.0 + version: 2.0.0 + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: metadata.labels + value: + app.kubernetes.io/instance: test-release + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: foo + app.kubernetes.io/version: 1.0.0 + helm.sh/chart: heimdall-2.0.0 + + - it: should set checksum/config annotation by default + template: deployment.yaml + asserts: + - equal: + path: metadata.annotations + value: + checksum/config: fd1d54e159488247d1aa0acf510afc7c24e8fa37e76663533a1fce9bc53b81a9 + + - it: should set custom annotations if provided + template: deployment.yaml + set: + deployment.annotations: + foo: bar + bar: foo + asserts: + - equal: + path: metadata.annotations + value: + foo: bar + checksum/config: fd1d54e159488247d1aa0acf510afc7c24e8fa37e76663533a1fce9bc53b81a9 + bar: foo + + - it: pod label selector is configured with the required labels and values + template: deployment.yaml + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: spec.selector.matchLabels + value: + app.kubernetes.io/instance: test-release + app.kubernetes.io/name: foo + - isSubset: + path: spec.template.metadata.labels + content: + app.kubernetes.io/instance: test-release + app.kubernetes.io/name: foo + + - it: pod template labels should match deployment labels + template: deployment.yaml + release: + name: test-release + chart: + appVersion: 1.0.0 + version: 2.0.0 + set: + deployment.labels: + foo: bar + asserts: + - equal: + path: spec.template.metadata.labels + value: + app.kubernetes.io/instance: test-release + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: heimdall + app.kubernetes.io/version: 1.0.0 + foo: bar + helm.sh/chart: heimdall-2.0.0 + - equal: + path: metadata.labels + value: + app.kubernetes.io/instance: test-release + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: heimdall + app.kubernetes.io/version: 1.0.0 + foo: bar + helm.sh/chart: heimdall-2.0.0 + + - it: should configure 2 replicas if autoscaling is disabled + template: deployment.yaml + set: + deployment.autoscaling.enabled: false + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should not configure replicas if autoscaling is enabled (default) + template: deployment.yaml + set: + deployment.autoscaling.enabled: true + asserts: + - notExists: + path: spec.replicas + + - it: template metadata annotations should not be set by default + template: deployment.yaml + asserts: + - isNullOrEmpty: + path: spec.template.metadata.annotations + + - it: should set prometheus scraping annotations on template metadata if metrics exporter is set to prometheus + template: deployment.yaml + set: + env: + OTEL_METRICS_EXPORTER: prometheus + asserts: + - equal: + path: spec.template.metadata.annotations + value: + prometheus.io/scrape: "true" + prometheus.io/path: "/metrics" + prometheus.io/port: "9464" + + - it: prometheus exporter port can be customized + template: deployment.yaml + set: + env: + OTEL_METRICS_EXPORTER: prometheus + OTEL_EXPORTER_PROMETHEUS_PORT: 8888 + asserts: + - equal: + path: spec.template.metadata.annotations + value: + prometheus.io/scrape: "true" + prometheus.io/path: "/metrics" + prometheus.io/port: "8888" + + - it: phlare annotations should be set if profiling is enabled + template: deployment.yaml + set: + profiling.enabled: true + asserts: + - equal: + path: spec.template.metadata.annotations + value: + phlare.grafana.com/scrape: "true" + phlare.grafana.com/port: "10251" + + - it: port for phlare scraping can be customized + template: deployment.yaml + set: + profiling.enabled: true + profiling.port: 8888 + asserts: + - equal: + path: spec.template.metadata.annotations + value: + phlare.grafana.com/scrape: "true" + phlare.grafana.com/port: "8888" + + - it: should set custom pod annotations if provided + template: deployment.yaml + set: + deployment.pod.annotations: + foo: bar + asserts: + - equal: + path: spec.template.metadata.annotations + value: + foo: bar + + - it: profiling, prometheus and custom pod annotations can be used together + template: deployment.yaml + set: + deployment.pod.annotations: + foo: bar + profiling.enabled: true + profiling.port: 8888 + env: + OTEL_METRICS_EXPORTER: prometheus + OTEL_EXPORTER_PROMETHEUS_PORT: 9999 + asserts: + - equal: + path: spec.template.metadata.annotations + value: + foo: bar + phlare.grafana.com/port: "8888" + phlare.grafana.com/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "9999" + prometheus.io/scrape: "true" + + - it: imagePullSecrets should be empty by default + template: deployment.yaml + asserts: + - isNullOrEmpty: + path: spec.template.spec.imagePullSecrets + + - it: should allow imagePullSecrets configuration + template: deployment.yaml + set: + image.pullSecrets: + - name: foobar + asserts: + - equal: + path: spec.template.spec.imagePullSecrets + value: + - name: foobar + + - it: should automount service account token + template: deployment.yaml + asserts: + - equal: + path: spec.template.spec.automountServiceAccountToken + value: true + + - it: should set service account name + template: deployment.yaml + release: + name: test-release + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: test-release-heimdall + + - it: should configure pod security context by default + template: deployment.yaml + asserts: + - equal: + path: spec.template.spec.securityContext + value: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + + - it: should allow pod security context configuration and merge the provided configuration with the default one + template: deployment.yaml + set: + deployment.pod.securityContext: + runAsNonRoot: false + runAsUser: 1 + foo: bar + asserts: + - equal: + path: spec.template.spec.securityContext + value: + foo: bar + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: false + runAsUser: 1 + + - it: should configure one volume for heimdall configuration by default + template: deployment.yaml + release: + name: test-release + asserts: + - equal: + path: spec.template.spec.volumes + value: + - configMap: + name: test-release-heimdall-config + name: heimdall-config-volume + + - it: should reference further volumes if configured + template: deployment.yaml + set: + deployment.volumes: + - name: some-volume-name + configMap: + name: some-config-map + asserts: + - equal: + path: spec.template.spec.volumes + value: + - configMap: + name: RELEASE-NAME-heimdall-config + name: heimdall-config-volume + - configMap: + name: some-config-map + name: some-volume-name + + - it: should have no node selectors configured by default + template: deployment.yaml + asserts: + - isNullOrEmpty: + path: spec.template.spec.nodeSelector + + - it: should set node selectors if specified + template: deployment.yaml + set: + deployment.nodeSelector: + foo: bar + asserts: + - equal: + path: spec.template.spec.nodeSelector + value: + foo: bar + + - it: should have no affinity configured by default + template: deployment.yaml + asserts: + - isNullOrEmpty: + path: spec.template.spec.affinity + + - it: should set affinity if specified + template: deployment.yaml + set: + deployment.affinity: + foo: bar + asserts: + - equal: + path: spec.template.spec.affinity + value: + foo: bar + + - it: should have no tolerations configured by default + template: deployment.yaml + asserts: + - isNullOrEmpty: + path: spec.template.spec.tolerations + + - it: should set tolerations if specified + template: deployment.yaml + set: + deployment.tolerations: + - foo: bar + asserts: + - equal: + path: spec.template.spec.tolerations + value: + - foo: bar + + - it: should have one container with name set the name of the chart (heimdall) + template: deployment.yaml + asserts: + - lengthEqual: + path: spec.template.spec.containers + count: 1 + - equal: + path: spec.template.spec.containers[0].name + value: heimdall \ No newline at end of file diff --git a/charts/heimdall/tests/hpa_test.yaml b/charts/heimdall/tests/hpa_test.yaml new file mode 100644 index 000000000..81b256aab --- /dev/null +++ b/charts/heimdall/tests/hpa_test.yaml @@ -0,0 +1,192 @@ +suite: test suite for horizontal pod autoscaler +templates: + - hpa.yaml +tests: + - it: should be configured by default + asserts: + - isKind: + of: HorizontalPodAutoscaler + - isAPIVersion: + of: autoscaling/v2 + + - it: can be disabled + set: + deployment.autoscaling.enabled: false + asserts: + - hasDocuments: + count: 0 + + - it: name should be set with default name + asserts: + - equal: + path: metadata.name + value: RELEASE-NAME-heimdall + + - it: name should be set with overwritten name + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: metadata.name + value: test-release-foo + + - it: namespace should be set + release: + namespace: test-namespace + asserts: + - equal: + path: metadata.namespace + value: test-namespace + + - it: should set default labels with default values + asserts: + - isSubset: + path: metadata.labels + content: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: heimdall + app.kubernetes.io/version: latest + - matchRegex: + path: metadata.labels["helm.sh/chart"] + pattern: heimdall-* + + - it: should set default labels with overwrites + chart: + appVersion: 1.0.0 + version: 2.0.0 + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: metadata.labels + value: + app.kubernetes.io/instance: test-release + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: foo + app.kubernetes.io/version: 1.0.0 + helm.sh/chart: heimdall-2.0.0 + + - it: should have no annotations + asserts: + - notExists: + path: metadata.annotations + + - it: should reference expected deployment + release: + name: test-release + asserts: + - equal: + path: spec.scaleTargetRef + value: + apiVersion: apps/v1 + kind: Deployment + name: test-release-heimdall + + - it: should set min and max replicas by default + asserts: + - equal: + path: spec.minReplicas + value: 2 + - equal: + path: spec.maxReplicas + value: 10 + + - it: min and max replicas can be configured + set: + deployment.autoscaling.minReplicas: 1 + deployment.autoscaling.maxReplicas: 4 + asserts: + - equal: + path: spec.minReplicas + value: 1 + - equal: + path: spec.maxReplicas + value: 4 + + - it: should configure cpu and memory metrics by default with default average utilization + asserts: + - equal: + path: spec.metrics + value: + - resource: + name: cpu + target: + averageUtilization: 80 + type: Utilization + type: Resource + - resource: + name: memory + target: + averageUtilization: 80 + type: Utilization + type: Resource + + - it: should configure cpu and memory metrics with specified average utilization + set: + deployment: + autoscaling: + targetCPUUtilizationPercentage: 40 + targetMemoryUtilizationPercentage: 50 + asserts: + - equal: + path: spec.metrics + value: + - resource: + name: cpu + target: + averageUtilization: 40 + type: Utilization + type: Resource + - resource: + name: memory + target: + averageUtilization: 50 + type: Utilization + type: Resource + + - it: cpu metrics can be disabled + set: + deployment: + autoscaling: + targetCPUUtilizationPercentage: 0 + asserts: + - equal: + path: spec.metrics + value: + - resource: + name: memory + target: + averageUtilization: 80 + type: Utilization + type: Resource + + - it: memory metrics can be disabled + set: + deployment: + autoscaling: + targetMemoryUtilizationPercentage: 0 + asserts: + - equal: + path: spec.metrics + value: + - resource: + name: cpu + target: + averageUtilization: 80 + type: Utilization + type: Resource + + - it: should error when both, cpu and memory metrics are disabled + set: + deployment: + autoscaling: + targetCPUUtilizationPercentage: 0 + targetMemoryUtilizationPercentage: 0 + asserts: + - failedTemplate: + errorMessage: autoscaling is enabled, but usage of both, the cpu and the memory metrics is disabled \ No newline at end of file diff --git a/charts/heimdall/tests/service_test.yaml b/charts/heimdall/tests/service_test.yaml new file mode 100644 index 000000000..8b2884762 --- /dev/null +++ b/charts/heimdall/tests/service_test.yaml @@ -0,0 +1,266 @@ +suite: test suite for service configuration +templates: + - service.yaml +tests: + - it: should be configured by default + asserts: + - isKind: + of: Service + - isAPIVersion: + of: v1 + + - it: name should be set with default name + asserts: + - equal: + path: metadata.name + value: RELEASE-NAME-heimdall + + - it: name should be set with overwritten name + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: metadata.name + value: test-release-foo + + - it: namespace should be set + release: + namespace: test-namespace + asserts: + - equal: + path: metadata.namespace + value: test-namespace + + - it: should set default labels with default values + asserts: + - isSubset: + path: metadata.labels + content: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: heimdall + app.kubernetes.io/version: latest + - matchRegex: + path: metadata.labels["helm.sh/chart"] + pattern: heimdall-* + + - it: should set custom labels in addition to default ones if provided + set: + service.labels: + foo: bar + asserts: + - isSubset: + path: metadata.labels + content: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: heimdall + app.kubernetes.io/version: latest + foo: bar + - matchRegex: + path: metadata.labels["helm.sh/chart"] + pattern: heimdall-* + + - it: should set default labels with overwrites + chart: + appVersion: 1.0.0 + version: 2.0.0 + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: metadata.labels + value: + app.kubernetes.io/instance: test-release + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: foo + app.kubernetes.io/version: 1.0.0 + helm.sh/chart: heimdall-2.0.0 + + - it: should not set any annotations by default + asserts: + - isNullOrEmpty: + path: metadata.annotations + + - it: should not set custom annotations if provided + set: + service.annotations: + foo: bar + asserts: + - equal: + path: metadata.annotations + value: + foo: bar + + - it: should set service type to ClusterIP + asserts: + - equal: + path: spec.type + value: ClusterIP + + - it: should configure selector with the required labels and values + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: spec.selector + value: + app.kubernetes.io/instance: test-release + app.kubernetes.io/name: foo + + - it: should expose management and decision ports by default + asserts: + - equal: + path: spec.ports + value: + - name: management + port: 4457 + protocol: TCP + targetPort: http-management + - name: decision + port: 4456 + protocol: TCP + targetPort: http-decision + + - it: should expose management and proxy ports if operated in proxy mode + set: + operationMode: proxy + asserts: + - equal: + path: spec.ports + value: + - name: management + port: 4457 + protocol: TCP + targetPort: http-management + - name: proxy + port: 4455 + protocol: TCP + targetPort: http-proxy + + - it: should not expose metrics port in addition to the standard ports although it is configured + set: + env: + OTEL_METRICS_EXPORTER: "prometheus" + asserts: + - equal: + path: spec.ports + value: + - name: management + port: 4457 + protocol: TCP + targetPort: http-management + - name: decision + port: 4456 + protocol: TCP + targetPort: http-decision + + - it: should not expose profiling port in addition to the standard ports although it is configured + set: + profiling.enabled: true + asserts: + - equal: + path: spec.ports + value: + - name: management + port: 4457 + protocol: TCP + targetPort: http-management + - name: decision + port: 4456 + protocol: TCP + targetPort: http-decision + + - it: should expose admission controller web hook port if configured + set: + providers.kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: spec.ports + value: + - name: management + port: 4457 + protocol: TCP + targetPort: http-management + - name: decision + port: 4456 + protocol: TCP + targetPort: http-decision + - name: admission-controller + port: 4458 + protocol: TCP + targetPort: https-webhook + + - it: should use provided port and name values for each exposed service port in decision mode + set: + providers.kubernetes: + tls: + key_store: + path: /path/to/file.pem + service.management: + port: 1111 + name: man + service.decision: + port: 2222 + name: dec + service.admissionController: + port: 3333 + name: adm + asserts: + - equal: + path: spec.ports + value: + - name: man + port: 1111 + protocol: TCP + targetPort: http-management + - name: dec + port: 2222 + protocol: TCP + targetPort: http-decision + - name: adm + port: 3333 + protocol: TCP + targetPort: https-webhook + + - it: should use provided port and name values for each exposed service port in proxy mode + set: + operationMode: proxy + providers.kubernetes: + tls: + key_store: + path: /path/to/file.pem + service.management: + port: 1111 + name: man + service.proxy: + port: 2222 + name: prox + service.admissionController: + port: 3333 + name: adm + asserts: + - equal: + path: spec.ports + value: + - name: man + port: 1111 + protocol: TCP + targetPort: http-management + - name: prox + port: 2222 + protocol: TCP + targetPort: http-proxy + - name: adm + port: 3333 + protocol: TCP + targetPort: https-webhook diff --git a/charts/heimdall/tests/serviceaccount_test.yaml b/charts/heimdall/tests/serviceaccount_test.yaml new file mode 100644 index 000000000..f910f3775 --- /dev/null +++ b/charts/heimdall/tests/serviceaccount_test.yaml @@ -0,0 +1,171 @@ +suite: test suite for service account configuration +templates: + - serviceaccount.yaml +tests: + - it: should create service account config and related objects + asserts: + - hasDocuments: + count: 4 + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + documentIndex: 0 + - containsDocument: + apiVersion: v1 + kind: Secret + documentIndex: 1 + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + documentIndex: 2 + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + documentIndex: 3 + + - it: should set a default name for the service account + asserts: + - equal: + path: metadata.name + value: RELEASE-NAME-heimdall + documentIndex: 0 + + - it: name for the service account should be set with overwritten name + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: metadata.name + value: test-release-foo + documentIndex: 0 + + - it: should set a default name for the account toke secret + asserts: + - equal: + path: metadata.name + value: RELEASE-NAME-heimdall-account-token + documentIndex: 1 + + - it: name for the account toke secret should be set with overwritten name + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: metadata.name + value: test-release-foo-account-token + documentIndex: 1 + + - it: should set a default name for the cluster role + asserts: + - equal: + path: metadata.name + value: RELEASE-NAME-heimdall-ruleset-accessor + documentIndex: 2 + + - it: name for the cluster role should be set with overwritten name + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: metadata.name + value: test-release-foo-ruleset-accessor + documentIndex: 2 + + - it: should set a default name for the cluster role binding + asserts: + - equal: + path: metadata.name + value: RELEASE-NAME-heimdall-ruleset-accessor + documentIndex: 3 + + - it: name for the cluster role binding should be set with overwritten name + release: + name: test-release + set: + nameOverride: foo + asserts: + - equal: + path: metadata.name + value: test-release-foo-ruleset-accessor + documentIndex: 3 + + - it: should set namespace for all configuration objects + release: + namespace: test-namespace + asserts: + - equal: + path: metadata.namespace + value: test-namespace + + - it: should set default labels with default values for all configuration objects + asserts: + - isSubset: + path: metadata.labels + content: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: heimdall + app.kubernetes.io/version: latest + - matchRegex: + path: metadata.labels["helm.sh/chart"] + pattern: heimdall-* + + - it: should not automount the service account token + asserts: + - equal: + path: automountServiceAccountToken + value: false + documentIndex: 0 + + - it: secret should be of type service account token and be annotated accordingly + release: + name: test-release + asserts: + - equal: + path: type + value: kubernetes.io/service-account-token + documentIndex: 1 + - equal: + path: metadata.annotations + value: + kubernetes.io/service-account.name: test-release-heimdall + documentIndex: 1 + + - it: cluster role should have expected rules + asserts: + - equal: + path: rules + value: + - apiGroups: [ "heimdall.dadrus.github.com" ] + resources: [ "rulesets", "rulesets/status" ] + verbs: [ "get", "watch", "list" ] + - apiGroups: [ "heimdall.dadrus.github.com" ] + resources: [ "rulesets/status" ] + verbs: [ "patch", "update" ] + documentIndex: 2 + + - it: cluster role binding should reference the expected service account and cluster role + release: + name: test-release + namespace: test + asserts: + - equal: + path: subjects + value: + - kind: ServiceAccount + name: test-release-heimdall + namespace: test + documentIndex: 3 + - equal: + path: roleRef + value: + kind: ClusterRole + name: test-release-heimdall-ruleset-accessor + apiGroup: rbac.authorization.k8s.io + documentIndex: 3 \ No newline at end of file diff --git a/charts/heimdall/tests/validating_webhook_test.yaml b/charts/heimdall/tests/validating_webhook_test.yaml new file mode 100644 index 000000000..a3fcb2352 --- /dev/null +++ b/charts/heimdall/tests/validating_webhook_test.yaml @@ -0,0 +1,386 @@ +suite: test suite for validating webhook configuration +templates: + - validating_webhook.yaml +tests: + - it: should not be configured by default + asserts: + - hasDocuments: + count: 0 + + - it: should not be configured by if there is no tls configuration for kubernetes provider + set: + providers: + kubernetes: + auth_class: foo + asserts: + - hasDocuments: + count: 0 + + - it: should be configured if tls settings are provided for the kubernetes provider + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - isKind: + of: ValidatingWebhookConfiguration + - isAPIVersion: + of: admissionregistration.k8s.io/v1 + + - it: name should be set with default name + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: metadata.name + value: RELEASE-NAME-heimdall-webhook + + - it: name should be set with overwritten name + release: + name: test-release + set: + nameOverride: foo + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: metadata.name + value: test-release-foo-webhook + + - it: namespace should be set + release: + namespace: test-namespace + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: metadata.namespace + value: test-namespace + + - it: should set default labels with default values + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - isSubset: + path: metadata.labels + content: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: heimdall + app.kubernetes.io/version: latest + - matchRegex: + path: metadata.labels["helm.sh/chart"] + pattern: heimdall-* + + - it: should set custom labels in addition to default ones if provided + set: + admissionController.labels: + foo: bar + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - isSubset: + path: metadata.labels + content: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: heimdall + app.kubernetes.io/version: latest + foo: bar + - matchRegex: + path: metadata.labels["helm.sh/chart"] + pattern: heimdall-* + + - it: should set default labels with overwrites + chart: + appVersion: 1.0.0 + version: 2.0.0 + release: + name: test-release + set: + nameOverride: foo + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: metadata.labels + value: + app.kubernetes.io/instance: test-release + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: foo + app.kubernetes.io/version: 1.0.0 + helm.sh/chart: heimdall-2.0.0 + + - it: should not set annotations by default + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - isNullOrEmpty: + path: metadata.annotations + + - it: should set custom annotations if provided + set: + admissionController.annotations: + foo: bar + bar: foo + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: metadata.annotations + value: + foo: bar + bar: foo + + - it: should configure one webhook + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - lengthEqual: + path: webhooks + count: 1 + + - it: should set expected webhook name + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: webhooks[0].name + value: admission-controller.heimdall.dadrus.github.com + + - it: should set admissionReviewVersions + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: webhooks[0].admissionReviewVersions + value: [ "v1" ] + + - it: should set side effects to none + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: webhooks[0].sideEffects + value: None + + - it: should configure webhook timeout to 5 seconds by default + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: webhooks[0].timeoutSeconds + value: 5 + + - it: webhook timeout should be configurable + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + admissionController.timeoutSeconds: 10 + asserts: + - equal: + path: webhooks[0].timeoutSeconds + value: 10 + + - it: should not configure namespace selector by default + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - notExists: + path: webhooks[0].namespaceSelector + + - it: namespace selector should be configurable + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + admissionController.namespaceSelector: + matchExpressions: + - key: runlevel, + operator: NotIn, + values: [ 0, 1 ] + asserts: + - equal: + path: webhooks[0].namespaceSelector + value: + matchExpressions: + - key: runlevel, + operator: NotIn, + values: + - 0 + - 1 + + - it: should configure expected rules + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: webhooks[0].rules + value: + - apiGroups: + - heimdall.dadrus.github.com + apiVersions: + - v1alpha4 + operations: + - CREATE + - UPDATE + resources: + - rulesets + scope: Namespaced + + - it: should configure default match conditions if no auth_class is specified + set: + providers: + kubernetes: + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: webhooks[0].matchConditions + value: + - expression: object.spec.authClassName == "default" + name: auth-class-filter + + - it: should configure match conditions based on specified auth_class + set: + providers: + kubernetes: + auth_class: foo + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: webhooks[0].matchConditions + value: + - expression: object.spec.authClassName == "foo" + name: auth-class-filter + + - it: should have client config configured + release: + name: foo + namespace: bar + set: + providers: + kubernetes: + auth_class: foo + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: webhooks[0].clientConfig + value: + service: + name: foo-heimdall + namespace: bar + path: /validate-ruleset + port: 4458 + + - it: should allow configuration of the ca bundle for the client config + release: + name: foo + namespace: bar + set: + admissionController.caBundle: foobar + providers: + kubernetes: + auth_class: foo + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: webhooks[0].clientConfig + value: + caBundle: foobar + service: + name: foo-heimdall + namespace: bar + path: /validate-ruleset + port: 4458 + + - it: should use the port configured for the admission controller service + set: + admissionController.caBundle: foobar + service.admissionController.port: 9999 + providers: + kubernetes: + auth_class: foo + tls: + key_store: + path: /path/to/file.pem + asserts: + - equal: + path: webhooks[0].clientConfig + value: + caBundle: foobar + service: + name: RELEASE-NAME-heimdall + namespace: NAMESPACE + path: /validate-ruleset + port: 9999 diff --git a/charts/heimdall/tests/validation_test.yaml b/charts/heimdall/tests/validation_test.yaml new file mode 100644 index 000000000..7ed44a461 --- /dev/null +++ b/charts/heimdall/tests/validation_test.yaml @@ -0,0 +1,33 @@ +suite: test suite for validations +templates: + - validations/operation_mode.yaml +tests: + - it: should fail if operation mode is set to an unexpected value + set: + operationMode: foo + asserts: + - failedTemplate: + errorMessage: A valid operationMode is required! Call helm with --set operationMode= + + - it: should fail if operation mode is empty + set: + operationMode: "" + asserts: + - failedTemplate: + errorMessage: operationMode is not set! Call helm with --set operationMode= + + - it: should not fail by default + asserts: + - notFailedTemplate: { } + + - it: should not fail is operation mode is set to decision + set: + operationMode: decision + asserts: + - notFailedTemplate: { } + + - it: should not fail is operation mode is set to proxy + set: + operationMode: decision + asserts: + - notFailedTemplate: { } diff --git a/charts/heimdall/values.yaml b/charts/heimdall/values.yaml index b01482777..7b5674b48 100644 --- a/charts/heimdall/values.yaml +++ b/charts/heimdall/values.yaml @@ -20,18 +20,6 @@ # or in each pod before the actual business service. operationMode: decision # decision or proxy -# If set to true, a demo deployment with exemplary rules, as these can be found in the documentation, -# will be done -demo: - enabled: false - - # the demo setup assumes nginx ingress controller and will use the - # following values to configure the ingress rule - # change the values to what is required for your ingress controller - forwardAuthMiddlewareAnnotation: nginx.ingress.kubernetes.io/auth-url - forwardAuthMiddlewareResponseAnnotation: nginx.ingress.kubernetes.io/auth-response-headers - forwardAuthMiddlewareRequestUri: $request_uri - # Default values for heimdall. image: repository: ghcr.io/dadrus/heimdall diff --git a/cmd/validate/config_test.go b/cmd/validate/config_test.go index 36d55b572..7753ff68e 100644 --- a/cmd/validate/config_test.go +++ b/cmd/validate/config_test.go @@ -64,6 +64,8 @@ func TestValidateConfig(t *testing.T) { } func TestRunValidateConfigCommand(t *testing.T) { + t.Parallel() + for _, tc := range []struct { uc string confFile string diff --git a/cmd/validate/ruleset.go b/cmd/validate/ruleset.go index 5fe271a5d..4efd7d949 100644 --- a/cmd/validate/ruleset.go +++ b/cmd/validate/ruleset.go @@ -18,18 +18,26 @@ package validate import ( "context" + "errors" "os" + "github.com/go-jose/go-jose/v4" "github.com/rs/zerolog" "github.com/spf13/cobra" "github.com/dadrus/heimdall/internal/config" + "github.com/dadrus/heimdall/internal/heimdall" + "github.com/dadrus/heimdall/internal/keyholder" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/rules" - "github.com/dadrus/heimdall/internal/rules/event" "github.com/dadrus/heimdall/internal/rules/mechanisms" "github.com/dadrus/heimdall/internal/rules/provider/filesystem" + "github.com/dadrus/heimdall/internal/rules/rule" + "github.com/dadrus/heimdall/internal/watcher" ) +var errFunctionNotSupported = errors.New("function not supported") + // NewValidateRulesCommand represents the "validate rules" command. func NewValidateRulesCommand() *cobra.Command { cmd := &cobra.Command{ @@ -55,8 +63,6 @@ func NewValidateRulesCommand() *cobra.Command { } func validateRuleSet(cmd *cobra.Command, args []string) error { - const queueSize = 50 - envPrefix, _ := cmd.Flags().GetString("env-config-prefix") logger := zerolog.Nop() @@ -80,7 +86,13 @@ func validateRuleSet(cmd *cobra.Command, args []string) error { conf.Providers.FileSystem = map[string]any{"src": args[0]} - mFactory, err := mechanisms.NewFactory(conf, logger) + mFactory, err := mechanisms.NewMechanismFactory( + conf, + logger, + &watcher.NoopWatcher{}, + &noopRegistry{}, + &noopCertificateObserver{}, + ) if err != nil { return err } @@ -90,14 +102,29 @@ func validateRuleSet(cmd *cobra.Command, args []string) error { return err } - queue := make(event.RuleSetChangedEventQueue, queueSize) - - defer close(queue) - - provider, err := filesystem.NewProvider(conf, rules.NewRuleSetProcessor(queue, rFactory, logger), logger) + provider, err := filesystem.NewProvider(conf, rules.NewRuleSetProcessor(&noopRepository{}, rFactory), logger) if err != nil { return err } return provider.Start(context.Background()) } + +type noopRepository struct{} + +func (*noopRepository) FindRule(_ heimdall.Context) (rule.Rule, error) { + return nil, errFunctionNotSupported +} +func (*noopRepository) AddRuleSet(_ string, _ []rule.Rule) error { return nil } +func (*noopRepository) UpdateRuleSet(_ string, _ []rule.Rule) error { return errFunctionNotSupported } +func (*noopRepository) DeleteRuleSet(_ string) error { return errFunctionNotSupported } + +type noopRegistry struct{} + +func (*noopRegistry) AddKeyHolder(_ keyholder.KeyHolder) {} +func (*noopRegistry) Keys() []jose.JSONWebKey { return nil } + +type noopCertificateObserver struct{} + +func (*noopCertificateObserver) Add(_ certificate.Supplier) {} +func (*noopCertificateObserver) Start() error { return errFunctionNotSupported } diff --git a/cmd/validate/ruleset_test.go b/cmd/validate/ruleset_test.go index 7d8b29f90..e5ca3d324 100644 --- a/cmd/validate/ruleset_test.go +++ b/cmd/validate/ruleset_test.go @@ -18,17 +18,48 @@ package validate import ( "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" "os" + "path/filepath" "testing" + "github.com/drone/envsubst/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/dadrus/heimdall/internal/x/pkix/pemx" + "github.com/dadrus/heimdall/internal/x/stringx" "github.com/dadrus/heimdall/internal/x/testsupport" ) func TestValidateRuleset(t *testing.T) { - t.Parallel() + privKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + + pemBytes, err := pemx.BuildPEM( + pemx.WithECDSAPrivateKey(privKey, pemx.WithHeader("X-Key-ID", "key")), + ) + require.NoError(t, err) + + testDir := t.TempDir() + pemFile := filepath.Join(testDir, "keystore.pem") + configFile := filepath.Join(testDir, "test-config.yaml") + + t.Setenv("TEST_KEYSTORE_FILE", pemFile) + + err = os.WriteFile(pemFile, pemBytes, 0o600) + require.NoError(t, err) + + raw, err := os.ReadFile("test_data/config.yaml") + require.NoError(t, err) + + content, err := envsubst.EvalEnv(stringx.ToString(raw)) + require.NoError(t, err) + + err = os.WriteFile(configFile, []byte(content), 0o600) + require.NoError(t, err) for _, tc := range []struct { uc string @@ -47,13 +78,13 @@ func TestValidateRuleset(t *testing.T) { }, { uc: "invalid rule set file", - confFile: "test_data/config.yaml", + confFile: configFile, rulesFile: "doesnotexist.yaml", expError: os.ErrNotExist, }, { uc: "everything is valid", - confFile: "test_data/config.yaml", + confFile: configFile, rulesFile: "test_data/valid-ruleset.yaml", }, } { @@ -82,6 +113,32 @@ func TestValidateRuleset(t *testing.T) { } func TestRunValidateRulesCommand(t *testing.T) { + privKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + + pemBytes, err := pemx.BuildPEM( + pemx.WithECDSAPrivateKey(privKey, pemx.WithHeader("X-Key-ID", "key")), + ) + require.NoError(t, err) + + testDir := t.TempDir() + pemFile := filepath.Join(testDir, "keystore.pem") + configFile := filepath.Join(testDir, "test-config.yaml") + + t.Setenv("TEST_KEYSTORE_FILE", pemFile) + + err = os.WriteFile(pemFile, pemBytes, 0o600) + require.NoError(t, err) + + raw, err := os.ReadFile("test_data/config.yaml") + require.NoError(t, err) + + content, err := envsubst.EvalEnv(stringx.ToString(raw)) + require.NoError(t, err) + + err = os.WriteFile(configFile, []byte(content), 0o600) + require.NoError(t, err) + for _, tc := range []struct { uc string confFile string @@ -95,20 +152,20 @@ func TestRunValidateRulesCommand(t *testing.T) { }, { uc: "everything is valid for decision mode usage", - confFile: "test_data/config.yaml", + confFile: configFile, rulesFile: "test_data/valid-ruleset.yaml", }, { uc: "invalid for proxy usage", proxyMode: true, - confFile: "test_data/config.yaml", + confFile: configFile, rulesFile: "test_data/invalid-ruleset-for-proxy-usage.yaml", - expError: "no forward_to", + expError: "requires forward_to", }, { uc: "everything is valid for proxy mode usage", proxyMode: true, - confFile: "test_data/config.yaml", + confFile: configFile, rulesFile: "test_data/valid-ruleset.yaml", }, } { diff --git a/cmd/validate/test_data/config.yaml b/cmd/validate/test_data/config.yaml index 424aae07d..3304a5fdf 100644 --- a/cmd/validate/test_data/config.yaml +++ b/cmd/validate/test_data/config.yaml @@ -145,6 +145,11 @@ mechanisms: - id: jwt type: jwt config: + signer: + name: heimdall + key_id: "key" + key_store: + path: "${TEST_KEYSTORE_FILE}" ttl: 5m claims: | {"user": {{ quote .Subject.ID }} } @@ -163,22 +168,20 @@ mechanisms: type: default - id: authenticate_with_kratos type: redirect - if: | - ((type(Error) == authentication_error && Error.Source == "kratos_session_authenticator") || - type(Error) == authorization_error) && - Request.Header("Accept").contains("*/*") config: to: http://127.0.0.1:4433/self-service/login/browser?return_to={{ .Request.URL | urlenc }} default_rule: - methods: - - GET - - POST + backtracking_enabled: false execute: - authenticator: anonymous_authenticator - finalizer: jwt on_error: - error_handler: authenticate_with_kratos + if: | + ((type(Error) == authentication_error && Error.Source == "kratos_session_authenticator") || + type(Error) == authorization_error) && + Request.Header("Accept").contains("*/*") providers: file_system: @@ -189,8 +192,8 @@ providers: watch_interval: 5m endpoints: - url: http://foo.bar/rules.yaml - rule_path_match_prefix: /foo - enable_http_cache: true + http_cache: + enabled: true - url: http://bar.foo/rules.yaml headers: bla: bla @@ -209,10 +212,8 @@ providers: buckets: - url: gs://my-bucket prefix: service1 - rule_path_match_prefix: /service1 - url: gs://my-bucket prefix: service2 - rule_path_match_prefix: /service2 - url: s3://my-bucket/my-rule-set kubernetes: diff --git a/cmd/validate/test_data/invalid-ruleset-for-proxy-usage.yaml b/cmd/validate/test_data/invalid-ruleset-for-proxy-usage.yaml index eac2bd642..c5d22614e 100644 --- a/cmd/validate/test_data/invalid-ruleset-for-proxy-usage.yaml +++ b/cmd/validate/test_data/invalid-ruleset-for-proxy-usage.yaml @@ -1,13 +1,15 @@ -version: "1alpha3" +version: "1alpha4" name: test-rule-set rules: - id: rule:foo match: - url: http://foo.bar/<**> - strategy: glob -# methods: # reuses default -# - GET -# - POST + routes: + - path: /** + scheme: http + hosts: + - type: glob + value: foo.bar + methods: [ GET, POST ] execute: - authenticator: unauthorized_authenticator - authenticator: jwt_authenticator1 diff --git a/cmd/validate/test_data/valid-ruleset.yaml b/cmd/validate/test_data/valid-ruleset.yaml index 6008de8d3..1f5a0f1d5 100644 --- a/cmd/validate/test_data/valid-ruleset.yaml +++ b/cmd/validate/test_data/valid-ruleset.yaml @@ -1,19 +1,24 @@ -version: "1alpha3" +version: "1alpha4" name: test-rule-set rules: - id: rule:foo match: - url: http://foo.bar/<**> - strategy: glob + routes: + - path: /** + backtracking_enabled: true + scheme: http + hosts: + - type: glob + value: foo.bar + methods: + - POST + - PUT forward_to: host: bar.foo rewrite: strip_path_prefix: /foo add_path_prefix: /baz strip_query_parameters: [boo] -# methods: # reuses default -# - GET -# - POST execute: - authenticator: unauthorized_authenticator - authenticator: jwt_authenticator1 diff --git a/docker/Dockerfile b/docker/Dockerfile index 765e74382..da73869c2 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -18,7 +18,6 @@ RUN useradd -l -M -U -s "/usr/sbin/nologin" -d "/nonexistent" -u 10001 ${USER} WORKDIR /build COPY . . -RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.10.0 RUN go mod download && go mod verify &&\ CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} go build -trimpath -ldflags="-buildid= -w -s -X github.com/dadrus/heimdall/version.Version=${VERSION}" diff --git a/docs/config.yaml b/docs/config.yaml index 3528bf806..c559a764d 100644 --- a/docs/config.yaml +++ b/docs/config.yaml @@ -9,7 +9,7 @@ enableGitInfo: true security: enableInlineShortcodes: false exec: - allow: ['asciidoctor'] + allow: ['asciidoctor', 'git'] osEnv: ['(?i)^(PATH|PATHEXT|APPDATA|TMP|TEMP|TERM|GEM_PATH)$'] funcs: diff --git a/docs/content/_index.adoc b/docs/content/_index.adoc index f667e4b11..14322f76c 100644 --- a/docs/content/_index.adoc +++ b/docs/content/_index.adoc @@ -15,7 +15,7 @@ Use declarative techniques you are already familiar with [source, yaml] ---- -apiVersion: heimdall.dadrus.github.com/v1alpha3 +apiVersion: heimdall.dadrus.github.com/v1alpha4 kind: RuleSet metadata: name: My awesome service @@ -23,7 +23,12 @@ spec: rules: - id: my_api_rule match: - url: http://127.0.0.1:9090/api/<**> + routes: + - path: /api/** + scheme: http + hosts: + - type: exact + value: 127.0.0.1:9090 execute: - authenticator: keycloak - authorizer: opa diff --git a/docs/content/docs/concepts/operating_modes.adoc b/docs/content/docs/concepts/operating_modes.adoc index 4b18da8d9..1af801798 100644 --- a/docs/content/docs/concepts/operating_modes.adoc +++ b/docs/content/docs/concepts/operating_modes.adoc @@ -76,9 +76,14 @@ And there is a rule, which allows anonymous requests and sets a header with subj ---- id: rule:my-service:anonymous-api-access match: - url: http://my-backend-service/my-service/api -methods: - - GET + routes: + - path: /my-service/api + scheme: http + hosts: + - type: exact + value: my-backend-service + methods: + - GET execute: - authenticator: anonymous-authn - finalizer: id-header @@ -144,11 +149,12 @@ And there is a rule, which allows anonymous requests and sets a header with subj ---- id: rule:my-service:anonymous-api-access match: - url: <**>/my-service/api + routes: + - path: /my-service/api + methods: + - GET forward_to: host: my-backend-service:8888 -methods: - - GET execute: - authenticator: anonymous-authn - finalizer: id-header diff --git a/docs/content/docs/concepts/rules.adoc b/docs/content/docs/concepts/rules.adoc index 4ee4752f8..48b6403d5 100644 --- a/docs/content/docs/concepts/rules.adoc +++ b/docs/content/docs/concepts/rules.adoc @@ -34,26 +34,32 @@ To minimize the memory footprint, heimdall instanciates all defined mechanisms o The diagram below sketches the logic executed by heimdall for each and every incoming request. -[mermaid, format=svg, width=70%] +[mermaid, format=svg] .... flowchart TD - req[Request] --> findRule{1: any\nrule\nmatching\nurl?} - findRule -->|yes| methodCheck{2: method\nallowed?} - findRule -->|no| err1[404 Not Found] - methodCheck -->|yes| regularPipeline[3: execute\nauthentication & authorization\npipeline] - methodCheck -->|no| err2[405 Method Not Allowed] + req[Request] --> findRule{1: any\nrule\nmatching\nrequest?} + findRule -->|no| err2[404 Not Found] + findRule -->|yes| regularPipeline[2: execute\nauthentication & authorization\npipeline] regularPipeline --> failed{failed?} failed -->|yes| errPipeline[execute error pipeline] - failed -->|no| success[4: forward request,\nrespectively respond\nto the API gateway] - errPipeline --> errResult[5: result of the\nused error handler] + failed -->|no| success[3: forward request,\nrespectively respond\nto the API gateway] + errPipeline --> errResult[4: result of the\nused error handler] .... -. *Any rule matching url?* - This is the first step executed by heimdall in which it tries to find a rule matching the request url. The information about the scheme, host, path and query is taken either from the URL itself, or if present and allowed, from the `X-Forwarded-Proto`, `X-Forwarded-Host`, or `X-Forwarded-Uri` headers of the incoming request. The request is denied if there is no matching rule, respectively no default rule. Otherwise, the rule specific pipeline is executed. When heimdall is evaluating the rules against the request url it takes the first matching one. -. *Method allowed?* - As soon as a rule matching the request is found (which might also be the default rule if specified and there was no regular rule matching the request), a check is done whether the used HTTP method is allowed or not. The information about the HTTP method is either taken from the request itself or, if present and allowed, from the `X-Forwarded-Method` header. -. *Execute authentication & authorization pipeline* - when the above steps succeed, the mechanisms defined in this pipeline are executed. +. *Any rule matching request?* - This is the first step executed by heimdall in which it tries to find a link:{{< relref "#_matching_of_rules" >}}[matching rule]. If there is no matching rule, heimdall either falls back to the default rule if available, or the request is denied. Otherwise, the rule specific authentication & authorization pipeline is executed. +. *Execute authentication & authorization pipeline* - when a rule is matched, the mechanisms defined in its authentication & authorization pipeline are executed. . *Forward request, respectively respond to the API gateway* - when the above steps succeed, heimdall, depending on the link:{{< relref "/docs/concepts/operating_modes.adoc" >}}[operating mode], responds with, respectively forwards whatever was defined in the pipeline (usually this is a set of HTTP headers). Otherwise . *Execute error pipeline* is executed if any of the mechanisms, defined in the authentiction & authorization pipeline fail. This again results in a response, this time however, based on the definition in the used error handler. +== Matching of Rules + +As written above, an link:{{< relref "/docs/rules/regular_rule.adoc" >}}[upstream specific rule] is only executed when it matches an incoming request. + +The actual matching happens via the requests URL path, which is guaranteed to happen with O(log(n)) time complexity and is based on the path expressions specified in the loaded rules. These expressions support usage of (named) wildcards to capture segments of the matched path. The implementation ensures, that more specific path expressions are matched first regardless of the placement of rules in a link:{{< relref "/docs/concepts/provider.adoc#_rule_sets" >}}[rule set]. + +Additional conditions, like the host, the HTTP method, or application of regular or glob expressions can also be taken into account, allowing different rules for the same path expressions. The information about the HTTP method, scheme, host, path and query is taken either from the request itself, or if present and allowed, from the `X-Forwarded-Proto`, `X-Forwarded-Host`, `X-Forwarded-Uri` and `X-Forwarded-Method` headers of the incoming request. + +There is also an option to have backtracking to a rule with a less specific path expression, if the actual specific path is matched, but the above said additional conditions are not satisfied. == Default Rule & Inheritance @@ -71,7 +77,6 @@ Imagine, the concept of a rule is e.g. an interface written in Java defining the [source, java] ---- public interface Rule { - public boolean checkMethods(methods []String) public void executeAuthenticationStage(req Request) public void executeAuthorizationStage(req Request) public void executeFinalizationStage(req Request) @@ -84,8 +89,8 @@ And the logic described in link:{{< relref "#_execution_of_rules" >}}[Execution [source, java] ---- Rule rule = findMatchingRule(req) -if (!rule.checkMethods(req)) { - throw new MethodNotAllowedError() +if (rule == null) { + throw new NotFoundError() } try { @@ -110,7 +115,6 @@ Since there is some default behaviour in place, like error handling, if the erro [source, java] ---- public abstract class BaseRule implements Rule { - public abstract boolean checkMethods(methods []String) public abstract void executeAuthenticationStage(req Request) public void executeAuthorizationStage(req Request) {} public void executeFinalizationStage(req Request) {} @@ -118,12 +122,11 @@ public abstract class BaseRule implements Rule { } ---- -If there is no default rule configured, an upstream specific rule can then be considered as a class inheriting from that `BaseRule` and must implement at least the two `checkMethods` and `executeAuthenticationStage` methods, similar to what is shown below +If there is no default rule configured, an upstream specific rule can then be considered as a class inheriting from that `BaseRule` and must implement at least the `executeAuthenticationStage` method, similar to what is shown below [source, java] ---- public class MySpecificRule extends BaseRule { - public boolean checkMethods(methods []String) { ... } public void executeAuthenticationStage(req Request) { ... } } ---- @@ -133,7 +136,6 @@ If however, there is a default rule configured, on one hand, it can be considere [source, java] ---- public class DefaultRule extends BaseRule { - public boolean checkMethods(methods []String) { ... } public void executeAuthenticationStage(req Request) { ... } public void executeAuthorizationStage(req Request) { ... } public void executeFinalizationStage(req Request) { ... } @@ -141,7 +143,7 @@ public class DefaultRule extends BaseRule { } ---- -with at least the aforesaid two `checkMethods` and `executeAuthenticationStage` methods being implemented as this is also required for the regular rule. +with at least the aforesaid `executeAuthenticationStage` method being implemented, as this is also required for the regular rule. On the other hand, the definition of a regular, respectively upstream specific rule is then not a class deriving from the `BaseRule`, but from the `DefaultRule`. That way, upstream specific rules are only required, if the behavior of the default rule would not fit the given requirements of a particular service, respectively endpoint. So, if e.g. a rule requires only the authentication stage to be different from the default rule, you would only specify the required authentication mechanisms. That would result in something like shown in the snippet below. diff --git a/docs/content/docs/configuration/reference.adoc b/docs/content/docs/configuration/reference.adoc index b1d7726bd..003571bbe 100644 --- a/docs/content/docs/configuration/reference.adoc +++ b/docs/content/docs/configuration/reference.adoc @@ -156,13 +156,6 @@ profiling: host: 0.0.0.0 port: 9000 -signer: - name: foobar - key_store: - path: /opt/heimdall/keystore.pem - password: VeryInsecure! - key_id: foo - mechanisms: authenticators: - id: anonymous_authenticator @@ -322,6 +315,12 @@ mechanisms: - id: jwt type: jwt config: + signer: + name: foobar + key_store: + path: /opt/heimdall/keystore.pem + password: VeryInsecure! + key_id: foo ttl: 5m header: name: Foo @@ -358,24 +357,18 @@ mechanisms: type: redirect config: to: http://127.0.0.1:4433/self-service/login/browser?return_to={{ .Request.URL | urlenc }} - when: - - error: - - type: authentication_error - raised_by: kratos_session_authenticator - - type: authorization_error - request_headers: - Accept: - - '*/*' default_rule: - methods: - - GET - - POST + backtracking_enabled: false execute: - authenticator: anonymous_authenticator - finalizer: jwt on_error: - error_handler: authenticate_with_kratos + if: | + ((type(Error) == authentication_error && Error.Source == "kratos_session_authenticator") || + type(Error) == authorization_error) && + Request.Header("Accept").contains("*/*") providers: file_system: @@ -386,8 +379,8 @@ providers: watch_interval: 5m endpoints: - url: http://foo.bar/ruleset1 - expected_path_prefix: /foo/bar - enable_http_cache: false + http_cache: + enabled: false - url: http://foo.bar/ruleset2 retry: give_up_after: 5s @@ -406,10 +399,8 @@ providers: buckets: - url: gs://my-bucket prefix: service1 - rule_path_match_prefix: /service1 - url: azblob://my-bucket prefix: service2 - rule_path_match_prefix: /service2 - url: s3://my-bucket/my-rule-set kubernetes: diff --git a/docs/content/docs/configuration/types.adoc b/docs/content/docs/configuration/types.adoc index ac5fb22fd..fac8ac47c 100644 --- a/docs/content/docs/configuration/types.adoc +++ b/docs/content/docs/configuration/types.adoc @@ -341,6 +341,45 @@ config: ---- ==== +=== HTTP Message Signatures + +This strategy implements HTTP message signatures according to https://datatracker.ietf.org/doc/html/rfc9421[RFC 9421] to sign outbound requests. + +`type` must be set to `http_message_signatures`. `config` supports the following properties: + +* *`ttl`*: _link:{{< relref "#_duration" >}}[Duration]_ (optional) ++ +The TTL of the resulting signature. Defaults to 1m. Responsible for setting `created` and `expires` parameters in the resulting signature. + +* *`label`*: _string_ (optional) ++ +The label to use. Defaults to `sig`. + +* *`components`*: _string array_ (mandatory) ++ +The components to be covered by the signature. While the RFC allows for signatures that do not cover any components, this is considered a security risk. When using the `"content-digest"` component, Heimdall will compute hash values of the request body using `sha-256` and `sha-512` algorithms. It will then add a `Content-Digest` header with these hash values to the request, and this header will be included in the signature calculation. + +* *`signer`*: _link:{{< relref "/docs/configuration/types.adoc#_signer" >}}[Signer]_ (mandatory) ++ +The configuration of the key material used for signature creation purposes, as well as the name used for the `tag` parameter in the resulting signature. + +.Strategy configuration +==== + +[source, yaml] +---- +type: http_message_signatures +config: + ttl: 2m + label: foo + components: ["@method", "content-digest", "@authority", "x-my-fancy-header"] + signer: + name: bar + key-store: + path: /path/to/key.pem +---- +==== + === OAuth2 Client Credentials Grant Flow Strategy This strategy implements the https://datatracker.ietf.org/doc/html/rfc6749#section-4.4[OAuth2 Client Credentials Grant Flow] to obtain an access token expected by the endpoint. Heimdall caches the received access token. @@ -565,7 +604,8 @@ auth: headers: X-My-First-Header: foobar X-My-Second-Header: barfoo -enable_http_cache: true +http_cache: + enabled: true ---- ==== @@ -581,7 +621,6 @@ Following types are available: * `authorization_error` (*) - used if an authorizer failed to authorize the subject. E.g. an authorizer is configured to use an expression on the given subject and request context, but that expression returned with an error. Error of this type results by default in `403 Forbidden` response if the default error handler was used to handle such error. * `communication_error` (*) - this error is used to signal a communication error while communicating to a remote system during the execution of the pipeline of the matched rule. Timeouts of DNSs errors result in such an error. Error of this type results by default in `502 Bad Gateway` HTTP code if handled by the default error handler. * `internal_error` - used if heimdall run into an internal error condition while processing the request. E.g. something went wrong while unmarshalling a JSON object, or if there was a configuration error, which couldn't be raised while loading a rule, etc. Results by default in `500 Internal Server Error` response to the caller. -* `method_error` - this error is used to signal that a matched rule does not allow usage of the HTTP method used to submit the request. Error of this type results by default in `405 Method Not Allowed` HTTP code. * `no_rule_error` - this error is used to signal, there is no matching rule to handle the given request. Error of this type results by default in `404 Not Found` HTTP code. * `precondition_error` (*) - used if the request does not contain required/expected data. E.g. if an authenticator could not find a cookie configured. Error of this type results by default in `400 Bad Request` HTTP code if handled by the default error handler. @@ -771,42 +810,6 @@ This matcher enables matching scopes using wildcards. It goes beyond the link:{{ This matcher can only be used by explicitly setting the `matching_strategy` to `wildcard` and defining the required patterns in the `values` property. -== Subject - -This configuration type enables extraction of subject information from responses received by Heimdall from authentication services. Following properties are available. - -* *`id`*: _string_ (mandatory) -+ -A https://github.com/tidwall/gjson/blob/master/SYNTAX.md[GJSON Path] pointing to the id of the subject in the JSON object. - -* *`attributes`*: _string_ (optional) -+ -A https://github.com/tidwall/gjson/blob/master/SYNTAX.md[GJSON Path] pointing to the attributes of the subject in the JSON object. Defaults to `@this`. - -.Extracting subject id from an https://tools.ietf.org/html/rfc7662[OAuth2 Introspection] endpoint response. -==== - -This example shows how to extract the subject id from an https://tools.ietf.org/html/rfc7662[OAuth2 Introspection] endpoint response and set the subject attributes to the entire response - -[source, yaml] ----- -id: sub -attributes: @this ----- - -Setting `attributes` was actually not required, as `@this` would be set by default anyway. -==== - -.Extracting subject id from an https://www.ory.sh/docs/kratos/[Ory Kratos] "whoami" endpoint response -==== - -This example shows how to extract the subject id from an https://www.ory.sh/docs/kratos/[Ory Kratos] "whoami" endpoint response and set the subject attributes to the entire response. `attributes` is not configured, so default is used. - -[source, yaml] ----- -id: identity.id ----- -==== == Session Lifespan This configuration type enables the configuration of session lifespans, used for session validation for those authenticators, which act on non-standard protocols. Following properties are available. @@ -903,6 +906,114 @@ As you see, there is no need to define the time format as the times values appea ==== +== Signer + +When heimdall is used to issue signed objects, like JWTs, to enable upstream services to rely on authentic information, it acts as an issuer of such objects and requires corresponding configuration. + +Following properties are supported: + +* *`name`*: _string_ (optional) ++ +The name used to specify the issuer. E.g. if a JWT is generated, this value is used to set the `iss` claim. If not set, the value `heimdall` is used. + +* *`key_store`*: _link:{{< relref "/docs/configuration/types.adoc#_key_store" >}}[Key Store]_ (mandatory) ++ +The key store containing the cryptographic material. At least one private key must be present. + +* *`key_id`*: _string_ (optional) ++ +If the `key_store` contains multiple keys, this property can be used to specify the key to use (see also link:{{< relref "/docs/configuration/types.adoc#_key_id_lookup" >}}[Key-Id Lookup]). If not specified, the first key is used. If specified, but there is no key for the given key id present, an error is raised and heimdall will refuse to start. + +.Possible configuration +==== +Imagine you have a PEM file located in `/opt/heimdall/keystore.pem` with the following contents: + +[source, txt] +---- +-----BEGIN EC PRIVATE KEY----- +X-Key-ID: foo + +MIGkAgEBBDBRLr783dIM5NHJnDDMRVBiFSF56xqHle5lZk1ZCyyow9wKZGuF4EWK +jRBISBkE3NSgBwYFK4EEACKhZANiAAQ+oGUOJpVjntIWuanYxpXe6oN5tKhzLhBX +GP1SOXiLhnPNnN2uZu9KwOoBzoZhr/Fxw+sziXmzHJwjluz78VOlFKyopxTfmxRZ +0qq3f/KHWdDtVvmTfT0O/ux9mg6mCJw= +-----END EC PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIByjCCAVGgAwIBAgIBATAKBggqhkjOPQQDAzAuMQswCQYDVQQGEwJFVTENMAsG +A1UEChMEVGVzdDEQMA4GA1UEAxMHVGVzdCBDQTAeFw0yMjA4MTUwOTE3MTFaFw0y +MjA4MTUxMDE3MTFaMDAxCzAJBgNVBAYTAkVVMQ0wCwYDVQQKEwRUZXN0MRIwEAYD +VQQDEwlUZXN0IEVFIDEwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQ+oGUOJpVjntIW +uanYxpXe6oN5tKhzLhBXGP1SOXiLhnPNnN2uZu9KwOoBzoZhr/Fxw+sziXmzHJwj +luz78VOlFKyopxTfmxRZ0qq3f/KHWdDtVvmTfT0O/ux9mg6mCJyjQTA/MA4GA1Ud +DwEB/wQEAwIHgDAMBgNVHQ4EBQQDYmFyMB8GA1UdIwQYMBaAFLO77bgPgZMKz11D +BVDUXvtNGeBnMAoGCCqGSM49BAMDA2cAMGQCMFRlx9Bq0MuSh5pDhDTqRq/MnxxD +W7qZg15AXoNnLrR60vV9gHjzkp1UkcU9viRIuAIwU0BjwDncp9z1seqKh+/eJV3f +xstQe2rzUEptWLIiPFoOBWZuw9wJ/Hunjik3a9T/ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIByjCCAVCgAwIBAgIBATAKBggqhkjOPQQDAzAuMQswCQYDVQQGEwJFVTENMAsG +A1UEChMEVGVzdDEQMA4GA1UEAxMHVGVzdCBDQTAeFw0yMjA4MTUwOTE3MTFaFw0y +MjA4MTYwOTE3MTFaMC4xCzAJBgNVBAYTAkVVMQ0wCwYDVQQKEwRUZXN0MRAwDgYD +VQQDEwdUZXN0IENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEf96tstMNdNoNfYjl +bGY6BvBFTsl9E3hpPnta7SJn6BqIYz6KEohDJ+8DXwUMVb5Ytr/QkEikg966HCY3 +A9TFBUdAs01TV8f2KoAPRQVrh+ccSLLJyACENfZ5VbGSQ0wso0IwQDAOBgNVHQ8B +Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUs7vtuA+BkwrPXUMF +UNRe+00Z4GcwCgYIKoZIzj0EAwMDaAAwZQIxAMPgE/Z+1Dcj+lH7jioE16Hig0HQ +FC4qBx1UU05H05Gs23ECB1hzD2qXikVpaNyuDgIwbogEu42wIwpDa5xdJIZcIhmz +DIuPvEscUDjU3C+1GPxmACcRMPv9QVUEcBAvZkfn +-----END CERTIFICATE----- +---- + +Then you can configure heimdall to use it like follows: + +[source, yaml] +---- +signer: + name: foobar + key_store: + path: /opt/heimdall/keystore.pem + key_id: foo +---- +==== + +== Subject + +This configuration type enables extraction of subject information from responses received by Heimdall from authentication services. Following properties are available. + +* *`id`*: _string_ (mandatory) ++ +A https://github.com/tidwall/gjson/blob/master/SYNTAX.md[GJSON Path] pointing to the id of the subject in the JSON object. + +* *`attributes`*: _string_ (optional) ++ +A https://github.com/tidwall/gjson/blob/master/SYNTAX.md[GJSON Path] pointing to the attributes of the subject in the JSON object. Defaults to `@this`. + +.Extracting subject id from an https://tools.ietf.org/html/rfc7662[OAuth2 Introspection] endpoint response. +==== + +This example shows how to extract the subject id from an https://tools.ietf.org/html/rfc7662[OAuth2 Introspection] endpoint response and set the subject attributes to the entire response + +[source, yaml] +---- +id: sub +attributes: @this +---- + +Setting `attributes` was actually not required, as `@this` would be set by default anyway. +==== + +.Extracting subject id from an https://www.ory.sh/docs/kratos/[Ory Kratos] "whoami" endpoint response +==== + +This example shows how to extract the subject id from an https://www.ory.sh/docs/kratos/[Ory Kratos] "whoami" endpoint response and set the subject attributes to the entire response. `attributes` is not configured, so default is used. + +[source, yaml] +---- +id: identity.id +---- +==== + + == TLS Following are the supported TLS configuration properties: diff --git a/docs/content/docs/getting_started/installation.adoc b/docs/content/docs/getting_started/installation.adoc index 9e9eea4f9..52faf196a 100644 --- a/docs/content/docs/getting_started/installation.adoc +++ b/docs/content/docs/getting_started/installation.adoc @@ -91,8 +91,8 @@ Following tag patterns exist: + [source, bash] ---- -$ docker pull dadrus/heimdall:0.13.0-alpha && docker run dadrus/heimdall:0.13.0-alpha --version -heimdall version v0.13.0-alpha +$ docker pull dadrus/heimdall:0.15.0 && docker run dadrus/heimdall:0.15.0 --version +heimdall version v0.15.0 ---- * `latest` - will pull the most recent tagged release. @@ -100,7 +100,7 @@ heimdall version v0.13.0-alpha [source, bash] ---- $ docker pull dadrus/heimdall:latest && docker run dadrus/heimdall:latest --version -heimdall version v0.13.0-alpha +heimdall version 0.15.0 ---- == Helm Chart diff --git a/docs/content/docs/getting_started/protect_an_app.adoc b/docs/content/docs/getting_started/protect_an_app.adoc index 0e864941a..268b6f529 100644 --- a/docs/content/docs/getting_started/protect_an_app.adoc +++ b/docs/content/docs/getting_started/protect_an_app.adoc @@ -86,13 +86,14 @@ mechanisms: # <3> finalizers: - id: create_jwt # <8> type: jwt + config: + signer: + key_store: + path: /etc/heimdall/signer.pem - id: noop # <9> type: noop default_rule: # <10> - methods: - - GET - - POST execute: - authenticator: deny_all - finalizer: create_jwt @@ -109,20 +110,35 @@ providers: <5> These two lines define the `link:{{< relref "/docs/mechanisms/authenticators.adoc#_anonymous" >}}[anonymous]` authenticator named `anon`. It allows any request passing through and creates a subject with ID set to `anonymous`. You can find more information about the subject and other objects link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[here]. <6> This and the following lines define and configure the `link:{{< relref "/docs/mechanisms/authenticators.adoc#_jwt" >}}[jwt]` authenticator named `jwt_auth`. With the given configuration it will check whether a request contains an `Authorization` header with a bearer token in JWT format and validate it using key material fetched from the JWKS endpoint. It will reject all requests without a valid JWT or create a subject with ID set to the value of the `sub` claim from the token and add also add all claims as key-value map to subject's Attribute property. <7> Here we define and configure a `link:{{< relref "/docs/mechanisms/authorizers.adoc#_remote" >}}[remote]` authorizer named `opa`. Please note, how we allow overriding of particular settings, which application you'll find below, when we define the rules. -<8> The following two lines define the `link:{{< relref "/docs/mechanisms/finalizers.adoc#_jwt" >}}[jwt]` finalizer. Without any configuration, as used here, it will create a jwt out of the subject object with standard claims and set the `sub` claim to the value of subject's ID. The key material used for signature creation purpose will be generated on start up. This is fine for our demo purpose. For real scenarios you should definitely link:{{< relref "/docs/operations/security.adoc#_signatures" >}}[define it]. +<8> The following lines define the `link:{{< relref "/docs/mechanisms/finalizers.adoc#_jwt" >}}[jwt]` finalizer. With the given configuration, it will create a jwt out of the subject object with standard claims and set the `sub` claim to the value of subject's ID. The key material used for signature creation purpose is taken from the referenced key store. <9> These two lines conclude the definition of our mechanisms catalogue and define the `link:{{< relref "/docs/mechanisms/finalizers.adoc#_noop" >}}[noop]` finalizer, which as the type implies, does nothing. -<10> With the above catalogue in place, we can now define a link:{{< relref "/docs/rules/default_rule.adoc" >}}[default rule], which will kick in if no other rule matches the request. In addition, it acts as a link:{{< relref "/docs/concepts/rules.adoc#_default_rule_inheritance" >}}[base] for the definition of regular (upstream service specific) rules. In this case it allows only HTTP GET and POST requests and defines a secure default link:{{< relref "/docs/concepts/pipelines.adoc#_authentication_authorization_pipeline" >}}[authentication & authorization pipeline], which refuses any request by making use of the `deny_all` authenticator, and if the regular rule overrides that authenticator, will create a JWT thanks to the used `jwt` finalizer. +<10> With the above catalogue in place, we can now define a link:{{< relref "/docs/rules/default_rule.adoc" >}}[default rule], which will kick in if no other rule matches the request. In addition, it acts as a link:{{< relref "/docs/concepts/rules.adoc#_default_rule_inheritance" >}}[base] for the definition of regular (upstream service specific) rules. In this case it defines a secure default link:{{< relref "/docs/concepts/pipelines.adoc#_authentication_authorization_pipeline" >}}[authentication & authorization pipeline], which refuses any request by making use of the `deny_all` authenticator, and if the regular rule overrides that authenticator, will create a JWT thanks to the used `jwt` finalizer. <11> The last few lines of the configure the link:{{< relref "/docs/rules/providers.adoc#_filesystem" >}}[`file_system`] provider, which allows loading of regular rules from the file system. Btw. the provider is configured to watch for changes. So you can modify the rules, we're going to create, while playing around. +. Create a file, named `signer.pem` with the following content. This is our key store with a private key, you've seen in the configuration above. ++ +[source, yaml] +---- +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDALv/dRp6zvm6nmozmB/21viwFCUGBoisHz0v8LSRXGiM5aDywLFmMy +1jPnw29tz36gBwYFK4EEACKhZANiAAQgZkUS7PCh5tEXXvZk0LDQ4Xn4LSK+vKkI +zlCZl+oMgud8gacf4uG5ERgju1xdUyfewsXlwepTnWuwhXM7GdnwY5GOxZTwGn3X +XVwR/5tokqFVrFxt/5c1x7VdccF4nNM= +-----END EC PRIVATE KEY----- +---- ++ +WARNING: Do not use it for purposes beyond this tutorial! + . Now, create a rule file named `upstream-rules.yaml`, which will implement the authentication and authorization requirements of our service, and copy the following contents to it: + [source, yaml] ---- -version: "1alpha3" +version: "1alpha4" rules: - id: demo:public # <1> match: - url: http://<**>/public + routes: + - path: /public forward_to: host: upstream:8081 execute: @@ -131,7 +147,12 @@ rules: - id: demo:protected # <2> match: - url: http://<**>/<{user,admin}> + routes: + - path: /:user + path_params: + - name: user + type: glob + value: "{user,admin}" forward_to: host: upstream:8081 execute: @@ -224,12 +245,13 @@ version: '3.7' services: heimdall: # <1> - image: dadrus/heimdall:latest + image: dadrus/heimdall:dev ports: - "9090:4455" volumes: - ./heimdall-config.yaml:/etc/heimdall/config.yaml:ro - ./upstream-rules.yaml:/etc/heimdall/rules.yaml:ro + - ./signer.pem:/etc/heimdall/signer.pem:ro command: -c /etc/heimdall/config.yaml serve proxy upstream: # <2> @@ -249,7 +271,7 @@ services: volumes: - ./policy.rego:/etc/opa/policies/policy.rego:ro ---- -<1> These lines configure heimdall to use our config and rule file and to run in proxy operation mode. +<1> These lines configure heimdall to use our config, our key store, and the rule file and to run in proxy operation mode. <2> Here, we configure the "upstream" service. As already written above, it is a very simple service, which just echoes back everything it receives. <3> This is our NGINX, which mimics an IDP system and exposes an JWKS endpoint with our key material. <4> And these lines configure our OPA instance to use our authorization policy @@ -280,10 +302,11 @@ services: - traefik.http.middlewares.heimdall.forwardauth.authResponseHeaders=Authorization heimdall: # <3> - image: dadrus/heimdall:latest + image: dadrus/heimdall:dev volumes: - ./heimdall-config.yaml:/etc/heimdall/config.yaml:ro - ./upstream-rules.yaml:/etc/heimdall/rules.yaml:ro + - ./signer.pem:/etc/heimdall/signer.pem:ro command: -c /etc/heimdall/config.yaml serve decision upstream: # <4> @@ -311,7 +334,7 @@ services: + <1> These lines configure Traefik, which is used to dispatch the incoming requests and also forward all of them to heimdall before routing to the target service. We're using the ForwardAuth middleware here, which requires an additional configuration on the route level. <2> Here we configure Trafik to forward the requests to heimdall -<3> These lines configure heimdall to use our config and rule file and to run in decision operation mode. +<3> These lines configure heimdall to use our config, our key store, and the rule file and to run in decision operation mode. <4> Here, we configure the "upstream" service. As already written above, it is a very simple service, which just echoes back everything it receives. As also written above, we need to provide some route level configuration here to have the requests forwarded to heimdall. We could however also have a global configuration (which we decided not to do to avoid yet another configuration file). <5> This is our NGINX, which mimics an IDP system and exposes an JWKS endpoint with our key material. <6> And these lines configure our OPA instance to use our authorization policy @@ -402,7 +425,7 @@ Host: upstream:8081 User-Agent: curl/8.2.1 Accept: */* Accept-Encoding: gzip -Authorization: Bearer eyJhbGciOiJFUzM4NCIsImtpZCI6IjEzNTQxODg3NGFiNzQwN2I3ZWQ0MmU5MmM4NWIzY2ZkNDJmZDk5NDgiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE3MDk3NDI2NzYsImlhdCI6MTcwOTc0MjM3NiwiaXNzIjoiaGVpbWRhbGwiLCJqdGkiOiJhYTZkZDE1MC0yMzhiLTQ2YWEtOTIzMi00MDRjMWNiMGM4ZDMiLCJuYmYiOjE3MDk3NDIzNzYsInN1YiI6IjEifQ.84QF4F7-WKSAV4KcC2Z_7SG4VkiEXg0fUu1hLS-zKR8-SfpM3XVphLz3QVg4aDXe4AxiNIfqyA5rE9ZEFnYAlFfWOIt2R7i0PZh2gf1PBOQMj6cMLbDSUw_YZ9x1XWcf +Authorization: Bearer eyJhbGciOiJFUzM4NCIsImtpZCI6ImIzNDA3N2ZlNWI5NDczYzBjMmY3NDNmYWQ0MmY3ZDU0YWM3ZTFkN2EiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE3MTg2MzYwMDAsImlhdCI6MTcxODYzNTcwMCwiaXNzIjoiaGVpbWRhbGwiLCJqdGkiOiIyZjc0MjRmNy05ZWFkLTQ4MzItYmM2Yy0xM2FiNDY5NTNjOTQiLCJuYmYiOjE3MTg2MzU3MDAsInN1YiI6IjEifQ._xy_TRsQpiBPsdGi6gh1IOlyep62YpgxiqquXhg-guVdhpslS4PfVH139dv50GOX0fj3F31q8__8QWWvzPJCEI0aEaaMazIVZ24qjyFM2LJvX0o0ILePxfeDU3bhzN8i Forwarded: for=172.19.0.1;host=127.0.0.1:9090;proto=http ---- + diff --git a/docs/content/docs/mechanisms/authorizers.adoc b/docs/content/docs/mechanisms/authorizers.adoc index 1415321f9..f3d514b61 100644 --- a/docs/content/docs/mechanisms/authorizers.adoc +++ b/docs/content/docs/mechanisms/authorizers.adoc @@ -46,7 +46,7 @@ type: deny == Local (CEL) -This authorizer allows definition of authorization requirements based on information available about the authenticated subject, as well as the actual request by using https://github.com/google/cel-spec[CEL] based authorization expressions. Each expression is expected to return `true` to signal success. Otherwise, the authorization fails, resulting in the execution of the error handler mechanisms. +This authorizer allows definition of authorization requirements based on information available about the authenticated subject, the existing pipeline results, as well as the actual request by using https://github.com/google/cel-spec[CEL] based authorization expressions. Each expression is expected to return `true` to signal success. Otherwise, the authorization fails, resulting in the execution of the error handler mechanisms. To enable the usage of this authorizer, you have to set the `type` property to `cel`. @@ -54,7 +54,7 @@ Configuration using the `config` property is mandatory. Following properties are * *`expressions`*: _link:{{< relref "/docs/configuration/types.adoc#_authorization_expression">}}[Authorization Expression] array_ (mandatory, overridable) + -List of authorization expressions, which define the actual authorization logic. Each expression has access to the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] and the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] objects. +List of authorization expressions, which define the actual authorization logic. Each expression has access to the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`], link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`], and the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] objects. .Authorization based on subject properties ==== @@ -103,7 +103,7 @@ The usage of this type of configuration makes sense in a pipeline, which combine This authorizer allows communication with other systems, like https://www.openpolicyagent.org/[Open Policy Agent], https://www.ory.sh/docs/keto/[Ory Keto], etc. for the actual authorization purpose. If the used endpoint answers with a not 2xx HTTP response code, this authorizer assumes, the authorization has failed, resulting in the execution of the error handler mechanisms. Otherwise, if no expressions for the verification of the response are defined, the authorizer assumes, the request has been authorized. If expressions are defined and do not fail, the authorization succeeds. -If your authorization system provides a payload in the response, heimdall inspects the `Content-Type` header to prepare the payload for further usage, e.g. for payload verification expressions, or for a link:{{< relref "#_local_cel" >}}[Local (CEL)] authorizer. If the content type does either end with `json` or is `application/x-www-form-urlencoded`, the payload is decoded, so key based access to the corresponding attributes is possible, otherwise it is made available as well, but as a simple string. In all cases this value is available for the authorization expressions as well as in the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[Subject's] `Attributes` property under a key named by the `id` of the authorizer (See also the example below). +If your authorization system provides a payload in the response, heimdall inspects the `Content-Type` header to prepare the payload for further usage, e.g. for payload verification expressions, or for a link:{{< relref "#_local_cel" >}}[Local (CEL)] authorizer. If the content type does either end with `json` or is `application/x-www-form-urlencoded`, the payload is decoded, so key based access to the corresponding attributes is possible, otherwise it is made available as well, but as a simple string. In all cases this value is available for the authorization expressions, as well as in the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`] property under a key named by the `id` of the authorizer (See also the example below). To enable the usage of this authorizer, you have to set the `type` property to `remote`. @@ -111,11 +111,11 @@ Configuration using the `config` property is mandatory. Following properties are * *`endpoint`*: _link:{{< relref "/docs/configuration/types.adoc#_endpoint">}}[Endpoint]_ (mandatory, not overridable) + -The API endpoint of your authorization system. At least the `url` must be configured. This mechanism allows templating of the url and makes the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] object, as well as the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`] (see also below) objects available to it. By default, this authorizer will use HTTP `POST` to send the rendered payload to this endpoint. You can override this behavior by configuring `method` as well. Depending on the API requirements of your authorization system, you might need to configure further properties, like headers, etc. +The API endpoint of your authorization system. At least the `url` must be configured. This mechanism allows templating of the url and makes the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] object, the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`] object, as well as the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`] (see also below) objects available to it. By default, this authorizer will use HTTP `POST` to send the rendered payload to this endpoint. You can override this behavior by configuring `method` as well. Depending on the API requirements of your authorization system, you might need to configure further properties, like headers, etc. * *`payload`*: _string_ (optional, overridable) + -Your link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_templating" >}}[template] with definitions required to communicate to the authorization endpoint. The template can make use of link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`], link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] and link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] objects. +Your link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_templating" >}}[template] with definitions required to communicate to the authorization endpoint. The template can make use of link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`], link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`], link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] and link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] objects. * *`expressions`*: _link:{{< relref "/docs/configuration/types.adoc#_authorization_expression">}}[Authorization Expression] array_ (optional, overridable) + @@ -133,7 +133,7 @@ Allows caching of the authorization endpoint responses. Defaults to 0s, which me * *`values`* _map of strings_ (optional, overridable) + -A key value map, which is made accessible to the template rendering engine as link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`] object, to render parts of the URL and/or the payload. The actual values in that map can be templated as well with access to the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] and link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] objects. +A key value map, which is made accessible to the template rendering engine as link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`] object, to render parts of the URL and/or the payload. The actual values in that map can be templated as well with access to the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`], the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`], and link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] objects. .Configuration of Remote authorizer to communicate with https://www.openpolicyagent.org/[Open Policy Agent] (OPA) ==== @@ -155,7 +155,7 @@ config: user: ${OPA_USER} password: ${OPA_PASSWORD} payload: | - { "input": { "user": {{ quote .Subject.ID }} }, "some_data": {{ quote .Values.whatever }} } + { "input": { "user": {{ quote .Subject.ID }} }, "some_data": {{ quote .Values.whatever }}, "more_data": {{ quote .Outputs.whatever }} } values: namespace: myapi/policy policy: allow_write @@ -167,7 +167,7 @@ config: message: User does not have required permissions ---- -In this case, since an OPA response could look like `{ "result": true }` or `{ "result": false }`, heimdall makes the response also available under `Subject.Attributes["opa"]` as a map, with `"opa"` being the id of the authorizer in this example. +In this case, since an OPA response could look like `{ "result": true }` or `{ "result": false }`, heimdall makes the response also available under `Outputs["opa"]`, with `"opa"` being the id of the authorizer in this example. A specific rule could then use this authorizer in the following ways: diff --git a/docs/content/docs/mechanisms/catalogue.adoc b/docs/content/docs/mechanisms/catalogue.adoc index dfcdf8852..c4434f1dc 100644 --- a/docs/content/docs/mechanisms/catalogue.adoc +++ b/docs/content/docs/mechanisms/catalogue.adoc @@ -107,6 +107,9 @@ mechanisms: - id: jwt_finalizer type: jwt config: + signer: + key_store: + path: /etc/heimdall/signer.pem ttl: 5m claims: | { @@ -123,9 +126,6 @@ mechanisms: - id: default type: default - id: authenticate_with_kratos - if: | - type(Error) in [authentication_error, authorization_error] && - Request.Header("Accept").contains("text/html") type: redirect config: to: http://127.0.0.1:4433/self-service/login/browser?return_to={{ .Request.URL | urlenc }} diff --git a/docs/content/docs/mechanisms/contextualizers.adoc b/docs/content/docs/mechanisms/contextualizers.adoc index e3e62c268..daa08b82e 100644 --- a/docs/content/docs/mechanisms/contextualizers.adoc +++ b/docs/content/docs/mechanisms/contextualizers.adoc @@ -16,7 +16,7 @@ Some of the contextualizers may support or require additional configuration. The == Generic -This mechanism allows you to communicate to any API you want to fetch further information about the subject. Typical scenario is getting specific attributes for later authorization purposes which are not known to the authentication system and thus were not made available in link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject's`] `Attributes` property. If the API responses with a 2xx HTTP response code, the payload is made available in the `Attributes` property of the `Subject`, otherwise, if not overridden, an error is thrown and the execution of the authentication & authorization pipeline stops. To avoid overwriting of existing attributes, this object is however not available on the top level, but under a key named by the `id` of the authorizer (See also the example below). If the `Content-Type` of the response is either ending with `json` or is `application/x-www-form-urlencoded`, the payload is decoded and made available as map, otherwise it is treated as string, but, as written above, is made available as well. +This mechanism allows you to communicate to any API you want to fetch further information about the subject. Typical scenario is getting specific attributes for later authorization purposes which are not known to the authentication system and thus were not made available in link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject's`] `Attributes` property. If the API responses with a 2xx HTTP response code, the payload is made available in the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`] object, otherwise, if not overridden, an error is thrown and the execution of the authentication & authorization pipeline stops. To avoid overwriting of existing key value pairs, this object is however not available on the top level, but under a key named by the `id` of the contextualizer (See also the example below). If the `Content-Type` of the response is either ending with `json` or is `application/x-www-form-urlencoded`, the payload is decoded and made available as map, otherwise it is treated as string, but, as written above, is made available as well. To enable the usage of this contextualizer, you have to set the `type` property to `generic`. @@ -24,7 +24,7 @@ Configuration using the `config` property is mandatory. Following properties are * *`endpoint`*: _link:{{< relref "/docs/configuration/types.adoc#_endpoint">}}[Endpoint]_ (mandatory, not overridable) + -The API of the service providing additional attributes about the authenticated user. At least the `url` must be configured. This mechanism allows templating of the url and makes the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] object, as well as the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`] (see also below) objects available to it. By default, this contextualizer will use HTTP `POST` to send the rendered payload to this endpoint. You can override this behavior by configuring `method` as well. Depending on the API requirements of the system, this contextualizer should communicate to, you might need to configure further properties, like headers, etc. +The API of the service providing additional attributes about the authenticated user. At least the `url` must be configured. This mechanism allows templating of the url and makes the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`], the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`] object, as well as the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`] (see also below) objects available to it. By default, this contextualizer will use HTTP `POST` to send the rendered payload to this endpoint. You can override this behavior by configuring `method` as well. Depending on the API requirements of the system, this contextualizer should communicate to, you might need to configure further properties, like headers, etc. * *`forward_headers`*: _string array_ (optional, overridable) + @@ -36,7 +36,7 @@ If the API requires any cookies from the request to heimdall, you can forward th * *`payload`*: _string_ (optional, overridable) + -Your link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_templating" >}}[template] with definitions required to communicate to the endpoint. The template can make use of link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`], link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] and link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] objects. +Your link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_templating" >}}[template] with definitions required to communicate to the endpoint. The template can make use of link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`], link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`], the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`], and link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] objects. * *`cache_ttl`*: _link:{{< relref "/docs/configuration/types.adoc#_duration" >}}[Duration]_ (optional, overridable) + @@ -48,7 +48,7 @@ If set to `true`, allows the pipeline to continue with the execution of the next * *`values`* _map of strings_ (optional, overridable) + -A key value map, which is made accessible to the template rendering engine as link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`] object to render parts of the URL and/or the payload. The actual values in that map can be templated as well with access to the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] and link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] objects. +A key value map, which is made accessible to the template rendering engine as link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_values" >}}[`Values`] object to render parts of the URL and/or the payload. The actual values in that map can be templated as well with access to the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`], the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`] and link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] objects. .Contextualizer configuration without payload ==== @@ -84,7 +84,7 @@ config: payload: | { "user_id": {{ quote .Values.user_id }} - "whatever": {{ quote .Values.whatever }} + "whatever": {{ quote .Outputs.whatever }} } ---- @@ -100,7 +100,6 @@ Since the `values` property is not defined but used in the payload, it must be s config: # overriding with rule specifics values: user_id: "{{ .Subject.ID }}" - whatever: "some value" - # other mechanisms ---- ==== diff --git a/docs/content/docs/mechanisms/error_handlers.adoc b/docs/content/docs/mechanisms/error_handlers.adoc index d49901a99..ddf33495b 100644 --- a/docs/content/docs/mechanisms/error_handlers.adoc +++ b/docs/content/docs/mechanisms/error_handlers.adoc @@ -16,7 +16,7 @@ Some of the error handlers may support or require additional configuration. The == Default -This mechanism is always there and is executed if no other error handler mechanism is responsible for the error. Actually, there is no need to explicitly configure it. The only exception is to allow overriding the link:{{< relref "/docs/rules/default_rule.adoc" >}}[default rule's] error handler chain in a specific rule for performance reasons (if configured error handlers in the default rule should not be considered). This mechanism type doesn't have any configuration options. +This error handler is always there and is executed if no other error handler mechanism is responsible for the error. Actually, there is no need to explicitly configure it. The only exception is to allow overriding the link:{{< relref "/docs/rules/default_rule.adoc" >}}[default rule's] error handler chain in a specific rule for performance reasons (if configured error handlers in the default rule should not be considered). This mechanism type doesn't have any configuration options. To enable the usage of this mechanism, you have to set the `type` property to `default`. @@ -37,7 +37,7 @@ This error handler mechanism allows redirecting the client to another endpoint, To enable the usage of this mechanism, you have to set the `type` property to `redirect`. -Configuration is mandatory by making use of the `if` and `config` properties. The first defines the condition, which must hold true for this error handler to execute and has access to the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] and the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_error" >}}[`Error`] objects. Latter defines the data to drive the redirect and supports the following properties: +Configuration is mandatory by making use of the `config` property supporting the following settings: * *`to`*: _URL_ (mandatory, not overridable) + @@ -62,9 +62,6 @@ So, e.g. if heimdall was handling the request for `\http://my-service.local/foo` ---- id: authenticate_with_kratos type: redirect -if: | - type(Error) in [authentication_error, authorization_error] && - Request.Header("Accept").contains("text/html") config: to: http://127.0.0.1:4433/self-service/login/browser?return_to={{ .Request.URL | urlenc }} ---- diff --git a/docs/content/docs/mechanisms/evaluation_objects.adoc b/docs/content/docs/mechanisms/evaluation_objects.adoc index 5729214a3..152225d44 100644 --- a/docs/content/docs/mechanisms/evaluation_objects.adoc +++ b/docs/content/docs/mechanisms/evaluation_objects.adoc @@ -20,15 +20,15 @@ Objects represent state in the execution of a particular rule. These are the ent === Subject -This object contains the information about the authenticated subject and has the following attributes: +This object is created by an authenticator which was able to verify the authentication claim available in the request, and contains the information about the authenticated subject. It has the following properties: * *`ID`*: _string_ + -The identifier of the subject. This value is set by the authenticator, which was able to authenticate the subject. +The identifier of the subject. * *`Attributes`*: _map_ + -Contains all attributes, which are known about the subject. The content is initially set by the authenticator, which was able to authenticate the subject. Mechanisms in the following stages can update it, but cannot override any entries. +Contains all attributes, which are known about the subject. Each object of this type can be thought as a JSON object. Here some examples: @@ -72,28 +72,46 @@ This object contains information about the request handled by heimdall and has t + The HTTP method used, like `GET`, `POST`, etc. +[#_url_captures] * *`URL`*: _URL_ + The URL of the matched request. This object has the following properties and methods: -** *`Scheme`*: _string_ +** *`Captures`*: _map_ + -The HTTP scheme part of the url +Allows accessing of the values captured by the named wildcards used in the matching path expression of the rule. + ** *`Host`*: _string_ + -The host part of the url +The host part of the url. + +** *`Hostname()`*: _method_ ++ +This method returns the host name stripping any valid port number if present. + +** *`Port()`*: _method_ ++ +Returns the port part of the `Host`, without the leading colon. If `Host` doesn't contain a valid numeric port, returns an empty string. + ** *`Path`*: _string_ + -The path part of the url +The path part of the url. + +** *`Query()`*: _method_ ++ +The parsed query with each key-value pair being a string to array of strings mapping. + ** *`RawQuery`*: _string_ + The raw query part of the url. + +** *`Scheme`*: _string_ ++ +The HTTP scheme part of the url. + ** *`String()`*: _method_ + This method returns the URL as valid URL string of a form `scheme:host/path?query`. -** *`Query()`*: _method_ -+ -The parsed query with each key-value pair being a string to array of strings mapping. * *`ClientIPAddresses`*: _string array_ + @@ -152,14 +170,29 @@ Request = { Url: { Scheme: "https", Host: "localhost", - Path: "/test", - RawQuery: "baz=zab&baz=bar&foo=bar" + Path: "/test/abc", + RawQuery: "baz=zab&baz=bar&foo=bar", + Captures: { "value": "abc" } }, ClientIP: ["127.0.0.1", "10.10.10.10"] } ---- ==== +=== Outputs + +This object represents a pipeline execution specific key value map. It is used by pipeline steps to store or read results of particular step executions. Mechanism id used by a pipeline step is used as a key and the value is the corresponding result. + +Example: + +[source, javascript] +---- +Outputs = { + "id_1": ["a", "b"], + "id_2": { "foo": "bar", "baz": false } +} +---- + === Payload This object represents the contents of a payload, like the request body or a response body. The contents depend on the MIME-Type of the payload. For `json`, `yaml` or `x-www-form-urlencoded` encoded payload, the object is transformed to a JSON object. Otherwise, it is just a string. @@ -214,7 +247,7 @@ Values = { == Templating -Some mechanisms support templating using https://golang.org/pkg/text/template/[Golang Text Templates]. Templates can act on all objects described above (link:{{< relref "#_subject" >}}[Subject], link:{{< relref "#_request" >}}[Request], link:{{< relref "#_payload" >}}[Payload] and link:{{< relref "#_values" >}}[Values]). Which exactly are supported is mechanism specific. +Some mechanisms support templating using https://golang.org/pkg/text/template/[Golang Text Templates]. Templates can act on all objects described above (link:{{< relref "#_subject" >}}[Subject], link:{{< relref "#_outputs" >}}[Outputs], link:{{< relref "#_request" >}}[Request], link:{{< relref "#_payload" >}}[Payload] and link:{{< relref "#_values" >}}[Values]). Which exactly are supported is mechanism specific. To ease the usage, all http://masterminds.github.io/sprig/[sprig] functions, except `env` and `expandenv`, as well as the following functions are available: @@ -241,7 +274,8 @@ Imagine, we have a `POST` request for the URL `\http://foobar.baz/zab?foo=bar`, "request_url": {{ quote .Request.URL }}, "foo_value": {{ index .Request.URL.Query.foo 0 | quote }} "request_method": {{ quote .Request.Method }}, - "x_foo_value": {{ .Request.Header "X-Foo" | quote }} + "x_foo_value": {{ .Request.Header "X-Foo" | quote }}, + "whatever": {{ .Outputs.whatever | quote }} } ---- @@ -257,12 +291,13 @@ This will result in the following JSON object: "request_url": "http://foobar.baz/zab?foo=bar", "foo_value": "bar", "request_method": "POST", - "x_foo_value": "bar" + "x_foo_value": "bar", + "whatever": "some value" } ---- ==== -.Access the last part of the path +.Access to captured path segments ==== Imagine, we have a `POST` request to the URL `\http://foobar.baz/zab/1234`, with `1234` being the identifier of a file, which should be updated with the contents sent in the body of the request, and you would like to control access to the aforesaid object using e.g. OpenFGA. This can be achieved with the following authorizer: @@ -277,7 +312,7 @@ config: { "user": "user:{{ .Subject.ID }}", "relation": "write", - "object": "file:{{ splitList "/" .Request.URL.Path | last }}" + "object": "file:{{ .Request.URL.Captures.id }}" } expressions: - expression: | diff --git a/docs/content/docs/mechanisms/finalizers.adoc b/docs/content/docs/mechanisms/finalizers.adoc index 456a468e6..7719d79a6 100644 --- a/docs/content/docs/mechanisms/finalizers.adoc +++ b/docs/content/docs/mechanisms/finalizers.adoc @@ -31,7 +31,7 @@ type: noop == Header -This finalizer enables transformation of a link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] into HTTP headers. It can also be used to map information from the original link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] into headers expected by the upstream service. +This finalizer enables transformation of a link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] and the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`] objects into HTTP headers. It can also be used to map information from the original link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] into headers expected by the upstream service. To enable the usage of this finalizer, you have to set the `type` property to `header`. @@ -39,7 +39,7 @@ Configuration using the `config` property is mandatory. Following properties are * *`headers`*: _string map_ (mandatory, overridable) + -Enables configuration of arbitrary headers with any values build from available subject and request information (See also link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_templating" >}}[Templating]). +Enables configuration of arbitrary headers with any values build from available information (See also link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_templating" >}}[Templating]). .Header finalizer configuration ==== @@ -57,7 +57,7 @@ config: == Cookie -This finalizer enables transformation of a link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] into cookies. It can also be used to map information from the original link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] into cookies expected by the upstream service. +This finalizer enables transformation of a link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] and the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`] objects into cookies. It can also be used to map information from the original link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] into cookies expected by the upstream service. To enable the usage of this finalizer, you have to set the `type` property to `cookie`. @@ -65,7 +65,7 @@ Configuration using the `config` property is mandatory. Following properties are * *`cookies`*: _string map_ (mandatory, overridable) + -Enables configuration of arbitrary cookies with any values build from available subject information (See also link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_templating" >}}[Templating]). +Enables configuration of arbitrary cookies with any values build from available information (See also link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_templating" >}}[Templating]). .Cookie finalizer configuration ==== @@ -82,14 +82,16 @@ config: == JWT -This finalizer enables transformation of the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] object into a token in a https://www.rfc-editor.org/rfc/rfc7519[JWT] format, which is then made available to your upstream service in either the HTTP `Authorization` header with `Bearer` scheme set, or in a custom header. In addition to setting the JWT specific claims, it allows setting custom claims as well. Your upstream service can then verify the signature of the JWT by making use of heimdall's JWKS endpoint to retrieve the required public keys/certificates from. +This finalizer enables transformation of the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[`Subject`] and the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_outputs" >}}[`Outputs`] objects as custom claims into a token in a https://www.rfc-editor.org/rfc/rfc7519[JWT] format, which is then made available to your upstream service in either the HTTP `Authorization` header with `Bearer` scheme set, or in a custom header. Your upstream service can then verify the signature of the JWT by making use of heimdall's JWKS endpoint to retrieve the required public keys/certificates from. To enable the usage of this finalizer, you have to set the `type` property to `jwt`. -NOTE: The usage of this finalizer type requires a configured link:{{< relref "/docs/operations/security.adoc#_signatures" >}}[Signer] as well. At least it is a must in production environments. - Configuration using the `config` property is optional. Following properties are available: +* *`signer`*: _link:{{< relref "/docs/configuration/types.adoc#_signer" >}}[Signer]_ (mandatory, not overridable) ++ +The configuration of the key material used for signature creation purposes, as well as the name used for the `iss` claim. + * *`claims`*: _string_ (optional, overridable) + Your template with custom claims, you would like to add to the JWT (See also link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_templating" >}}[Templating]). diff --git a/docs/content/docs/operations/configuration.adoc b/docs/content/docs/operations/configuration.adoc index 494f6d72b..d347bc783 100644 --- a/docs/content/docs/operations/configuration.adoc +++ b/docs/content/docs/operations/configuration.adoc @@ -70,11 +70,12 @@ mechanisms: finalizers: - id: create_jwt type: jwt + config: + signer: + key_store: + path: /etc/heimdall/signer.pem default_rule: - methods: - - GET - - POST execute: - authenticator: anonymous_authenticator - finalizer: create_jwt @@ -100,6 +101,10 @@ mechanisms: finalizers: - id: create_jwt type: jwt + config: + signer: + key_store: + path: ${SIGNER_KEY_STORE_FILE} ---- ==== @@ -133,22 +138,23 @@ HEIMDALLCFG_LOG_LEVEL=info * Array entries must be defined using `\_[_]`, with `IDX` being the index of the array starting with `0` and `_` in brackets being only required, if the value of the configured element has a structure/hierarchy. + -E.g. the `methods` of the link:{{< relref "/docs/rules/default_rule.adoc" >}}[default rule] can be configured in a config file as +E.g. the `trusted_proxies` property of the link:{{< relref "/docs/services/decision.adoc" >}}[decision service] can be configured in a config file as + [source,yaml] ---- -default_rule: - methods: - - GET - - POST +serve: + decision: + trusted_proxies: + - 192.168.1.0/24 + - 192.168.2.0/24 ---- + and using environment variables with + [source,bash] ---- -HEIMDALLCFG_DEFAULT__RULE_METHODS_0=GET -HEIMDALLCFG_DEFAULT__RULE_METHODS_1=POST +HEIMDALLCFG_SERVE_DECISION_TRUSTED__PROXIES_0=192.168.1.0/24 +HEIMDALLCFG_SERVE_DECISION_TRUSTED__PROXIES_0=192.168.2.0/24 ---- + For structured configuration, like the definition of the authenticators in the example above diff --git a/docs/content/docs/operations/observability.adoc b/docs/content/docs/operations/observability.adoc index 8adca24bf..9469e974e 100644 --- a/docs/content/docs/operations/observability.adoc +++ b/docs/content/docs/operations/observability.adoc @@ -345,7 +345,7 @@ You can also disable metrics export by setting the `OTEL_METRICS_EXPORTER` envir All, but custom metrics adhere to the https://opentelemetry.io/docs/specs/otel/metrics/semantic_conventions/[OpenTelementry semantic conventions]. For that reason, only the custom metrics are listed in the table below. ==== Metric: `certificate.expiry` -Number of seconds until a certificate used by a particular service (decision, proxy, management), as well as signer expires. The metric type is UpDownCounter und the unit is s. +Number of seconds until a certificate used by a particular service (decision, proxy, management), or mechanism (e.g. jwt finalizer) expires. The metric type is UpDownCounter und the unit is s. [cols="2,1,5"] |=== @@ -430,7 +430,7 @@ By making use of this property, you can specify the TCP port the heimdall should ==== [source, yaml] ---- -metrics: +profiling: port: 9999 ---- ==== diff --git a/docs/content/docs/operations/security.adoc b/docs/content/docs/operations/security.adoc index 2e4406412..ad9fd68e6 100644 --- a/docs/content/docs/operations/security.adoc +++ b/docs/content/docs/operations/security.adoc @@ -56,87 +56,13 @@ E.g. docker run -t -p 4456:4456 \ -v $PWD:/heimdall/conf \ -v /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt:ro \ - dadrus/heimdall:latest serve decision \ + dadrus/heimdall:dev serve decision \ -c /heimdall/conf/heimdall.yaml ---- ==== The verification of TLS server certificates is not the single configuration option. You should also ensure heimdall's services, you're using, are configured to be available via TLS as well. See link:{{< relref "/docs/configuration/types.adoc#_tls" >}}[TLS Configuration] for all available options. -== Signatures - -When heimdall is used to issue signed objects, like JWTs, to enable upstream services to rely on authentic subject information, it acts as an issuer of such objects and requires corresponding configuration. - -=== Configuration - -The configuration related to the issuance of signed objects can be done using the `signer` property, which resides on the top level of heimdall's configuration and supports the following properties. - -* *`name`*: _string_ (optional) -+ -The name used to specify the issuer. E.g. if a JWT is generated, this value is used to set the `iss` claim. If not set, the value `heimdall` is used. - -* *`key_store`*: _link:{{< relref "/docs/configuration/types.adoc#_key_store" >}}[Key Store]_ (optional) -+ -The key store containing the cryptographic material. If configured, at least one private key and the corresponding certificate must be present. If not configured, heimdall generates an ECDSA P-384 key pair on start up and uses it then. -+ -WARNING: You should always configure a valid key store for production use! - -* *`key_id`*: _string_ (optional) -+ -If the `key_store` contains multiple keys, this property can be used to specify the key to use (see also link:{{< relref "/docs/configuration/types.adoc#_key_id_lookup" >}}[Key-Id Lookup]). If not specified, the first key is used. If specified, but there is no key for the given key id present, an error is raised and heimdall will refuse to start. - -.Possible configuration -==== -Imagine you have a PEM file located in `/opt/heimdall/keystore.pem` with the following contents: - -[source, txt] ----- ------BEGIN EC PRIVATE KEY----- -X-Key-ID: foo - -MIGkAgEBBDBRLr783dIM5NHJnDDMRVBiFSF56xqHle5lZk1ZCyyow9wKZGuF4EWK -jRBISBkE3NSgBwYFK4EEACKhZANiAAQ+oGUOJpVjntIWuanYxpXe6oN5tKhzLhBX -GP1SOXiLhnPNnN2uZu9KwOoBzoZhr/Fxw+sziXmzHJwjluz78VOlFKyopxTfmxRZ -0qq3f/KHWdDtVvmTfT0O/ux9mg6mCJw= ------END EC PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIByjCCAVGgAwIBAgIBATAKBggqhkjOPQQDAzAuMQswCQYDVQQGEwJFVTENMAsG -A1UEChMEVGVzdDEQMA4GA1UEAxMHVGVzdCBDQTAeFw0yMjA4MTUwOTE3MTFaFw0y -MjA4MTUxMDE3MTFaMDAxCzAJBgNVBAYTAkVVMQ0wCwYDVQQKEwRUZXN0MRIwEAYD -VQQDEwlUZXN0IEVFIDEwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQ+oGUOJpVjntIW -uanYxpXe6oN5tKhzLhBXGP1SOXiLhnPNnN2uZu9KwOoBzoZhr/Fxw+sziXmzHJwj -luz78VOlFKyopxTfmxRZ0qq3f/KHWdDtVvmTfT0O/ux9mg6mCJyjQTA/MA4GA1Ud -DwEB/wQEAwIHgDAMBgNVHQ4EBQQDYmFyMB8GA1UdIwQYMBaAFLO77bgPgZMKz11D -BVDUXvtNGeBnMAoGCCqGSM49BAMDA2cAMGQCMFRlx9Bq0MuSh5pDhDTqRq/MnxxD -W7qZg15AXoNnLrR60vV9gHjzkp1UkcU9viRIuAIwU0BjwDncp9z1seqKh+/eJV3f -xstQe2rzUEptWLIiPFoOBWZuw9wJ/Hunjik3a9T/ ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -MIIByjCCAVCgAwIBAgIBATAKBggqhkjOPQQDAzAuMQswCQYDVQQGEwJFVTENMAsG -A1UEChMEVGVzdDEQMA4GA1UEAxMHVGVzdCBDQTAeFw0yMjA4MTUwOTE3MTFaFw0y -MjA4MTYwOTE3MTFaMC4xCzAJBgNVBAYTAkVVMQ0wCwYDVQQKEwRUZXN0MRAwDgYD -VQQDEwdUZXN0IENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEf96tstMNdNoNfYjl -bGY6BvBFTsl9E3hpPnta7SJn6BqIYz6KEohDJ+8DXwUMVb5Ytr/QkEikg966HCY3 -A9TFBUdAs01TV8f2KoAPRQVrh+ccSLLJyACENfZ5VbGSQ0wso0IwQDAOBgNVHQ8B -Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUs7vtuA+BkwrPXUMF -UNRe+00Z4GcwCgYIKoZIzj0EAwMDaAAwZQIxAMPgE/Z+1Dcj+lH7jioE16Hig0HQ -FC4qBx1UU05H05Gs23ECB1hzD2qXikVpaNyuDgIwbogEu42wIwpDa5xdJIZcIhmz -DIuPvEscUDjU3C+1GPxmACcRMPv9QVUEcBAvZkfn ------END CERTIFICATE----- ----- - -Then you can configure heimdall to use it like follows: - -[source, yaml] ----- -signer: - name: foobar - key_store: - path: /opt/heimdall/keystore.pem - key_id: foo ----- -==== - === Security Considerations In a typical production scenario, there is a need for proper key and certificate management. This is supported by heimdall in the following way: diff --git a/docs/content/docs/rules/default_rule.adoc b/docs/content/docs/rules/default_rule.adoc index 8ec82ac69..0e6134801 100644 --- a/docs/content/docs/rules/default_rule.adoc +++ b/docs/content/docs/rules/default_rule.adoc @@ -16,28 +16,31 @@ description: Heimdall lets you not only define upstream service specific rules, The configuration of the default rule can be done by making use of the `default_rule` property and configuring the options shown below. -NOTE: The default rule does not support all the properties, which can be configured in an link:{{< relref "regular_rule.adoc" >}}[regular rule]. E.g. it can not be used to forward requests to an upstream service, heimdall is protecting. So, if you operate heimdall in the reverse proxy mode, the default rule should be configured to reject requests. Otherwise, heimdall will respond with an error. +[NOTE] +==== +The default rule does not support all the properties, which can be configured in an link:{{< relref "regular_rule.adoc" >}}[regular rule]. + +* It can not be used to forward requests to an upstream service, heimdall is protecting. So, if you operate heimdall in the reverse proxy mode, the default rule should be configured to reject requests. Otherwise, heimdall will respond with an error. +* A default rule does also reject requests with encoded slashes in the path of the URL with `400 Bad Request`, which can be configured on the level of a regular rule. +==== -* *`methods`*: _string array_ (optional) +* *`backtracking_enabled`*: _boolean_ (optional) + -Which HTTP methods (`GET`, `POST`, `PATCH`, etc) are allowed. Expansion using `ALL` and removal by prefixing the method with an `!` is supported as with the regular rules. Defaults to an empty array. If the default rule is defined and the upstream service API specific rule (see also link:{{< relref "regular_rule.adoc#_configuration" >}}[Rule Configuration] does not override it, no methods will be accepted, effectively resulting in `405 Method Not Allowed` response to Heimdall's client for any urls matched by that particular rule. +Enables or disables backtracking while matching the rules globally. Defaults to `false`. * *`execute`*: _link:{{< relref "regular_rule.adoc#_authentication_authorization_pipeline" >}}[Authentication & Authorization Pipeline]_ (mandatory) + Which mechanisms to use for authentication, authorization and finalization stages of the pipeline. At least the authentication stage with at least one link:{{< relref "/docs/mechanisms/authenticators.adoc" >}}[authenticator] must be defined. A specific rule (see also link:{{< relref "regular_rule.adoc" >}}[Regular Rule]) can omit the definition of that stage, if it wants to reuse it from in the default rule. Same is true for other stages (See also link:{{< relref "/docs/concepts/rules.adoc#_default_rule_inheritance" >}}[Rule Inheritance]). -* *`on_error`*: _link:{{< relref "regular_rule.adoc#_error_pipeline" >}}[Error Pipeline]_ (mandatory) +* *`on_error`*: _link:{{< relref "regular_rule.adoc#_error_pipeline" >}}[Error Pipeline]_ (optional) + -Which error handler mechanisms to use if any of the mechanisms, defined in the `execute` property fail. Allows omitting the definition of error handlers in specific rules. As soon as a specific rule defines at least one error handler mechanism, all error handler mechanisms, defined in the default rule are ignored. +Which error handler mechanisms to use if any of the mechanisms, defined in the `execute` property fail. Allows omitting the definition of error handlers in specific rules. As soon as a specific rule defines at least one error handler mechanism, all error handler mechanisms, defined in the default rule are ignored. If not specified, the default error handler is used. .Default rule configuration ==== [source, yaml] ---- default_rule: - methods: - - GET - - PATCH execute: - authenticator: session_cookie_from_kratos_authn - authenticator: oauth2_introspect_token_from_keycloak_authn @@ -45,9 +48,11 @@ default_rule: - finalizer: create_jwt on_error: - error_handler: authenticate_with_kratos_eh + if: | + type(Error) == authentication_error && Error.Source == "session_cookie_from_kratos_authn" ---- -This example defines a default rule, which allows HTTP `GET` and `PATCH` requests on any URL (will respond with `405 Method Not Allowed` for any other HTTP method used by a client). The authentication 6 authorization pipeline consists of two authenticators, with `session_cookie_from_kratos_authn` being the first and `oauth2_introspect_token_from_keycloak_authn` being the fallback (if the first one fails), a `deny_all_requests_authz` authorizer and the `create_jwt` finalizer. The error pipeline is configured to execute only the `authenticate_with_kratos_eh` error handler. +This example defines a default rule, with the authentication 6 authorization pipeline consisting of two authenticators, with `session_cookie_from_kratos_authn` being the first and `oauth2_introspect_token_from_keycloak_authn` being the fallback one (if the first one fails), a `deny_all_requests_authz` authorizer and the `create_jwt` finalizer. The error pipeline is configured to execute only the `authenticate_with_kratos_eh` error handler. Obviously, the authentication & authorization pipeline (defined in the `execute` property) of this default rule will always result in an error due to `deny_all_requests_authz`. This way it is thought to provide secure defaults and let the upstream specific (regular) rules override at least the part dealing with authorization. Such an upstream specific rule could then look like follows: @@ -55,11 +60,12 @@ Obviously, the authentication & authorization pipeline (defined in the `execute` ---- id: rule:my-service:protected-api match: - url: http://my-service.local/foo + routes: + - path: /foo execute: - authorizer: allow_all_requests_authz ---- -Take a look at how `methods`, `on_error`, as well as the authenticators and finalizers from the `execute` definition of the default rule are reused. Easy, no? +Take a look at how `on_error`, as well as the authenticators and finalizers from the `execute` definition of the default rule are reused. Easy, no? ==== diff --git a/docs/content/docs/rules/providers.adoc b/docs/content/docs/rules/providers.adoc index cf947351b..5f37f534c 100644 --- a/docs/content/docs/rules/providers.adoc +++ b/docs/content/docs/rules/providers.adoc @@ -44,15 +44,19 @@ WARNING: All environment variables, used in the rule set files must be known in ==== [source, yaml] ---- -version: "1alpha3" +version: "1alpha4" name: my-rule-set rules: - id: rule:1 match: - url: https://my-service1.local/<**> + routes: + - path: /** + hosts: + - type: exact + value: my-service1.local + methods: [ "GET" ] forward_to: host: ${UPSTREAM_HOST:="default-backend:8080"} - methods: [ "GET" ] execute: - authorizer: foobar ---- @@ -99,15 +103,11 @@ Following configuration options are supported: + Whether the configured `endpoints` should be polled for updates. Defaults to `0s` (polling disabled). -* *`endpoints`*: _RuleSetEndpoint array_ (mandatory) +* *`endpoints`*: _link:{{< relref "/docs/configuration/types.adoc#_endpoint" >}}[Endpoint] array_ (mandatory) + -Each entry of that array supports all the properties defined by link:{{< relref "/docs/configuration/types.adoc#_endpoint" >}}[Endpoint], except `method`, which is always `GET`. As with the link:{{< relref "/docs/configuration/types.adoc#_endpoint" >}}[Endpoint] type, at least the `url` must be configured. Following properties are defined in addition: -+ -** *`rule_path_match_prefix`*: _string_ (optional) -+ -This property can be used to create kind of a namespace for the rule sets retrieved from the different endpoints. If set, the provider checks whether the urls specified in all rules retrieved from the referenced endpoint have the defined path prefix. If not, a warning is emitted and the rule set is ignored. This can be used to ensure a rule retrieved from one endpoint does not collide with a rule from another endpoint. +Each entry of that array supports all the properties defined by link:{{< relref "/docs/configuration/types.adoc#_endpoint" >}}[Endpoint], except `method`, which is always `GET`. As with the link:{{< relref "/docs/configuration/types.adoc#_endpoint" >}}[Endpoint] type, at least the `url` must be configured. -NOTE: HTTP caching according to https://www.rfc-editor.org/rfc/rfc7234[RFC 7234] is enabled by default. It can be disabled by setting `http_cache.enabled` to `false`. +NOTE: HTTP caching according to https://www.rfc-editor.org/rfc/rfc7234[RFC 7234] is enabled by default. It can be disabled on the particular endpoint by setting `http_cache.enabled` to `false`. === Examples @@ -128,9 +128,7 @@ http_endpoint: Here, the provider is configured to poll the two defined rule set endpoints for changes every 5 minutes. -The configuration for the first endpoint instructs heimdall to ensure all urls defined in the rules coming from that endpoint must match the defined path prefix. - -The configuration for the second endpoint defines the `rule_path_match_prefix` as well. It also defines a couple of other properties. One to ensure the communication to that endpoint is more resilient by setting the `retry` options and since this endpoint is protected by an API key, it defines the corresponding options as well. +The configuration for both endpoints instructs heimdall to disable HTTP caching. The configuration of the second endpoint uses a couple of additional properties. One to ensure the communication to that endpoint is more resilient by setting the `retry` options and since this endpoint is protected by an API key, it defines the corresponding options as well. [source, yaml] ---- @@ -138,9 +136,11 @@ http_endpoint: watch_interval: 5m endpoints: - url: http://foo.bar/ruleset1 - rule_path_match_prefix: /foo/bar + http_cache: + enabled: false - url: http://foo.bar/ruleset2 - rule_path_match_prefix: /bar/foo + http_cache: + enabled: false retry: give_up_after: 5s max_delay: 250ms @@ -183,10 +183,6 @@ The actual url to the bucket or to a specific blob in the bucket. ** *`prefix`*: _string_ (optional) + Indicates that only blobs with a key starting with this prefix should be retrieved -+ -** *`rule_path_match_prefix`*: _string_ (optional) -+ -Creates kind of a namespace for the rule sets retrieved from the blobs. If set, the provider checks whether the urls patterns specified in all rules retrieved from the referenced bucket have the defined path prefix. If that rule is violated, a warning is emitted and the rule set is ignored. This can be used to ensure a rule retrieved from one endpoint does not override a rule from another endpoint. The differentiation which storage is used is based on the URL scheme. These are: @@ -222,17 +218,14 @@ cloud_blob: buckets: - url: gs://my-bucket prefix: service1 - rule_path_match_prefix: /service1 - url: gs://my-bucket prefix: service2 - rule_path_match_prefix: /service2 - url: s3://my-bucket/my-rule-set?region=us-west-1 ---- Here, the provider is configured to poll multiple buckets with rule sets for changes every 2 minutes. The first two bucket reference configurations reference actually the same bucket on Google Cloud Storage, but different blobs based on the configured blob prefix. The first one will let heimdall loading only those blobs, which start with `service1`, the second only those, which start with `service2`. -As `rule_path_match_prefix` are defined for both as well, heimdall will ensure, that rule sets loaded from the corresponding blobs will not overlap in their url matching definitions. The last one instructs heimdall to load rule set from a specific blob, namely a blob named `my-rule-set`, which resides on the `my-bucket` AWS S3 bucket, which is located in the `us-west-1` AWS region. diff --git a/docs/content/docs/rules/regular_rule.adoc b/docs/content/docs/rules/regular_rule.adoc index 60df11ada..168c7b849 100644 --- a/docs/content/docs/rules/regular_rule.adoc +++ b/docs/content/docs/rules/regular_rule.adoc @@ -12,7 +12,7 @@ description: Regular rules allow definition and as such execution of arbitrary l :toc: -In simplest case a regular rule will just reuse mechanisms from a previously defined link:{{< relref "/docs/mechanisms/catalogue.adoc" >}}[catalogue] in its pipelines. In more complex cases a rule can reconfigure parts of used mechanisms. Which parts can be reconfigured, respectively overridden are mechanism specific and described in the mechanism specific documentation. Reconfiguration is always limited to the particular rule pipeline and does not affect other rules. +In the simplest case, a regular rule reuses mechanisms from the previously defined link:{{< relref "/docs/mechanisms/catalogue.adoc" >}}[catalogue] in its pipelines. In more complex scenarios, a rule can reconfigure parts of the mechanisms being used. The specific parts that can be reconfigured or overridden depend on the mechanism itself and are described in the mechanism-specific documentation. Reconfiguration is always limited to the particular rule's pipeline and does not affect other rules. == Configuration @@ -20,110 +20,131 @@ A single regular rule consists of the following properties: * *`id`*: _string_ (mandatory) + -The unique identifier of a rule. It must be unique across all rules loaded by the same link:{{< relref "providers.adoc" >}}[Rule Provider]. To ensure this, it is recommended to let the `id` include the name of your upstream service, as well as its purpose. E.g. `rule:my-service:public-api`. +The unique identifier of the rule. It must be unique across all rules loaded by the same link:{{< relref "providers.adoc" >}}[Rule Provider]. To ensure uniqueness, it's recommended to include the upstream service's name and the rule’s purpose in the id. For example, `rule:my-service:public-api`. * *`match`*: _RuleMatcher_ (mandatory) + -Defines how to match a rule and supports the following properties: +Defines the matching criteria for a rule, with the following properties: -** *`url`*: _string_ (mandatory) +** *`routes`*: _RouteMatcher array_ (mandatory) + -Glob or Regex pattern of the endpoints of your upstream service, which this rule should apply to. Query parameters are ignored. +Specifies route conditions for matching the rule to incoming HTTP requests with each entry having the following properties: -** *`strategy`*: _string_ (optional) +*** *`path`*: _string_ (mandatory) + -Which strategy to use for matching of the value, provided in the `url` property. Can be one of: +The link:{{< relref "#_path_expression" >}}[Path Expression] describing the request path this rule should match. It supports both simple and free (named) wildcards. -*** `regex` - to match `url` expressions by making use of regular expressions. Internally, heimdall makes use of Heimdall uses https://github.com/dlclark/regexp2[dlclark/regexp2] to implement this strategy. Head over to linked resource to get more insights about possible options. +*** *`path_params`*: _PathParameterConditions_ (optional) + -.Regular expressions patterns -==== -* `\https://mydomain.com/` matches `\https://mydomain.com/` and doesn't match `\https://mydomain.com/foo` or `\https://mydomain.com`. -* `://mydomain.com/<.*>` matches `\https://mydomain.com/` and `\http://mydomain.com/foo`. Doesn't match `\https://other-domain.com/` or `\https://mydomain.com`. -* `\http://mydomain.com/<[[:digit:]]+>` matches `\http://mydomain.com/123`, but doesn't match `\http://mydomain/abc`. -* `\http://mydomain.com/<(?!protected).*>` matches `\http://mydomain.com/resource`, but doesn't match `\http://mydomain.com/protected`. -==== +Additional conditions for the values captured by named wildcards in the path expression. Each entry supports the following properties: -*** `glob` - to match `url` expressions by making use of glob expressions. Internally, heimdall makes use of Heimdall uses https://github.com/gobwas/glob[gobwas/glob] to implement this strategy. Head over to linked resource to get more insights about possible options. +**** *`name`*: _string_ (mandatory) + -.Glob patterns -==== -* `\https://mydomain.com/` matches `\https://mydomain.com/man` and does not match `\http://mydomain.com/foo`. -* `\https://mydomain.com/<{foo*,bar*}>` matches `\https://mydomain.com/foo` or `\https://mydomain.com/bar` and doesn't match `\https://mydomain.com/any`. -==== +The name of the wildcard. -* *`allow_encoded_slashes`*: _string_ (optional) +**** *`type`*: _string_ (mandatory) + -Defines how to handle url-encoded slashes in url paths while matching and forwarding the requests. Can be set to the one of the following values, defaulting to `off`: +The type of expression used to match the captured wildcard's value. The supported types are: -** *`off`* - Reject requests containing encoded slashes. Means, if the request URL contains an url-encoded slash (`%2F`), the rule will not match it. -** *`on`* - Accept requests using encoded slashes, decoding them and making it transparent for the rules and the upstream url. That is, the `%2F` becomes a `/` and will be treated as such in all places. -** *`no_decode`* - Accept requests using encoded slashes, but not touching them and showing them to the rules and the upstream. That is, the `%2F` just remains as is. +***** `glob`: to use a https://github.com/gobwas/glob[glob expression] to match the captured value (`/` is used as a delimiter, so `*` matches anything until the next `/`). +***** `regex` to use a regular expression to match the captured value. +**** *`value`*: _string_ (mandatory) + -CAUTION: Since the proxy integrating with heimdall, heimdall by itself, and the upstream service, all may treat the url-encoded slashes differently, accepting requests with url-encoded slashes can, depending on your rules, lead to https://cwe.mitre.org/data/definitions/436.html[Interpretation Conflict] vulnerabilities resulting in privilege escalations. +The actual expression based on the given `type`. -* *`methods`*: _string array_ (optional) +** *`backtracking_enabled`*: _boolean_ (optional) + -Which HTTP methods (`GET`, `POST`, `PATCH`, etc) are allowed for the matched URL. If not specified, every request to that URL will result in `405 Method Not Allowed` response from heimdall. If all methods should be allowed, one can use a special `ALL` placeholder. If all, except some specific methods should be allowed, one can specify `ALL` and remove specific methods by adding the `!` sign to the to be removed method. In that case you have to specify the value in braces. See also examples below. +Enables or disables backtracking when a request matches the path expressions but fails to meet additional matching criteria, like `path_params`, `hosts`, etc. This setting is inherited from the default rule and defaults to that rule's setting. When enabled, the system will backtrack to attempt a match with a less specific rule (see link:{{< relref "#_rule_matching_specificity_backtracking" >}}[Rule Matching Specificity & Backtracking] for more details). + +** *`hosts`*: _HostMatcher array_ (optional) ++ +Defines a set of hosts to match against the HTTP Host header. Each entry has the following properties: + +*** *`type`*: _string_ (mandatory) ++ +Specifies the type of expression for matching the host, which can be one of: + +**** `exact` to match the host exactly +**** `glob` to use a https://github.com/gobwas/glob[glob expression] which should be satisfied by the host of the incoming request (`.` is used as a delimiter, which means `*` will match anything until the next `.`). +**** `regex` to use a regular expression which should be satisfied by the host of the incoming request. + +*** *`value`*: _string_ (mandatory) ++ +The actual host expression based on the `type`. + +** *`scheme`*: _string_ (optional) ++ +The expected HTTP scheme. If not specified, both http and https are accepted. + +** *`methods`*: _string array_ (optional) ++ +Specifies the allowed HTTP methods (`GET`, `POST`, `PATCH`, etc). If not specified, all methods are allowed. To allow all methods except specific ones, use `ALL` and prefix the methods to exclude with `!`. For example: + -.Methods list which effectively expands to all HTTP methods -==== [source, yaml] ---- +# Methods list which effectively expands to all HTTP methods methods: - ALL ---- -==== + -.Methods list consisting of all HTTP methods without `TRACE` and `OPTIONS` -==== [source, yaml] ---- +# Methods list consisting of all HTTP methods without `TRACE` and `OPTIONS` methods: - ALL - "!TRACE" - "!OPTIONS" ---- -==== + +* *`allow_encoded_slashes`*: _string_ (optional) ++ +Controls how to handle URL-encoded slashes in request paths during matching and forwarding. Options include: + +** *`off`* - Reject requests with encoded slashes (`%2F`). This is the default behavior. +** *`on`* - Accept requests with encoded slashes decoding them to `/`. +** *`no_decode`* - Accept requests with encoded slashes without touching them. + ++ +CAUTION: Handling URL-encoded slashes may differ across the proxies in front of heimdall, heimdall, and the upstream service. Accepting requests with encoded slashes could, depending on your rules, lead to https://cwe.mitre.org/data/definitions/436.html[Interpretation Conflict] vulnerabilities resulting in privilege escalations. * *`forward_to`*: _RequestForwarder_ (mandatory in Proxy operation mode) + -Defines where to forward the proxied request to. Used only when heimdall is operated in the Proxy operation mode and supports the following properties: +Specifies where to forward proxied requests when heimdall is operating in proxy mode. The following properties are supported: ** *`host`*: _string_ (mandatory) + -Host (and port) to be used for request forwarding. If no `rewrite` property (see below) is specified, all other parts, like scheme, path, etc. of the original url are preserved. E.g. if the original request is `\https://mydomain.com/api/v1/something?foo=bar&bar=baz` and the value of this property is set to `my-backend:8080`, the url used to forward the request to the upstream will be `\https://my-backend:8080/api/v1/something?foo=bar&bar=baz` +Host (and port) for forwarding the request. If no `rewrite` property (see below) is defined, the original URL's scheme, path, and other components are preserved. E.g. if the original request is `\https://mydomain.com/api/v1/something?foo=bar&bar=baz` and the value of this property is set to `my-backend:8080`, the url used to forward the request to the upstream will be `\https://my-backend:8080/api/v1/something?foo=bar&bar=baz` + -NOTE: The `Host` header is not preserved while forwarding the request. If you need it to be set to the value from the original request, make use of the link:{{< relref "/docs/mechanisms/finalizers.adoc#_header" >}}[header finalizer] in your `execute` pipeline and set it accordingly. The example below demonstrates that. +NOTE: The `Host` header is not preserved when forwarding the request. To preserve it, use of the link:{{< relref "/docs/mechanisms/finalizers.adoc#_header" >}}[header finalizer] in your `execute` pipeline and set it accordingly. The example below demonstrates that. ** *`rewrite`*: _OriginalURLRewriter_ (optional) + -Can be used to rewrite further parts of the original url before forwarding the request. If specified at least one of the following supported (middleware) properties must be specified: +Can be used to rewrite further parts of the original URL before forwarding the request. If specified, at least one of the following supported (middleware) properties must be specified: *** *`scheme`*: _string_ (optional) + -If defined, heimdall will use the specified value for the url scheme part while forwarding the request to the upstream. +Specifies the URL scheme to use for forwarding the request. *** *`strip_path_prefix`*: _string_ (optional) + -If defined, heimdall will strip the specified prefix from the original url path. E.g. if the path of the original url is `/api/v1/something` and the value of this property is set to `/api/v1`, the request to the upstream will have the url path set to `/something`. +This middleware strips the specified prefix from the original URL path before forwarding. E.g. if the path of the original url is `/api/v1/something` and the value of this property is set to `/api/v1`, the request to the upstream will have the url path set to `/something`. *** *`add_path_prefix`*: _string_ (optional) + -This middleware is applied after the execution of the `strip_path_prefix` middleware described above. If defined, heimdall will add the specified path prefix to the path used to forward the request to the upstream service. E.g. if the path of the original url or the pass resulting after the application of the `strip_path_prefix` middleware is `/something` and the value of this property is set to `/my-backend`, the request to the upstream will have the url path set to `/my-backend/something`. +This middleware is applied after the execution of the `strip_path_prefix` middleware described above. If specified, heimdall will add the specified path prefix to the path used to forward the request to the upstream service. E.g. if the path of the original URL or the path resulting after the application of the `strip_path_prefix` middleware is `/something` and the value of this property is set to `/my-backend`, the request to the upstream will have the URL path set to `/my-backend/something`. *** *`strip_query_parameters`*: _string array_ (optional) + -If defined, heimdall will remove the specified query parameters from the original url before forwarding the request to the upstream service. E.g. if the query parameters part of the original url is `foo=bar&bar=baz` and the value of this property is set to `["foo"]`, the query part of the request to the upstream will be set to `bar=baz` +Removes specified query parameters from the original URL before forwarding. E.g. if the query parameters part of the original URL is `foo=bar&bar=baz` and the value of this property is set to `["foo"]`, the query part of the request to the upstream will be set to `bar=baz` * *`execute`*: _link:{{< relref "#_authentication_authorization_pipeline" >}}[Authentication & Authorization Pipeline]_ (mandatory) + -Which mechanisms to use to authenticate, authorize, contextualize (enrich) and finalize the pipeline. +Specifies the mechanisms used for authentication, authorization, contextualization, and finalization. * *`on_error`*: _link:{{< relref "#_error_pipeline" >}}[Error Pipeline]_ (optional) + -Which error handler mechanisms to use if any of the mechanisms, defined in the `execute` property, fails. This property is optional only, if a link:{{< relref "default_rule.adoc" >}}[default rule] has been configured and contains an `on_error` definition. +Specifies error handling mechanisms if the pipeline defined by the `execute` property fails. Defaults to the error pipeline defined in the link:{{< relref "default_rule.adoc" >}}[default rule] if not specified. .An example rule ==== @@ -131,16 +152,24 @@ Which error handler mechanisms to use if any of the mechanisms, defined in the ` ---- id: rule:foo:bar match: - url: http://my-service.local/<**> - strategy: glob + routes: + - path: /some/:identifier/followed/by/** + path_params: + - name: identifier + type: glob + value: "[a-z]" + scheme: http + hosts: + - type: exact + value: my-service.local + methods: + - GET + - POST forward_to: host: backend-a:8080 rewrite: scheme: http strip_path_prefix: /api/v1 -methods: - - GET - - POST execute: # the following just demonstrates how to make use of specific # mechanisms in the simplest possible form @@ -162,6 +191,116 @@ on_error: ---- ==== +== Path Expression + +Path expressions are used to match the incoming requests. When specifying these, you can make use of two types of wildcards: + +* free wildcard, which can be defined using `*` and +* single wildcard, which can be defined using `:` + +Both can be named and unnamed, with named wildcards allowing accessing of the matched segments in the pipeline of the rule using the defined name as a key on the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_url_captures" >}}[`Request.URL.Captures`] object. Unnamed free wildcard is defined as `\**` and unnamed single wildcard is defined as `:*`. A named wildcard uses some identifier instead of the `*`, so like `*name` for free wildcard and `:name` for single wildcard. + +The value of the path segment, respectively path segments available via the wildcard name is decoded. E.g. if you define the to be matched path in a rule as `/file/:name`, and the actual path of the request is `/file/%5Bid%5D`, you'll get `[id]` when accessing the captured path segment via the `name` key. Not every path encoded value is decoded though. Decoding of encoded slashes happens only if `allow_encoded_slashes` was set to `on`. + +There are some simple rules, which must be followed while using wildcards: + +- One can use as many single wildcards, as needed in any segment +- A segment must start with `:` or `*` to define a wildcard +- No segments are allowed after a free (named) wildcard +- If a regular segment must start with `:` or `*`, but should not be considered as a wildcard, it must be escaped with `\`. + +Here some path examples: + +- `/apples/and/bananas` - Matches exactly the given path +- `/apples/and/:something` - Matches `/apples/and/bananas`, `/apples/and/oranges` and alike, but not `/apples/and/bananas/andmore` or `/apples/or/bananas`. Since a named single wildcard is used, the actual value of the path segment matched by `:something` can be accessed in the rule pipeline using `something` as a key. +- `/apples/:junction/:something` - Similar to above. But will also match `/apples/or/bananas` in addition to `/apples/and/bananas` and `/apples/and/oranges`. +- `/apples/and/some:thing` - Matches exactly `/apples/and/some:thing` +- `/apples/and/some*\*` - Matches exactly `/apples/and/some**` +- `/apples/**` - Matches any path starting with `/apples/`, like `/apples/and/bananas` but not `/apples/`. +- `/apples/*remainingpath` - Same as above, but uses a named free wildcard +- `/apples/**/bananas` - Is invalid, as there is a path segment after a free wildcard +- `/apples/\*remainingpath` - Matches exactly `/apples/*remainingpath` + +Here is an example demonstrating the usage of a single named wildcard: + +[source, yaml] +---- +id: rule:1 +match: + routes: + - path: /files/:uuid/delete + hosts: + - type: exact + value: hosty.mchostface + execute: + - authorizer: openfga_check + config: + payload: | + { + "user": "{{ .Subject.ID }}", + "relation": "can_delete", + "object": "file:{{ .Request.URL.Captures.uuid }}" + } +---- + +== Rule Matching Specificity & Backtracking + +The implementation ensures that rules with more specific path expressions are matched first, regardless of their placement within a rule set. In fact, more specific rules are prioritized even when they are defined across different rule sets. + +When a path expression matches a request, any additional conditions specified in the rule's matching criteria are evaluated. Only if these conditions are met will the rule's pipeline be executed. + +CAUTION: If multiple rules share the same path expression and all their additional conditions match, the first matching rule will be applied. The matching order is determined by the sequence of rules in the rule set. + +If no rule is matched, and backtracking is enabled, the process will backtrack to attempt a match with the next less specific rule. Backtracking will stop when: + +* a less specific rule successfully matches (including evaluation of any additional conditions), or +* a less specific rule fails to match and does not permit backtracking. + +The following examples illustrate these principles: + +Imagine the following set of rules + +[source, yaml] +---- +id: rule1 +match: + routes: + - path: /files/** +execute: + - +---- + +[source, yaml] +---- +id: rule2 +match: + routes: + - path: /files/:team/:name + path_params: + - name: team + type: regex + value: "(team1|team2)" + backtracking_enabled: true +execute: + - +---- + +[source, yaml] +---- +id: rule3 +match: + routes: + - path: /files/team3/:name +execute: + - +---- + +The request to `/files/team1/document.pdf` will be matched by `rule2`, as it is more specific than `rule1`. Consequently, the pipeline for `rule2` will be executed. + +The request to `/files/team3/document.pdf` will be matched by `rule3`, which is more specific than both `rule1` and `rule2`. As a result, the corresponding pipeline will be executed. + +However, even though the request to `/files/team4/document.pdf` matches the path defined in `rule2`, the regular expression `(team1|team2)` used in the `path_params` for the `team` parameter will not match. Since `backtracking_enabled` is set to `true`, backtracking will occur, and the request will be matched by `rule1`, with its pipeline then being executed. + == Authentication & Authorization Pipeline As described in the link:{{< relref "/docs/concepts/pipelines.adoc" >}}[Concepts] section, this pipeline consists of mechanisms, previously configured in the link:{{< relref "/docs/mechanisms/catalogue.adoc" >}}[mechanisms catalogue], organized in stages as described below, with authentication stage (consisting of link:{{< relref "/docs/mechanisms/authenticators.adoc" >}}[authenticators]) being mandatory. @@ -226,21 +365,23 @@ This example uses == Error Pipeline -Compared to the link:{{< relref "#_authentication_authorization_pipeline" >}}[Authentication & Authorization Pipeline], the error pipeline is pretty simple. It is also a list of mechanism references, but all referenced types are link:{{< relref "/docs/mechanisms/error_handlers.adoc" >}}[error handler types]. Thus, each entry in this list must have `error_handler` as key, followed by the `ìd` of the required error handler, previously defined in the link:{{< relref "/docs/mechanisms/catalogue.adoc" >}}[mechanism catalogue]. Error handlers are always executed as fallbacks. So, if the condition of the first error handler does not match, second is selected, if its condition matches, it is executed, otherwise the next one is selected, etc. If none of the conditions of the defined error handlers match, the link:{{< relref "/docs/mechanisms/error_handlers.adoc#_default" >}}[default error handler] is executed. +Compared to the link:{{< relref "#_authentication_authorization_pipeline" >}}[Authentication & Authorization Pipeline], the error pipeline is pretty simple. It is also a list of mechanism references, but all referenced types are link:{{< relref "/docs/mechanisms/error_handlers.adoc" >}}[error handler types]. Thus, each entry in this list must have `error_handler` as key, followed by the `ìd` of the required error handler previously defined in the link:{{< relref "/docs/mechanisms/catalogue.adoc" >}}[mechanism catalogue]. + +Execution of the error handlers should happen conditionally by making use of a https://github.com/google/cel-spec[CEL] expression in an `if` clause, which has access to the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_error" >}}[`Error`] and the link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_request" >}}[`Request`] objects. Otherwise, the first error handler will be executed and the error pipeline will exit. -As with the authentication & authorization pipeline, partial reconfiguration of the used mechanisms is possible if supported by the corresponding type. Same is true for overrides of the `if` conditions. The overrides are always local to the given rule as well. +As with the authentication & authorization pipeline, partial reconfiguration of the used mechanisms is possible if supported by the corresponding type. The overrides are always local to the given rule as well. .Two error handlers ==== [source, yaml] ---- - error_handler: foo -- error_handler: bar if: # rule specific condition +- error_handler: bar config: # rule specific config ---- ==== -This example uses two error handlers, named `foo` and `bar`. `bar` will only be selected by heimdall if `foo` 's error condition (defined in the link:{{< relref "/docs/mechanisms/catalogue.adoc" >}}[mechanism catalogue]) does not match. `bar` does also override the error condition as required by the given rule. +This example uses two error handlers, named `foo` and `bar`. `bar` will only be executed if `foo` 's error condition does not match. `bar` does also override the error handler configuration as required by the given rule. diff --git a/docs/content/docs/rules/rule_sets.adoc b/docs/content/docs/rules/rule_sets.adoc index faa0383ae..791386eee 100644 --- a/docs/content/docs/rules/rule_sets.adoc +++ b/docs/content/docs/rules/rule_sets.adoc @@ -27,7 +27,7 @@ Available properties are: * *`version`*: _string_ (mandatory) + -The version schema of the rule set. The current version of heimdall supports only the version `1alpha3`. +The version schema of the rule set. The current version of heimdall supports only the version `1alpha4`. * *`name`*: _string_ (optional) + @@ -44,19 +44,29 @@ An imaginary rule set file defining two rules could look like shown below. [source, yaml] ---- -version: "1alpha3" +version: "1alpha4" name: my-rule-set rules: - id: rule:1 match: - url: https://my-service1.local/<**> - methods: [ "GET" ] + routes: + - path: /** + methods: [ "GET" ] + scheme: https + hosts: + - type: exact + value: my-service1.local execute: - authorizer: foobar - id: rule:2 match: - url: https://my-service2.local/<**> - methods: [ "GET" ] + routes: + - path: /** + scheme: https + hosts: + - type: exact + value: my-service2.local + methods: [ "GET" ] execute: - authorizer: barfoo ---- @@ -70,7 +80,7 @@ If you operate heimdall in kubernetes, most probably, you would like to make use * *`apiVersion`*: _string_ (mandatory) + -The api version of the custom resource definition, the given rule set is based on. The current version of heimdall supports only `heimdall.dadrus.github.com/v1alpha3` version. +The api version of the custom resource definition, the given rule set is based on. The current version of heimdall supports only `heimdall.dadrus.github.com/v1alpha4` version. * *`kind`*: _string_ (mandatory) + @@ -82,7 +92,7 @@ The metadata, you would like to assign to the rule set, like the name of the rul * *`spec`*: _map_ (mandatory) + -The actual specification of the rule set. Following attributes are possible, respectively required: +The actual specification of the rule set. Following properties are possible, respectively required: ** *`authClassName`*: _string_ (optional) + @@ -108,7 +118,7 @@ $ kubectl apply -f https://raw.githubusercontent.com/dadrus/heimdall/main/charts ==== [source, yaml] ---- -apiVersion: heimdall.dadrus.github.com/v1alpha3 +apiVersion: heimdall.dadrus.github.com/v1alpha4 kind: RuleSet metadata: name: "" @@ -117,7 +127,12 @@ spec: rules: - id: "" match: - url: http://127.0.0.1:9090/foo/<**> + routes: + - path: /foo/** + scheme: https + hosts: + - type: exact + value: 127.0.0.1:9090 execute: - authenticator: foo - authorizer: bar @@ -126,7 +141,7 @@ spec: === Resource Status -In addition to configuration attributes described above, a `RuleSet` resource has a `status` stanza, which provides information about the usage status as soon as a `RuleSet` has been loaded by at least one heimdall instance. +In addition to configuration properties described above, a `RuleSet` resource has a `status` stanza, which provides information about the usage status as soon as a `RuleSet` has been loaded by at least one heimdall instance. By making use of `kubectl get -n rulesets.heimdall.dadrus.github.com` you'll get an overview of deployed `RuleSet` resources in a particular namespace, like e.g. shown below diff --git a/docs/content/guides/authn/_index.adoc b/docs/content/guides/authn/_index.adoc new file mode 100644 index 000000000..fa5ea38e2 --- /dev/null +++ b/docs/content/guides/authn/_index.adoc @@ -0,0 +1,9 @@ +--- +title: "Authentication Protocols & Services" +date: 2024-09-13T12:54:41+02:00 +draft: false +menu: + guides: + weight: 30 +description: The guides in this section cover various authentication protocols and explain how to integrate them with heimdall for first-party authentication. +--- \ No newline at end of file diff --git a/docs/content/guides/authn/oidc_first_party_auth.adoc b/docs/content/guides/authn/oidc_first_party_auth.adoc new file mode 100644 index 000000000..a15603a4f --- /dev/null +++ b/docs/content/guides/authn/oidc_first_party_auth.adoc @@ -0,0 +1,407 @@ +--- +title: "First-Party Authentication with OpenID Connect" +date: 2022-11-04T09:00:41+02:00 +draft: false +weight: 31 +menu: + guides: + parent: "Authentication Protocols & Services" +description: This guide will walk you through the process of integrating heimdall with an OpenID Connect provider to implement first-party authentication. +--- + +:toc: + +By the end of this guide, you'll have a functional setup where heimdall uses https://www.keycloak.org/[Keycloak] to authenticate users and route requests based on their authentication status and roles for role-based access control. + +Although this guide uses Keycloak as identity provider (IDP), you can achieve the same results with https://zitadel.com[Zitadel], https://github.com/malach-it/boruta-server[Boruta], or any other OpenID Connect-compatible IDP. + +== Overview + +In this guide, we'll set up a Docker Compose environment where heimdall secures services and controls access to specific endpoints: + +* `/` - This endpoint is open to everyone. +* `/user` - Accessible only to authenticated users. +* `/admin` - Accessible only to users with the `admin` role. + +There are also some further endpoints, which you'll learn about during the setup. + +Technically, the setup includes: + +* https://hub.docker.com/r/containous/whoami/[containous/whoami] - A service that echoes back everything it receives, simulating our main service with endpoints mentioned above. +* https://www.keycloak.org/[Keycloak] - Our identity provider. +* https://oauth2-proxy.github.io/oauth2-proxy/[OAuth2-Proxy] - Handles the Authorization Code Grant flow for the actual user login. +* heimdall - Manages everything to enforce the requirements outlined above. + +== Prerequisites + +To be able to follow this guide you need the following tools installed locally: + +* https://docs.docker.com/install/[Docker], +* https://docs.docker.com/compose/install/[docker-compose], and +* a text editor of your choice. + +== Configure the Base Setup + +. Create a directory for the configuration files we’ll be using (referred to as the root directory in this guide). Inside this root directory, create two additional directories named `rules` and `initdb`. The former will be used for heimdall rules and the latter for DB initialization scripts. + +. Create a config file for heimdall named `heimdall-config.yaml` with the following contents in the root directory: ++ +[source, yaml] +---- +log: # <1> + level: debug + +tracing: + enabled: false + +metrics: + enabled: false + +mechanisms: # <2> + authenticators: + - id: deny_all # <3> + type: unauthorized + - id: anon # <4> + type: anonymous + - id: auth # <5> + type: generic + config: + identity_info_endpoint: http://oauth2-proxy:4180/oauth2/userinfo + authentication_data_source: + - cookie: SESSION + forward_cookies: + - SESSION + subject: + id: "user" + + authorizers: + - id: cel # <6> + type: cel + config: + expressions: + - expression: "true == false" + + finalizers: + - id: create_jwt # <7> + type: jwt + config: + signer: + key_store: + path: /etc/heimdall/signer.pem + claims: | + {{- dict "attrs" .Subject.Attributes | toJson -}} + - id: noop # <8> + type: noop + + error_handlers: # <9> + - id: redirect_to_idp + type: redirect + config: + to: http://127.0.0.1:9090/oauth2/start?rd={{ .Request.URL | urlenc }} + - id: redirect_to_error_page + type: redirect + config: + to: https://www.google.com/search?q=access+denied&udm=2 + +default_rule: # <10> + execute: + - authenticator: deny_all + - finalizer: create_jwt + on_error: + - error_handler: redirect_to_error_page + if: | + type(Error) in [authorization_error, authentication_error] && + Request.Header("Accept").contains("text/html") + +providers: # <11> + file_system: + src: /etc/heimdall/rules + watch: true +---- +<1> By default, heimdall emits logs on `error` level, but to better understand its operations, we’re setting the log level to `debug`. This way, you'll see not only the results of rule executions (which is what you would see if we set the log level to `info`), but also detailed information about what’s happening within each rule. We’re also disabling tracing and metrics collection to avoid errors related to the missing OTEL agent, which is used by default. For more details on logging and other observability options, see the link:{{< relref "/docs/operations/observability.adoc#_logging" >}}[Observability] chapter. +<2> We define our link:{{< relref "/docs/mechanisms/catalogue.adoc" >}}[catalogue of mechanisms] for use in our link:{{< relref "/docs/rules/default_rule.adoc" >}}[default rule] and link:{{< relref "/docs/rules/regular_rule.adoc" >}}[upstream service-specific rules]. +<3> These lines define the `unauthorized` authenticator, named `deny_all`, which rejects all requests. +<4> These lines define the `anonymous` authenticator, named `anon`, which allows all requests and creates a subject with the ID set to `anonymous`. More information about subjects and other objects can be found link:{{< relref "/docs/mechanisms/evaluation_objects.adoc#_subject" >}}[here]. +<5> These and the following lines set up the `generic` authenticator, named `auth`. This configuration checks if a request includes a `Cookie` named `SESSION`, and then sends it to the `\http://oauth2-proxy:4180/oauth2/userinfo` endpoint to get user information. If successful, heimdall extracts the user identifier from the `user` property in the response. If there’s an error (e.g. the `SESSION` cookie is not present or the response from the OAuth2-Proxy contains an error), an authentication error is triggered. +<6> These lines define a `cel` authorizer that is configured to always fail. We’ll improve this in our upstream-specific rule. +<7> These lines define the `jwt` finalizer. It creates a JWT from the subject object with standard claims, setting the `sub` claim to the subject’s ID. The key for signing the JWT comes from a key store we’ll configure later. +<8> These two lines define the `noop` finalizer, which we’ll use for public endpoints. +<9> Here, we set up two `redirect` error handlers: one redirects to the `/oauth2/start` endpoint with a deep link to the current URL, and the other redirects to Google with the search query "access denied". +<10> With all mechanisms defined, we configure our first rule - the link:{{< relref "/docs/rules/default_rule.adoc" >}}[default rule]. This rule applies if no other rules match the request and serves as a link:{{< relref "/docs/concepts/rules.adoc#_default_rule_inheritance" >}}[base] for defining regular (upstream service-specific) rules as well. It sets up a default link:{{< relref "/docs/concepts/pipelines.adoc#_authentication_authorization_pipeline" >}}[authentication & authorization pipeline] that rejects all requests using the `deny_all` authenticator. This rejection triggers the `redirect_to_error_page` error handler. If a regular rule overrides this authenticator, a JWT is created using the `jwt` finalizer. +<11> The last few lines configure the link:{{< relref "/docs/rules/providers.adoc#_filesystem" >}}[`file_system`] provider, which loads regular rules from the file system and watches for changes. This allows you to modify the rules while testing. + +. Create a file named `signer.pem` and add the following content to it. This file should also be placed in the root directory and will act as our key store, containing the private key referenced in the configuration above. ++ +[source, yaml] +---- +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDALv/dRp6zvm6nmozmB/21viwFCUGBoisHz0v8LSRXGiM5aDywLFmMy +1jPnw29tz36gBwYFK4EEACKhZANiAAQgZkUS7PCh5tEXXvZk0LDQ4Xn4LSK+vKkI +zlCZl+oMgud8gacf4uG5ERgju1xdUyfewsXlwepTnWuwhXM7GdnwY5GOxZTwGn3X +XVwR/5tokqFVrFxt/5c1x7VdccF4nNM= +-----END EC PRIVATE KEY----- +---- ++ +WARNING: Do not use it for purposes beyond this tutorial! + +. Next, we’ll create rules for our main service - the one exposing `/`, `/user` and the `/admin` endpoints. To do this, create a file named `upstream-rules.yaml` in the `rules` directory with the following content: ++ +[source, yaml] +---- +version: "1alpha4" +rules: +- id: upstream:public # <1> + match: + routes: + - path: / + - path: /favicon.ico + forward_to: + host: upstream:8081 + execute: + - authenticator: anon + - finalizer: noop + +- id: upstream:protected # <2> + match: + routes: + - path: /user + - path: /admin + forward_to: + host: upstream:8081 + execute: + - authenticator: auth + - authorizer: cel + if: Request.URL.Path == '/admin' + config: + expressions: + - expression: | + has(Subject.Attributes.groups) && + "role:admin" in Subject.Attributes.groups + message: User is not admin + on_error: + - error_handler: redirect_to_idp + if: | + type(Error) == authentication_error && + Request.Header("Accept").contains("text/html") + - error_handler: redirect_to_error_page + if: | + type(Error) == authorization_error && + Request.Header("Accept").contains("text/html") +---- +<1> This first rule is for the `/` endpoint. It instructs heimdall to pass requests to this endpoint directly through to our upstream service. +<2> The second rule ensures that the `/user` endpoint is only accessible to authenticated users, while the `/admin` endpoint is only accessible to users with the `admin` role configured in our IDP. + +. Next, we’ll create a rule for the OAuth2-Proxy, placed behind heimdall and publicly exposing only certain endpoints. Create a new file named `oauth2-proxy-rules.yaml` in the `rules` directory with the following content: ++ +[source, yaml] +---- +version: "1alpha4" +rules: +- id: oauth2-proxy:public + match: + routes: + - path: /oauth2/start + - path: /oauth2/callback + forward_to: + host: oauth2-proxy:4180 + execute: + - authenticator: anon + - finalizer: noop +---- + +. Next, we’ll create a database initialization script to set up a database for Keycloak. In the `initdb` directory, create a file named `initdb.sh` with the following content, and make it executable by running `chmod +x initdb.sh`: ++ +[source, bash] +---- +#!/bin/bash +set -e + +psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL + CREATE USER keycloak WITH PASSWORD 'keycloak'; + CREATE DATABASE keycloak OWNER keycloak; +EOSQL + +---- + +. Now, let’s bring everything together using a Docker Compose file. Create a file named `docker-compose.yaml` in the root directory with the following content: ++ +[source, yaml] +---- +version: '3.7' + +services: + heimdall: # <1> + image: dadrus/heimdall:dev + ports: + - "9090:4455" + command: -c /etc/heimdall/config.yaml serve proxy + volumes: + - ./heimdall-config.yaml:/etc/heimdall/config.yaml:ro + - ./rules:/etc/heimdall/rules:ro + - ./signer.pem:/etc/heimdall/signer.pem:ro + + upstream: # <2> + image: containous/whoami:latest + command: --port=8081 + + oauth2-proxy: # <3> + depends_on: + - keycloak + image: quay.io/oauth2-proxy/oauth2-proxy:v7.6.0-amd64 + command: + - --http-address + - 0.0.0.0:4180 + environment: + OAUTH2_PROXY_CLIENT_ID: placeholder # <4> + OAUTH2_PROXY_CLIENT_SECRET: placeholder + OAUTH2_PROXY_REDIRECT_URL: http://127.0.0.1:9090/oauth2/callback # <5> + OAUTH2_PROXY_PROVIDER: keycloak-oidc + OAUTH2_PROXY_SKIP_PROVIDER_BUTTON: true + OAUTH2_PROXY_COOKIE_SECRET: VerySecure!!!!!! + OAUTH2_PROXY_COOKIE_NAME: SESSION + OAUTH2_PROXY_WHITELIST_DOMAINS: 127.0.0.1:9090 + OAUTH2_PROXY_OIDC_ISSUER_URL: http://keycloak:8080/realms/test # <6> + OAUTH2_PROXY_INSECURE_OIDC_ALLOW_UNVERIFIED_EMAIL: true # <7> + OAUTH2_PROXY_EMAIL_DOMAINS: '*' + OAUTH2_PROXY_OIDC_EXTRA_AUDIENCES: account # <8> + OAUTH2_PROXY_LOGIN_URL: http://127.0.0.1:8080/realms/test/protocol/openid-connect/auth # <9> + OAUTH2_PROXY_OIDC_JWKS_URL: http://keycloak:8080/realms/test/protocol/openid-connect/certs + OAUTH2_PROXY_REDEEM_URL: http://keycloak:8080/realms/test/protocol/openid-connect/token + OAUTH2_PROXY_INSECURE_OIDC_SKIP_ISSUER_VERIFICATION: true + OAUTH2_PROXY_SKIP_OIDC_DISCOVERY: true + + keycloak: # <10> + image: quay.io/keycloak/keycloak:25.0.4 + command: [ "start-dev", "--http-port", "8080" ] + ports: + - "8080:8080" + environment: + KC_HOSTNAME: 127.0.0.1 + KC_HOSTNAME_PORT: 8080 + KC_HOSTNAME_STRICT_BACKCHANNEL: "true" + KEYCLOAK_ADMIN: admin + KEYCLOAK_ADMIN_PASSWORD: admin + KC_HEALTH_ENABLED: "true" + KC_LOG_LEVEL: info + KC_DB_URL_HOST: postgresql + KC_DB: postgres + KC_DB_USERNAME: keycloak + KC_DB_PASSWORD: keycloak + depends_on: + - postgresql + + postgresql: # <11> + image: postgres:13.11 + volumes: + - type: volume + source: postgres-db + target: /var/lib/postgresql/data + read_only: false + - ./initdb:/docker-entrypoint-initdb.d + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + +volumes: + postgres-db: +---- +<1> Here, we configure heimdall to use the previously defined configuration. +<2> This section sets up the main service that we’ll be protecting. +<3> This section defines the OAuth2-Proxy configuration, which heimdall will use to handle the Authorization Code Grant flow and manage the authentication session with a `SESSION` cookie. +<4> The client ID and client secret values are placeholders. We will configure these in Keycloak. +<5> Since the redirect URI, exposed as `/oauth2/callback`, is behind heimdall, we use the publicly accessible endpoint here. +<6> The URL of the issuer that OAuth2-Proxy will use. We’ll create a corresponding realm in Keycloak to match this configuration. +<7> The following two lines are required as we don't want keycloak to verify the email addresses of the users we'll be creating, and we want to allow any email domain. +<8> To avoid creating a mapper to let Keycloak set a proper `aud` claim value, we allow the usage of `account` audience set by Keycloak by default. +<9> These lines are only required as Keycloak is part of our docker compose setup, and we have to use different domain names while communicating with it. +<10> This section sets up Keycloak. +<11> Finally, this section configures our database. + +== Create a Realm and a Client in Keycloak + +With the above configuration in place, follow these steps to start Keycloak and the database, initialize both, and create the OAuth2-Proxy client: + +. In the root directory, run `docker-compose up postgresql keycloak`. Wait until the database is initialized and Keycloak has started. +. Open your browser and go to `\http://127.0.0.1:8080`. Log in using the admin credentials (both the username and password are set to `admin` in our setup). +. Create a Realm named `test`. For detailed instructions, refer to the Keycloak documentation on https://www.keycloak.org/docs/latest/server_admin/index.html#proc-creating-a-realm_server_administration_guide[ creating a realm]. +. Within the `test` realm, create an OpenID Client. Follow the Keycloak documentation on https://www.keycloak.org/docs/latest/server_admin/index.html#proc-creating-oidc-client_server_administration_guide[creating an OIDC client]. Enable "Client authentication" and "Standard Flow", set `\http://127.0.0.1:9090/oauth2/callback` as the "Valid Redirect URI" and `\http://127.0.0.1:9090/` as the "Home URL" and "Valid post logout redirect URIs" and note the "Client ID" and "Client Secret" (later can be found under the "Credentials" tab after completing the client creation wizard); we will use these to complete the OAuth2-Proxy configuration in our Docker Compose file. +. Stop the docker compose setup with `CTRL-C`. + +== Update OAuth2-Proxy Configuration + +We can now finalize the configuration and use the proper client id and secret for OAuth2-Proxy + +. Update the `OAUTH2_PROXY_CLIENT_ID` and `OAUTH2_PROXY_CLIENT_SECRET` in the configuration of the OAuth2-Proxy in the docker compose file with the "Client ID" and "Client Secret" values from Keycloak + +== Use the Setup + +We now have almost everything set up. The final step is to create a few users, including at least one with the `admin` role assigned. + +. In the root directory, run `docker-compose up`. Wait until all services are up and running. +. Open your browser and navigate to `\http://127.0.0.1:8080`. Log in using the admin credentials (both username and password are set to `admin`). +. Select the `test` realm and create an `admin` group with a role named `admin` assigned to it. For guidance, refer to the Keycloak documentation on creating https://www.keycloak.org/docs/latest/server_admin/index.html#proc-managing-groups_server_administration_guide[Groups] and https://www.keycloak.org/docs/latest/server_admin/index.html#proc-creating-realm-roles_server_administration_guide[Roles]. +. Create several users following the Keycloak documentation on https://www.keycloak.org/docs/latest/server_admin/index.html#proc-creating-user_server_administration_guide[managing users], and assign some of them to the `admin` group. Disable email verification during user creation to avoid sending verification emails to potentially non-existent addresses. + +Now, let's test the setup: + +. Navigate to `\http://127.0.0.1:9090/`. You should see some text similar to the one shown below. This text is the response from our upstream service, which echoes back everything the browser sends in its request. ++ +[source, text] +---- +Hostname: 39f1815dd8ac +IP: 127.0.0.1 +IP: ::1 +IP: 172.31.0.3 +RemoteAddr: 172.31.0.2:37908 +GET / HTTP/1.1 +Host: upstream:8081 +User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:130.0) Gecko/20100101 Firefox/130.0 +Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/png,image/svg+xml,*/*;q=0.8 +Accept-Encoding: gzip, deflate, br, zstd +Accept-Language: de,en-US;q=0.7,en;q=0.3 +Dnt: 1 +Forwarded: for=172.31.0.1;host=127.0.0.1:9090;proto=http +Priority: u=0, i +Sec-Fetch-Dest: document +Sec-Fetch-Mode: navigate +Sec-Fetch-Site: none +Sec-Fetch-User: ?1 +Upgrade-Insecure-Requests: 1 +X-Trp-Catalog: de +---- +. Attempt to access the `\http://127.0.0.1:9090/user` endpoint. You should be redirected to the Keycloak login page. Log in with any of the users you configured. After logging in, you should be redirected back to `\http://127.0.0.1:9090/user`, where you’ll see some text similar to the one shown below. This indicates that the request hit the `/user` endpoint of our upstream service. You should also see a JWT token in the `Authorization` header, which is the result of the JWT finalizer we configured. ++ +[source, text] +---- +Hostname: 39f1815dd8ac +IP: 127.0.0.1 +IP: ::1 +IP: 172.31.0.3 +RemoteAddr: 172.31.0.2:37908 +GET /user HTTP/1.1 +Host: upstream:8081 +User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:130.0) Gecko/20100101 Firefox/130.0 +Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/png,image/svg+xml,*/*;q=0.8 +Accept-Encoding: gzip, deflate, br, zstd +Accept-Language: de,en-US;q=0.7,en;q=0.3 +Authorization: Bearer eyJhbGciOiJFUzM4NCIsImtpZCI6ImIzNDA3N2ZlNWI5NDczYzBjMmY3NDNmYWQ0MmY3ZDU0YWM3ZTFkN2EiLCJ0eXAiOiJKV1QifQ.eyJhdHRycyI6eyJlbWFpbCI6InRlc3QxQGV4YW1wbGUuY29tIiwiZ3JvdXBzIjpbInJvbGU6ZGVmYXVsdC1yb2xlcy10ZXN0Iiwicm9sZTpvZmZsaW5lX2FjY2VzcyIsInJvbGU6YWRtaW4iLCJyb2xlOnVtYV9hdXRob3JpemF0aW9uIiwicm9sZTphY2NvdW50Om1hbmFnZS1hY2NvdW50Iiwicm9sZTphY2NvdW50Om1hbmFnZS1hY2NvdW50LWxpbmtzIiwicm9sZTphY2NvdW50OnZpZXctcHJvZmlsZSJdLCJwcmVmZXJyZWRVc2VybmFtZSI6InRlc3QxIiwidXNlciI6IjQ5ZWE3Mjk3LWI0NzgtNDcxNC05NjM2LWQ5ZTk3ZmVkZmNhOSJ9LCJleHAiOjE3MjY0NjkyMTAsImlhdCI6MTcyNjQ2ODkxMCwiaXNzIjoiaGVpbWRhbGwiLCJqdGkiOiJkODg3Y2Q3MC1iYzhlLTQ2MTItODYzNS1lYTYwYjU1ZmU3MzciLCJuYmYiOjE3MjY0Njg5MTAsInN1YiI6IjQ5ZWE3Mjk3LWI0NzgtNDcxNC05NjM2LWQ5ZTk3ZmVkZmNhOSJ9.JLS__gH0wEHwB07DV9Rcrm9mo1xfXpqHoC8pbZ523KHV7QO3n2jrauiB4fVggB5DPe4tTUrp8X1e4nePXPniJyACyC7gmoBX5PJTbUPlalsw0WKOfYOcYXjwJDakId5r +Cookie: SESSION=S77dk6NGQyyreWQ1enWRSCSP...wBsNTlidPgTBahs= +Dnt: 1 +Forwarded: for=172.31.0.1;host=127.0.0.1:9090;proto=http +Priority: u=0, i +Sec-Fetch-Dest: document +Sec-Fetch-Mode: navigate +Sec-Fetch-Site: none +Sec-Fetch-User: ?1 +Upgrade-Insecure-Requests: 1 +X-Trp-Catalog: de +---- +. Try accessing the `\http://127.0.0.1:9090/admin` endpoint with both a user not in the `admin` group and a user from the `admin` group. The user not in the `admin` group should see the "access denied" page, while the user from the `admin` group should be able to access the endpoint and see the echoed response from our upstream service. ++ +NOTE: To "logout" the user, just delete the cookies for `\http://127.0.0.1:9090` using the Web-Developer Tools of your browser. +. Attempts to access any not exposed endpoints, like `\http://127.0.0.1:9090/foo` will always result in the "access denied" page. + +== Cleanup + +Just stop the environment with `CTRL-C` and delete the created files. If you started docker compose in the background, tear the environment down with `docker-compose down`. + diff --git a/docs/content/guides/authz/opa.adoc b/docs/content/guides/authz/opa.adoc index c45c3e75d..a115fd099 100644 --- a/docs/content/guides/authz/opa.adoc +++ b/docs/content/guides/authz/opa.adoc @@ -103,5 +103,5 @@ config: { "input": { "user": {{ quote .Subject.ID }} } } ---- -Upon successful execution of the corresponding request, the response from the OPA endpoint will be stored in the `Subject.Attributes["billing_contextualizer"]` field. That way, you can use that information in a link:{{< relref "/docs/mechanisms/finalizers.adoc" >}}[Finalizer] to forward the group membership to the billing service API. +Upon successful execution of the corresponding request, the response from the OPA endpoint will be stored in the `Outputs["billing_contextualizer"]` field. That way, you can use that information in a link:{{< relref "/docs/mechanisms/finalizers.adoc" >}}[Finalizer] to forward the group membership to the billing service API. diff --git a/docs/content/guides/authz/openfga.adoc b/docs/content/guides/authz/openfga.adoc index beb22c663..8573aef67 100644 --- a/docs/content/guides/authz/openfga.adoc +++ b/docs/content/guides/authz/openfga.adoc @@ -44,13 +44,14 @@ version: '3.7' services: heimdall: # <1> - image: dadrus/heimdall:latest + image: dadrus/heimdall:dev container_name: heimdall ports: - "9090:4455" volumes: - ./heimdall-config.yaml:/etc/heimdall/config.yaml:ro - ./rules:/etc/heimdall/rules:ro + - ./signer.pem:/etc/heimdall/signer.pem:ro command: -c /etc/heimdall/config.yaml serve proxy upstream: # <2> @@ -122,6 +123,10 @@ mechanisms: finalizers: - id: create_jwt # <9> type: jwt + config: + signer: + key_store: + path: /etc/heimdall/signer.pem providers: file_system: # <10> @@ -139,9 +144,23 @@ NOTE: We use a very simple `link:{{< relref "/docs/configuration/types.adoc#_end <6> Here we define and configure a `link:{{< relref "/docs/mechanisms/contextualizers.adoc#_generic" >}}[generic]` contextualizer named `openfga_list`. <7> As with the authorization mechanism, defined above, here we configure the endpoint to list the allowed objects. <8> The payload configuration used while communicating to the configured endpoint. -<9> The following two lines define the `link:{{< relref "/docs/mechanisms/finalizers.adoc#_jwt" >}}[jwt]` finalizer. Without any configuration, as used here, it will create a jwt out of the subject object with standard claims and set the `sub` claim to the value of subject's ID. +<9> The following two lines define the `link:{{< relref "/docs/mechanisms/finalizers.adoc#_jwt" >}}[jwt]` finalizer. With the given configuration, it will create a jwt out of the subject object with standard claims and set the `sub` claim to the value of subject's ID. <10> The last few lines of the configure the `link:{{< relref "/docs/rules/providers.adoc#_filesystem" >}}[file_system]` provider, which allows loading of regular rules from the file system. +. Create a file, named `signer.pem` with the following content. This is our key store with a private key, you've seen in the configuration above. ++ +[source, yaml] +---- +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDALv/dRp6zvm6nmozmB/21viwFCUGBoisHz0v8LSRXGiM5aDywLFmMy +1jPnw29tz36gBwYFK4EEACKhZANiAAQgZkUS7PCh5tEXXvZk0LDQ4Xn4LSK+vKkI +zlCZl+oMgud8gacf4uG5ERgju1xdUyfewsXlwepTnWuwhXM7GdnwY5GOxZTwGn3X +XVwR/5tokqFVrFxt/5c1x7VdccF4nNM= +-----END EC PRIVATE KEY----- +---- ++ +WARNING: Do not use it for purposes beyond this tutorial! + . Configure NGINX to expose a static endpoint serving a JWKS document under the `.well-known` path, so heimdall is able to verify the JWT, we're going to use. Create a file named `idp.nginx` with the following content: + [source, bash] @@ -241,17 +260,15 @@ Note or write down the value of `authorization_model_id`. + [source, yaml] ---- -version: "1alpha3" +version: "1alpha4" rules: - id: access_document # <1> match: - url: http://<**>/document/<**> # <2> + routes: + - path: /document/:id # <2> + methods: [ GET, POST, DELETE ] forward_to: # <3> host: upstream:8081 - methods: - - GET - - POST - - DELETE execute: - authenticator: jwt_auth # <4> - authorizer: openfga_check # <5> @@ -266,16 +283,16 @@ rules: {{- else -}} unknown {{- end -}} object: > - document:{{- splitList "/" .Request.URL.Path | last -}} # <9> + document:{{- .Request.URL.Captures.id -}} # <9> - finalizer: create_jwt # <10> - id: list_documents # <11> match: - url: http://<**>/documents # <12> + routes: + - path: /documents # <12> + methods: [ GET ] # <14> forward_to: # <13> host: upstream:8081 - methods: - - GET # <14> execute: # <15> - authenticator: jwt_auth - contextualizer: openfga_list @@ -287,7 +304,7 @@ rules: - finalizer: create_jwt config: claims: | - {{ toJson .Subject.Attributes.openfga_list }} # <16> + {{ toJson .Outputs.openfga_list }} # <16> ---- <1> Our rule set consists of two rules. The first one has the id `access_document` <2> This rule should match urls of the following form `/document/`, with id being the identifier of a document. @@ -297,7 +314,7 @@ rules: <6> Replace the value here with the store id, you've received in step 6 <7> Replace the value here with the authorization model id, you've received in step 7 <8> Here, we set the relation depending on the used HTTP request method -<9> Our object reference. We use the last URL path fragment as the id of the document +<9> Our object reference. We use the value captured by the wildcard named `id`. <10> Reference to the previously configured finalizer to create a JWT to be forwarded to our upstream service <11> This is our second rule. It has the id `list_documents`. <12> And matches any request of the form `/documents` @@ -380,7 +397,7 @@ Host: upstream:8081 User-Agent: curl/8.2.1 Accept: */* Accept-Encoding: gzip -Authorization: Bearer eyJhbGciOiJFUzM4NCIsImtpZCI6ImRiMzliZGI3ZmIyNWMyNTgxMTI4ZDdlMzc0M2Y2MjkxY2E5YzBkZDIiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE3MTE5Nzc1NDEsImlhdCI6MTcxMTk3NzI0MSwiaXNzIjoiaGVpbWRhbGwiLCJqdGkiOiJjNGMyZWMxZC02MTMxLTQ2NWYtYjYwZC01ZTYwZDJhMGNiMTgiLCJuYmYiOjE3MTE5NzcyNDEsInN1YiI6ImFubmUifQ.x03GYY2yPvItoYDY-YWlnrVlI1NUZs81Zr1yGHnR0sDPiCamzzWX2YcQFZkXCXO8EkwPLesvjZeISCs0RjuCT85UnQF8mavh2Q2j1By9zGfYobOVsaoSrIA8anR4I1hL +Authorization: Bearer eyJhbGciOiJFUzM4NCIsImtpZCI6ImIzNDA3N2ZlNWI5NDczYzBjMmY3NDNmYWQ0MmY3ZDU0YWM3ZTFkN2EiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE3MTg2OTQ5MzAsImlhdCI6MTcxODY5NDYzMCwiaXNzIjoiaGVpbWRhbGwiLCJqdGkiOiJiNzgyZGE4YS1mMDFlLTRmYmUtYTlkZC04MzdiYzYzYzlhODUiLCJuYmYiOjE3MTg2OTQ2MzAsInN1YiI6ImFubmUifQ.xANlIPmRWdMraL_j0i-0cK4NVhqopzgSc5_u0m4Hyg4VAFQ3ZHuuap1ZD9hs8ZkBQGin9-vPsBeVrQr40OfAev7WKyNVPpIpmFBAU8fX15kXgVXox29kgBAcAM2b2W-w Forwarded: for=172.19.0.1;host=127.0.0.1:9090;proto=http ---- @@ -405,7 +422,7 @@ Host: upstream:8081 User-Agent: curl/8.2.1 Accept: */* Accept-Encoding: gzip -Authorization: Bearer eyJhbGciOiJFUzM4NCIsImtpZCI6ImRiMzliZGI3ZmIyNWMyNTgxMTI4ZDdlMzc0M2Y2MjkxY2E5YzBkZDIiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE3MTE5Nzc1OTksImlhdCI6MTcxMTk3NzI5OSwiaXNzIjoiaGVpbWRhbGwiLCJqdGkiOiI5OTBkOTM5Ny1mMjAwLTQ4ODgtOGE0Ny0zZjZmM2Q2YmNmMmIiLCJuYmYiOjE3MTE5NzcyOTksIm9iamVjdHMiOlsiZG9jdW1lbnQ6MTIzNCJdLCJzdWIiOiJhbm5lIn0.NA2PBmdLNICDjbauIeZwKrFglNKPaddKkNOww4vUCcmWwhQX0cyKMtUILN4cypgR7Bfu_Hr9VOIgbau2IbVIYe51hEwH5jvWCUbInXDnLkdfwAdDWGpAVGtNHcMj3CRG +Authorization: Bearer eyJhbGciOiJFUzM4NCIsImtpZCI6ImIzNDA3N2ZlNWI5NDczYzBjMmY3NDNmYWQ0MmY3ZDU0YWM3ZTFkN2EiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE3MTg2OTUwODEsImlhdCI6MTcxODY5NDc4MSwiaXNzIjoiaGVpbWRhbGwiLCJqdGkiOiJiNWRhMDg2OC0yNTFhLTRhZmEtODk4ZS1hZThlYzdkZjMyZDEiLCJuYmYiOjE3MTg2OTQ3ODEsIm9iamVjdHMiOlsiZG9jdW1lbnQ6MTIzNCJdLCJzdWIiOiJhbm5lIn0.GY-4oi75KV8jQz5SgMzVMG_-CcCSi9XpmRE934Uq-A326MBwTcFuHysSYmWNz85wwG5zti2Jijn1T8Vm2fpTVEgEE6qltB9caVQlVNGDyF3uAVdpq9NRgHDcru3-15oB Forwarded: for=172.19.0.1;host=127.0.0.1:9090;proto=http ---- diff --git a/docs/content/guides/proxies/envoy_gateway.adoc b/docs/content/guides/proxies/envoy_gateway.adoc new file mode 100644 index 000000000..f02181666 --- /dev/null +++ b/docs/content/guides/proxies/envoy_gateway.adoc @@ -0,0 +1,128 @@ +--- +title: "Envoy Gateway Integration" +date: 2024-05-02T13:02:43+02:00 +draft: false +weight: 13 +menu: + guides: + parent: "API Gateways & Proxies" +description: This guide explains how to integrate heimdall with Envoy Gateway. +--- + +:toc: + +https://gateway.envoyproxy.io[Envoy Gateway] is an open source project for managing https://www.envoyproxy.io/[Envoy Proxy] as a Kubernetes-based application gateway by making use of the https://gateway-api.sigs.k8s.io/[Gateway API] resources. + +== Prerequisites + +* A kubernetes cluster +* Deployed Envoy Gateway (See https://gateway.envoyproxy.io/v1.0.1/install/[here] for installation options) +* Deployed https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.GatewayClass[`GatewayClass`] resource that matches Envoy Gateway's configured `controllerName` (typically `gateway.envoyproxy.io/gatewayclass-controller`), as well as a deployed https://gateway-api.sigs.k8s.io/api-types/gateway[`Gateway`] resource. +* heimdall installed and operated in link:{{< relref "/docs/concepts/operating_modes.adoc#_decision_mode" >}}[Decision Operation Mode]. + +== Integration Options + +Technically, the integration happens the same way as with link:{{< relref "/guides/proxies/envoy.adoc" >}}[Envoy] itself by making use of the https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/http/ext_authz/v3/ext_authz.proto.html[External Authorization] filter, and can be done in two ways: + +* either via HTTP +* or via gRPC (recommended) + +In both cases, the filter calls an external gRPC or HTTP service (here heimdall) to check whether an incoming HTTP request is authorized or not. If heimdall responses with `2xx` the request is forwarded to the upstream service, otherwise the response from heimdall is returned to the caller. + +In case of Envoy Gateway the abovesaid configuration happens via a https://gateway.envoyproxy.io/contributions/design/security-policy/[`SecurityPolicy`] custom resource, that can be linked to a https://gateway-api.sigs.k8s.io/api-types/gateway[`Gateway`], https://gateway-api.sigs.k8s.io/api-types/httproute[`HTTPRoute`], or a https://gateway-api.sigs.k8s.io/api-types/grpcroute[`GRPCRoute`] resource. + +NOTE: As of today, there is a limitation in the implementation of the Envoy Gateway - it does not allow cross-namespace reference of external auth services (see also https://github.com/envoyproxy/gateway/issues/3322[envoyproxy/gateway#3322]). That means, the https://gateway-api.sigs.k8s.io/api-types/httproute[`HTTPRoute`], the https://gateway-api.sigs.k8s.io/api-types/gateway[`Gateway`] resource and heimdall must be deployed in the same namespace. + +== Global Configuration + +To integrate heimdall with the gateway globally, that is, each and every request will be forwarded to heimdall for authentication and authorization purposes first, create a https://gateway.envoyproxy.io/contributions/design/security-policy/[`SecurityPolicy`] as shown below in the namespace, the https://gateway-api.sigs.k8s.io/api-types/gateway[`Gateway`] resource is deployed into. + +[source, yaml] +---- +apiVersion: gateway.envoyproxy.io/v1alpha1 +kind: SecurityPolicy +metadata: + name: ext-auth-heimdall # <1> + namespace: heimdall # <2> +spec: + targetRef: # <3> + group: gateway.networking.k8s.io + kind: Gateway + name: eg + namespace: heimdall + extAuth: + grpc: + backendRef: # <4> + name: heimdall + port: 4456 + namespace: heimdall +---- +<1> The name of the `SecurityPolicy`. You can change it to any other value if you like. +<2> The namespace for the policy. It must be the same namespace the `Gateway` resource and heimdall are deployed into. So change it to your namespace. +<3> Defines the `Gateway` resource, this policy should be applied to. Change the `name` property to the name of your `Gateway` resource and the `namespace` property to the proper namespace (same as in 2) +<4> Defines the reference to the heimdall `Service` using the gRPC protocol. Change the `name` and the `namespace` to the proper values of your setup. + +== Route-level Configuration + +The integration on the route level happens similar to the link:{{< relref "#_global_configuration" >}}[global integration]. The difference is that the https://gateway.envoyproxy.io/contributions/design/security-policy/[`SecurityPolicy`] is applied to an https://gateway-api.sigs.k8s.io/api-types/httproute[`HTTPRoute`] as shown below and not the https://gateway-api.sigs.k8s.io/api-types/gateway[`Gateway`] resource. + +[source, yaml] +---- +apiVersion: gateway.envoyproxy.io/v1alpha1 +kind: SecurityPolicy +metadata: + name: ext-auth-example # <1> + namespace: heimdall # <2> +spec: + targetRef: # <3> + group: gateway.networking.k8s.io + kind: HTTPRoute + name: heimdall + extAuth: + grpc: + backendRef: # <4> + name: heimdall + port: 4456 + namespace: heimdall +---- +<1> The name of the `SecurityPolicy`. You can change it to any other value if you like. +<2> The namespace for the policy. It must be the same namespace the `HTTPRoute` resource is deployed into, so the namespace, your application is deployed to. So change it to your namespace. +<3> Defines the `HTTPRoute` resource, this policy should be applied to. Change the `name` property to the name of your `HTTPRoute` resource and the `namespace` property to the proper namespace (same as in 2) +<4> Defines the reference to the heimdall `Service` using the gRPC protocol. Change the `name` and the `namespace` to the proper values of your setup. + +== Security Considerations + +The configuration options shown above are highly insecure, as the communication from the gateway to heimdall happens over plain HTTP. Therefore, it is highly recommended to enable TLS. This can be achieved by enabling TLS for heimdall and attaching a https://gateway-api.sigs.k8s.io/api-types/backendtlspolicy/[`BackendTLSPolicy`] resource shown below to heimdall's https://kubernetes.io/docs/concepts/services-networking/service/[`Service`]. + +[source, yaml] +---- +apiVersion: gateway.networking.k8s.io/v1alpha2 +kind: BackendTLSPolicy +metadata: + name: heimdall-btls + namespace: heimdall # <1> +spec: + targetRef: # <2> + group: '' + kind: Service + namespace: heimdall + name: heimdall + sectionName: "4456" + tls: # <3> + caCertRefs: + - name: demo-ca # <4> + group: '' + kind: ConfigMap + hostname: heimdall # <5> +---- +<1> Change it to the namespace in which heimdall is deployed +<2> The reference to heimdall's `Service`. Change the `name` and the `namespace` to the proper values. +<3> Here we configure the reference to the `ConfigMap` with the certificate of the CA, used to issue a TLS server authentication certificate for heimdall, as well as the hostname used by heimdall (and present in the SAN extension of heimdall's TLS certificate). The `ConfigMap` must be in the same namespace as the `BackendTLSPolicy`. +<4> The name of the `ConfigMap`. Change it to the proper value. +<5> The expected hostname used by heimdall. Change it to the proper value. + +== Additional Resources + +* A fully working example with Envoy Gateway is also available on https://github.com/dadrus/heimdall/tree/main/examples[GitHub]. +* You can find the official external authentication guide for Envoy Gateway https://gateway.envoyproxy.io/v1.0.1/tasks/security/ext-auth/[here]. It contains a fully working setup with a demo application. +* https://gateway.envoyproxy.io/v1.0.1/tasks/security/secure-gateways/[Secure Gateways] is a highly recommended read as well. diff --git a/docs/content/guides/proxies/haproxy.adoc b/docs/content/guides/proxies/haproxy.adoc index 904155a91..23b9c62cf 100644 --- a/docs/content/guides/proxies/haproxy.adoc +++ b/docs/content/guides/proxies/haproxy.adoc @@ -39,7 +39,7 @@ data: auth-url: "https://..svc.cluster.local:" # <1> auth-headers-succeed: "authorization" # <2> headers: | # <3> - X-Forwarded-Uri: %[baseq] + X-Forwarded-Uri: %[pathq] X-Forwarded-Method: %[method] X-Forwarded-Host: %[req.hdr(host)] ---- @@ -63,7 +63,7 @@ annotations: haproxy-ingress.github.io/auth-url: "https://..svc.cluster.local:" haproxy-ingress.github.io/auth-headers-succeed: "authorization" haproxy-ingress.github.io/headers: | - X-Forwarded-Uri: %[baseq] + X-Forwarded-Uri: %[pathq] X-Forwarded-Method: %[method] X-Forwarded-Host: %[req.hdr(host)] ---- diff --git a/docs/content/guides/proxies/nginx.adoc b/docs/content/guides/proxies/nginx.adoc index 5719fc3a2..91e9439f6 100644 --- a/docs/content/guides/proxies/nginx.adoc +++ b/docs/content/guides/proxies/nginx.adoc @@ -43,8 +43,6 @@ location @error401 { * If there is no matching rule on heimdall side, heimdall responds with `404 Not Found`, which, as said above will be treated by NGINX as error. To avoid such situations, you can define a link:{{< relref "/docs/rules/default_rule.adoc" >}}[default rule], which is anyway recommended to have secure defaults -* If a heimdall rule is matched, but is configured to not allow a particular HTTP method, `405 Method Not Allowed` response code is returned. That will result in `500` returned by NGINX due to the reasons written above. To overcome that, you can configure heimdall to respond with another HTTP response code using the `respond` property on the level of the link:{{< relref "/docs/services/decision.adoc" >}}[decision service] configuration. - == Vanilla NGINX Since NGINX is highly configurable and heimdall supports different integration options, you can use any of the configuration examples given below. All of these enable heimdall to build the URL of the protected backend server for rule matching purposes. @@ -126,24 +124,23 @@ location = /_auth { == NGINX Ingress Controller -Even one can configure an external auth server globally with vanilla NGINX, there is no way to achieve that with the https://kubernetes.github.io/ingress-nginx/[NGINX Ingress Controller]. Only route based configuration/integration is possible. +=== Global Configuration -=== Using `X-Forwarded-*` headers +==== Using `X-Forwarded-*` headers -To integrate heimdall with the NGINX Ingress Controller you can make use of the `nginx.ingress.kubernetes.io/auth-url`, `nginx.ingress.kubernetes.io/auth-response-headers` and the `nginx.ingress.kubernetes.io/auth-snippet` annotation as shown in the example below. This will result in an NGINX configuration corresponding to the integration option, described in the link:{{< relref "#_second_option" >}}[Forward all information in `X-Forwarded-*` headers] section. +NOTE: The configuration used in the example below requires proper configuration of `trusted_proxies` on heimdall side. -NOTE: The configuration used in the example below requires proper configuration of `trusted_proxies`. +Global configuration can be achieved by setting the following properties in controller `ConfigMap`. If you install the NGINX controller via the helm chart, you can add these properties under the `controller.config` property of your helm `values.yaml` file. [source, yaml] ---- -nginx.ingress.kubernetes.io/auth-url: "http://..svc.cluster.local:" # <1> -nginx.ingress.kubernetes.io/auth-response-headers: Authorization # <2> -nginx.ingress.kubernetes.io/auth-snippet: | # <3> +global-auth-url: "http://..svc.cluster.local:" # <1> +global-auth-response-headers: Authorization # <2> +global-auth-snippet: | # <3> proxy_set_header X-Forwarded-Method $request_method; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Host $http_host; proxy_set_header X-Forwarded-Uri $request_uri; -# other annotations required ---- <1> Configures the controller to use heimdall's decision service endpoint with ``, `` and `` depending on your configuration. <2> Let NGINX forward the `Authorization` header set by heimdall to the upstream service upon successful response. This configuration depends on @@ -152,7 +149,58 @@ your link:{{< relref "/docs/mechanisms/contextualizers.adoc" >}}[Contextualizers + NOTE: Without that, heimdall will not be able extracting relevant information from the NGINX request as it does not support NGINX proprietary `X-Original-Method` and `X-Original-Uri` used by it for the same purposes. -=== Alternative Configuration +With that in place, you can simply use the standard https://kubernetes.io/docs/concepts/services-networking/ingress/[`Ingress`] resource, and the NGINX Ingress Controller will ensure, each request will be analyzed by heimdall first. + +This will result in an NGINX configuration corresponding to the integration option, described in the link:{{< relref "#_second_option" >}}[Forward all information in `X-Forwarded-*` headers] section. + +==== Alternative Configuration + +Alternatively, if you don't want configuring `trusted_proxies` and do not rely on the used HTTP scheme, host and port in your rules, you can also use the `location-snippet` and the `server-snippet` to the `ConfigMap` of the NGINX Ingress Controller with values shown below. + +This example is an exact copy of the configuration used in the very first link:{{< relref "#_first_option" >}}[integration option] described above. + +[source, yaml] +---- +location-snippet: | + auth_request /_auth; + auth_request_set $auth_cookie $upstream_http_set_cookie; + add_header Set-Cookie $auth_cookie; + auth_request_set $auth_header $upstream_http_authorization; + proxy_set_header 'Authorization' $auth_header; + proxy_set_header Proxy ""; +server-snippet: | + location = /_auth { + internal; + access_log off; + proxy_method $request_method; + proxy_pass http://..svc.cluster.local:$request_uri; + proxy_pass_request_body off; + proxy_set_header Content-Length ""; + proxy_set_header Host $http_host; + } +---- + +As with the previous integration option, you can add these properties under the `controller.config` property of your helm `values.yaml` file if you install the NGINX Ingress Controller via helm. + +=== Integration on `Ingress` Resource Level + +==== Using `X-Forwarded-*` headers + +One option to integrate heimdall with the NGINX Ingress Controller on the `Ingress` resource level is making use of the `nginx.ingress.kubernetes.io/auth-url`, `nginx.ingress.kubernetes.io/auth-response-headers` and the `nginx.ingress.kubernetes.io/auth-snippet` annotation as shown in the example below. This approach requires proper configuration of `trusted_proxies` on heimdall side. On NGINX Ingress Controller side you must allow the usage of `nginx.ingress.kubernetes.io/auth-snippet` (See also https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#allow-snippet-annotations[here]). + +[source, yaml] +---- +nginx.ingress.kubernetes.io/auth-url: "http://..svc.cluster.local:" +nginx.ingress.kubernetes.io/auth-response-headers: Authorization +nginx.ingress.kubernetes.io/auth-snippet: | + proxy_set_header X-Forwarded-Method $request_method; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $http_host; + proxy_set_header X-Forwarded-Uri $request_uri; +# other annotations required +---- + +==== Alternative Configuration Alternatively, if you don't want configuring `trusted_proxies` and do not rely on the used HTTP scheme, host and port in your rules, you can also use the `nginx.ingress.kubernetes.io/configuration-snippet` and `nginx.ingress.kubernetes.io/server-snippet` annotations and use the configuration shown below. diff --git a/docs/content/guides/proxies/traefik.adoc b/docs/content/guides/proxies/traefik.adoc index 81cc91a32..7c51c9983 100644 --- a/docs/content/guides/proxies/traefik.adoc +++ b/docs/content/guides/proxies/traefik.adoc @@ -15,7 +15,7 @@ https://doc.traefik.io/traefik/[Traefik Proxy] is a modern HTTP proxy and load b == Prerequisites -* Integration with Envoy proxy requires heimdall being operated in link:{{< relref "/docs/concepts/operating_modes.adoc#_decision_mode" >}}[Decision Operation Mode]. +* Integration with traefik requires heimdall being operated in link:{{< relref "/docs/concepts/operating_modes.adoc#_decision_mode" >}}[Decision Operation Mode]. [CAUTION] ==== @@ -31,12 +31,14 @@ To let Traefik forward all incoming requests to heimdall, there is a need * to configure the https://doc.traefik.io/traefik/middlewares/http/forwardauth/[ForwardAuth] middleware, and * to add it to the list of https://doc.traefik.io/traefik/routing/entrypoints/#middlewares[middlewares] that are prepended by default to the list of middlewares of each router associated to a named entry point. -Both is shown in the snippet below +=== Regular Deployment + +If you are using Traefik outside of kubernetes, the above can be achieved by the following static configuration [source, yaml] ---- entryPoints: - http: + web: address: ":8080" middlewares: # <1> - heimdall @@ -56,6 +58,51 @@ http: <4> Configures this middleware to forward requests to a service available under "heimdall" DNS name <5> Configures this middleware to forward the `Authorization` header from heimdall's response to the upstream service +=== Kubernetes Deployment + +If you are using Traefik as https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/[Ingress Controller] or as https://gateway-api.sigs.k8s.io/[Gateway API] implementation in your kubernetes cluster, the required configuration is slightly different. The configuration of the entry point(s) stays the same, but the middleware needs to be deployed as a custom resource. + +Here an example for a https://doc.traefik.io/traefik/routing/providers/kubernetes-crd/#kind-middleware[`Middleware`] custom resource: + +[source, yaml] +---- +apiVersion: traefik.io/v1alpha1 +kind: Middleware +metadata: # <1> + name: heimdall + namespace: heimdall +spec: + forwardAuth: # <2> + address: "http://heimdall.heimdall.svc.cluster.local:4456" # <3> + authResponseHeaders: # <4> + - Authorization +---- +<1> The name and the namespace of the middleware. Both are set to `heimdall` here +<2> The type of the middleware, which is of type https://doc.traefik.io/traefik/middlewares/http/forwardauth/[`forwardAuth`] +<3> Configures this middleware to forward requests to the heimdall service. Here, the corresponding `Service` is named `heimdall` and is also located in the namespace named `heimdall`. +<4> Configures this middleware to forward the `Authorization` header from heimdall's response to the upstream service + +How to add this middleware to the default middleware list of a particular endpoint depends on the method used to install Traefik. If helm is used, you can configure that list by making use of the following `values.yaml` file: + +[source, yaml] +---- +providers: + kubernetesCRD: + enabled: true # <1> + +ports: + web: # <2> + middlewares: + - heimdall-heimdall@kubernetescrd # <3> + websecure: # <4> + middlewares: + - heimdall-heimdall@kubernetescrd +---- +<1> To let traefik load `Middleware` resources, like defined above, traefik's `kubernetesCRD` provider must be enabled. Typically, it is enabled by default. +<2> Traefik's helm chart defines two entry points `web` for HTTP traffic and `websecure` for HTTPS traffic. Here we configure the `web` endpoint to use our middleware +<3> Reference to the `Middleware` resource, defined above. The general structure is `-@`. Since our middleware resource is loaded by the `kubernetescrd` provider, resides in the `heimdall` namespace, and is named `heimdall`, the reference `heimdall-heimdall@kubernetescrd` is used. +<4> Here we configure the `websecure` endpoint, which, as written above, is configured via helm chart for HTTPS traffic. The actual configuration is identical to the configuration for the `web` endpoint. + == Route-based Configuration with Docker The integration option, described here makes use of the https://doc.traefik.io/traefik/providers/docker/[Docker Provider] for configuration discovery. @@ -84,7 +131,7 @@ services: - traefik.http.middlewares.heimdall.forwardauth.authResponseHeaders=Authorization # <4> heimdall: - image: dadrus/heimdall:latest + image: dadrus/heimdall:dev # further config upstream: @@ -103,7 +150,40 @@ services: If you have Traefik as Ingress Controller in your Kubernetes cluster, you can simply integrate heimdall globally as descibed in link:{{< relref "#_global_configuration" >}}[Global Configuration] chapter above and make use of the standard https://kubernetes.io/docs/concepts/services-networking/ingress/[Ingress resource]. -There is also an option to have a route based configuration. In that case, you'll have to use Traefik proprietary https://doc.traefik.io/traefik/routing/providers/kubernetes-crd/#kind-middleware[`Middleware`] and https://doc.traefik.io/traefik/routing/providers/kubernetes-crd/#kind-ingressroute[`IngressRoute`] custom resources to define and use the https://doc.traefik.io/traefik/middlewares/http/forwardauth/[ForwardAuth] middleware. +If you are using traefik's proprietary https://doc.traefik.io/traefik/routing/providers/kubernetes-crd/#kind-ingressroute[`IngressRoute`] custom resource instead of kubernetes standard https://kubernetes.io/docs/concepts/services-networking/ingress/[`Ingress`] one, you can also reference the https://doc.traefik.io/traefik/routing/providers/kubernetes-crd/#kind-middleware[`Middleware`] resource locally. This option is shown in the snippet below. + +[source, yaml] +---- +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: # <1> + name: demo-app + namespace: demo +spec: + entryPoints: + - web # <2> + routes: + - kind: Rule + match: Host(`demo-app.local`) && PathPrefix(`/`) + middlewares: # <3> + - name: heimdall + namespace: heimdall + services: # <4> + - kind: Service + name: demo-app + namespace: demo + port: app-port +---- +<1> `metadata`, like name and the namespace of the `IngressRoute` resource +<2> The traefik entry points to attach this resource to. Here only `web` entry point is referenced +<3> List of the middlewares to be applied. Here the `Middleware` named `heimdall` in the namespace `heimdall` is referenced. ++ +NOTE: By default, `IngressRoute` resources are not allowed to reference resources in namespaces different from the own namespace. If your `Middleware` resource, like also shown here, is deployed in another namespace, you have to allow that. If traefik is installed via helm, it can be achieved by setting `providers.kubernetesCRD.allowCrossNamespace` to `true` (See also https://doc.traefik.io/traefik/providers/kubernetes-crd/#allowcrossnamespace[here]). +<4> The reference to the `Service`, the requests should be forwarded to. + +== Traefik as Gateway API implementation + +If you have Traefik as https://gateway-api.sigs.k8s.io/[Gateway API] implementation in your Kubernetes cluster, you can simply integrate heimdall globally as descibed in link:{{< relref "#_kubernetes_deployment" >}}[Global Configuration] chapter above and make use of the standard https://gateway-api.sigs.k8s.io/api-types/httproute[`HTTPRoute`] resource. == Additional Resources diff --git a/docs/openapi/specification.yaml b/docs/openapi/specification.yaml index 56bdfd4fd..ff38b5bbf 100644 --- a/docs/openapi/specification.yaml +++ b/docs/openapi/specification.yaml @@ -439,22 +439,22 @@ paths: "uid": "ce409862-eae0-4704-b7d5-46634efdaf9b", "kind": { "group": "heimdall.dadrus.github.com", - "version": "v1alpha3", + "version": "v1alpha4", "kind": "RuleSet" }, "resource": { "group": "heimdall.dadrus.github.com", - "version": "v1alpha3", + "version": "v1alpha4", "resource": "rulesets" }, "requestKind": { "group": "heimdall.dadrus.github.com", - "version": "v1alpha3", + "version": "v1alpha4", "kind": "RuleSet" }, "requestResource": { "group": "heimdall.dadrus.github.com", - "version": "v1alpha3", + "version": "v1alpha4", "resource": "rulesets" }, "name": "echo-app-rules", @@ -468,11 +468,11 @@ paths: ] }, "object": { - "apiVersion": "heimdall.dadrus.github.com/v1alpha3", + "apiVersion": "heimdall.dadrus.github.com/v1alpha4", "kind": "RuleSet", "metadata": { "annotations": { - "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"heimdall.dadrus.github.com/v1alpha3\",\"kind\":\"RuleSet\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/name\":\"echo-app\"},\"name\":\"echo-app-rules\",\"namespace\":\"quickstarts\"},\"spec\":{\"rules\":[{\"execute\":[{\"authorizer\":\"allow_all_requests\"},{\"finalizer\":\"noop_finalizer\"}],\"forward_to\":{\"host\":\"echo-app.quickstarts.svc.cluster.local:8080\"},\"id\":\"public-access\",\"match\":{\"url\":\"\\u003c**\\u003e://\\u003c**\\u003e/pub/\\u003c**\\u003e\"}},{\"execute\":[{\"authorizer\":\"allow_all_requests\"}],\"forward_to\":{\"host\":\"echo-app.quickstarts.svc.cluster.local:8080\"},\"id\":\"anonymous-access\",\"match\":{\"url\":\"\\u003c**\\u003e://\\u003c**\\u003e/anon/\\u003c**\\u003e\"}},{\"execute\":[{\"authenticator\":\"deny_authenticator\"}],\"forward_to\":{\"host\":\"echo-app.quickstarts.svc.cluster.local:8080\"},\"id\":\"redirect\",\"match\":{\"url\":\"\\u003c**\\u003e://\\u003c**\\u003e/redir/\\u003c**\\u003e\"}}]}}\n" + "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"heimdall.dadrus.github.com/v1alpha4\",\"kind\":\"RuleSet\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/name\":\"echo-app\"},\"name\":\"echo-app-rules\",\"namespace\":\"quickstarts\"},\"spec\":{\"rules\":[{\"execute\":[{\"authorizer\":\"allow_all_requests\"},{\"finalizer\":\"noop_finalizer\"}],\"forward_to\":{\"host\":\"echo-app.quickstarts.svc.cluster.local:8080\"},\"id\":\"public-access\",\"match\":{\"url\":\"\\u003c**\\u003e://\\u003c**\\u003e/pub/\\u003c**\\u003e\"}},{\"execute\":[{\"authorizer\":\"allow_all_requests\"}],\"forward_to\":{\"host\":\"echo-app.quickstarts.svc.cluster.local:8080\"},\"id\":\"anonymous-access\",\"match\":{\"url\":\"\\u003c**\\u003e://\\u003c**\\u003e/anon/\\u003c**\\u003e\"}},{\"execute\":[{\"authenticator\":\"deny_authenticator\"}],\"forward_to\":{\"host\":\"echo-app.quickstarts.svc.cluster.local:8080\"},\"id\":\"redirect\",\"match\":{\"url\":\"\\u003c**\\u003e://\\u003c**\\u003e/redir/\\u003c**\\u003e\"}}]}}\n" }, "creationTimestamp": "2023-10-25T17:13:37Z", "generation": 1, @@ -481,7 +481,7 @@ paths: }, "managedFields": [ { - "apiVersion": "heimdall.dadrus.github.com/v1alpha3", + "apiVersion": "heimdall.dadrus.github.com/v1alpha4", "fieldsType": "FieldsV1", "fieldsV1": { "f:metadata": { @@ -526,8 +526,7 @@ paths: }, "id": "public-access", "match": { - "strategy": "glob", - "url": "<**>://<**>/pub/<**>" + "path": "/pub/**" } }, { @@ -541,8 +540,7 @@ paths: }, "id": "anonymous-access", "match": { - "strategy": "glob", - "url": "<**>://<**>/anon/<**>" + "path": "/anon/**" } }, { @@ -556,8 +554,7 @@ paths: }, "id": "redirect", "match": { - "strategy": "glob", - "url": "<**>://<**>/redir/<**>" + "path": "/redir/**" } } ] diff --git a/docs/osv-scanner.toml b/docs/osv-scanner.toml new file mode 100644 index 000000000..5ce86d050 --- /dev/null +++ b/docs/osv-scanner.toml @@ -0,0 +1,5 @@ +[[PackageOverrides]] +name = "ws" +ecosystem = "npm" +ignore = true +reason = "The entire docs directory is there to generat static html content. Although some of the dependencies may have vulnerabilities, like the ws package, they have no effect as neither inbound nor outbound connections are made during the generation of the content, respectively there is no server, which would use that functionalit, when the generated html documentation is hosted somewhere" diff --git a/docs/versions/data.json b/docs/versions/data.json index 6cc3fada3..031e7de96 100644 --- a/docs/versions/data.json +++ b/docs/versions/data.json @@ -102,5 +102,9 @@ { "version": "v0.14.5-alpha", "path": "/heimdall/v0.14.5-alpha" + }, + { + "version": "v0.15.0", + "path": "/heimdall/v0.15.0" } ] diff --git a/example_config.yaml b/example_config.yaml index 4d859f586..74c6924ab 100644 --- a/example_config.yaml +++ b/example_config.yaml @@ -169,22 +169,20 @@ mechanisms: type: default - id: authenticate_with_kratos type: redirect - if: | - Error.Source == "kratos_session_authenticator" && - type(Error) == authentication_error && - Request.Header("Accept").contains("*/*") config: to: http://127.0.0.1:4433/self-service/login/browser?origin={{ .Request.URL | urlenc }} default_rule: - methods: - - GET - - POST + backtracking_enabled: false execute: - authenticator: anonymous_authenticator - finalizer: jwt on_error: - error_handler: authenticate_with_kratos + if: | + Error.Source == "kratos_session_authenticator" && + type(Error) == authentication_error && + Request.Header("Accept").contains("*/*") providers: file_system: @@ -195,8 +193,8 @@ providers: watch_interval: 5m endpoints: - url: http://foo.bar/rules.yaml - rule_path_match_prefix: /foo - enable_http_cache: false + http_cache: + enabled: false - url: http://bar.foo/rules.yaml headers: bla: bla @@ -215,10 +213,8 @@ providers: buckets: - url: gs://my-bucket prefix: service1 - rule_path_match_prefix: /service1 - url: gs://my-bucket prefix: service2 - rule_path_match_prefix: /service2 - url: s3://my-bucket/my-rule-set kubernetes: diff --git a/example_rules.yaml b/example_rules.yaml index 5dfe52936..159853bd4 100644 --- a/example_rules.yaml +++ b/example_rules.yaml @@ -1,15 +1,24 @@ -version: "1alpha3" +version: "1alpha4" name: test-rule-set rules: - id: rule:foo match: - url: http://foo.bar/<**> - strategy: glob + routes: + - path: /foo/:bar/** + path_params: + - name: bar + type: glob + value: "*baz" + backtracking_enabled: false + methods: + - GET + - POST + hosts: + - type: exact + value: foo.bar + scheme: http forward_to: host: bar.foo -# methods: # reuses default -# - GET -# - POST execute: - authenticator: unauthorized_authenticator - authenticator: jwt_authenticator diff --git a/examples/README.md b/examples/README.md index ffaa97ef2..014474768 100644 --- a/examples/README.md +++ b/examples/README.md @@ -6,4 +6,6 @@ Those examples, which are based on docker compose are located in the `docker-com To be able to run the docker compose examples, you'll need Docker and docker-compose installed. -To be able to run the Kubernetes based examples, you'll need just, kubectl, kustomize, helm and a k8s cluster. Latter can also be created locally using kind. The examples are indeed using it. \ No newline at end of file +To be able to run the Kubernetes based examples, you'll need just, kubectl, kustomize, helm and a k8s cluster. Latter can also be created locally using kind. The examples are indeed using it. + +**Note:** The main branch may have breaking changes (see pending release PRs for details under https://github.com/dadrus/heimdall/pulls) which would make the usage of the referenced heimdall images impossible (even though the configuration files and rules reflect the latest changes). In such situations you'll have to use the `dev` image, build a heimdall image by yourself and update the setups to use it, or switch to a tagged (released) version. \ No newline at end of file diff --git a/examples/docker-compose/quickstarts/README.md b/examples/docker-compose/quickstarts/README.md index abce2ea23..a8668531f 100644 --- a/examples/docker-compose/quickstarts/README.md +++ b/examples/docker-compose/quickstarts/README.md @@ -2,6 +2,9 @@ This directory contains examples described in the getting started section of the documentation. The demonstration of the decision operation mode is done via integration with some reverse proxies. +**Note:** The main branch may have breaking changes (see pending release PRs for details under https://github.com/dadrus/heimdall/pulls) which would make the usage of the referenced heimdall images impossible (even though the configuration files and rules reflect the latest changes). In such situations you'll have to use the `dev` image, build a heimdall image by yourself and update the setups to use it, or switch to a tagged (released) version. + + # Proxy Mode Quickstart In that setup heimdall is not integrated with any other reverse proxy. @@ -18,8 +21,8 @@ In that setup heimdall is not integrated with any other reverse proxy. curl -v http://127.0.0.1:9090/public curl -v http://127.0.0.1:9090/private curl -v http://127.0.0.1:9090/user - curl -H "Authorization: Bearer eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleS0xIiwidHlwIjoiSldUIn0.eyJleHAiOjIwMjUxMDA3NTEsImlhdCI6MTcwOTc0MDc1MSwiaXNzIjoiZGVtb19pc3N1ZXIiLCJqdGkiOiIzZmFmNDkxOS0wZjUwLTQ3NGItOGExMy0yOTYzMjEzNThlOTMiLCJuYmYiOjE3MDk3NDA3NTEsInJvbGUiOiJ1c2VyIiwic3ViIjoiMiJ9.W5xCpwsFShS0RpOtrm9vrV2dN6K8pRr5gQnt0kluzLE6oNWFzf7Oot-0YLCPa64Z3XPd7cfGcBiSjrzKZSAj4g" 127.0.0.1:9090/user - curl -H "Authorization: Bearer eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleS0xIiwidHlwIjoiSldUIn0.eyJleHAiOjIwMjUxMDA3NTEsImlhdCI6MTcwOTc0MDc1MSwiaXNzIjoiZGVtb19pc3N1ZXIiLCJqdGkiOiI0NjExZDM5Yy00MzI1LTRhMWYtYjdkOC1iMmYxMTE3NDEyYzAiLCJuYmYiOjE3MDk3NDA3NTEsInJvbGUiOiJhZG1pbiIsInN1YiI6IjEifQ.mZZ_UqC8RVzEKBPZbPs4eP-MkXLK22Q27ZJ34UwJiioFdaYXqYJ4ZsatP0TbpKeNyF83mkrrCGL_pWLFTho7Gg" 127.0.0.1:9090/admin + curl -v -H "Authorization: Bearer eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleS0xIiwidHlwIjoiSldUIn0.eyJleHAiOjIwMjUxMDA3NTEsImlhdCI6MTcwOTc0MDc1MSwiaXNzIjoiZGVtb19pc3N1ZXIiLCJqdGkiOiIzZmFmNDkxOS0wZjUwLTQ3NGItOGExMy0yOTYzMjEzNThlOTMiLCJuYmYiOjE3MDk3NDA3NTEsInJvbGUiOiJ1c2VyIiwic3ViIjoiMiJ9.W5xCpwsFShS0RpOtrm9vrV2dN6K8pRr5gQnt0kluzLE6oNWFzf7Oot-0YLCPa64Z3XPd7cfGcBiSjrzKZSAj4g" 127.0.0.1:9090/user + curl -v -H "Authorization: Bearer eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleS0xIiwidHlwIjoiSldUIn0.eyJleHAiOjIwMjUxMDA3NTEsImlhdCI6MTcwOTc0MDc1MSwiaXNzIjoiZGVtb19pc3N1ZXIiLCJqdGkiOiI0NjExZDM5Yy00MzI1LTRhMWYtYjdkOC1iMmYxMTE3NDEyYzAiLCJuYmYiOjE3MDk3NDA3NTEsInJvbGUiOiJhZG1pbiIsInN1YiI6IjEifQ.mZZ_UqC8RVzEKBPZbPs4eP-MkXLK22Q27ZJ34UwJiioFdaYXqYJ4ZsatP0TbpKeNyF83mkrrCGL_pWLFTho7Gg" 127.0.0.1:9090/admin ``` Check the responses @@ -40,8 +43,8 @@ In that setup heimdall is integrated with Traefik. All requests are sent to trae curl -v http://127.0.0.1:9090/public curl -v http://127.0.0.1:9090/private curl -v http://127.0.0.1:9090/user - curl -H "Authorization: Bearer eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleS0xIiwidHlwIjoiSldUIn0.eyJleHAiOjIwMjUxMDA3NTEsImlhdCI6MTcwOTc0MDc1MSwiaXNzIjoiZGVtb19pc3N1ZXIiLCJqdGkiOiIzZmFmNDkxOS0wZjUwLTQ3NGItOGExMy0yOTYzMjEzNThlOTMiLCJuYmYiOjE3MDk3NDA3NTEsInJvbGUiOiJ1c2VyIiwic3ViIjoiMiJ9.W5xCpwsFShS0RpOtrm9vrV2dN6K8pRr5gQnt0kluzLE6oNWFzf7Oot-0YLCPa64Z3XPd7cfGcBiSjrzKZSAj4g" 127.0.0.1:9090/user - curl -H "Authorization: Bearer eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleS0xIiwidHlwIjoiSldUIn0.eyJleHAiOjIwMjUxMDA3NTEsImlhdCI6MTcwOTc0MDc1MSwiaXNzIjoiZGVtb19pc3N1ZXIiLCJqdGkiOiI0NjExZDM5Yy00MzI1LTRhMWYtYjdkOC1iMmYxMTE3NDEyYzAiLCJuYmYiOjE3MDk3NDA3NTEsInJvbGUiOiJhZG1pbiIsInN1YiI6IjEifQ.mZZ_UqC8RVzEKBPZbPs4eP-MkXLK22Q27ZJ34UwJiioFdaYXqYJ4ZsatP0TbpKeNyF83mkrrCGL_pWLFTho7Gg" 127.0.0.1:9090/admin + curl -v -H "Authorization: Bearer eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleS0xIiwidHlwIjoiSldUIn0.eyJleHAiOjIwMjUxMDA3NTEsImlhdCI6MTcwOTc0MDc1MSwiaXNzIjoiZGVtb19pc3N1ZXIiLCJqdGkiOiIzZmFmNDkxOS0wZjUwLTQ3NGItOGExMy0yOTYzMjEzNThlOTMiLCJuYmYiOjE3MDk3NDA3NTEsInJvbGUiOiJ1c2VyIiwic3ViIjoiMiJ9.W5xCpwsFShS0RpOtrm9vrV2dN6K8pRr5gQnt0kluzLE6oNWFzf7Oot-0YLCPa64Z3XPd7cfGcBiSjrzKZSAj4g" 127.0.0.1:9090/user + curl -v -H "Authorization: Bearer eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleS0xIiwidHlwIjoiSldUIn0.eyJleHAiOjIwMjUxMDA3NTEsImlhdCI6MTcwOTc0MDc1MSwiaXNzIjoiZGVtb19pc3N1ZXIiLCJqdGkiOiI0NjExZDM5Yy00MzI1LTRhMWYtYjdkOC1iMmYxMTE3NDEyYzAiLCJuYmYiOjE3MDk3NDA3NTEsInJvbGUiOiJhZG1pbiIsInN1YiI6IjEifQ.mZZ_UqC8RVzEKBPZbPs4eP-MkXLK22Q27ZJ34UwJiioFdaYXqYJ4ZsatP0TbpKeNyF83mkrrCGL_pWLFTho7Gg" 127.0.0.1:9090/admin ``` Check the responses diff --git a/examples/docker-compose/quickstarts/docker-compose-traefik.yaml b/examples/docker-compose/quickstarts/docker-compose-traefik.yaml index 6f9c5bfc4..fdb59a602 100644 --- a/examples/docker-compose/quickstarts/docker-compose-traefik.yaml +++ b/examples/docker-compose/quickstarts/docker-compose-traefik.yaml @@ -2,7 +2,7 @@ version: '3.7' services: proxy: - image: traefik:2.11.0 + image: traefik:3.0.0 ports: - "9090:9090" command: > diff --git a/examples/docker-compose/quickstarts/docker-compose.yaml b/examples/docker-compose/quickstarts/docker-compose.yaml index 7318ce39c..6a699dd2c 100644 --- a/examples/docker-compose/quickstarts/docker-compose.yaml +++ b/examples/docker-compose/quickstarts/docker-compose.yaml @@ -1,11 +1,22 @@ version: '3.7' services: + heimdall-init: + image: finalgene/openssh + command: /tmp/generate_keys.sh + volumes: + - ./generate_keys.sh:/tmp/generate_keys.sh:ro + - heimdall-keys:/etc/heimdall/keys + heimdall: - image: dadrus/heimdall:latest + image: dadrus/heimdall:0.15.0 + depends_on: + heimdall-init: + condition: service_started volumes: - ./heimdall-config.yaml:/etc/heimdall/config.yaml:ro - ./upstream-rules.yaml:/etc/heimdall/rules.yaml:ro + - heimdall-keys:/etc/heimdall/keys:ro upstream: image: containous/whoami:latest @@ -23,3 +34,6 @@ services: volumes: - ./policy.rego:/etc/opa/policies/policy.rego:ro +volumes: + heimdall-keys: + diff --git a/examples/docker-compose/quickstarts/generate_keys.sh b/examples/docker-compose/quickstarts/generate_keys.sh new file mode 100755 index 000000000..549577597 --- /dev/null +++ b/examples/docker-compose/quickstarts/generate_keys.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env sh + +openssl ecparam -name prime256v1 -genkey -noout -out /etc/heimdall/keys/signer.pem +chown -R 10001:10001 /etc/heimdall/keys \ No newline at end of file diff --git a/examples/docker-compose/quickstarts/heimdall-config.yaml b/examples/docker-compose/quickstarts/heimdall-config.yaml index a651f7020..0ec56160e 100644 --- a/examples/docker-compose/quickstarts/heimdall-config.yaml +++ b/examples/docker-compose/quickstarts/heimdall-config.yaml @@ -37,13 +37,14 @@ mechanisms: finalizers: - id: create_jwt type: jwt + config: + signer: + key_store: + path: /etc/heimdall/keys/signer.pem - id: noop type: noop default_rule: - methods: - - GET - - POST execute: - authenticator: deny_all - finalizer: create_jwt diff --git a/examples/docker-compose/quickstarts/upstream-rules.yaml b/examples/docker-compose/quickstarts/upstream-rules.yaml index 68b9ed363..739fe8b75 100644 --- a/examples/docker-compose/quickstarts/upstream-rules.yaml +++ b/examples/docker-compose/quickstarts/upstream-rules.yaml @@ -1,8 +1,10 @@ -version: "1alpha3" +version: "1alpha4" rules: - id: demo:public match: - url: http://<**>/public + routes: + - path: /public + methods: [ GET, POST ] forward_to: host: upstream:8081 execute: @@ -11,7 +13,13 @@ rules: - id: demo:protected match: - url: http://<**>/<{user,admin}> + routes: + - path: /:user + path_params: + - type: regex + name: user + value: (user|admin) + methods: [ GET, POST ] forward_to: host: upstream:8081 execute: diff --git a/examples/kubernetes/Justfile b/examples/kubernetes/Justfile index 03dfdd3a9..2bdfed743 100644 --- a/examples/kubernetes/Justfile +++ b/examples/kubernetes/Justfile @@ -3,25 +3,30 @@ prometheus_version := '44.2.1' loki_version := '2.8.9' tempo_version := '0.16.8' phlare_version := '0.1.2' -nginx_version := '9.7.7' -contour_version := '17.0.0' -emissary_version := '8.7.2' -haproxy_version := '0.14.4' -metallb_version := '0.13.10' -certmanager_version := '1.12.3' +nginx_version := '4.10.1' +contour_version := '17.0.12' +emissary_version := '8.9.1' +haproxy_version := '0.14.6' +envoy_gw_version := 'v1.0.1' +traefik_version := '28.0.0' +metallb_version := '0.14.5' +certmanager_version := '1.14.5' +trustmanager_version := '0.9.2' cluster_name := 'demo-cluster' -default_ingress_controller := "contour" +default_router := "contour" setup-charts: helm repo add bitnami https://charts.bitnami.com/bitnami + helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx helm repo add prometheus-community https://prometheus-community.github.io/helm-charts helm repo add grafana https://grafana.github.io/helm-charts helm repo add metallb https://metallb.github.io/metallb helm repo add jetstack https://charts.jetstack.io helm repo add dadrus https://dadrus.github.io/heimdall/charts helm repo add datawire https://app.getambassador.io - helm repo add haproxy-ingress https://haproxy-ingress.github.io/charts + helm repo add haproxy https://haproxy-ingress.github.io/charts + helm repo add traefik https://traefik.github.io/charts helm repo update ## Installs Grafana @@ -88,16 +93,20 @@ install-heimdall-pod-monitor: install-observability-stack: install-grafana install-prometheus install-loki install-tempo install-phlare install-dashboards install-heimdall-pod-monitor -install-nginx-ingress-controller: - helm upgrade --install nginx-ingress-controller bitnami/nginx-ingress-controller \ - -n nginx-ingress-controller --create-namespace \ +install-nginx-ingress-controller global_ext_auth="true": + #!/usr/bin/env bash + + valuesFile=$({{global_ext_auth}} && echo global-helm-values.yaml || echo helm-values.yaml) + + helm upgrade --install ingress-nginx ingress-nginx/ingress-nginx \ + -n nginx-controller --create-namespace \ --version {{nginx_version}} \ - --set metrics.enabled=true \ + -f nginx/${valuesFile} \ --wait install-contour-ingress-controller: - helm upgrade --install contour-ingress-controller bitnami/contour \ - -n contour-ingress-controller --create-namespace \ + helm upgrade --install contour-controller bitnami/contour \ + -n contour-controller --create-namespace \ --version {{contour_version}} \ -f contour/helm-values.yaml # used only to configure a global auth server @@ -107,35 +116,44 @@ install-emissary-ingress-controller: kubectl apply -f https://app.getambassador.io/yaml/emissary/${app_version}/emissary-crds.yaml kubectl wait --timeout=90s --for=condition=available deployment emissary-apiext -n emissary-system - helm upgrade --install emissary-ingress datawire/emissary-ingress \ - -n emissary-ingress-controller --create-namespace \ + helm upgrade --install emissary datawire/emissary-ingress \ + -n emissary-controller --create-namespace \ --version {{emissary_version}} - kubectl -n emissary-ingress-controller wait --for condition=available --timeout=90s deploy -lapp.kubernetes.io/instance=emissary-ingress + kubectl -n emissary-controller wait --for condition=available --timeout=90s deploy -lapp.kubernetes.io/instance=emissary - kubectl apply -f - <://<**>/pub/<**> + routes: + - path: /pub/** forward_to: # only required for proxy operation mode host: echo-app.quickstarts.svc.cluster.local:8080 execute: - authorizer: allow_all_requests - id: anonymous-access match: - url: <**>://<**>/anon/<**> + routes: + - path: /anon/** forward_to: # only required for proxy operation mode host: echo-app.quickstarts.svc.cluster.local:8080 execute: @@ -24,7 +26,8 @@ spec: - finalizer: create_jwt - id: redirect match: - url: <**>://<**>/redir/<**> + routes: + - path: /redir/** forward_to: # only required for proxy operation mode host: echo-app.quickstarts.svc.cluster.local:8080 execute: diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/contour-ingress/http-proxy.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/contour/http-proxy.yaml similarity index 100% rename from examples/kubernetes/quickstarts/demo-app/overlays/contour-ingress/http-proxy.yaml rename to examples/kubernetes/quickstarts/demo-app/overlays/contour/http-proxy.yaml diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/contour-ingress/kustomization.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/contour/kustomization.yaml similarity index 100% rename from examples/kubernetes/quickstarts/demo-app/overlays/contour-ingress/kustomization.yaml rename to examples/kubernetes/quickstarts/demo-app/overlays/contour/kustomization.yaml diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/emissary-ingress/host.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/emissary/host.yaml similarity index 100% rename from examples/kubernetes/quickstarts/demo-app/overlays/emissary-ingress/host.yaml rename to examples/kubernetes/quickstarts/demo-app/overlays/emissary/host.yaml diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/emissary-ingress/kustomization.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/emissary/kustomization.yaml similarity index 100% rename from examples/kubernetes/quickstarts/demo-app/overlays/emissary-ingress/kustomization.yaml rename to examples/kubernetes/quickstarts/demo-app/overlays/emissary/kustomization.yaml diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/emissary-ingress/mapping.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/emissary/mapping.yaml similarity index 100% rename from examples/kubernetes/quickstarts/demo-app/overlays/emissary-ingress/mapping.yaml rename to examples/kubernetes/quickstarts/demo-app/overlays/emissary/mapping.yaml diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/envoygw/http_route.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/envoygw/http_route.yaml new file mode 100644 index 000000000..eb46f81b1 --- /dev/null +++ b/examples/kubernetes/quickstarts/demo-app/overlays/envoygw/http_route.yaml @@ -0,0 +1,22 @@ +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: echo-app + namespace: quickstarts +spec: + parentRefs: + - name: eg + namespace: heimdall + hostnames: + - "echo-app.local" + rules: + - backendRefs: + - group: "" + kind: Service + name: echo-app + port: 8080 + weight: 1 + matches: + - path: + type: PathPrefix + value: / \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/envoygw/kustomization.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/envoygw/kustomization.yaml new file mode 100644 index 000000000..d3588afd6 --- /dev/null +++ b/examples/kubernetes/quickstarts/demo-app/overlays/envoygw/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../base + - http_route.yaml \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/haproxy-ingress/ingress.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/haproxy/ingress.yaml similarity index 96% rename from examples/kubernetes/quickstarts/demo-app/overlays/haproxy-ingress/ingress.yaml rename to examples/kubernetes/quickstarts/demo-app/overlays/haproxy/ingress.yaml index 46cdc1725..16fd58d53 100644 --- a/examples/kubernetes/quickstarts/demo-app/overlays/haproxy-ingress/ingress.yaml +++ b/examples/kubernetes/quickstarts/demo-app/overlays/haproxy/ingress.yaml @@ -10,7 +10,7 @@ metadata: haproxy-ingress.github.io/auth-url: "https://heimdall.heimdall.svc.cluster.local:4456" haproxy-ingress.github.io/auth-headers-succeed: "authorization" haproxy-ingress.github.io/headers: | - X-Forwarded-Uri: %[baseq] + X-Forwarded-Uri: %[pathq] X-Forwarded-Method: %[method] X-Forwarded-Host: %[req.hdr(host)] spec: diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/haproxy-ingress/kustomization.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/haproxy/kustomization.yaml similarity index 100% rename from examples/kubernetes/quickstarts/demo-app/overlays/haproxy-ingress/kustomization.yaml rename to examples/kubernetes/quickstarts/demo-app/overlays/haproxy/kustomization.yaml diff --git a/examples/kubernetes/quickstarts/proxy-demo/ingress.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/nginx-global/ingress.yaml similarity index 100% rename from examples/kubernetes/quickstarts/proxy-demo/ingress.yaml rename to examples/kubernetes/quickstarts/demo-app/overlays/nginx-global/ingress.yaml diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/nginx-ingress/kustomization.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/nginx-global/kustomization.yaml similarity index 100% rename from examples/kubernetes/quickstarts/demo-app/overlays/nginx-ingress/kustomization.yaml rename to examples/kubernetes/quickstarts/demo-app/overlays/nginx-global/kustomization.yaml diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/nginx-ingress/ingress.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/nginx-route-based/ingress.yaml similarity index 100% rename from examples/kubernetes/quickstarts/demo-app/overlays/nginx-ingress/ingress.yaml rename to examples/kubernetes/quickstarts/demo-app/overlays/nginx-route-based/ingress.yaml diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/nginx-route-based/kustomization.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/nginx-route-based/kustomization.yaml new file mode 100644 index 000000000..c9af21949 --- /dev/null +++ b/examples/kubernetes/quickstarts/demo-app/overlays/nginx-route-based/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../base + - ingress.yaml \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/traefik-gw/http_route.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-gw/http_route.yaml new file mode 100644 index 000000000..53d7ff87b --- /dev/null +++ b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-gw/http_route.yaml @@ -0,0 +1,22 @@ +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: echo-app + namespace: quickstarts +spec: + parentRefs: + - name: traefik-gateway + namespace: traefik + hostnames: + - "echo-app.local" + rules: + - backendRefs: + - group: "" + kind: Service + name: echo-app + port: 8080 + weight: 1 + matches: + - path: + type: PathPrefix + value: / \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/traefik-gw/kustomization.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-gw/kustomization.yaml new file mode 100644 index 000000000..d3588afd6 --- /dev/null +++ b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-gw/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../base + - http_route.yaml \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress-route/ingress-route.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress-route/ingress-route.yaml new file mode 100644 index 000000000..6e70d8c37 --- /dev/null +++ b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress-route/ingress-route.yaml @@ -0,0 +1,23 @@ +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: echo-app + namespace: quickstarts + labels: + app.kubernetes.io/name: echo-app +spec: + entryPoints: + - websecure + routes: + - kind: Rule + match: Host(`echo-app.local`) && PathPrefix(`/`) + middlewares: + - name: heimdall + namespace: heimdall + services: + - kind: Service + name: echo-app + namespace: quickstarts + port: app-port + tls: + secretName: echo-app \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress-route/kustomization.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress-route/kustomization.yaml new file mode 100644 index 000000000..a00b3bce9 --- /dev/null +++ b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress-route/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../base + - ingress-route.yaml \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress/ingress.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress/ingress.yaml new file mode 100644 index 000000000..041e0f5a8 --- /dev/null +++ b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress/ingress.yaml @@ -0,0 +1,24 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: echo-app + namespace: quickstarts + labels: + app.kubernetes.io/name: echo-app +spec: + ingressClassName: "traefik" + tls: + - hosts: + - echo-app.local + secretName: echo-app + rules: + - host: echo-app.local + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: echo-app + port: + number: 8080 \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress/kustomization.yaml b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress/kustomization.yaml new file mode 100644 index 000000000..c9af21949 --- /dev/null +++ b/examples/kubernetes/quickstarts/demo-app/overlays/traefik-ingress/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../base + - ingress.yaml \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/heimdall/backend-tls-policy.yaml b/examples/kubernetes/quickstarts/heimdall/backend-tls-policy.yaml new file mode 100644 index 000000000..4e6fcc785 --- /dev/null +++ b/examples/kubernetes/quickstarts/heimdall/backend-tls-policy.yaml @@ -0,0 +1,18 @@ +apiVersion: gateway.networking.k8s.io/v1alpha2 +kind: BackendTLSPolicy +metadata: + name: heimdall-btls + namespace: heimdall +spec: + targetRef: + group: '' + kind: Service + namespace: heimdall + name: heimdall + sectionName: "4456" + tls: + caCertRefs: + - name: demo-ca + group: '' + kind: ConfigMap + hostname: heimdall \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/heimdall/certificate.yaml b/examples/kubernetes/quickstarts/heimdall/certificate.yaml index f02019738..bc1633e6f 100644 --- a/examples/kubernetes/quickstarts/heimdall/certificate.yaml +++ b/examples/kubernetes/quickstarts/heimdall/certificate.yaml @@ -8,7 +8,7 @@ spec: additionalOutputFormats: - type: CombinedPEM issuerRef: - name: selfsigned + name: demo-ca-issuer kind: ClusterIssuer duration: 720h # 30d renewBefore: 360h # 15d @@ -27,7 +27,19 @@ spec: - heimdall.heimdall - heimdall.heimdall.svc - heimdall.heimdall.svc.cluster.local - +--- +apiVersion: trust.cert-manager.io/v1alpha1 +kind: Bundle +metadata: + name: demo-ca +spec: + sources: + - secret: + name: heimdall-tls + key: ca.crt + target: + configMap: + key: ca.crt --- apiVersion: cert-manager.io/v1 kind: Certificate @@ -39,7 +51,7 @@ spec: additionalOutputFormats: - type: CombinedPEM issuerRef: - name: selfsigned + name: demo-ca-issuer kind: ClusterIssuer duration: 720h # 30d renewBefore: 360h # 15d @@ -54,4 +66,4 @@ spec: usages: - "digital signature" dnsNames: - - heimdall \ No newline at end of file + - heimdall diff --git a/examples/kubernetes/quickstarts/heimdall/config.yaml b/examples/kubernetes/quickstarts/heimdall/config.yaml index be481829e..3939adf2f 100644 --- a/examples/kubernetes/quickstarts/heimdall/config.yaml +++ b/examples/kubernetes/quickstarts/heimdall/config.yaml @@ -10,10 +10,6 @@ serve: trusted_proxies: - 0.0.0.0/0 -signer: - key_store: - path: /etc/heimdall/certs/jwt/tls-combined.pem - mechanisms: authenticators: - id: anonymous_authenticator @@ -28,21 +24,23 @@ mechanisms: finalizers: - id: create_jwt type: jwt + config: + signer: + key_store: + path: /etc/heimdall/certs/jwt/tls-combined.pem error_handlers: - id: redirect type: redirect - if: type(Error) == authentication_error config: to: http://foo.bar?origin={{ .Request.URL | urlenc }} + default_rule: - methods: - - GET - - POST execute: - authenticator: anonymous_authenticator - authorizer: deny_all_requests on_error: - - error_handler: redirect + - if: type(Error) == authentication_error + error_handler: redirect providers: kubernetes: diff --git a/examples/kubernetes/quickstarts/heimdall/envoygw-security-policy.yaml b/examples/kubernetes/quickstarts/heimdall/envoygw-security-policy.yaml new file mode 100644 index 000000000..d229d61ea --- /dev/null +++ b/examples/kubernetes/quickstarts/heimdall/envoygw-security-policy.yaml @@ -0,0 +1,19 @@ +apiVersion: gateway.envoyproxy.io/v1alpha1 +kind: SecurityPolicy +metadata: + name: ext-auth-heimdall + namespace: heimdall +spec: + targetRef: + group: gateway.networking.k8s.io + kind: Gateway + name: eg + namespace: heimdall + extAuth: + grpc: + backendRef: + name: heimdall + port: 4456 + namespace: heimdall +--- + diff --git a/examples/kubernetes/quickstarts/heimdall/heimdall-middleware.yaml b/examples/kubernetes/quickstarts/heimdall/heimdall-middleware.yaml new file mode 100644 index 000000000..de7fa32bf --- /dev/null +++ b/examples/kubernetes/quickstarts/heimdall/heimdall-middleware.yaml @@ -0,0 +1,12 @@ +apiVersion: traefik.io/v1alpha1 +kind: Middleware +metadata: + name: heimdall + namespace: heimdall +spec: + forwardAuth: + address: "https://heimdall.heimdall.svc.cluster.local:4456" + tls: + caSecret: heimdall-tls + authResponseHeaders: + - Authorization \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/heimdall/helm-values.yaml b/examples/kubernetes/quickstarts/heimdall/helm-values.yaml index 95710d3b2..7a3719c21 100644 --- a/examples/kubernetes/quickstarts/heimdall/helm-values.yaml +++ b/examples/kubernetes/quickstarts/heimdall/helm-values.yaml @@ -26,3 +26,6 @@ env: OTEL_EXPORTER_PROMETHEUS_HOST: "0.0.0.0" OTEL_EXPORTER_OTLP_PROTOCOL: "grpc" OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "http://tempo.monitoring:4317" + +image: + tag: 0.15.0 diff --git a/examples/kubernetes/quickstarts/proxy-demo/namespace.yaml b/examples/kubernetes/quickstarts/heimdall/namespace.yaml similarity index 67% rename from examples/kubernetes/quickstarts/proxy-demo/namespace.yaml rename to examples/kubernetes/quickstarts/heimdall/namespace.yaml index eb34edde7..49d28c231 100644 --- a/examples/kubernetes/quickstarts/proxy-demo/namespace.yaml +++ b/examples/kubernetes/quickstarts/heimdall/namespace.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: quickstarts + name: heimdall \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/heimdall1/certificate.yaml b/examples/kubernetes/quickstarts/heimdall1/certificate.yaml deleted file mode 100644 index 5b31c7cf3..000000000 --- a/examples/kubernetes/quickstarts/heimdall1/certificate.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: heimdall-tls - namespace: heimdall1 -spec: - secretName: heimdall-tls - additionalOutputFormats: - - type: CombinedPEM - issuerRef: - name: selfsigned - kind: ClusterIssuer - duration: 720h # 30d - renewBefore: 360h # 15d - subject: - organizations: - - heimdall1 - isCA: false - privateKey: - algorithm: RSA - encoding: PKCS1 - size: 2048 - usages: - - "server auth" - dnsNames: - - heimdall1 - - heimdall1.heimdall1 - - heimdall1.heimdall1.svc - - heimdall1.heimdall1.svc.cluster.local - ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: heimdall-jwt - namespace: heimdall1 -spec: - secretName: heimdall-jwt - additionalOutputFormats: - - type: CombinedPEM - issuerRef: - name: selfsigned - kind: ClusterIssuer - duration: 720h # 30d - renewBefore: 360h # 15d - subject: - organizations: - - heimdall - isCA: false - privateKey: - algorithm: RSA - encoding: PKCS1 - size: 2048 - usages: - - "digital signature" - dnsNames: - - heimdall1 \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/heimdall1/config.yaml b/examples/kubernetes/quickstarts/heimdall1/config.yaml deleted file mode 100644 index 4b6a57c9b..000000000 --- a/examples/kubernetes/quickstarts/heimdall1/config.yaml +++ /dev/null @@ -1,53 +0,0 @@ -log: - level: trace - -serve: - decision: - tls: - min_version: TLS1.2 - key_store: - path: /etc/heimdall/certs/ssl/tls-combined.pem - trusted_proxies: - - 0.0.0.0/0 - -signer: - key_store: - path: /etc/heimdall/certs/jwt/tls-combined.pem - -mechanisms: - authenticators: - - id: anonymous_authenticator - type: anonymous - - id: deny_authenticator - type: unauthorized - authorizers: - - id: deny_all_requests - type: deny - - id: allow_all_requests - type: allow - finalizers: - - id: create_jwt - type: jwt - error_handlers: - - id: redirect - type: redirect - if: type(Error) == authentication_error - config: - to: http://foo.bar?origin={{ .Request.URL | urlenc }} -default_rule: - methods: - - GET - - POST - execute: - - authenticator: anonymous_authenticator - - authorizer: deny_all_requests - on_error: - - error_handler: redirect - -providers: - kubernetes: - auth_class: bar - tls: - min_version: TLS1.2 - key_store: - path: /etc/heimdall/certs/ssl/tls-combined.pem \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/heimdall1/contour-extension-service.yaml b/examples/kubernetes/quickstarts/heimdall1/contour-extension-service.yaml deleted file mode 100644 index 3748bad6d..000000000 --- a/examples/kubernetes/quickstarts/heimdall1/contour-extension-service.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: projectcontour.io/v1alpha1 -kind: ExtensionService -metadata: - name: heimdall - namespace: heimdall -spec: - protocol: h2 - services: - - name: heimdall - port: 4456 \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/heimdall1/emissary-auth-service.yaml b/examples/kubernetes/quickstarts/heimdall1/emissary-auth-service.yaml deleted file mode 100644 index 9106e2b72..000000000 --- a/examples/kubernetes/quickstarts/heimdall1/emissary-auth-service.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: getambassador.io/v3alpha1 -kind: AuthService -metadata: - name: heimdall - namespace: heimdall -spec: - auth_service: "https://heimdall.heimdall.svc.cluster.local:4456" - proto: grpc - protocol_version: v3 diff --git a/examples/kubernetes/quickstarts/heimdall1/helm-values.yaml b/examples/kubernetes/quickstarts/heimdall1/helm-values.yaml deleted file mode 100644 index 0375cf117..000000000 --- a/examples/kubernetes/quickstarts/heimdall1/helm-values.yaml +++ /dev/null @@ -1,32 +0,0 @@ -deployment: - replicaCount: 2 - autoscaling: - enabled: false - volumes: - - name: tls-cert-volume - secret: - secretName: heimdall-tls - - name: jwt-cert-volume - secret: - secretName: heimdall-jwt - volumeMounts: - - name: tls-cert-volume - readOnly: true - mountPath: "/etc/heimdall/certs/ssl" - - name: jwt-cert-volume - readOnly: true - mountPath: "/etc/heimdall/certs/jwt" - -admissionController: - annotations: - cert-manager.io/inject-ca-from: heimdall1/heimdall-tls - -env: - OTEL_METRICS_EXPORTER: "prometheus" - OTEL_EXPORTER_PROMETHEUS_HOST: "0.0.0.0" - OTEL_EXPORTER_OTLP_PROTOCOL: "grpc" - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "http://tempo.monitoring:4317" - -image: - repository: heimdall - tag: local diff --git a/examples/kubernetes/quickstarts/heimdall1/pod_monitor.yaml b/examples/kubernetes/quickstarts/heimdall1/pod_monitor.yaml deleted file mode 100644 index e695cf8da..000000000 --- a/examples/kubernetes/quickstarts/heimdall1/pod_monitor.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: PodMonitor -metadata: - name: heimdall-pod-monitor - namespace: heimdall - labels: - release: prometheus - app.kubernetes.io/instance: heimdall - app.kubernetes.io/name: heimdall - app.kubernetes.io/part-of: heimdall -spec: - selector: - matchLabels: - app.kubernetes.io/instance: heimdall - app.kubernetes.io/name: heimdall - podMetricsEndpoints: - - path: /metrics - port: http-metrics - scheme: http - interval: 30s - jobLabel: heimdall-pod-monitor - namespaceSelector: - matchNames: - - heimdall diff --git a/examples/kubernetes/quickstarts/proxy-demo/deployment.yaml b/examples/kubernetes/quickstarts/proxy-demo/deployment.yaml deleted file mode 100644 index 37faa1b12..000000000 --- a/examples/kubernetes/quickstarts/proxy-demo/deployment.yaml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: echo-app - namespace: quickstarts - labels: - app.kubernetes.io/name: echo-app -spec: - selector: - matchLabels: - app.kubernetes.io/name: echo-app - template: - metadata: - labels: - app.kubernetes.io/name: echo-app - spec: - automountServiceAccountToken: false - containers: - - name: heimdall - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - image: "heimdall:local" - args: [ "-c", "/heimdall/heimdall.yaml", "serve", "proxy2" ] - ports: - - name: http-port - protocol: TCP - containerPort: 4455 - volumeMounts: - - name: config - mountPath: /heimdall/heimdall.yaml - subPath: heimdall.yaml - readOnly: true - - name: rules - mountPath: /heimdall/rules.yaml - subPath: rules.yaml - readOnly: true - - name: echo-app - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - image: "containous/whoami:latest" - args: [ "--port", "8080", "--name", "echo-app" ] - - volumes: - - name: config - configMap: - name: heimdall-config - items: - - key: heimdall.yaml - path: heimdall.yaml - - name: rules - configMap: - name: heimdall-rules - items: - - key: rules.yaml - path: rules.yaml - diff --git a/examples/kubernetes/quickstarts/proxy-demo/heimdall-config.yaml b/examples/kubernetes/quickstarts/proxy-demo/heimdall-config.yaml deleted file mode 100644 index 53cfbe86c..000000000 --- a/examples/kubernetes/quickstarts/proxy-demo/heimdall-config.yaml +++ /dev/null @@ -1,52 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: heimdall-config - namespace: quickstarts - labels: - app.kubernetes.io/name: echo-app -immutable: true -data: - heimdall.yaml: | - log: - level: debug - - serve: - proxy: - trusted_proxies: - - 0.0.0.0/0 - - mechanisms: - authenticators: - - id: anonymous_authenticator - type: anonymous - - id: deny_authenticator - type: unauthorized - authorizers: - - id: deny_all_requests - type: deny - - id: allow_all_requests - type: allow - finalizers: - - id: create_jwt - type: jwt - error_handlers: - - id: redirect - type: redirect - if: type(Error) == authentication_error - config: - to: http://foo.bar?origin={{ .Request.URL | urlenc }} - - default_rule: - methods: - - GET - - POST - execute: - - authenticator: anonymous_authenticator - - authorizer: deny_all_requests - on_error: - - error_handler: redirect - - providers: - file_system: - src: /heimdall/rules.yaml \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/proxy-demo/heimdall-rules.yaml b/examples/kubernetes/quickstarts/proxy-demo/heimdall-rules.yaml deleted file mode 100644 index 076866291..000000000 --- a/examples/kubernetes/quickstarts/proxy-demo/heimdall-rules.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: heimdall-rules - namespace: quickstarts - labels: - app.kubernetes.io/name: echo-app -immutable: true -data: - rules.yaml: | - version: "1alpha3" - rules: - - id: public-access - match: - url: <**>://<**>/pub/<**> - forward_to: - host: localhost:8080 - rewrite: - scheme: http - execute: - - authorizer: allow_all_requests - - - id: anonymous-access - match: - url: <**>://<**>/anon/<**> - forward_to: - host: localhost:8080 - rewrite: - scheme: http - execute: - - authorizer: allow_all_requests - - finalizer: create_jwt - - - id: redirect - match: - url: <**>://<**>/redir/<**> - forward_to: - host: localhost:8080 - rewrite: - scheme: http - execute: - - authenticator: deny_authenticator \ No newline at end of file diff --git a/examples/kubernetes/quickstarts/proxy-demo/kustomization.yaml b/examples/kubernetes/quickstarts/proxy-demo/kustomization.yaml deleted file mode 100644 index 9543649bf..000000000 --- a/examples/kubernetes/quickstarts/proxy-demo/kustomization.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - namespace.yaml - - heimdall-config.yaml - - heimdall-rules.yaml - - deployment.yaml - - service.yaml - - ingress.yaml diff --git a/examples/kubernetes/quickstarts/proxy-demo/service.yaml b/examples/kubernetes/quickstarts/proxy-demo/service.yaml deleted file mode 100644 index 88bdc4dc0..000000000 --- a/examples/kubernetes/quickstarts/proxy-demo/service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: echo-app - namespace: quickstarts - labels: - app.kubernetes.io/name: echo-app -spec: - ports: - - name: app-port - port: 8080 - targetPort: http-port - selector: - app.kubernetes.io/name: echo-app \ No newline at end of file diff --git a/examples/kubernetes/traefik/certificate.yaml b/examples/kubernetes/traefik/certificate.yaml new file mode 100644 index 000000000..e3f45c25e --- /dev/null +++ b/examples/kubernetes/traefik/certificate.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: traefik +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: traefik-tls + namespace: traefik +spec: + dnsNames: + - echo-app + - echo-app.default + - echo-app.default.svc + - echo-app.default.svc.cluster.local + - echo-app.local + ipAddresses: + - 172.18.255.200 + secretName: traefik-tls + issuerRef: + name: demo-ca-issuer + kind: ClusterIssuer \ No newline at end of file diff --git a/examples/kubernetes/traefik/global-mw-helm-values.yaml b/examples/kubernetes/traefik/global-mw-helm-values.yaml new file mode 100644 index 000000000..57bc22450 --- /dev/null +++ b/examples/kubernetes/traefik/global-mw-helm-values.yaml @@ -0,0 +1,31 @@ +logs: + general: + level: DEBUG + access: + enabled: true + +experimental: + kubernetesGateway: + enabled: true + namespacePolicy: All + certificate: + group: "" + kind: Secret + name: traefik-tls + +providers: + kubernetesCRD: + enabled: true + allowCrossNamespace: true + +ports: + traefik: + port: 9000 + expose: + default: true + web: + middlewares: + - heimdall-heimdall@kubernetescrd + websecure: + middlewares: + - heimdall-heimdall@kubernetescrd diff --git a/examples/kubernetes/traefik/helm-values.yaml b/examples/kubernetes/traefik/helm-values.yaml new file mode 100644 index 000000000..3e414d20b --- /dev/null +++ b/examples/kubernetes/traefik/helm-values.yaml @@ -0,0 +1,27 @@ +logs: + general: + level: DEBUG + access: + enabled: true + +experimental: + kubernetesGateway: + enabled: true + namespacePolicy: All + certificate: + group: core + kind: Secret + name: traefik-tls + +providers: + kubernetesCRD: + enabled: true + # without that the middleware must be deployed into the same namespace as the resource + # referencing it (IngressRoute) + allowCrossNamespace: true + +ports: + traefik: + port: 9000 + expose: + default: true diff --git a/go.mod b/go.mod index def0fd167..de6f38574 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.23.1 require ( github.com/Masterminds/sprig/v3 v3.3.0 github.com/alicebob/miniredis/v2 v2.33.0 + github.com/dadrus/httpsig v0.0.0-20240829181839-e878daf50fa2 github.com/dlclark/regexp2 v1.11.4 github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 github.com/elnormous/contenttype v1.0.4 @@ -39,7 +40,6 @@ require ( github.com/knadh/koanf/providers/rawbytes v0.1.0 github.com/knadh/koanf/providers/structs v0.1.0 github.com/knadh/koanf/v2 v2.1.1 - github.com/ory/ladon v1.3.0 github.com/pkg/errors v0.9.1 github.com/pquerna/cachecontrol v0.2.0 github.com/prometheus/client_golang v1.20.3 @@ -85,7 +85,6 @@ require ( k8s.io/api v0.31.1 k8s.io/apimachinery v0.31.1 k8s.io/client-go v0.31.1 - k8s.io/klog/v2 v2.130.1 ) require ( @@ -132,6 +131,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dunglas/httpsfv v1.0.2 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/fatih/structs v1.1.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect @@ -217,6 +217,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect diff --git a/go.sum b/go.sum index 5f9aaa6b1..b6a26c4de 100644 --- a/go.sum +++ b/go.sum @@ -100,6 +100,8 @@ github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnTh github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/dadrus/httpsig v0.0.0-20240829181839-e878daf50fa2 h1:Suc3X/EXDMTdXdIB1YdKbFMHR4XINZpJ0iI/zRH7VHI= +github.com/dadrus/httpsig v0.0.0-20240829181839-e878daf50fa2/go.mod h1:P31eM5Rh3dqq9FLr1QASaZsk8/8qIiKKUYFKjBC/yYc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -108,6 +110,8 @@ github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yA github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5sizHf27aoY2RTvw62mO6x7mxkScNk0= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU= +github.com/dunglas/httpsfv v1.0.2 h1:iERDp/YAfnojSDJ7PW3dj1AReJz4MrwbECSSE59JWL0= +github.com/dunglas/httpsfv v1.0.2/go.mod h1:zID2mqw9mFsnt7YC3vYQ9/cjq30q41W+1AnDwH8TiMg= github.com/elnormous/contenttype v1.0.4 h1:FjmVNkvQOGqSX70yvocph7keC8DtmJaLzTTq6ZOQCI8= github.com/elnormous/contenttype v1.0.4/go.mod h1:5KTOW8m1kdX1dLMiUJeN9szzR2xkngiv2K+RVZwWBbI= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= @@ -311,8 +315,6 @@ github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= -github.com/ory/ladon v1.3.0 h1:35Rc3O8d+mhFWxzmKs6Qj/ETQEHGEI5BmWQf8wtqFHk= -github.com/ory/ladon v1.3.0/go.mod h1:DyhUMpMSmkC2xWjXsCcfuueCO2jkWrjAYu2RfeXD8/c= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/internal/cache/factory.go b/internal/cache/factory.go index df34a64be..7da20d76a 100644 --- a/internal/cache/factory.go +++ b/internal/cache/factory.go @@ -16,14 +16,17 @@ package cache -import "github.com/dadrus/heimdall/internal/watcher" +import ( + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + "github.com/dadrus/heimdall/internal/watcher" +) type Factory interface { - Create(conf map[string]any, cw watcher.Watcher) (Cache, error) + Create(conf map[string]any, cw watcher.Watcher, co certificate.Observer) (Cache, error) } -type FactoryFunc func(conf map[string]any, cw watcher.Watcher) (Cache, error) +type FactoryFunc func(conf map[string]any, cw watcher.Watcher, co certificate.Observer) (Cache, error) -func (f FactoryFunc) Create(conf map[string]any, cw watcher.Watcher) (Cache, error) { - return f(conf, cw) +func (f FactoryFunc) Create(conf map[string]any, cw watcher.Watcher, co certificate.Observer) (Cache, error) { + return f(conf, cw, co) } diff --git a/internal/cache/factory_registry.go b/internal/cache/factory_registry.go index 4258d7d17..f5836ec18 100644 --- a/internal/cache/factory_registry.go +++ b/internal/cache/factory_registry.go @@ -21,6 +21,7 @@ import ( "sync" "github.com/dadrus/heimdall/internal/cache/noop" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/internal/x/errorchain" ) @@ -44,7 +45,7 @@ func Register(typ string, factory Factory) { factories[typ] = factory } -func Create(typ string, config map[string]any, cw watcher.Watcher) (Cache, error) { +func Create(typ string, config map[string]any, cw watcher.Watcher, co certificate.Observer) (Cache, error) { if typ == "noop" { return &noop.Cache{}, nil } @@ -57,5 +58,5 @@ func Create(typ string, config map[string]any, cw watcher.Watcher) (Cache, error return nil, errorchain.NewWithMessagef(ErrUnsupportedCacheType, "'%s'", typ) } - return factory.Create(config, cw) + return factory.Create(config, cw, co) } diff --git a/internal/cache/memory/cache.go b/internal/cache/memory/cache.go index 36605faa7..c2c7a5449 100644 --- a/internal/cache/memory/cache.go +++ b/internal/cache/memory/cache.go @@ -24,6 +24,7 @@ import ( "github.com/jellydator/ttlcache/v3" "github.com/dadrus/heimdall/internal/cache" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/watcher" ) @@ -34,7 +35,7 @@ func init() { // nolint: gochecknoinits cache.Register("in-memory", cache.FactoryFunc(NewCache)) } -func NewCache(_ map[string]any, _ watcher.Watcher) (cache.Cache, error) { +func NewCache(_ map[string]any, _ watcher.Watcher, _ certificate.Observer) (cache.Cache, error) { return &Cache{c: ttlcache.New[string, []byte](ttlcache.WithDisableTouchOnHit[string, []byte]())}, nil } diff --git a/internal/cache/memory/cache_test.go b/internal/cache/memory/cache_test.go index 6c21b71f2..c59d4b0de 100644 --- a/internal/cache/memory/cache_test.go +++ b/internal/cache/memory/cache_test.go @@ -86,7 +86,7 @@ func TestMemoryCacheUsage(t *testing.T) { } { t.Run("case="+tc.uc, func(t *testing.T) { // GIVEN - cache, _ := NewCache(nil, nil) + cache, _ := NewCache(nil, nil, nil) // WHEN tc.configureCache(t, cache) @@ -102,7 +102,7 @@ func TestMemoryCacheUsage(t *testing.T) { func TestMemoryCacheExpiration(t *testing.T) { t.Parallel() - cache, _ := NewCache(nil, nil) + cache, _ := NewCache(nil, nil, nil) cache.Set(context.TODO(), "baz", []byte("bar"), 1*time.Second) hits := 0 diff --git a/internal/cache/module/module.go b/internal/cache/module/module.go index c4159ac7d..e6b3e2dc7 100644 --- a/internal/cache/module/module.go +++ b/internal/cache/module/module.go @@ -26,6 +26,7 @@ import ( _ "github.com/dadrus/heimdall/internal/cache/memory" // to register the memory cache _ "github.com/dadrus/heimdall/internal/cache/redis" // to register the redis cache "github.com/dadrus/heimdall/internal/config" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/watcher" ) @@ -38,8 +39,13 @@ var Module = fx.Provide( ), ) -func newCache(conf *config.Configuration, logger zerolog.Logger, cw watcher.Watcher) (cache.Cache, error) { - cch, err := cache.Create(conf.Cache.Type, conf.Cache.Config, cw) +func newCache( + conf *config.Configuration, + logger zerolog.Logger, + cw watcher.Watcher, + co certificate.Observer, +) (cache.Cache, error) { + cch, err := cache.Create(conf.Cache.Type, conf.Cache.Config, cw, co) if err != nil { logger.Error().Err(err).Str("_type", conf.Cache.Type).Msg("Failed creating cache instance") diff --git a/internal/cache/module/module_test.go b/internal/cache/module/module_test.go index 20e3aede0..63322e0f4 100644 --- a/internal/cache/module/module_test.go +++ b/internal/cache/module/module_test.go @@ -138,7 +138,7 @@ func TestNewCache(t *testing.T) { } { t.Run("case="+tc.uc, func(t *testing.T) { // WHEN - cch, err := newCache(tc.conf, log.Logger, nil) + cch, err := newCache(tc.conf, log.Logger, nil, nil) // THEN tc.assert(t, err, cch) diff --git a/internal/cache/redis/cache_test.go b/internal/cache/redis/cache_test.go index c51bef289..5f5bd3864 100644 --- a/internal/cache/redis/cache_test.go +++ b/internal/cache/redis/cache_test.go @@ -36,7 +36,7 @@ func TestCacheUsage(t *testing.T) { "address": db.Addr(), "client_cache": map[string]any{"disabled": true}, "tls": map[string]any{"disabled": true}, - }, nil) + }, nil, nil) require.NoError(t, err) cch.Start(context.TODO()) diff --git a/internal/cache/redis/cluster.go b/internal/cache/redis/cluster.go index f99b3c0d9..ecf6bfb8a 100644 --- a/internal/cache/redis/cluster.go +++ b/internal/cache/redis/cluster.go @@ -20,6 +20,7 @@ import ( "time" "github.com/dadrus/heimdall/internal/cache" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/watcher" ) @@ -28,7 +29,7 @@ func init() { // nolint: gochecknoinits cache.Register("redis-cluster", cache.FactoryFunc(NewClusterCache)) } -func NewClusterCache(conf map[string]any, cw watcher.Watcher) (cache.Cache, error) { +func NewClusterCache(conf map[string]any, cw watcher.Watcher, co certificate.Observer) (cache.Cache, error) { type Config struct { baseConfig `mapstructure:",squash"` @@ -44,7 +45,7 @@ func NewClusterCache(conf map[string]any, cw watcher.Watcher) (cache.Cache, erro return nil, err } - opts, err := cfg.clientOptions(cw) + opts, err := cfg.clientOptions("redis-cluster", cw, co) if err != nil { return nil, err } diff --git a/internal/cache/redis/cluster_test.go b/internal/cache/redis/cluster_test.go index ef8227a8f..6d5ef4472 100644 --- a/internal/cache/redis/cluster_test.go +++ b/internal/cache/redis/cluster_test.go @@ -240,7 +240,7 @@ func TestNewClusterCache(t *testing.T) { require.NoError(t, err) // WHEN - cch, err := NewClusterCache(conf, nil) + cch, err := NewClusterCache(conf, nil, nil) if err == nil { defer cch.Stop(context.TODO()) } diff --git a/internal/cache/redis/config.go b/internal/cache/redis/config.go index 6bc997c7f..9484b46ba 100644 --- a/internal/cache/redis/config.go +++ b/internal/cache/redis/config.go @@ -31,6 +31,7 @@ import ( "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/heimdall" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/internal/x/errorchain" "github.com/dadrus/heimdall/internal/x/tlsx" @@ -138,7 +139,11 @@ type baseConfig struct { TLS tlsConfig `mapstructure:"tls"` } -func (c baseConfig) clientOptions(cw watcher.Watcher) (rueidis.ClientOption, error) { +func (c baseConfig) clientOptions( + name string, + cw watcher.Watcher, + co certificate.Observer, +) (rueidis.ClientOption, error) { var ( tlsCfg *tls.Config err error @@ -148,6 +153,7 @@ func (c baseConfig) clientOptions(cw watcher.Watcher) (rueidis.ClientOption, err tlsCfg, err = tlsx.ToTLSConfig(&c.TLS.TLS, tlsx.WithClientAuthentication(len(c.TLS.KeyStore.Path) != 0), tlsx.WithSecretsWatcher(cw), + tlsx.WithCertificateObserver(name, co), ) if err != nil { return rueidis.ClientOption{}, err diff --git a/internal/cache/redis/sentinel.go b/internal/cache/redis/sentinel.go index 0548b9480..762be5b54 100644 --- a/internal/cache/redis/sentinel.go +++ b/internal/cache/redis/sentinel.go @@ -22,6 +22,7 @@ import ( "github.com/redis/rueidis" "github.com/dadrus/heimdall/internal/cache" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/watcher" ) @@ -30,7 +31,7 @@ func init() { // nolint: gochecknoinits cache.Register("redis-sentinel", cache.FactoryFunc(NewSentinelCache)) } -func NewSentinelCache(conf map[string]any, cw watcher.Watcher) (cache.Cache, error) { +func NewSentinelCache(conf map[string]any, cw watcher.Watcher, co certificate.Observer) (cache.Cache, error) { type Config struct { baseConfig `mapstructure:",squash"` @@ -48,7 +49,7 @@ func NewSentinelCache(conf map[string]any, cw watcher.Watcher) (cache.Cache, err return nil, err } - opts, err := cfg.clientOptions(cw) + opts, err := cfg.clientOptions("redis-sentinel", cw, co) if err != nil { return nil, err } diff --git a/internal/cache/redis/sentinel_test.go b/internal/cache/redis/sentinel_test.go index 874168f9d..63037c55b 100644 --- a/internal/cache/redis/sentinel_test.go +++ b/internal/cache/redis/sentinel_test.go @@ -183,7 +183,7 @@ func TestNewSentinelCache(t *testing.T) { require.NoError(t, err) // WHEN - cch, err := NewSentinelCache(conf, nil) + cch, err := NewSentinelCache(conf, nil, nil) if err == nil { defer cch.Stop(context.TODO()) } diff --git a/internal/cache/redis/standalone.go b/internal/cache/redis/standalone.go index ddc61e512..f41724b49 100644 --- a/internal/cache/redis/standalone.go +++ b/internal/cache/redis/standalone.go @@ -20,6 +20,7 @@ import ( "time" "github.com/dadrus/heimdall/internal/cache" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/watcher" ) @@ -28,7 +29,7 @@ func init() { // nolint: gochecknoinits cache.Register("redis", cache.FactoryFunc(NewStandaloneCache)) } -func NewStandaloneCache(conf map[string]any, cw watcher.Watcher) (cache.Cache, error) { +func NewStandaloneCache(conf map[string]any, cw watcher.Watcher, co certificate.Observer) (cache.Cache, error) { type Config struct { baseConfig `mapstructure:",squash"` @@ -45,7 +46,7 @@ func NewStandaloneCache(conf map[string]any, cw watcher.Watcher) (cache.Cache, e return nil, err } - opts, err := cfg.clientOptions(cw) + opts, err := cfg.clientOptions("redis", cw, co) if err != nil { return nil, err } diff --git a/internal/cache/redis/standalone_test.go b/internal/cache/redis/standalone_test.go index bda91b86e..890b4f73f 100644 --- a/internal/cache/redis/standalone_test.go +++ b/internal/cache/redis/standalone_test.go @@ -345,7 +345,7 @@ func TestNewStandaloneCache(t *testing.T) { require.NoError(t, err) // WHEN - cch, err := NewStandaloneCache(conf, wm) + cch, err := NewStandaloneCache(conf, wm, nil) if err == nil { defer cch.Stop(context.TODO()) } diff --git a/internal/config/configuration.go b/internal/config/configuration.go index 0310d2acb..875e76b27 100644 --- a/internal/config/configuration.go +++ b/internal/config/configuration.go @@ -30,7 +30,6 @@ type Configuration struct { //nolint:musttag Tracing TracingConfig `koanf:"tracing"` Metrics MetricsConfig `koanf:"metrics"` Profiling ProfilingConfig `koanf:"profiling"` - Signer SignerConfig `koanf:"signer"` Cache CacheConfig `koanf:"cache"` Prototypes *MechanismPrototypes `koanf:"mechanisms,omitempty"` Default *DefaultRule `koanf:"default_rule,omitempty"` diff --git a/internal/config/default_configuration.go b/internal/config/default_configuration.go index 59217c244..6e6df0cfe 100644 --- a/internal/config/default_configuration.go +++ b/internal/config/default_configuration.go @@ -105,9 +105,6 @@ func defaultConfig() Configuration { Port: defaultProfilingServicePort, Host: loopbackIP, }, - Signer: SignerConfig{ - Name: "heimdall", - }, Prototypes: &MechanismPrototypes{}, } } diff --git a/internal/config/default_rule.go b/internal/config/default_rule.go index e905435bd..d7bf87e42 100644 --- a/internal/config/default_rule.go +++ b/internal/config/default_rule.go @@ -17,7 +17,7 @@ package config type DefaultRule struct { - Methods []string `koanf:"methods"` - Execute []MechanismConfig `koanf:"execute"` - ErrorHandler []MechanismConfig `koanf:"on_error"` + BacktrackingEnabled bool `koanf:"backtracking_enabled"` + Execute []MechanismConfig `koanf:"execute"` + ErrorHandler []MechanismConfig `koanf:"on_error"` } diff --git a/internal/config/serve.go b/internal/config/serve.go index b93b59f5c..434fb150a 100644 --- a/internal/config/serve.go +++ b/internal/config/serve.go @@ -80,7 +80,6 @@ type RespondConfig struct { ArgumentError ResponseOverride `koanf:"argument_error"` AuthenticationError ResponseOverride `koanf:"authentication_error"` AuthorizationError ResponseOverride `koanf:"authorization_error"` - BadMethodError ResponseOverride `koanf:"method_error"` CommunicationError ResponseOverride `koanf:"communication_error"` InternalError ResponseOverride `koanf:"internal_error"` NoRuleError ResponseOverride `koanf:"no_rule_error"` diff --git a/internal/config/test_data/test_config.yaml b/internal/config/test_data/test_config.yaml index 26060a256..42c6a0596 100644 --- a/internal/config/test_data/test_config.yaml +++ b/internal/config/test_data/test_config.yaml @@ -28,8 +28,6 @@ serve: code: 404 authorization_error: code: 404 - method_error: - code: 400 communication_error: code: 502 internal_error: @@ -163,13 +161,6 @@ profiling: host: 0.0.0.0 port: 6060 -signer: - name: foobar - key_store: - path: /opt/heimdall/keystore.pem - password: VeryInsecure! - key_id: foo - mechanisms: authenticators: - id: anonymous_authenticator @@ -423,12 +414,24 @@ mechanisms: - id: jwt type: jwt config: + signer: + name: foobar + key_store: + path: /opt/heimdall/keystore.pem + password: VeryInsecure! + key_id: foo ttl: 5m claims: | {"user": {{ quote .Subject.ID }} } - id: jwt_with_custom_header type: jwt config: + signer: + name: foobar + key_store: + path: /opt/heimdall/keystore.pem + password: VeryInsecure! + key_id: foo ttl: 5m header: name: Foo @@ -464,22 +467,17 @@ mechanisms: type: default - id: authenticate_with_kratos type: redirect - if: | - ((type(Error) == authentication_error && Error.Source == "kratos_session_authenticator") || - type(Error) == authorization_error) && - Request.Header("Accept").contains("*/*") config: to: http://127.0.0.1:4433/self-service/login/browser?return_to={{ .Request.URL | urlenc }} default_rule: - methods: - - GET - - POST + backtracking_enabled: false execute: - authenticator: anonymous_authenticator - finalizer: jwt on_error: - error_handler: authenticate_with_kratos + if: type(Error) == authentication_error providers: file_system: @@ -491,8 +489,8 @@ providers: watch_interval: 5m endpoints: - url: http://foo.bar/rules.yaml - rule_path_match_prefix: /foo - enable_http_cache: true + http_cache: + enabled: true - url: http://bar.foo/rules.yaml headers: bla: bla @@ -511,10 +509,8 @@ providers: buckets: - url: gs://my-bucket prefix: service1 - rule_path_match_prefix: /service1 - url: gs://my-bucket prefix: service2 - rule_path_match_prefix: /service2 - url: s3://my-bucket/my-rule-set kubernetes: diff --git a/internal/handler/decision/module.go b/internal/handler/decision/module.go index e845b0eac..0516379e9 100644 --- a/internal/handler/decision/module.go +++ b/internal/handler/decision/module.go @@ -25,7 +25,6 @@ import ( "github.com/dadrus/heimdall/internal/cache" "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/handler/fxlcm" - "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/watcher" ) @@ -43,7 +42,6 @@ func newLifecycleManager( logger zerolog.Logger, cch cache.Cache, exec rule.Executor, - signer heimdall.JWTSigner, cw watcher.Watcher, ) *fxlcm.LifecycleManager { cfg := conf.Serve.Decision @@ -51,7 +49,7 @@ func newLifecycleManager( return &fxlcm.LifecycleManager{ ServiceName: "Decision", ServiceAddress: cfg.Address(), - Server: newService(conf, cch, logger, exec, signer), + Server: newService(conf, cch, logger, exec), Logger: logger, TLSConf: cfg.TLS, FileWatcher: cw, diff --git a/internal/handler/decision/request_context.go b/internal/handler/decision/request_context.go index a7ebed00d..cf0c0cde0 100644 --- a/internal/handler/decision/request_context.go +++ b/internal/handler/decision/request_context.go @@ -22,17 +22,15 @@ import ( "github.com/rs/zerolog" "github.com/dadrus/heimdall/internal/handler/requestcontext" - "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/rules/rule" ) func newContextFactory( - signer heimdall.JWTSigner, responseCode int, ) requestcontext.ContextFactory { return requestcontext.FactoryFunc(func(rw http.ResponseWriter, req *http.Request) requestcontext.Context { return &requestContext{ - RequestContext: requestcontext.New(signer, req), + RequestContext: requestcontext.New(req), responseCode: responseCode, rw: rw, } diff --git a/internal/handler/decision/request_context_test.go b/internal/handler/decision/request_context_test.go index 96e3cfd53..208e2874a 100644 --- a/internal/handler/decision/request_context_test.go +++ b/internal/handler/decision/request_context_test.go @@ -134,7 +134,7 @@ func TestRequestContextFinalize(t *testing.T) { req, err := http.NewRequestWithContext(context.TODO(), http.MethodPost, "http://heimdall.local/foo", nil) require.NoError(t, err) - reqCtx := newContextFactory(nil, tc.code).Create(rw, req) + reqCtx := newContextFactory(tc.code).Create(rw, req) tc.setup(t, reqCtx) // WHEN diff --git a/internal/handler/decision/service.go b/internal/handler/decision/service.go index f9322a3d0..a049baaf6 100644 --- a/internal/handler/decision/service.go +++ b/internal/handler/decision/service.go @@ -36,7 +36,6 @@ import ( "github.com/dadrus/heimdall/internal/handler/middleware/http/recovery" "github.com/dadrus/heimdall/internal/handler/middleware/http/trustedproxy" "github.com/dadrus/heimdall/internal/handler/service" - "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/x" "github.com/dadrus/heimdall/internal/x/httpx" @@ -48,7 +47,6 @@ func newService( cch cache.Cache, log zerolog.Logger, exec rule.Executor, - signer heimdall.JWTSigner, ) *http.Server { cfg := conf.Serve.Decision eh := errorhandler.New( @@ -57,7 +55,6 @@ func newService( errorhandler.WithAuthenticationErrorCode(cfg.Respond.With.AuthenticationError.Code), errorhandler.WithAuthorizationErrorCode(cfg.Respond.With.AuthorizationError.Code), errorhandler.WithCommunicationErrorCode(cfg.Respond.With.CommunicationError.Code), - errorhandler.WithMethodErrorCode(cfg.Respond.With.BadMethodError.Code), errorhandler.WithNoRuleErrorCode(cfg.Respond.With.NoRuleError.Code), errorhandler.WithInternalServerErrorCode(cfg.Respond.With.InternalError.Code), ) @@ -87,7 +84,7 @@ func newService( otelmetrics.WithServerName(cfg.Address()), ), cachemiddleware.New(cch), - ).Then(service.NewHandler(newContextFactory(signer, acceptedCode), exec, eh)) + ).Then(service.NewHandler(newContextFactory(acceptedCode), exec, eh)) return &http.Server{ Handler: hc, diff --git a/internal/handler/decision/service_test.go b/internal/handler/decision/service_test.go index fc2861f0b..0f3bf5c38 100644 --- a/internal/handler/decision/service_test.go +++ b/internal/handler/decision/service_test.go @@ -96,13 +96,13 @@ func TestHandleDecisionEndpointRequest(t *testing.T) { configureMocks: func(t *testing.T, exec *mocks4.ExecutorMock) { t.Helper() - exec.EXPECT().Execute(mock.Anything).Return(nil, heimdall.ErrMethodNotAllowed) + exec.EXPECT().Execute(mock.Anything).Return(nil, heimdall.ErrNoRuleFound) }, assertResponse: func(t *testing.T, err error, response *http.Response) { t.Helper() require.NoError(t, err) - assert.Equal(t, http.StatusMethodNotAllowed, response.StatusCode) + assert.Equal(t, http.StatusNotFound, response.StatusCode) data, err := io.ReadAll(response.Body) require.NoError(t, err) @@ -554,7 +554,7 @@ func TestHandleDecisionEndpointRequest(t *testing.T) { srvConf.Host = "127.0.0.1" srvConf.Port = port - listener, err := listener.New("tcp", srvConf.Address(), srvConf.TLS, nil) + listener, err := listener.New("tcp", "test", srvConf.Address(), srvConf.TLS, nil, nil) require.NoError(t, err) conf := &config.Configuration{Serve: config.ServeConfig{Decision: srvConf}} @@ -565,7 +565,7 @@ func TestHandleDecisionEndpointRequest(t *testing.T) { client := &http.Client{Transport: &http.Transport{}} - decision := newService(conf, cch, log.Logger, exec, nil) + decision := newService(conf, cch, log.Logger, exec) defer decision.Shutdown(context.Background()) go func() { diff --git a/internal/handler/envoyextauth/grpcv3/handler.go b/internal/handler/envoyextauth/grpcv3/handler.go index 66a5c6538..2219a2cc5 100644 --- a/internal/handler/envoyextauth/grpcv3/handler.go +++ b/internal/handler/envoyextauth/grpcv3/handler.go @@ -21,17 +21,15 @@ import ( envoy_auth "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3" - "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/rules/rule" ) type Handler struct { e rule.Executor - s heimdall.JWTSigner } func (h *Handler) Check(ctx context.Context, req *envoy_auth.CheckRequest) (*envoy_auth.CheckResponse, error) { - reqCtx := NewRequestContext(ctx, req, h.s) + reqCtx := NewRequestContext(ctx, req) _, err := h.e.Execute(reqCtx) if err != nil { diff --git a/internal/handler/envoyextauth/grpcv3/handler_test.go b/internal/handler/envoyextauth/grpcv3/handler_test.go index eabcdc98f..f41280fc6 100644 --- a/internal/handler/envoyextauth/grpcv3/handler_test.go +++ b/internal/handler/envoyextauth/grpcv3/handler_test.go @@ -70,17 +70,17 @@ func TestHandleDecisionEndpointRequest(t *testing.T) { configureMocks: func(t *testing.T, exec *mocks2.ExecutorMock) { t.Helper() - exec.EXPECT().Execute(mock.Anything).Return(nil, heimdall.ErrMethodNotAllowed) + exec.EXPECT().Execute(mock.Anything).Return(nil, heimdall.ErrNoRuleFound) }, assertResponse: func(t *testing.T, err error, response *envoy_auth.CheckResponse) { t.Helper() require.NoError(t, err) - assert.Equal(t, int32(codes.InvalidArgument), response.GetStatus().GetCode()) + assert.Equal(t, int32(codes.NotFound), response.GetStatus().GetCode()) deniedResponse := response.GetDeniedResponse() require.NotNil(t, deniedResponse) - assert.Equal(t, typev3.StatusCode(http.StatusMethodNotAllowed), deniedResponse.GetStatus().GetCode()) + assert.Equal(t, typev3.StatusCode(http.StatusNotFound), deniedResponse.GetStatus().GetCode()) assert.Empty(t, deniedResponse.GetBody()) assert.Empty(t, deniedResponse.GetHeaders()) }, @@ -205,7 +205,7 @@ func TestHandleDecisionEndpointRequest(t *testing.T) { tc.configureMocks(t, exec) - srv := newService(conf, cch, log.Logger, exec, nil) + srv := newService(conf, cch, log.Logger, exec) defer srv.Stop() diff --git a/internal/handler/envoyextauth/grpcv3/module.go b/internal/handler/envoyextauth/grpcv3/module.go index 49138c1a7..83216d60d 100644 --- a/internal/handler/envoyextauth/grpcv3/module.go +++ b/internal/handler/envoyextauth/grpcv3/module.go @@ -25,7 +25,6 @@ import ( "github.com/dadrus/heimdall/internal/cache" "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/handler/fxlcm" - "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/watcher" ) @@ -42,7 +41,6 @@ func newLifecycleManager( conf *config.Configuration, logger zerolog.Logger, exec rule.Executor, - signer heimdall.JWTSigner, cch cache.Cache, cw watcher.Watcher, ) *fxlcm.LifecycleManager { @@ -52,7 +50,7 @@ func newLifecycleManager( ServiceName: "Decision Envoy ExtAuth", ServiceAddress: cfg.Address(), Server: &adapter{ - s: newService(conf, cch, logger, exec, signer), + s: newService(conf, cch, logger, exec), }, Logger: logger, TLSConf: cfg.TLS, diff --git a/internal/handler/envoyextauth/grpcv3/request_context.go b/internal/handler/envoyextauth/grpcv3/request_context.go index b3d784d75..727cf2754 100644 --- a/internal/handler/envoyextauth/grpcv3/request_context.go +++ b/internal/handler/envoyextauth/grpcv3/request_context.go @@ -45,13 +45,13 @@ type RequestContext struct { reqRawBody []byte upstreamHeaders http.Header upstreamCookies map[string]string - jwtSigner heimdall.JWTSigner err error savedBody any + outputs map[string]any } -func NewRequestContext(ctx context.Context, req *envoy_auth.CheckRequest, signer heimdall.JWTSigner) *RequestContext { +func NewRequestContext(ctx context.Context, req *envoy_auth.CheckRequest) *RequestContext { var clientIPs []string if rmd, ok := metadata.FromIncomingContext(ctx); ok { @@ -75,7 +75,6 @@ func NewRequestContext(ctx context.Context, req *envoy_auth.CheckRequest, signer }, reqBody: req.GetAttributes().GetRequest().GetHttp().GetBody(), reqRawBody: req.GetAttributes().GetRequest().GetHttp().GetRawBody(), - jwtSigner: signer, upstreamHeaders: make(http.Header), upstreamCookies: make(map[string]string), } @@ -95,7 +94,7 @@ func (r *RequestContext) Request() *heimdall.Request { return &heimdall.Request{ RequestFunctions: r, Method: r.reqMethod, - URL: r.reqURL, + URL: &heimdall.URL{URL: *r.reqURL}, ClientIPAddresses: r.ips, } } @@ -144,7 +143,14 @@ func (r *RequestContext) AppContext() context.Context { return r.ctx func (r *RequestContext) SetPipelineError(err error) { r.err = err } func (r *RequestContext) AddHeaderForUpstream(name, value string) { r.upstreamHeaders.Add(name, value) } func (r *RequestContext) AddCookieForUpstream(name, value string) { r.upstreamCookies[name] = value } -func (r *RequestContext) Signer() heimdall.JWTSigner { return r.jwtSigner } + +func (r *RequestContext) Outputs() map[string]any { + if r.outputs == nil { + r.outputs = make(map[string]any) + } + + return r.outputs +} func (r *RequestContext) Finalize() (*envoy_auth.CheckResponse, error) { if r.err != nil { diff --git a/internal/handler/envoyextauth/grpcv3/request_context_test.go b/internal/handler/envoyextauth/grpcv3/request_context_test.go index 71fc15808..8d9347daf 100644 --- a/internal/handler/envoyextauth/grpcv3/request_context_test.go +++ b/internal/handler/envoyextauth/grpcv3/request_context_test.go @@ -31,7 +31,6 @@ import ( "google.golang.org/grpc/metadata" "github.com/dadrus/heimdall/internal/heimdall" - "github.com/dadrus/heimdall/internal/heimdall/mocks" ) func TestNewRequestContext(t *testing.T) { @@ -69,7 +68,6 @@ func TestNewRequestContext(t *testing.T) { md, ), checkReq, - mocks.NewJWTSignerMock(t), ) // THEN @@ -87,7 +85,6 @@ func TestNewRequestContext(t *testing.T) { require.Equal(t, "baz", ctx.Request().Cookie("foo")) require.Empty(t, ctx.Request().Cookie("baz")) require.NotNil(t, ctx.AppContext()) - require.NotNil(t, ctx.Signer()) assert.Equal(t, []string{"127.0.0.1", "192.168.1.1"}, ctx.Request().ClientIPAddresses) } @@ -237,7 +234,7 @@ func TestFinalizeRequestContext(t *testing.T) { }, }, } - ctx := NewRequestContext(context.Background(), checkReq, nil) + ctx := NewRequestContext(context.Background(), checkReq) tc.updateContext(t, ctx) @@ -315,7 +312,6 @@ func TestRequestContextBody(t *testing.T) { }, }, }, - nil, ) // WHEN diff --git a/internal/handler/envoyextauth/grpcv3/service.go b/internal/handler/envoyextauth/grpcv3/service.go index ba9449378..be04e89f6 100644 --- a/internal/handler/envoyextauth/grpcv3/service.go +++ b/internal/handler/envoyextauth/grpcv3/service.go @@ -33,7 +33,6 @@ import ( "github.com/dadrus/heimdall/internal/handler/middleware/grpc/errorhandler" loggermiddleware "github.com/dadrus/heimdall/internal/handler/middleware/grpc/logger" "github.com/dadrus/heimdall/internal/handler/middleware/grpc/otelmetrics" - "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/rules/rule" ) @@ -42,7 +41,6 @@ func newService( cch cache.Cache, logger zerolog.Logger, exec rule.Executor, - signer heimdall.JWTSigner, ) *grpc.Server { service := conf.Serve.Decision accessLogger := accesslogmiddleware.New(logger) @@ -72,7 +70,6 @@ func newService( errorhandler.WithAuthenticationErrorCode(service.Respond.With.AuthenticationError.Code), errorhandler.WithAuthorizationErrorCode(service.Respond.With.AuthorizationError.Code), errorhandler.WithCommunicationErrorCode(service.Respond.With.CommunicationError.Code), - errorhandler.WithMethodErrorCode(service.Respond.With.BadMethodError.Code), errorhandler.WithNoRuleErrorCode(service.Respond.With.NoRuleError.Code), errorhandler.WithInternalServerErrorCode(service.Respond.With.InternalError.Code), ), @@ -99,7 +96,7 @@ func newService( grpc.ChainStreamInterceptor(streamInterceptors...), ) - envoy_auth.RegisterAuthorizationServer(srv, &Handler{e: exec, s: signer}) + envoy_auth.RegisterAuthorizationServer(srv, &Handler{e: exec}) return srv } diff --git a/internal/handler/fxlcm/lifecycle_manager.go b/internal/handler/fxlcm/lifecycle_manager.go index 3b049de16..15bb1384d 100644 --- a/internal/handler/fxlcm/lifecycle_manager.go +++ b/internal/handler/fxlcm/lifecycle_manager.go @@ -26,6 +26,7 @@ import ( "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/handler/listener" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/watcher" ) @@ -37,16 +38,17 @@ type Server interface { } type LifecycleManager struct { - ServiceName string - ServiceAddress string - Server Server - Logger zerolog.Logger - TLSConf *config.TLS - FileWatcher watcher.Watcher + ServiceName string + ServiceAddress string + Server Server + Logger zerolog.Logger + TLSConf *config.TLS + FileWatcher watcher.Watcher + CertificateObserver certificate.Observer } func (m *LifecycleManager) Start(_ context.Context) error { - ln, err := listener.New("tcp", m.ServiceAddress, m.TLSConf, m.FileWatcher) + ln, err := listener.New("tcp", m.ServiceName, m.ServiceAddress, m.TLSConf, m.FileWatcher, m.CertificateObserver) if err != nil { m.Logger.Fatal().Err(err).Str("_service", m.ServiceName).Msg("Could not create listener") diff --git a/internal/handler/listener/listener.go b/internal/handler/listener/listener.go index d07ada08d..84b7b2095 100644 --- a/internal/handler/listener/listener.go +++ b/internal/handler/listener/listener.go @@ -23,6 +23,7 @@ import ( "time" "github.com/dadrus/heimdall/internal/config" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/internal/x/tlsx" ) @@ -89,7 +90,12 @@ func (l *listener) Accept() (net.Conn, error) { return &conn{Conn: con}, nil } -func New(network, address string, tlsConf *config.TLS, cw watcher.Watcher) (net.Listener, error) { +func New( + network, name, address string, + tlsConf *config.TLS, + cw watcher.Watcher, + co certificate.Observer, +) (net.Listener, error) { listnr, err := net.Listen(network, address) if err != nil { return nil, err @@ -98,16 +104,23 @@ func New(network, address string, tlsConf *config.TLS, cw watcher.Watcher) (net. listnr = &listener{Listener: listnr} if tlsConf != nil { - return newTLSListener(tlsConf, listnr, cw) + return newTLSListener(name, tlsConf, listnr, cw, co) } return listnr, nil } -func newTLSListener(tlsConf *config.TLS, listener net.Listener, cw watcher.Watcher) (net.Listener, error) { +func newTLSListener( + name string, + tlsConf *config.TLS, + listener net.Listener, + cw watcher.Watcher, + co certificate.Observer, +) (net.Listener, error) { cfg, err := tlsx.ToTLSConfig(tlsConf, tlsx.WithServerAuthentication(true), tlsx.WithSecretsWatcher(cw), + tlsx.WithCertificateObserver(name, co), ) if err != nil { return nil, err diff --git a/internal/handler/listener/listener_test.go b/internal/handler/listener/listener_test.go index d81308551..2f6e3da35 100644 --- a/internal/handler/listener/listener_test.go +++ b/internal/handler/listener/listener_test.go @@ -178,7 +178,7 @@ func TestNewListener(t *testing.T) { tc.serviceConf.Port = port // WHEN - ln, err := New(tc.network, tc.serviceConf.Address(), tc.serviceConf.TLS, nil) + ln, err := New(tc.network, "test", tc.serviceConf.Address(), tc.serviceConf.TLS, nil, nil) // THEN defer func() { diff --git a/internal/handler/management/handler.go b/internal/handler/management/handler.go index 35b87ece8..414332426 100644 --- a/internal/handler/management/handler.go +++ b/internal/handler/management/handler.go @@ -27,13 +27,13 @@ import ( "github.com/dadrus/heimdall/internal/handler/middleware/http/errorhandler" "github.com/dadrus/heimdall/internal/handler/middleware/http/methodfilter" - "github.com/dadrus/heimdall/internal/heimdall" + "github.com/dadrus/heimdall/internal/keyholder" ) -func newManagementHandler(signer heimdall.JWTSigner, eh errorhandler.ErrorHandler) http.Handler { +func newManagementHandler(khr keyholder.Registry, eh errorhandler.ErrorHandler) http.Handler { mh := &handler{ - s: signer, - eh: eh, + khr: khr, + eh: eh, } mux := http.NewServeMux() @@ -49,14 +49,14 @@ func newManagementHandler(signer heimdall.JWTSigner, eh errorhandler.ErrorHandle } type handler struct { - s heimdall.JWTSigner - eh errorhandler.ErrorHandler + khr keyholder.Registry + eh errorhandler.ErrorHandler } // jwks implements an endpoint returning JWKS objects according to // https://datatracker.ietf.org/doc/html/rfc7517 func (h *handler) jwks(rw http.ResponseWriter, req *http.Request) { - res, err := json.Marshal(jose.JSONWebKeySet{Keys: h.s.Keys()}) + res, err := json.Marshal(jose.JSONWebKeySet{Keys: h.khr.Keys()}) if err != nil { zerolog.Ctx(req.Context()).Error().Err(err).Msg("Failed to marshal json web key set object") h.eh.HandleError(rw, req, err) diff --git a/internal/handler/management/module.go b/internal/handler/management/module.go index deb0f460e..428dd6e6f 100644 --- a/internal/handler/management/module.go +++ b/internal/handler/management/module.go @@ -24,7 +24,7 @@ import ( "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/handler/fxlcm" - "github.com/dadrus/heimdall/internal/heimdall" + "github.com/dadrus/heimdall/internal/keyholder" "github.com/dadrus/heimdall/internal/watcher" ) @@ -39,7 +39,7 @@ var Module = fx.Invoke( // nolint: gochecknoglobals func newLifecycleManager( conf *config.Configuration, logger zerolog.Logger, - signer heimdall.JWTSigner, + khr keyholder.Registry, cw watcher.Watcher, ) *fxlcm.LifecycleManager { cfg := conf.Serve.Management @@ -47,7 +47,7 @@ func newLifecycleManager( return &fxlcm.LifecycleManager{ ServiceName: "Management", ServiceAddress: cfg.Address(), - Server: newService(conf, logger, signer), + Server: newService(conf, logger, khr), Logger: logger, TLSConf: cfg.TLS, FileWatcher: cw, diff --git a/internal/handler/management/service.go b/internal/handler/management/service.go index 10f5862be..a2dede863 100644 --- a/internal/handler/management/service.go +++ b/internal/handler/management/service.go @@ -34,7 +34,7 @@ import ( "github.com/dadrus/heimdall/internal/handler/middleware/http/otelmetrics" "github.com/dadrus/heimdall/internal/handler/middleware/http/passthrough" "github.com/dadrus/heimdall/internal/handler/middleware/http/recovery" - "github.com/dadrus/heimdall/internal/heimdall" + "github.com/dadrus/heimdall/internal/keyholder" "github.com/dadrus/heimdall/internal/x" "github.com/dadrus/heimdall/internal/x/httpx" "github.com/dadrus/heimdall/internal/x/loggeradapter" @@ -43,7 +43,7 @@ import ( func newService( conf *config.Configuration, log zerolog.Logger, - signer heimdall.JWTSigner, + khr keyholder.Registry, ) *http.Server { cfg := conf.Serve.Management eh := errorhandler2.New() @@ -82,7 +82,7 @@ func newService( }, func() func(http.Handler) http.Handler { return passthrough.New }, ), - ).Then(newManagementHandler(signer, eh)) + ).Then(newManagementHandler(khr, eh)) return &http.Server{ Handler: hc, diff --git a/internal/handler/management/service_test.go b/internal/handler/management/service_test.go index 0ef9f3107..889ee77c1 100644 --- a/internal/handler/management/service_test.go +++ b/internal/handler/management/service_test.go @@ -35,7 +35,7 @@ import ( "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/handler/listener" - "github.com/dadrus/heimdall/internal/heimdall/mocks" + "github.com/dadrus/heimdall/internal/keyholder/mocks" "github.com/dadrus/heimdall/internal/keystore" "github.com/dadrus/heimdall/internal/x/pkix/pemx" "github.com/dadrus/heimdall/internal/x/testsupport" @@ -48,10 +48,10 @@ type ServiceTestSuite struct { ee1 *testsupport.EndEntity ee2 *testsupport.EndEntity - srv *http.Server - ks keystore.KeyStore - signer *mocks.JWTSignerMock - addr string + srv *http.Server + ks keystore.KeyStore + addr string + khr *mocks.RegistryMock } func (suite *ServiceTestSuite) SetupSuite() { @@ -123,12 +123,12 @@ func (suite *ServiceTestSuite) SetupTest() { Metrics: config.MetricsConfig{Enabled: true}, } - listener, err := listener.New("tcp", conf.Serve.Management.Address(), conf.Serve.Management.TLS, nil) + listener, err := listener.New("tcp", "test", conf.Serve.Management.Address(), conf.Serve.Management.TLS, nil, nil) suite.Require().NoError(err) suite.addr = "http://" + listener.Addr().String() - suite.signer = mocks.NewJWTSignerMock(suite.T()) - suite.srv = newService(conf, log.Logger, suite.signer) + suite.khr = mocks.NewRegistryMock(suite.T()) + suite.srv = newService(conf, log.Logger, suite.khr) go func() { suite.srv.Serve(listener) @@ -152,7 +152,7 @@ func (suite *ServiceTestSuite) TestJWKSRequestWithoutEtagUsage() { keys[idx] = entry.JWK() } - suite.signer.EXPECT().Keys().Return(keys) + suite.khr.EXPECT().Keys().Return(keys) // WHEN client := &http.Client{Transport: &http.Transport{}} @@ -216,7 +216,7 @@ func (suite *ServiceTestSuite) TestJWKSRequestWithEtagUsage() { keys[idx] = entry.JWK() } - suite.signer.EXPECT().Keys().Return(keys) + suite.khr.EXPECT().Keys().Return(keys) client := &http.Client{Transport: &http.Transport{}} req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, suite.addr+"/.well-known/jwks", nil) diff --git a/internal/handler/middleware/grpc/errorhandler/defaults.go b/internal/handler/middleware/grpc/errorhandler/defaults.go index 39f6f56ba..846d4a32b 100644 --- a/internal/handler/middleware/grpc/errorhandler/defaults.go +++ b/internal/handler/middleware/grpc/errorhandler/defaults.go @@ -27,7 +27,6 @@ var defaultOptions = opts{ //nolint:gochecknoglobals authorizationError: responseWith(codes.PermissionDenied, http.StatusForbidden), communicationError: responseWith(codes.DeadlineExceeded, http.StatusBadGateway), preconditionError: responseWith(codes.InvalidArgument, http.StatusBadRequest), - badMethodError: responseWith(codes.InvalidArgument, http.StatusMethodNotAllowed), noRuleError: responseWith(codes.NotFound, http.StatusNotFound), internalError: responseWith(codes.Internal, http.StatusInternalServerError), } diff --git a/internal/handler/middleware/grpc/errorhandler/interceptor.go b/internal/handler/middleware/grpc/errorhandler/interceptor.go index c9081f615..023735977 100644 --- a/internal/handler/middleware/grpc/errorhandler/interceptor.go +++ b/internal/handler/middleware/grpc/errorhandler/interceptor.go @@ -67,8 +67,6 @@ func (h *interceptor) intercept( return h.communicationError(err, h.verboseErrors, acceptType(req)) case errors.Is(err, heimdall.ErrArgument): return h.preconditionError(err, h.verboseErrors, acceptType(req)) - case errors.Is(err, heimdall.ErrMethodNotAllowed): - return h.badMethodError(err, h.verboseErrors, acceptType(req)) case errors.Is(err, heimdall.ErrNoRuleFound): return h.noRuleError(err, h.verboseErrors, acceptType(req)) case errors.Is(err, &heimdall.RedirectError{}): diff --git a/internal/handler/middleware/grpc/errorhandler/interceptor_test.go b/internal/handler/middleware/grpc/errorhandler/interceptor_test.go index 87d73f3be..c782d26e2 100644 --- a/internal/handler/middleware/grpc/errorhandler/interceptor_test.go +++ b/internal/handler/middleware/grpc/errorhandler/interceptor_test.go @@ -164,28 +164,6 @@ func TestErrorInterceptor(t *testing.T) { expHTTPCode: http.StatusBadRequest, expBody: "

argument error

", }, - { - uc: "method error default", - interceptor: New(), - err: heimdall.ErrMethodNotAllowed, - expGRPCCode: codes.InvalidArgument, - expHTTPCode: http.StatusMethodNotAllowed, - }, - { - uc: "method error overridden", - interceptor: New(WithMethodErrorCode(http.StatusContinue)), - err: heimdall.ErrMethodNotAllowed, - expGRPCCode: codes.InvalidArgument, - expHTTPCode: http.StatusContinue, - }, - { - uc: "method error verbose", - interceptor: New(WithVerboseErrors(true)), - err: heimdall.ErrMethodNotAllowed, - expGRPCCode: codes.InvalidArgument, - expHTTPCode: http.StatusMethodNotAllowed, - expBody: "

method not allowed

", - }, { uc: "no rule error default", interceptor: New(), diff --git a/internal/handler/middleware/grpc/errorhandler/options.go b/internal/handler/middleware/grpc/errorhandler/options.go index 4567a45f1..8eac5ca6a 100644 --- a/internal/handler/middleware/grpc/errorhandler/options.go +++ b/internal/handler/middleware/grpc/errorhandler/options.go @@ -24,7 +24,6 @@ type opts struct { authorizationError func(err error, verbose bool, mimeType string) (any, error) communicationError func(err error, verbose bool, mimeType string) (any, error) preconditionError func(err error, verbose bool, mimeType string) (any, error) - badMethodError func(err error, verbose bool, mimeType string) (any, error) noRuleError func(err error, verbose bool, mimeType string) (any, error) internalError func(err error, verbose bool, mimeType string) (any, error) } @@ -71,14 +70,6 @@ func WithInternalServerErrorCode(code int) Option { } } -func WithMethodErrorCode(code int) Option { - return func(o *opts) { - if code > 0 { - o.badMethodError = responseWith(codes.InvalidArgument, code) - } - } -} - func WithNoRuleErrorCode(code int) Option { return func(o *opts) { if code > 0 { diff --git a/internal/handler/middleware/grpc/otelmetrics/interceptor.go b/internal/handler/middleware/grpc/otelmetrics/interceptor.go index 3dabb3f8b..22c537f31 100644 --- a/internal/handler/middleware/grpc/otelmetrics/interceptor.go +++ b/internal/handler/middleware/grpc/otelmetrics/interceptor.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "google.golang.org/grpc" "google.golang.org/grpc/peer" @@ -131,10 +131,10 @@ func peerAttr(addr string) []attribute.KeyValue { } if ip := net.ParseIP(host); ip != nil { - return []attribute.KeyValue{semconv.NetSockPeerAddr(host), semconv.NetSockPeerPort(port)} // nolint: staticcheck + return []attribute.KeyValue{semconv.NetworkPeerAddress(host), semconv.NetworkPeerPort(port)} } - return []attribute.KeyValue{semconv.NetPeerName(host), semconv.NetPeerPort(port)} // nolint: staticcheck + return []attribute.KeyValue{semconv.ClientAddress(host), semconv.ClientPort(port)} } func parseFullMethod(fullMethod string) (string, []attribute.KeyValue) { @@ -187,7 +187,7 @@ func serverAttr(addr string) []attribute.KeyValue { } return []attribute.KeyValue{ - attribute.Key("server.address").String(host), - attribute.Key("server.port").Int(port), + semconv.ServerAddress(host), + semconv.ServerPort(port), } } diff --git a/internal/handler/middleware/grpc/otelmetrics/interceptor_test.go b/internal/handler/middleware/grpc/otelmetrics/interceptor_test.go index 7b7947f12..f652f0ddb 100644 --- a/internal/handler/middleware/grpc/otelmetrics/interceptor_test.go +++ b/internal/handler/middleware/grpc/otelmetrics/interceptor_test.go @@ -31,7 +31,7 @@ import ( "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" rpc_status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -101,8 +101,8 @@ func TestHandlerObserveKnownRequests(t *testing.T) { assert.Equal(t, "heimdall.local", attributeValue(activeRequests.DataPoints[0].Attributes, "server.address").AsString()) assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue("server.port")) - assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetSockPeerAddrKey)) - assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetSockPeerPortKey)) + assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetworkPeerAddressKey)) + assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetworkPeerPortKey)) }, }, { @@ -152,8 +152,8 @@ func TestHandlerObserveKnownRequests(t *testing.T) { assert.Equal(t, "heimdall.local", attributeValue(activeRequests.DataPoints[0].Attributes, "server.address").AsString()) assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue("server.port")) - assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetSockPeerAddrKey)) - assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetSockPeerPortKey)) + assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetworkPeerAddressKey)) + assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetworkPeerPortKey)) }, }, { @@ -197,8 +197,8 @@ func TestHandlerObserveKnownRequests(t *testing.T) { assert.Equal(t, "heimdall.local", attributeValue(activeRequests.DataPoints[0].Attributes, "server.address").AsString()) assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue("server.port")) - assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetSockPeerAddrKey)) - assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetSockPeerPortKey)) + assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetworkPeerAddressKey)) + assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetworkPeerPortKey)) }, }, } { @@ -347,6 +347,6 @@ func TestHandlerObserveUnknownRequests(t *testing.T) { assert.Equal(t, "127.0.0.1", attributeValue(activeRequests.DataPoints[0].Attributes, "server.address").AsString()) assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue("server.port")) - assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetSockPeerAddrKey)) - assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetSockPeerPortKey)) + assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetworkPeerAddressKey)) + assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue(semconv.NetworkPeerAddressKey)) } diff --git a/internal/handler/middleware/http/errorhandler/defaults.go b/internal/handler/middleware/http/errorhandler/defaults.go index 493441432..06981c425 100644 --- a/internal/handler/middleware/http/errorhandler/defaults.go +++ b/internal/handler/middleware/http/errorhandler/defaults.go @@ -26,7 +26,6 @@ func defaultOptions() *opts { defaults.onAuthorizationError = errorWriter(defaults, http.StatusForbidden) defaults.onCommunicationError = errorWriter(defaults, http.StatusBadGateway) defaults.onPreconditionError = errorWriter(defaults, http.StatusBadRequest) - defaults.onBadMethodError = errorWriter(defaults, http.StatusMethodNotAllowed) defaults.onNoRuleError = errorWriter(defaults, http.StatusNotFound) defaults.onInternalError = errorWriter(defaults, http.StatusInternalServerError) diff --git a/internal/handler/middleware/http/errorhandler/error_handler.go b/internal/handler/middleware/http/errorhandler/error_handler.go index 0fc932814..0bdfa920f 100644 --- a/internal/handler/middleware/http/errorhandler/error_handler.go +++ b/internal/handler/middleware/http/errorhandler/error_handler.go @@ -58,8 +58,6 @@ func (h *errorHandler) HandleError(rw http.ResponseWriter, req *http.Request, er h.onCommunicationError(rw, req, err) case errors.Is(err, heimdall.ErrArgument): h.onPreconditionError(rw, req, err) - case errors.Is(err, heimdall.ErrMethodNotAllowed): - h.onBadMethodError(rw, req, err) case errors.Is(err, heimdall.ErrNoRuleFound): h.onNoRuleError(rw, req, err) case errors.Is(err, &heimdall.RedirectError{}): diff --git a/internal/handler/middleware/http/errorhandler/error_handler_test.go b/internal/handler/middleware/http/errorhandler/error_handler_test.go index 7b49d562c..f394858e8 100644 --- a/internal/handler/middleware/http/errorhandler/error_handler_test.go +++ b/internal/handler/middleware/http/errorhandler/error_handler_test.go @@ -136,25 +136,6 @@ func TestHandlerHandle(t *testing.T) { expCode: http.StatusBadRequest, expBody: "

argument error

", }, - { - uc: "method error default", - handler: New(), - err: errorchain.New(heimdall.ErrMethodNotAllowed), - expCode: http.StatusMethodNotAllowed, - }, - { - uc: "method error overridden", - handler: New(WithMethodErrorCode(http.StatusContinue)), - err: errorchain.New(heimdall.ErrMethodNotAllowed), - expCode: http.StatusContinue, - }, - { - uc: "method error verbose without mime type", - handler: New(WithVerboseErrors(true)), - err: errorchain.New(heimdall.ErrMethodNotAllowed), - expCode: http.StatusMethodNotAllowed, - expBody: "

method not allowed

", - }, { uc: "no rule error default", handler: New(), diff --git a/internal/handler/middleware/http/errorhandler/options.go b/internal/handler/middleware/http/errorhandler/options.go index 34c031af9..0d6d5ad39 100644 --- a/internal/handler/middleware/http/errorhandler/options.go +++ b/internal/handler/middleware/http/errorhandler/options.go @@ -26,7 +26,6 @@ type opts struct { onAuthorizationError func(rw http.ResponseWriter, req *http.Request, err error) onCommunicationError func(rw http.ResponseWriter, req *http.Request, err error) onPreconditionError func(rw http.ResponseWriter, req *http.Request, err error) - onBadMethodError func(rw http.ResponseWriter, req *http.Request, err error) onNoRuleError func(rw http.ResponseWriter, req *http.Request, err error) onInternalError func(rw http.ResponseWriter, req *http.Request, err error) } @@ -73,14 +72,6 @@ func WithInternalServerErrorCode(code int) Option { } } -func WithMethodErrorCode(code int) Option { - return func(o *opts) { - if code != 0 { - o.onBadMethodError = errorWriter(o, code) - } - } -} - func WithNoRuleErrorCode(code int) Option { return func(o *opts) { if code != 0 { diff --git a/internal/handler/middleware/http/otelmetrics/handler.go b/internal/handler/middleware/http/otelmetrics/handler.go index 4f1314236..6ebe0b82b 100644 --- a/internal/handler/middleware/http/otelmetrics/handler.go +++ b/internal/handler/middleware/http/otelmetrics/handler.go @@ -18,12 +18,13 @@ package otelmetrics import ( "net/http" + "strconv" "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "github.com/dadrus/heimdall/internal/x" "github.com/dadrus/heimdall/internal/x/httpx" @@ -104,14 +105,13 @@ func serverRequestMetrics(server string, req *http.Request) []attribute.KeyValue attrs := make([]attribute.KeyValue, 0, attrsCount) attrs = append(attrs, methodMetric(req.Method)) - attrs = append(attrs, x.IfThenElse(req.TLS != nil, - semconv.HTTPSchemeKey.String("https"), // nolint: staticcheck - semconv.HTTPSchemeKey.String("http"))) // nolint: staticcheck - attrs = append(attrs, flavor(req.Proto)) - attrs = append(attrs, semconv.NetHostNameKey.String(host)) // nolint: staticcheck + attrs = append(attrs, x.IfThenElse(req.TLS != nil, semconv.URLScheme("https"), semconv.URLScheme("http"))) + attrs = append(attrs, semconv.NetworkProtocolName("http")) + attrs = append(attrs, semconv.NetworkProtocolVersion(strconv.Itoa(req.ProtoMajor)+"."+strconv.Itoa(req.ProtoMinor))) + attrs = append(attrs, semconv.ServerAddress(host)) if hostPort > 0 { - attrs = append(attrs, semconv.NetHostPortKey.Int(hostPort)) // nolint: staticcheck + attrs = append(attrs, semconv.ServerPort(hostPort)) } return attrs @@ -133,25 +133,10 @@ func methodMetric(method string) attribute.KeyValue { method = "_OTHER" } - return semconv.HTTPMethodKey.String(method) + return semconv.HTTPRequestMethodKey.String(method) } -func flavor(proto string) attribute.KeyValue { - switch proto { - case "HTTP/1.0": - return semconv.HTTPFlavorHTTP10 // nolint: staticcheck - case "HTTP/1.1": - return semconv.HTTPFlavorHTTP11 // nolint: staticcheck - case "HTTP/2": - return semconv.HTTPFlavorHTTP20 // nolint: staticcheck - case "HTTP/3": - return semconv.HTTPFlavorHTTP30 // nolint: staticcheck - default: - return semconv.HTTPFlavorKey.String(proto) // nolint: staticcheck - } -} - -func requiredHTTPPort(https bool, port int) int { // nolint:revive +func requiredHTTPPort(https bool, port int) int { if https { if port > 0 && port != 443 { return port diff --git a/internal/handler/middleware/http/otelmetrics/handler_test.go b/internal/handler/middleware/http/otelmetrics/handler_test.go index 8ab3908ca..8c43637ad 100644 --- a/internal/handler/middleware/http/otelmetrics/handler_test.go +++ b/internal/handler/middleware/http/otelmetrics/handler_test.go @@ -82,20 +82,22 @@ func TestHandlerExecution(t *testing.T) { assert.False(t, activeRequests.IsMonotonic) require.Len(t, activeRequests.DataPoints, 1) require.InDelta(t, float64(0), activeRequests.DataPoints[0].Value, 0.00) - require.Equal(t, 7, activeRequests.DataPoints[0].Attributes.Len()) + require.Equal(t, 8, activeRequests.DataPoints[0].Attributes.Len()) assert.Equal(t, "foobar", attributeValue(activeRequests.DataPoints[0].Attributes, "service.subsystem").AsString()) assert.Equal(t, "zab", attributeValue(activeRequests.DataPoints[0].Attributes, "baz").AsString()) + assert.Equal(t, "http", + attributeValue(activeRequests.DataPoints[0].Attributes, "network.protocol.name").AsString()) assert.Equal(t, "1.1", - attributeValue(activeRequests.DataPoints[0].Attributes, "http.flavor").AsString()) + attributeValue(activeRequests.DataPoints[0].Attributes, "network.protocol.version").AsString()) assert.Equal(t, http.MethodGet, - attributeValue(activeRequests.DataPoints[0].Attributes, "http.method").AsString()) + attributeValue(activeRequests.DataPoints[0].Attributes, "http.request.method").AsString()) assert.Equal(t, "http", - attributeValue(activeRequests.DataPoints[0].Attributes, "http.scheme").AsString()) + attributeValue(activeRequests.DataPoints[0].Attributes, "url.scheme").AsString()) assert.Equal(t, "127.0.0.1", - attributeValue(activeRequests.DataPoints[0].Attributes, "net.host.name").AsString()) - assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue("net.host.port")) + attributeValue(activeRequests.DataPoints[0].Attributes, "server.address").AsString()) + assert.True(t, activeRequests.DataPoints[0].Attributes.HasValue("server.port")) }, }, } { diff --git a/internal/handler/proxy/module.go b/internal/handler/proxy/module.go index 9b80b9e37..8c218b563 100644 --- a/internal/handler/proxy/module.go +++ b/internal/handler/proxy/module.go @@ -25,7 +25,6 @@ import ( "github.com/dadrus/heimdall/internal/cache" "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/handler/fxlcm" - "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/watcher" ) @@ -43,7 +42,6 @@ func newLifecycleManager( logger zerolog.Logger, cch cache.Cache, executor rule.Executor, - signer heimdall.JWTSigner, cw watcher.Watcher, ) *fxlcm.LifecycleManager { cfg := conf.Serve.Proxy @@ -51,7 +49,7 @@ func newLifecycleManager( return &fxlcm.LifecycleManager{ ServiceName: "Proxy", ServiceAddress: cfg.Address(), - Server: newService(conf, cch, logger, executor, signer), + Server: newService(conf, cch, logger, executor), Logger: logger, TLSConf: cfg.TLS, FileWatcher: cw, diff --git a/internal/handler/proxy/request_context.go b/internal/handler/proxy/request_context.go index 75a69bf8f..f4b8331e1 100644 --- a/internal/handler/proxy/request_context.go +++ b/internal/handler/proxy/request_context.go @@ -46,7 +46,6 @@ type requestContext struct { } func newContextFactory( - signer heimdall.JWTSigner, cfg config.ServiceConfig, tlsCfg *tls.Config, ) requestcontext.ContextFactory { @@ -72,7 +71,7 @@ func newContextFactory( return requestcontext.FactoryFunc(func(rw http.ResponseWriter, req *http.Request) requestcontext.Context { return &requestContext{ - RequestContext: requestcontext.New(signer, req), + RequestContext: requestcontext.New(req), transport: transport, rw: rw, req: req, diff --git a/internal/handler/proxy/request_context_test.go b/internal/handler/proxy/request_context_test.go index 46c12db00..0985f81dd 100644 --- a/internal/handler/proxy/request_context_test.go +++ b/internal/handler/proxy/request_context_test.go @@ -309,7 +309,7 @@ func TestRequestContextFinalize(t *testing.T) { Write: 100 * time.Millisecond, Idle: 1 * time.Second, } - ctx := newContextFactory(nil, config.ServiceConfig{Timeout: timeouts}, nil).Create(rw, req) + ctx := newContextFactory(config.ServiceConfig{Timeout: timeouts}, nil).Create(rw, req) backend := tc.setup(t, ctx, targetURL) diff --git a/internal/handler/proxy/service.go b/internal/handler/proxy/service.go index 8c90cf428..02f01a64d 100644 --- a/internal/handler/proxy/service.go +++ b/internal/handler/proxy/service.go @@ -41,7 +41,6 @@ import ( "github.com/dadrus/heimdall/internal/handler/middleware/http/recovery" "github.com/dadrus/heimdall/internal/handler/middleware/http/trustedproxy" "github.com/dadrus/heimdall/internal/handler/service" - "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/x" "github.com/dadrus/heimdall/internal/x/httpx" @@ -86,7 +85,6 @@ func newService( cch cache.Cache, log zerolog.Logger, exec rule.Executor, - signer heimdall.JWTSigner, ) *http.Server { der := &deadlineResetter{} cfg := conf.Serve.Proxy @@ -96,7 +94,6 @@ func newService( errorhandler.WithAuthenticationErrorCode(cfg.Respond.With.AuthenticationError.Code), errorhandler.WithAuthorizationErrorCode(cfg.Respond.With.AuthorizationError.Code), errorhandler.WithCommunicationErrorCode(cfg.Respond.With.CommunicationError.Code), - errorhandler.WithMethodErrorCode(cfg.Respond.With.BadMethodError.Code), errorhandler.WithNoRuleErrorCode(cfg.Respond.With.NoRuleError.Code), errorhandler.WithInternalServerErrorCode(cfg.Respond.With.InternalError.Code), ) @@ -141,7 +138,7 @@ func newService( func() func(http.Handler) http.Handler { return passthrough.New }, ), cachemiddleware.New(cch), - ).Then(service.NewHandler(newContextFactory(signer, cfg, tlsClientConfig), exec, eh)) + ).Then(service.NewHandler(newContextFactory(cfg, tlsClientConfig), exec, eh)) return &http.Server{ Handler: hc, diff --git a/internal/handler/proxy/service_test.go b/internal/handler/proxy/service_test.go index 621c6b8e3..7b0904f14 100644 --- a/internal/handler/proxy/service_test.go +++ b/internal/handler/proxy/service_test.go @@ -159,7 +159,7 @@ func TestProxyService(t *testing.T) { configureMocks: func(t *testing.T, exec *mocks4.ExecutorMock, _ *url.URL) { t.Helper() - exec.EXPECT().Execute(mock.Anything).Return(nil, heimdall.ErrMethodNotAllowed) + exec.EXPECT().Execute(mock.Anything).Return(nil, heimdall.ErrNoRuleFound) }, assertResponse: func(t *testing.T, err error, upstreamCalled bool, resp *http.Response) { t.Helper() @@ -167,7 +167,7 @@ func TestProxyService(t *testing.T) { require.False(t, upstreamCalled) require.NoError(t, err) - assert.Equal(t, http.StatusMethodNotAllowed, resp.StatusCode) + assert.Equal(t, http.StatusNotFound, resp.StatusCode) data, err := io.ReadAll(resp.Body) require.NoError(t, err) @@ -928,7 +928,7 @@ func TestProxyService(t *testing.T) { proxyConf.Host = "127.0.0.1" proxyConf.Port = port - listener, err := listener.New("tcp", proxyConf.Address(), proxyConf.TLS, nil) + listener, err := listener.New("tcp", "test", proxyConf.Address(), proxyConf.TLS, nil, nil) require.NoError(t, err) conf := &config.Configuration{ @@ -942,7 +942,7 @@ func TestProxyService(t *testing.T) { client := createClient(t) - proxy := newService(conf, cch, log.Logger, exec, nil) + proxy := newService(conf, cch, log.Logger, exec) defer proxy.Shutdown(context.Background()) @@ -1046,11 +1046,11 @@ func TestWebSocketSupport(t *testing.T) { }, } - proxy := newService(conf, mocks.NewCacheMock(t), log.Logger, exec, nil) + proxy := newService(conf, mocks.NewCacheMock(t), log.Logger, exec) defer proxy.Shutdown(context.Background()) - listener, err := listener.New("tcp", conf.Serve.Proxy.Address(), conf.Serve.Proxy.TLS, nil) + listener, err := listener.New("tcp", "test", conf.Serve.Proxy.Address(), conf.Serve.Proxy.TLS, nil, nil) require.NoError(t, err) go func() { @@ -1146,11 +1146,11 @@ func TestServerSentEventsSupport(t *testing.T) { }, } - proxy := newService(conf, mocks.NewCacheMock(t), log.Logger, exec, nil) + proxy := newService(conf, mocks.NewCacheMock(t), log.Logger, exec) defer proxy.Shutdown(context.Background()) - listener, err := listener.New("tcp", conf.Serve.Proxy.Address(), conf.Serve.Proxy.TLS, nil) + listener, err := listener.New("tcp", "test", conf.Serve.Proxy.Address(), conf.Serve.Proxy.TLS, nil, nil) require.NoError(t, err) go func() { diff --git a/internal/handler/requestcontext/mocks/context.go b/internal/handler/requestcontext/mocks/context.go index 7b457f403..6b70fac67 100644 --- a/internal/handler/requestcontext/mocks/context.go +++ b/internal/handler/requestcontext/mocks/context.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -96,6 +96,10 @@ func (_c *ContextMock_AddHeaderForUpstream_Call) RunAndReturn(run func(string, s func (_m *ContextMock) AppContext() context.Context { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for AppContext") + } + var r0 context.Context if rf, ok := ret.Get(0).(func() context.Context); ok { r0 = rf() @@ -135,13 +139,17 @@ func (_c *ContextMock_AppContext_Call) RunAndReturn(run func() context.Context) return _c } -// Finalize provides a mock function with given fields: _a0 -func (_m *ContextMock) Finalize(_a0 rule.Backend) error { - ret := _m.Called(_a0) +// Finalize provides a mock function with given fields: backend +func (_m *ContextMock) Finalize(backend rule.Backend) error { + ret := _m.Called(backend) + + if len(ret) == 0 { + panic("no return value specified for Finalize") + } var r0 error if rf, ok := ret.Get(0).(func(rule.Backend) error); ok { - r0 = rf(_a0) + r0 = rf(backend) } else { r0 = ret.Error(0) } @@ -155,12 +163,12 @@ type ContextMock_Finalize_Call struct { } // Finalize is a helper method to define mock.On call -// - _a0 rule.Backend -func (_e *ContextMock_Expecter) Finalize(_a0 interface{}) *ContextMock_Finalize_Call { - return &ContextMock_Finalize_Call{Call: _e.mock.On("Finalize", _a0)} +// - backend rule.Backend +func (_e *ContextMock_Expecter) Finalize(backend interface{}) *ContextMock_Finalize_Call { + return &ContextMock_Finalize_Call{Call: _e.mock.On("Finalize", backend)} } -func (_c *ContextMock_Finalize_Call) Run(run func(_a0 rule.Backend)) *ContextMock_Finalize_Call { +func (_c *ContextMock_Finalize_Call) Run(run func(backend rule.Backend)) *ContextMock_Finalize_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(rule.Backend)) }) @@ -177,10 +185,61 @@ func (_c *ContextMock_Finalize_Call) RunAndReturn(run func(rule.Backend) error) return _c } +// Outputs provides a mock function with given fields: +func (_m *ContextMock) Outputs() map[string]interface{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Outputs") + } + + var r0 map[string]interface{} + if rf, ok := ret.Get(0).(func() map[string]interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]interface{}) + } + } + + return r0 +} + +// ContextMock_Outputs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Outputs' +type ContextMock_Outputs_Call struct { + *mock.Call +} + +// Outputs is a helper method to define mock.On call +func (_e *ContextMock_Expecter) Outputs() *ContextMock_Outputs_Call { + return &ContextMock_Outputs_Call{Call: _e.mock.On("Outputs")} +} + +func (_c *ContextMock_Outputs_Call) Run(run func()) *ContextMock_Outputs_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ContextMock_Outputs_Call) Return(_a0 map[string]interface{}) *ContextMock_Outputs_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ContextMock_Outputs_Call) RunAndReturn(run func() map[string]interface{}) *ContextMock_Outputs_Call { + _c.Call.Return(run) + return _c +} + // Request provides a mock function with given fields: func (_m *ContextMock) Request() *heimdall.Request { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Request") + } + var r0 *heimdall.Request if rf, ok := ret.Get(0).(func() *heimdall.Request); ok { r0 = rf() @@ -253,56 +312,12 @@ func (_c *ContextMock_SetPipelineError_Call) RunAndReturn(run func(error)) *Cont return _c } -// Signer provides a mock function with given fields: -func (_m *ContextMock) Signer() heimdall.JWTSigner { - ret := _m.Called() - - var r0 heimdall.JWTSigner - if rf, ok := ret.Get(0).(func() heimdall.JWTSigner); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(heimdall.JWTSigner) - } - } - - return r0 -} - -// ContextMock_Signer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Signer' -type ContextMock_Signer_Call struct { - *mock.Call -} - -// Signer is a helper method to define mock.On call -func (_e *ContextMock_Expecter) Signer() *ContextMock_Signer_Call { - return &ContextMock_Signer_Call{Call: _e.mock.On("Signer")} -} - -func (_c *ContextMock_Signer_Call) Run(run func()) *ContextMock_Signer_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *ContextMock_Signer_Call) Return(_a0 heimdall.JWTSigner) *ContextMock_Signer_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *ContextMock_Signer_Call) RunAndReturn(run func() heimdall.JWTSigner) *ContextMock_Signer_Call { - _c.Call.Return(run) - return _c -} - -type mockConstructorTestingTNewContextMock interface { +// NewContextMock creates a new instance of ContextMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewContextMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewContextMock creates a new instance of ContextMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewContextMock(t mockConstructorTestingTNewContextMock) *ContextMock { +}) *ContextMock { mock := &ContextMock{} mock.Mock.Test(t) diff --git a/internal/handler/requestcontext/request_context.go b/internal/handler/requestcontext/request_context.go index 420dcc3d8..c0ca86fea 100644 --- a/internal/handler/requestcontext/request_context.go +++ b/internal/handler/requestcontext/request_context.go @@ -36,7 +36,6 @@ type RequestContext struct { reqURL *url.URL upstreamHeaders http.Header upstreamCookies map[string]string - jwtSigner heimdall.JWTSigner req *http.Request err error @@ -45,11 +44,11 @@ type RequestContext struct { savedBody any hmdlReq *heimdall.Request headers map[string]string + outputs map[string]any } -func New(signer heimdall.JWTSigner, req *http.Request) *RequestContext { +func New(req *http.Request) *RequestContext { return &RequestContext{ - jwtSigner: signer, reqMethod: extractMethod(req), reqURL: extractURL(req), upstreamHeaders: make(http.Header), @@ -132,7 +131,7 @@ func (r *RequestContext) Request() *heimdall.Request { r.hmdlReq = &heimdall.Request{ RequestFunctions: r, Method: r.reqMethod, - URL: r.reqURL, + URL: &heimdall.URL{URL: *r.reqURL}, ClientIPAddresses: r.requestClientIPs(), } } @@ -174,4 +173,10 @@ func (r *RequestContext) UpstreamCookies() map[string]string { return r.ups func (r *RequestContext) AppContext() context.Context { return r.req.Context() } func (r *RequestContext) SetPipelineError(err error) { r.err = err } func (r *RequestContext) PipelineError() error { return r.err } -func (r *RequestContext) Signer() heimdall.JWTSigner { return r.jwtSigner } +func (r *RequestContext) Outputs() map[string]any { + if r.outputs == nil { + r.outputs = make(map[string]any) + } + + return r.outputs +} diff --git a/internal/handler/requestcontext/request_context_test.go b/internal/handler/requestcontext/request_context_test.go index 707e75994..fc5d46da6 100644 --- a/internal/handler/requestcontext/request_context_test.go +++ b/internal/handler/requestcontext/request_context_test.go @@ -122,7 +122,7 @@ func TestRequestContextHeaders(t *testing.T) { req.Header.Set("X-Foo-Bar", "foo") req.Header.Add("X-Foo-Bar", "bar") - ctx := New(nil, req) + ctx := New(req) // WHEN headers := ctx.Request().Headers() @@ -142,7 +142,7 @@ func TestRequestContextHeader(t *testing.T) { req.Header.Add("X-Foo-Bar", "bar") req.Host = "bar.foo" - ctx := New(nil, req) + ctx := New(req) // WHEN xFooBarValue := ctx.Request().Header("X-Foo-Bar") @@ -162,7 +162,7 @@ func TestRequestContextCookie(t *testing.T) { req := httptest.NewRequest(http.MethodHead, "https://foo.bar/test", nil) req.Header.Set("Cookie", "foo=bar; bar=baz") - ctx := New(nil, req) + ctx := New(req) // WHEN value1 := ctx.Request().Cookie("bar") @@ -230,7 +230,7 @@ func TestRequestContextBody(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "https://foo.bar/test", tc.body) req.Header.Set("Content-Type", tc.ct) - ctx := New(nil, req) + ctx := New(req) // WHEN data := ctx.Request().Body() diff --git a/internal/heimdall/context.go b/internal/heimdall/context.go index fc81e66a7..dc2461fd9 100644 --- a/internal/heimdall/context.go +++ b/internal/heimdall/context.go @@ -33,7 +33,7 @@ type Context interface { SetPipelineError(err error) - Signer() JWTSigner + Outputs() map[string]any } //go:generate mockery --name RequestFunctions --structname RequestFunctionsMock @@ -45,10 +45,16 @@ type RequestFunctions interface { Body() any } +type URL struct { + url.URL + + Captures map[string]string +} + type Request struct { RequestFunctions Method string - URL *url.URL + URL *URL ClientIPAddresses []string } diff --git a/internal/heimdall/errors.go b/internal/heimdall/errors.go index e09e30972..ef1569c19 100644 --- a/internal/heimdall/errors.go +++ b/internal/heimdall/errors.go @@ -29,7 +29,6 @@ var ( ErrCommunicationTimeout = errors.New("communication timeout error") ErrConfiguration = errors.New("configuration error") ErrInternal = errors.New("internal error") - ErrMethodNotAllowed = errors.New("method not allowed") ErrNoRuleFound = errors.New("no rule found") ) diff --git a/internal/heimdall/jwt_signer.go b/internal/heimdall/jwt_signer.go deleted file mode 100644 index 3cd196f45..000000000 --- a/internal/heimdall/jwt_signer.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2022 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package heimdall - -import ( - "time" - - "github.com/go-jose/go-jose/v4" -) - -//go:generate mockery --name JWTSigner --structname JWTSignerMock - -type JWTSigner interface { - Sign(sub string, ttl time.Duration, claims map[string]any) (string, error) - Hash() []byte - Keys() []jose.JSONWebKey -} diff --git a/internal/heimdall/mocks/context.go b/internal/heimdall/mocks/context.go index 4fe9c7575..442864dd4 100644 --- a/internal/heimdall/mocks/context.go +++ b/internal/heimdall/mocks/context.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -94,6 +94,10 @@ func (_c *ContextMock_AddHeaderForUpstream_Call) RunAndReturn(run func(string, s func (_m *ContextMock) AppContext() context.Context { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for AppContext") + } + var r0 context.Context if rf, ok := ret.Get(0).(func() context.Context); ok { r0 = rf() @@ -133,10 +137,61 @@ func (_c *ContextMock_AppContext_Call) RunAndReturn(run func() context.Context) return _c } +// Outputs provides a mock function with given fields: +func (_m *ContextMock) Outputs() map[string]interface{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Outputs") + } + + var r0 map[string]interface{} + if rf, ok := ret.Get(0).(func() map[string]interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]interface{}) + } + } + + return r0 +} + +// ContextMock_Outputs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Outputs' +type ContextMock_Outputs_Call struct { + *mock.Call +} + +// Outputs is a helper method to define mock.On call +func (_e *ContextMock_Expecter) Outputs() *ContextMock_Outputs_Call { + return &ContextMock_Outputs_Call{Call: _e.mock.On("Outputs")} +} + +func (_c *ContextMock_Outputs_Call) Run(run func()) *ContextMock_Outputs_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ContextMock_Outputs_Call) Return(_a0 map[string]interface{}) *ContextMock_Outputs_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ContextMock_Outputs_Call) RunAndReturn(run func() map[string]interface{}) *ContextMock_Outputs_Call { + _c.Call.Return(run) + return _c +} + // Request provides a mock function with given fields: func (_m *ContextMock) Request() *heimdall.Request { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Request") + } + var r0 *heimdall.Request if rf, ok := ret.Get(0).(func() *heimdall.Request); ok { r0 = rf() @@ -209,56 +264,12 @@ func (_c *ContextMock_SetPipelineError_Call) RunAndReturn(run func(error)) *Cont return _c } -// Signer provides a mock function with given fields: -func (_m *ContextMock) Signer() heimdall.JWTSigner { - ret := _m.Called() - - var r0 heimdall.JWTSigner - if rf, ok := ret.Get(0).(func() heimdall.JWTSigner); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(heimdall.JWTSigner) - } - } - - return r0 -} - -// ContextMock_Signer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Signer' -type ContextMock_Signer_Call struct { - *mock.Call -} - -// Signer is a helper method to define mock.On call -func (_e *ContextMock_Expecter) Signer() *ContextMock_Signer_Call { - return &ContextMock_Signer_Call{Call: _e.mock.On("Signer")} -} - -func (_c *ContextMock_Signer_Call) Run(run func()) *ContextMock_Signer_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *ContextMock_Signer_Call) Return(_a0 heimdall.JWTSigner) *ContextMock_Signer_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *ContextMock_Signer_Call) RunAndReturn(run func() heimdall.JWTSigner) *ContextMock_Signer_Call { - _c.Call.Return(run) - return _c -} - -type mockConstructorTestingTNewContextMock interface { +// NewContextMock creates a new instance of ContextMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewContextMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewContextMock creates a new instance of ContextMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewContextMock(t mockConstructorTestingTNewContextMock) *ContextMock { +}) *ContextMock { mock := &ContextMock{} mock.Mock.Test(t) diff --git a/internal/heimdall/mocks/jwt_signer.go b/internal/heimdall/mocks/jwt_signer.go deleted file mode 100644 index 3e7fac346..000000000 --- a/internal/heimdall/mocks/jwt_signer.go +++ /dev/null @@ -1,178 +0,0 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. - -package mocks - -import ( - jose "github.com/go-jose/go-jose/v4" - mock "github.com/stretchr/testify/mock" - - time "time" -) - -// JWTSignerMock is an autogenerated mock type for the JWTSigner type -type JWTSignerMock struct { - mock.Mock -} - -type JWTSignerMock_Expecter struct { - mock *mock.Mock -} - -func (_m *JWTSignerMock) EXPECT() *JWTSignerMock_Expecter { - return &JWTSignerMock_Expecter{mock: &_m.Mock} -} - -// Hash provides a mock function with given fields: -func (_m *JWTSignerMock) Hash() []byte { - ret := _m.Called() - - var r0 []byte - if rf, ok := ret.Get(0).(func() []byte); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - return r0 -} - -// JWTSignerMock_Hash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Hash' -type JWTSignerMock_Hash_Call struct { - *mock.Call -} - -// Hash is a helper method to define mock.On call -func (_e *JWTSignerMock_Expecter) Hash() *JWTSignerMock_Hash_Call { - return &JWTSignerMock_Hash_Call{Call: _e.mock.On("Hash")} -} - -func (_c *JWTSignerMock_Hash_Call) Run(run func()) *JWTSignerMock_Hash_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *JWTSignerMock_Hash_Call) Return(_a0 []byte) *JWTSignerMock_Hash_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *JWTSignerMock_Hash_Call) RunAndReturn(run func() []byte) *JWTSignerMock_Hash_Call { - _c.Call.Return(run) - return _c -} - -// Keys provides a mock function with given fields: -func (_m *JWTSignerMock) Keys() []jose.JSONWebKey { - ret := _m.Called() - - var r0 []jose.JSONWebKey - if rf, ok := ret.Get(0).(func() []jose.JSONWebKey); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]jose.JSONWebKey) - } - } - - return r0 -} - -// JWTSignerMock_Keys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Keys' -type JWTSignerMock_Keys_Call struct { - *mock.Call -} - -// Keys is a helper method to define mock.On call -func (_e *JWTSignerMock_Expecter) Keys() *JWTSignerMock_Keys_Call { - return &JWTSignerMock_Keys_Call{Call: _e.mock.On("Keys")} -} - -func (_c *JWTSignerMock_Keys_Call) Run(run func()) *JWTSignerMock_Keys_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *JWTSignerMock_Keys_Call) Return(_a0 []jose.JSONWebKey) *JWTSignerMock_Keys_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *JWTSignerMock_Keys_Call) RunAndReturn(run func() []jose.JSONWebKey) *JWTSignerMock_Keys_Call { - _c.Call.Return(run) - return _c -} - -// Sign provides a mock function with given fields: sub, ttl, claims -func (_m *JWTSignerMock) Sign(sub string, ttl time.Duration, claims map[string]interface{}) (string, error) { - ret := _m.Called(sub, ttl, claims) - - var r0 string - var r1 error - if rf, ok := ret.Get(0).(func(string, time.Duration, map[string]interface{}) (string, error)); ok { - return rf(sub, ttl, claims) - } - if rf, ok := ret.Get(0).(func(string, time.Duration, map[string]interface{}) string); ok { - r0 = rf(sub, ttl, claims) - } else { - r0 = ret.Get(0).(string) - } - - if rf, ok := ret.Get(1).(func(string, time.Duration, map[string]interface{}) error); ok { - r1 = rf(sub, ttl, claims) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// JWTSignerMock_Sign_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sign' -type JWTSignerMock_Sign_Call struct { - *mock.Call -} - -// Sign is a helper method to define mock.On call -// - sub string -// - ttl time.Duration -// - claims map[string]interface{} -func (_e *JWTSignerMock_Expecter) Sign(sub interface{}, ttl interface{}, claims interface{}) *JWTSignerMock_Sign_Call { - return &JWTSignerMock_Sign_Call{Call: _e.mock.On("Sign", sub, ttl, claims)} -} - -func (_c *JWTSignerMock_Sign_Call) Run(run func(sub string, ttl time.Duration, claims map[string]interface{})) *JWTSignerMock_Sign_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string), args[1].(time.Duration), args[2].(map[string]interface{})) - }) - return _c -} - -func (_c *JWTSignerMock_Sign_Call) Return(_a0 string, _a1 error) *JWTSignerMock_Sign_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *JWTSignerMock_Sign_Call) RunAndReturn(run func(string, time.Duration, map[string]interface{}) (string, error)) *JWTSignerMock_Sign_Call { - _c.Call.Return(run) - return _c -} - -type mockConstructorTestingTNewJWTSignerMock interface { - mock.TestingT - Cleanup(func()) -} - -// NewJWTSignerMock creates a new instance of JWTSignerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewJWTSignerMock(t mockConstructorTestingTNewJWTSignerMock) *JWTSignerMock { - mock := &JWTSignerMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/internal/heimdall/mocks/request_functions.go b/internal/heimdall/mocks/request_functions.go index 65e29a008..0b59f78ec 100644 --- a/internal/heimdall/mocks/request_functions.go +++ b/internal/heimdall/mocks/request_functions.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -21,6 +21,10 @@ func (_m *RequestFunctionsMock) EXPECT() *RequestFunctionsMock_Expecter { func (_m *RequestFunctionsMock) Body() interface{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Body") + } + var r0 interface{} if rf, ok := ret.Get(0).(func() interface{}); ok { r0 = rf() @@ -64,6 +68,10 @@ func (_c *RequestFunctionsMock_Body_Call) RunAndReturn(run func() interface{}) * func (_m *RequestFunctionsMock) Cookie(name string) string { ret := _m.Called(name) + if len(ret) == 0 { + panic("no return value specified for Cookie") + } + var r0 string if rf, ok := ret.Get(0).(func(string) string); ok { r0 = rf(name) @@ -106,6 +114,10 @@ func (_c *RequestFunctionsMock_Cookie_Call) RunAndReturn(run func(string) string func (_m *RequestFunctionsMock) Header(name string) string { ret := _m.Called(name) + if len(ret) == 0 { + panic("no return value specified for Header") + } + var r0 string if rf, ok := ret.Get(0).(func(string) string); ok { r0 = rf(name) @@ -148,6 +160,10 @@ func (_c *RequestFunctionsMock_Header_Call) RunAndReturn(run func(string) string func (_m *RequestFunctionsMock) Headers() map[string]string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Headers") + } + var r0 map[string]string if rf, ok := ret.Get(0).(func() map[string]string); ok { r0 = rf() @@ -187,13 +203,12 @@ func (_c *RequestFunctionsMock_Headers_Call) RunAndReturn(run func() map[string] return _c } -type mockConstructorTestingTNewRequestFunctionsMock interface { +// NewRequestFunctionsMock creates a new instance of RequestFunctionsMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRequestFunctionsMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewRequestFunctionsMock creates a new instance of RequestFunctionsMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRequestFunctionsMock(t mockConstructorTestingTNewRequestFunctionsMock) *RequestFunctionsMock { +}) *RequestFunctionsMock { mock := &RequestFunctionsMock{} mock.Mock.Test(t) diff --git a/internal/httpcache/round_tripper_test.go b/internal/httpcache/round_tripper_test.go index d16680327..717048c6e 100644 --- a/internal/httpcache/round_tripper_test.go +++ b/internal/httpcache/round_tripper_test.go @@ -73,7 +73,7 @@ func TestRoundTripperRoundTrip(t *testing.T) { }, } - cch, err := memory.NewCache(nil, nil) + cch, err := memory.NewCache(nil, nil, nil) require.NoError(t, err) ctx := cache.WithContext(context.Background(), cch) diff --git a/internal/keyholder/mocks/registry.go b/internal/keyholder/mocks/registry.go new file mode 100644 index 000000000..2fd3fd946 --- /dev/null +++ b/internal/keyholder/mocks/registry.go @@ -0,0 +1,116 @@ +// Code generated by mockery v2.42.1. DO NOT EDIT. + +package mocks + +import ( + keyholder "github.com/dadrus/heimdall/internal/keyholder" + jose "github.com/go-jose/go-jose/v4" + mock "github.com/stretchr/testify/mock" +) + +// RegistryMock is an autogenerated mock type for the Registry type +type RegistryMock struct { + mock.Mock +} + +type RegistryMock_Expecter struct { + mock *mock.Mock +} + +func (_m *RegistryMock) EXPECT() *RegistryMock_Expecter { + return &RegistryMock_Expecter{mock: &_m.Mock} +} + +// AddKeyHolder provides a mock function with given fields: kh +func (_m *RegistryMock) AddKeyHolder(kh keyholder.KeyHolder) { + _m.Called(kh) +} + +// RegistryMock_AddKeyHolder_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddKeyHolder' +type RegistryMock_AddKeyHolder_Call struct { + *mock.Call +} + +// AddKeyHolder is a helper method to define mock.On call +// - kh keyholder.KeyHolder +func (_e *RegistryMock_Expecter) AddKeyHolder(kh interface{}) *RegistryMock_AddKeyHolder_Call { + return &RegistryMock_AddKeyHolder_Call{Call: _e.mock.On("AddKeyHolder", kh)} +} + +func (_c *RegistryMock_AddKeyHolder_Call) Run(run func(kh keyholder.KeyHolder)) *RegistryMock_AddKeyHolder_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(keyholder.KeyHolder)) + }) + return _c +} + +func (_c *RegistryMock_AddKeyHolder_Call) Return() *RegistryMock_AddKeyHolder_Call { + _c.Call.Return() + return _c +} + +func (_c *RegistryMock_AddKeyHolder_Call) RunAndReturn(run func(keyholder.KeyHolder)) *RegistryMock_AddKeyHolder_Call { + _c.Call.Return(run) + return _c +} + +// Keys provides a mock function with given fields: +func (_m *RegistryMock) Keys() []jose.JSONWebKey { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Keys") + } + + var r0 []jose.JSONWebKey + if rf, ok := ret.Get(0).(func() []jose.JSONWebKey); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]jose.JSONWebKey) + } + } + + return r0 +} + +// RegistryMock_Keys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Keys' +type RegistryMock_Keys_Call struct { + *mock.Call +} + +// Keys is a helper method to define mock.On call +func (_e *RegistryMock_Expecter) Keys() *RegistryMock_Keys_Call { + return &RegistryMock_Keys_Call{Call: _e.mock.On("Keys")} +} + +func (_c *RegistryMock_Keys_Call) Run(run func()) *RegistryMock_Keys_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *RegistryMock_Keys_Call) Return(_a0 []jose.JSONWebKey) *RegistryMock_Keys_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RegistryMock_Keys_Call) RunAndReturn(run func() []jose.JSONWebKey) *RegistryMock_Keys_Call { + _c.Call.Return(run) + return _c +} + +// NewRegistryMock creates a new instance of RegistryMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRegistryMock(t interface { + mock.TestingT + Cleanup(func()) +}) *RegistryMock { + mock := &RegistryMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/keyholder/module.go b/internal/keyholder/module.go new file mode 100644 index 000000000..466c87aa8 --- /dev/null +++ b/internal/keyholder/module.go @@ -0,0 +1,7 @@ +package keyholder + +import "go.uber.org/fx" + +var Module = fx.Options( // nolint: gochecknoglobals + fx.Provide(newRegistry), +) diff --git a/internal/keyholder/registry.go b/internal/keyholder/registry.go new file mode 100644 index 000000000..974b03899 --- /dev/null +++ b/internal/keyholder/registry.go @@ -0,0 +1,36 @@ +package keyholder + +import "github.com/go-jose/go-jose/v4" + +type KeyHolder interface { + Keys() []jose.JSONWebKey +} + +//go:generate mockery --name Registry --structname RegistryMock + +type Registry interface { + AddKeyHolder(kh KeyHolder) + Keys() []jose.JSONWebKey +} + +func newRegistry() Registry { + return ®istry{} +} + +type registry struct { + keyHolders []KeyHolder +} + +func (r *registry) AddKeyHolder(kh KeyHolder) { + r.keyHolders = append(r.keyHolders, kh) +} + +func (r *registry) Keys() []jose.JSONWebKey { + var keys []jose.JSONWebKey + + for _, holder := range r.keyHolders { + keys = append(keys, holder.Keys()...) + } + + return keys +} diff --git a/internal/keyholder/registry_test.go b/internal/keyholder/registry_test.go new file mode 100644 index 000000000..703502e03 --- /dev/null +++ b/internal/keyholder/registry_test.go @@ -0,0 +1,84 @@ +package keyholder + +import ( + "testing" + + "github.com/go-jose/go-jose/v4" + "github.com/stretchr/testify/assert" +) + +type testKeyHolder []jose.JSONWebKey + +func (t testKeyHolder) Keys() []jose.JSONWebKey { return t } + +func TestRegistryKeys(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + keyHolder []KeyHolder + assert func(t *testing.T, keys []jose.JSONWebKey) + }{ + { + uc: "no key holders", + assert: func(t *testing.T, keys []jose.JSONWebKey) { + t.Helper() + + assert.Empty(t, keys) + }, + }, + { + uc: "key holder without keys", + keyHolder: []KeyHolder{testKeyHolder{}}, + assert: func(t *testing.T, keys []jose.JSONWebKey) { + t.Helper() + + assert.Empty(t, keys) + }, + }, + { + uc: "key holder with one key", + keyHolder: []KeyHolder{testKeyHolder{{KeyID: "test-1"}}}, + assert: func(t *testing.T, keys []jose.JSONWebKey) { + t.Helper() + + assert.Equal(t, []jose.JSONWebKey{{KeyID: "test-1"}}, keys) + }, + }, + { + uc: "key holder with multiple keys", + keyHolder: []KeyHolder{testKeyHolder{{KeyID: "test-1"}, {KeyID: "test-2"}}}, + assert: func(t *testing.T, keys []jose.JSONWebKey) { + t.Helper() + + assert.Equal(t, []jose.JSONWebKey{{KeyID: "test-1"}, {KeyID: "test-2"}}, keys) + }, + }, + { + uc: "multiple key holders, one with single key, one with multiple keys and one without keys", + keyHolder: []KeyHolder{ + testKeyHolder{{KeyID: "test-1"}, {KeyID: "test-2"}}, + testKeyHolder{}, + testKeyHolder{{KeyID: "test-3"}}, + }, + assert: func(t *testing.T, keys []jose.JSONWebKey) { + t.Helper() + + assert.Equal(t, []jose.JSONWebKey{{KeyID: "test-1"}, {KeyID: "test-2"}, {KeyID: "test-3"}}, keys) + }, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + // GIVEN + reg := newRegistry() + + // WHEN + for _, kh := range tc.keyHolder { + reg.AddKeyHolder(kh) + } + + // THEN + tc.assert(t, reg.Keys()) + }) + } +} diff --git a/internal/module.go b/internal/module.go index 39becb6ef..f4730ad59 100644 --- a/internal/module.go +++ b/internal/module.go @@ -25,11 +25,11 @@ import ( "github.com/dadrus/heimdall/internal/handler/management" "github.com/dadrus/heimdall/internal/handler/metrics" "github.com/dadrus/heimdall/internal/handler/profiling" + "github.com/dadrus/heimdall/internal/keyholder" "github.com/dadrus/heimdall/internal/logging" "github.com/dadrus/heimdall/internal/otel" "github.com/dadrus/heimdall/internal/rules" "github.com/dadrus/heimdall/internal/rules/mechanisms" - "github.com/dadrus/heimdall/internal/signer" "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/version" ) @@ -38,12 +38,12 @@ var Module = fx.Options( //nolint:gochecknoglobals config.Module, logging.Module, watcher.Module, + keyholder.Module, fx.Invoke(func(logger zerolog.Logger) { logger.Info().Str("_version", version.Version).Msg("Starting heimdall") }), otel.Module, cache.Module, - signer.Module, mechanisms.Module, rules.Module, management.Module, diff --git a/internal/otel/metrics/certificate/config.go b/internal/otel/metrics/certificate/config.go deleted file mode 100644 index 0b48bc82a..000000000 --- a/internal/otel/metrics/certificate/config.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2023 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package certificate - -import ( - "crypto/x509" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/metric" - - "github.com/dadrus/heimdall/internal/keystore" -) - -type config struct { - provider metric.MeterProvider - services []*service - monitorEECertsOnly bool -} - -type ( - Option func(conf *config) - CertGetter func(ks keystore.KeyStore) []*x509.Certificate -) - -func WithMeterProvider(provider metric.MeterProvider) Option { - return func(conf *config) { - if provider != nil { - conf.provider = provider - } - } -} - -func WithKeyID(keyID string) CertGetter { - return func(ks keystore.KeyStore) []*x509.Certificate { - entry, err := ks.GetKey(keyID) - if err != nil { - return nil - } - - return entry.CertChain - } -} - -func WithFirstEntry() CertGetter { - return func(ks keystore.KeyStore) []*x509.Certificate { - entries := ks.Entries() - if len(entries) == 0 { - return nil - } - - return entries[0].CertChain - } -} - -func WithServiceCertificates(serviceName string, certs []*x509.Certificate) Option { - return func(conf *config) { - if len(certs) != 0 { - conf.services = append(conf.services, &service{ - name: serviceName, - certificates: certs, - }) - } - } -} - -func WithServiceKeyStore(serviceName string, ks keystore.KeyStore, certGetter CertGetter) Option { - if ks != nil { - return WithServiceCertificates(serviceName, certGetter(ks)) - } - - return func(_ *config) {} -} - -func WithEndEntityMonitoringOnly(flag bool) Option { - return func(conf *config) { - conf.monitorEECertsOnly = flag - } -} - -func newConfig(opts ...Option) *config { - conf := config{ - provider: otel.GetMeterProvider(), - } - - for _, opt := range opts { - opt(&conf) - } - - return &conf -} diff --git a/internal/otel/metrics/certificate/expiration_observer.go b/internal/otel/metrics/certificate/expiration_observer.go deleted file mode 100644 index 62fcaf83f..000000000 --- a/internal/otel/metrics/certificate/expiration_observer.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2023 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package certificate - -import ( - "context" - "crypto/x509" - "strings" - "sync" - "time" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - - "github.com/dadrus/heimdall/version" -) - -const ( - serviceAttrKey = attribute.Key("service") - issuerAttrKey = attribute.Key("issuer") - serialNrAttrKey = attribute.Key("serial_nr") - subjectAttrKey = attribute.Key("subject") - dnsNameAttrKey = attribute.Key("dns_names") -) - -type service struct { - name string - certificates []*x509.Certificate -} - -type expirationObserver struct { - meter metric.Meter - services []*service - monitorEECertsOnly bool -} - -// Start initializes reporting of host metrics using the supplied config. -func Start(opts ...Option) error { - conf := newConfig(opts...) - if conf.provider == nil { - conf.provider = otel.GetMeterProvider() - } - - eo := &expirationObserver{ - meter: conf.provider.Meter( - "github.com/dadrus/heimdall/internal/otel/metrics/certificate", - metric.WithInstrumentationVersion(version.Version), - ), - services: conf.services, - monitorEECertsOnly: conf.monitorEECertsOnly, - } - - return eo.register() -} - -func (eo *expirationObserver) register() error { - var ( - err error - expirationCounter metric.Float64ObservableUpDownCounter - - // lock prevents a race between batch observer and instrument registration. - lock sync.Mutex - ) - - lock.Lock() - defer lock.Unlock() - - expirationCounter, err = eo.meter.Float64ObservableUpDownCounter( - "certificate.expiry", - metric.WithDescription("Number of seconds until certificate expires"), - metric.WithUnit("s"), - ) - if err != nil { - return err - } - - _, err = eo.meter.RegisterCallback( - func(_ context.Context, observer metric.Observer) error { - lock.Lock() - defer lock.Unlock() - - for _, srv := range eo.services { - if eo.monitorEECertsOnly { - eo.observeCertificate(observer, expirationCounter, srv.certificates[0], srv.name) - } else { - for _, cert := range srv.certificates { - eo.observeCertificate(observer, expirationCounter, cert, srv.name) - } - } - } - - return nil - }, - expirationCounter, - ) - - return err -} - -func (eo *expirationObserver) observeCertificate( - observer metric.Observer, - counter metric.Float64ObservableUpDownCounter, - cert *x509.Certificate, - srvName string, -) { - observer.ObserveFloat64( - counter, - time.Until(cert.NotAfter).Seconds(), - metric.WithAttributes( - serviceAttrKey.String(srvName), - issuerAttrKey.String(cert.Issuer.String()), - serialNrAttrKey.String(cert.SerialNumber.String()), - subjectAttrKey.String(cert.Subject.String()), - dnsNameAttrKey.String(strings.Join(cert.DNSNames, ",")), - ), - ) -} diff --git a/internal/otel/metrics/certificate/mocks/observer.go b/internal/otel/metrics/certificate/mocks/observer.go new file mode 100644 index 000000000..800a455cb --- /dev/null +++ b/internal/otel/metrics/certificate/mocks/observer.go @@ -0,0 +1,113 @@ +// Code generated by mockery v2.42.1. DO NOT EDIT. + +package mocks + +import ( + certificate "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + mock "github.com/stretchr/testify/mock" +) + +// ObserverMock is an autogenerated mock type for the Observer type +type ObserverMock struct { + mock.Mock +} + +type ObserverMock_Expecter struct { + mock *mock.Mock +} + +func (_m *ObserverMock) EXPECT() *ObserverMock_Expecter { + return &ObserverMock_Expecter{mock: &_m.Mock} +} + +// Add provides a mock function with given fields: sup +func (_m *ObserverMock) Add(sup certificate.Supplier) { + _m.Called(sup) +} + +// ObserverMock_Add_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Add' +type ObserverMock_Add_Call struct { + *mock.Call +} + +// Add is a helper method to define mock.On call +// - sup certificate.Supplier +func (_e *ObserverMock_Expecter) Add(sup interface{}) *ObserverMock_Add_Call { + return &ObserverMock_Add_Call{Call: _e.mock.On("Add", sup)} +} + +func (_c *ObserverMock_Add_Call) Run(run func(sup certificate.Supplier)) *ObserverMock_Add_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(certificate.Supplier)) + }) + return _c +} + +func (_c *ObserverMock_Add_Call) Return() *ObserverMock_Add_Call { + _c.Call.Return() + return _c +} + +func (_c *ObserverMock_Add_Call) RunAndReturn(run func(certificate.Supplier)) *ObserverMock_Add_Call { + _c.Call.Return(run) + return _c +} + +// Start provides a mock function with given fields: +func (_m *ObserverMock) Start() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ObserverMock_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type ObserverMock_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +func (_e *ObserverMock_Expecter) Start() *ObserverMock_Start_Call { + return &ObserverMock_Start_Call{Call: _e.mock.On("Start")} +} + +func (_c *ObserverMock_Start_Call) Run(run func()) *ObserverMock_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ObserverMock_Start_Call) Return(_a0 error) *ObserverMock_Start_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ObserverMock_Start_Call) RunAndReturn(run func() error) *ObserverMock_Start_Call { + _c.Call.Return(run) + return _c +} + +// NewObserverMock creates a new instance of ObserverMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewObserverMock(t interface { + mock.TestingT + Cleanup(func()) +}) *ObserverMock { + mock := &ObserverMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/otel/metrics/certificate/observer.go b/internal/otel/metrics/certificate/observer.go new file mode 100644 index 000000000..5ab968094 --- /dev/null +++ b/internal/otel/metrics/certificate/observer.go @@ -0,0 +1,110 @@ +// Copyright 2023 Dimitrij Drus +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package certificate + +import ( + "context" + "strings" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/dadrus/heimdall/version" +) + +const ( + serviceAttrKey = attribute.Key("service") + issuerAttrKey = attribute.Key("issuer") + serialNrAttrKey = attribute.Key("serial_nr") + subjectAttrKey = attribute.Key("subject") + dnsNameAttrKey = attribute.Key("dns_names") +) + +//go:generate mockery --name Observer --structname ObserverMock + +type Observer interface { + Add(sup Supplier) + Start() error +} + +type observer struct { + meter metric.Meter + suppliers []Supplier + mut sync.RWMutex +} + +func NewObserver() Observer { + provider := otel.GetMeterProvider() + + return &observer{ + meter: provider.Meter( + "github.com/dadrus/heimdall/internal/otel/metrics/certificate", + metric.WithInstrumentationVersion(version.Version), + ), + } +} + +// Start initializes reporting of host metrics using the supplied config. +func (eo *observer) Start() error { + expirationCounter, err := eo.meter.Float64ObservableUpDownCounter( + "certificate.expiry", + metric.WithDescription("Number of seconds until certificate expires"), + metric.WithUnit("s"), + ) + if err != nil { + return err + } + + _, err = eo.meter.RegisterCallback( + func(_ context.Context, observer metric.Observer) error { + eo.mut.RLock() + defer eo.mut.RUnlock() + + for _, sup := range eo.suppliers { + certs := sup.Certificates() + for _, cert := range certs { + observer.ObserveFloat64( + expirationCounter, + time.Until(cert.NotAfter).Seconds(), + metric.WithAttributes( + serviceAttrKey.String(sup.Name()), + issuerAttrKey.String(cert.Issuer.String()), + serialNrAttrKey.String(cert.SerialNumber.String()), + subjectAttrKey.String(cert.Subject.String()), + dnsNameAttrKey.String(strings.Join(cert.DNSNames, ",")), + ), + ) + } + } + + return nil + }, + expirationCounter, + ) + + return err +} + +func (eo *observer) Add(sup Supplier) { + eo.mut.Lock() + defer eo.mut.Unlock() + + eo.suppliers = append(eo.suppliers, sup) +} diff --git a/internal/otel/metrics/certificate/expiration_observer_test.go b/internal/otel/metrics/certificate/observer_test.go similarity index 50% rename from internal/otel/metrics/certificate/expiration_observer_test.go rename to internal/otel/metrics/certificate/observer_test.go index acbe06027..66feda724 100644 --- a/internal/otel/metrics/certificate/expiration_observer_test.go +++ b/internal/otel/metrics/certificate/observer_test.go @@ -29,13 +29,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" - "github.com/dadrus/heimdall/internal/keystore" - "github.com/dadrus/heimdall/internal/x/pkix/pemx" "github.com/dadrus/heimdall/internal/x/testsupport" ) @@ -47,30 +46,63 @@ func attributeValue(set attribute.Set, key attribute.Key) attribute.Value { return attribute.Value{} } -func dataPointForCert(cert *x509.Certificate, dps []metricdata.DataPoint[float64]) metricdata.DataPoint[float64] { +func dataPointForCert(cert *x509.Certificate, dps []metricdata.DataPoint[float64]) []metricdata.DataPoint[float64] { + var data []metricdata.DataPoint[float64] + for _, dp := range dps { if cert.Subject.String() == attributeValue(dp.Attributes, subjectAttrKey).AsString() { - return dp + data = append(data, dp) } } - return metricdata.DataPoint[float64]{} + return data } func checkMetric(t *testing.T, dp []metricdata.DataPoint[float64], service string, cert *x509.Certificate) { t.Helper() data := dataPointForCert(cert, dp) + require.GreaterOrEqual(t, len(data), 1) + + names := make([]string, len(data)) + + for idx, entry := range data { + assert.LessOrEqual(t, entry.Value-time.Until(cert.NotAfter).Seconds(), 1.0) + + attributes := entry.Attributes + require.Equal(t, 5, attributes.Len()) + assert.Equal(t, strings.Join(cert.DNSNames, ","), attributeValue(attributes, dnsNameAttrKey).AsString()) + assert.Equal(t, cert.Issuer.String(), attributeValue(attributes, issuerAttrKey).AsString()) + assert.Equal(t, cert.SerialNumber.String(), attributeValue(attributes, serialNrAttrKey).AsString()) + assert.Equal(t, cert.Subject.String(), attributeValue(attributes, subjectAttrKey).AsString()) + + names[idx] = attributeValue(attributes, serviceAttrKey).AsString() + } + + assert.Contains(t, names, service) +} + +type staticCertificateSupplier struct { + name string + certs []*x509.Certificate +} - assert.LessOrEqual(t, data.Value-time.Until(cert.NotAfter).Seconds(), 1.0) +func (s *staticCertificateSupplier) Name() string { return s.name } +func (s *staticCertificateSupplier) Certificates() []*x509.Certificate { return s.certs } - attributes := data.Attributes - require.Equal(t, 5, attributes.Len()) - assert.Equal(t, strings.Join(cert.DNSNames, ","), attributeValue(attributes, dnsNameAttrKey).AsString()) - assert.Equal(t, cert.Issuer.String(), attributeValue(attributes, issuerAttrKey).AsString()) - assert.Equal(t, cert.SerialNumber.String(), attributeValue(attributes, serialNrAttrKey).AsString()) - assert.Equal(t, service, attributeValue(attributes, serviceAttrKey).AsString()) - assert.Equal(t, cert.Subject.String(), attributeValue(attributes, subjectAttrKey).AsString()) +type dynamicCertificateSupplier struct { + name string + certs []*x509.Certificate + idx int +} + +func (s *dynamicCertificateSupplier) Name() string { return s.name } +func (s *dynamicCertificateSupplier) Certificates() []*x509.Certificate { + nextIdx := (s.idx + 1) % len(s.certs) + cert := s.certs[s.idx] + s.idx = nextIdx + + return []*x509.Certificate{cert} } func TestCertificateExpirationCollector(t *testing.T) { @@ -122,56 +154,25 @@ func TestCertificateExpirationCollector(t *testing.T) { testsupport.WithKeyUsage(x509.KeyUsageDigitalSignature)) require.NoError(t, err) - ksPEMBytes, err := pemx.BuildPEM( - pemx.WithECDSAPrivateKey(ee2PrivKey), - pemx.WithX509Certificate(ee2cert), - pemx.WithX509Certificate(intCA1Cert), - pemx.WithX509Certificate(rootCA1.Certificate), - pemx.WithECDSAPrivateKey(ee1PrivKey, pemx.WithHeader("X-Key-ID", "key1")), - pemx.WithX509Certificate(ee1cert), - pemx.WithX509Certificate(intCA1Cert), - pemx.WithX509Certificate(rootCA1.Certificate), - ) - require.NoError(t, err) - - ks, err := keystore.NewKeyStoreFromPEMBytes(ksPEMBytes, "") - require.NoError(t, err) - for _, tc := range []struct { - uc string - opts []Option - assert func(t *testing.T, rm *metricdata.ResourceMetrics) + uc string + suppliers []Supplier + assert func(t *testing.T, rm *metricdata.ResourceMetrics, call int) }{ { - uc: "with nil key store", - opts: []Option{ - WithServiceKeyStore("foo", nil, WithKeyID("key1")), - WithEndEntityMonitoringOnly(false), - }, - assert: func(t *testing.T, rm *metricdata.ResourceMetrics) { - t.Helper() - - assert.Empty(t, rm.ScopeMetrics) - }, - }, - { - uc: "with unknown key id", - opts: []Option{ - WithServiceKeyStore("foo", ks, WithKeyID("foo")), - WithEndEntityMonitoringOnly(false), - }, - assert: func(t *testing.T, rm *metricdata.ResourceMetrics) { + uc: "without suppliers", + assert: func(t *testing.T, rm *metricdata.ResourceMetrics, _ int) { t.Helper() assert.Empty(t, rm.ScopeMetrics) }, }, { - uc: "for single service from single certificate", - opts: []Option{ - WithServiceCertificates("foo", []*x509.Certificate{rootCA1.Certificate}), + uc: "with single supplier providing only static ee certificate", + suppliers: []Supplier{ + &staticCertificateSupplier{name: "test", certs: []*x509.Certificate{ee1cert}}, }, - assert: func(t *testing.T, rm *metricdata.ResourceMetrics) { + assert: func(t *testing.T, rm *metricdata.ResourceMetrics, _ int) { t.Helper() require.Len(t, rm.ScopeMetrics, 1) @@ -188,16 +189,15 @@ func TestCertificateExpirationCollector(t *testing.T) { assert.False(t, data.IsMonotonic) assert.Len(t, data.DataPoints, 1) - checkMetric(t, data.DataPoints, "foo", rootCA1.Certificate) + checkMetric(t, data.DataPoints, "test", ee1cert) }, }, { - uc: "for single service for all certificates from existing key store entry specified by key id", - opts: []Option{ - WithServiceKeyStore("foo", ks, WithKeyID("key1")), - WithEndEntityMonitoringOnly(false), + uc: "with single supplier providing the entire chain statically", + suppliers: []Supplier{ + &staticCertificateSupplier{name: "test", certs: []*x509.Certificate{ee1cert, intCA1Cert, rootCA1.Certificate}}, }, - assert: func(t *testing.T, rm *metricdata.ResourceMetrics) { + assert: func(t *testing.T, rm *metricdata.ResourceMetrics, _ int) { t.Helper() require.Len(t, rm.ScopeMetrics, 1) @@ -214,21 +214,18 @@ func TestCertificateExpirationCollector(t *testing.T) { assert.False(t, data.IsMonotonic) assert.Len(t, data.DataPoints, 3) - // first certificate in the chain - checkMetric(t, data.DataPoints, "foo", ee1cert) - // second certificate in the chain - checkMetric(t, data.DataPoints, "foo", intCA1Cert) - // third certificate in the chain - checkMetric(t, data.DataPoints, "foo", rootCA1.Certificate) + checkMetric(t, data.DataPoints, "test", rootCA1.Certificate) + checkMetric(t, data.DataPoints, "test", intCA1Cert) + checkMetric(t, data.DataPoints, "test", ee1cert) }, }, { - uc: "for single service for all certificates from existing key store for the first", - opts: []Option{ - WithServiceKeyStore("foo", ks, WithFirstEntry()), - WithEndEntityMonitoringOnly(false), + uc: "with multiple suppliers providing the entire chain statically", + suppliers: []Supplier{ + &staticCertificateSupplier{name: "test-1", certs: []*x509.Certificate{ee1cert, intCA1Cert, rootCA1.Certificate}}, + &staticCertificateSupplier{name: "test-2", certs: []*x509.Certificate{ee2cert, intCA1Cert, rootCA1.Certificate}}, }, - assert: func(t *testing.T, rm *metricdata.ResourceMetrics) { + assert: func(t *testing.T, rm *metricdata.ResourceMetrics, _ int) { t.Helper() require.Len(t, rm.ScopeMetrics, 1) @@ -243,51 +240,23 @@ func TestCertificateExpirationCollector(t *testing.T) { data := metrics.Data.(metricdata.Sum[float64]) // nolint: forcetypeassert assert.False(t, data.IsMonotonic) - assert.Len(t, data.DataPoints, 3) - - // first certificate in the chain - checkMetric(t, data.DataPoints, "foo", ee2cert) - // second certificate in the chain - checkMetric(t, data.DataPoints, "foo", intCA1Cert) - // third certificate in the chain - checkMetric(t, data.DataPoints, "foo", rootCA1.Certificate) - }, - }, - { - uc: "for the ee certificate of a single service from existing key store entry specified by key id", - opts: []Option{ - WithServiceKeyStore("foo", ks, WithKeyID("key1")), - WithEndEntityMonitoringOnly(true), - }, - assert: func(t *testing.T, rm *metricdata.ResourceMetrics) { - t.Helper() - - require.Len(t, rm.ScopeMetrics, 1) - - sm := rm.ScopeMetrics[0] - require.Len(t, sm.Metrics, 1) - - metrics := sm.Metrics[0] - assert.Equal(t, "certificate.expiry", metrics.Name) - assert.Equal(t, "s", metrics.Unit) - assert.Equal(t, "Number of seconds until certificate expires", metrics.Description) + assert.Len(t, data.DataPoints, 6) - data := metrics.Data.(metricdata.Sum[float64]) // nolint: forcetypeassert - assert.False(t, data.IsMonotonic) - assert.Len(t, data.DataPoints, 1) + checkMetric(t, data.DataPoints, "test-1", rootCA1.Certificate) + checkMetric(t, data.DataPoints, "test-1", intCA1Cert) + checkMetric(t, data.DataPoints, "test-1", ee1cert) - // first certificate in the chain - checkMetric(t, data.DataPoints, "foo", ee1cert) + checkMetric(t, data.DataPoints, "test-2", rootCA1.Certificate) + checkMetric(t, data.DataPoints, "test-2", intCA1Cert) + checkMetric(t, data.DataPoints, "test-2", ee2cert) }, }, { - uc: "for ee certificates of multiple services from existing key store", - opts: []Option{ - WithServiceKeyStore("foo", ks, WithKeyID("key1")), - WithServiceKeyStore("bar", ks, WithFirstEntry()), - WithEndEntityMonitoringOnly(true), + uc: "with supplier providing a certificate dynamically", + suppliers: []Supplier{ + &dynamicCertificateSupplier{name: "test", certs: []*x509.Certificate{ee1cert, ee2cert}}, }, - assert: func(t *testing.T, rm *metricdata.ResourceMetrics) { + assert: func(t *testing.T, rm *metricdata.ResourceMetrics, call int) { t.Helper() require.Len(t, rm.ScopeMetrics, 1) @@ -302,37 +271,13 @@ func TestCertificateExpirationCollector(t *testing.T) { data := metrics.Data.(metricdata.Sum[float64]) // nolint: forcetypeassert assert.False(t, data.IsMonotonic) - assert.Len(t, data.DataPoints, 2) - - // service 1 - checkMetric(t, data.DataPoints, "foo", ee1cert) - // service 2 - checkMetric(t, data.DataPoints, "bar", ee2cert) - }, - }, - { - uc: "for all certificates of multiple services from existing key store", - opts: []Option{ - WithServiceKeyStore("foo", ks, WithKeyID("key1")), - WithServiceKeyStore("bar", ks, WithFirstEntry()), - WithEndEntityMonitoringOnly(false), - }, - assert: func(t *testing.T, rm *metricdata.ResourceMetrics) { - t.Helper() - - require.Len(t, rm.ScopeMetrics, 1) - - sm := rm.ScopeMetrics[0] - require.Len(t, sm.Metrics, 1) - - metrics := sm.Metrics[0] - assert.Equal(t, "certificate.expiry", metrics.Name) - assert.Equal(t, "s", metrics.Unit) - assert.Equal(t, "Number of seconds until certificate expires", metrics.Description) + assert.Len(t, data.DataPoints, 1) - data := metrics.Data.(metricdata.Sum[float64]) // nolint: forcetypeassert - assert.False(t, data.IsMonotonic) - assert.Len(t, data.DataPoints, 6) + if call == 1 { + checkMetric(t, data.DataPoints, "test", ee1cert) + } else { + checkMetric(t, data.DataPoints, "test", ee2cert) + } }, }, } { @@ -340,23 +285,34 @@ func TestCertificateExpirationCollector(t *testing.T) { // GIVEN exp := metric.NewManualReader() - meterProvider := metric.NewMeterProvider( + otel.SetMeterProvider(metric.NewMeterProvider( metric.WithResource(resource.Default()), metric.WithReader(exp), - ) + )) - // WHEN - err = Start(append(tc.opts, WithMeterProvider(meterProvider))...) + obs := NewObserver() + for _, supplier := range tc.suppliers { + obs.Add(supplier) + } - // THEN + err = obs.Start() require.NoError(t, err) - var rm metricdata.ResourceMetrics - err = exp.Collect(context.TODO(), &rm) + var rm1, rm2 metricdata.ResourceMetrics + // WHEN + err = exp.Collect(context.TODO(), &rm1) require.NoError(t, err) - tc.assert(t, &rm) + // THEN + tc.assert(t, &rm1, 1) + + // WHEN + err = exp.Collect(context.TODO(), &rm2) + require.NoError(t, err) + + // THEN + tc.assert(t, &rm2, 2) }) } } diff --git a/internal/otel/metrics/certificate/supplier.go b/internal/otel/metrics/certificate/supplier.go new file mode 100644 index 000000000..a8fceb184 --- /dev/null +++ b/internal/otel/metrics/certificate/supplier.go @@ -0,0 +1,8 @@ +package certificate + +import "crypto/x509" + +type Supplier interface { + Name() string + Certificates() []*x509.Certificate +} diff --git a/internal/otel/metrics/module.go b/internal/otel/metrics/module.go index 1136b419e..9136e566c 100644 --- a/internal/otel/metrics/module.go +++ b/internal/otel/metrics/module.go @@ -21,88 +21,16 @@ import ( "go.opentelemetry.io/contrib/instrumentation/runtime" "go.uber.org/fx" - "github.com/dadrus/heimdall/internal/config" - "github.com/dadrus/heimdall/internal/keystore" "github.com/dadrus/heimdall/internal/otel/metrics/certificate" - "github.com/dadrus/heimdall/internal/x" ) var Module = fx.Options( // nolint: gochecknoglobals fx.Invoke(runtime.Start), fx.Invoke(host.Start), - fx.Invoke(monitorCertificateExpiry), -) - -func monitorCertificateExpiry(conf *config.Configuration) error { - var ( - decisionSrvKS keystore.KeyStore - proxySrvKS keystore.KeyStore - managementSrvKS keystore.KeyStore - signerKS keystore.KeyStore - - decisionSrvKeyID string - proxySrvKeyID string - managementSrvKeyID string - signerKeyID string - ) - - dSrvTLSConf := conf.Serve.Decision.TLS - pSrvTLSConf := conf.Serve.Proxy.TLS - mSrvTLSConf := conf.Serve.Management.TLS - - if dSrvTLSConf != nil { - decisionSrvKS, _ = keystore.NewKeyStoreFromPEMFile(dSrvTLSConf.KeyStore.Path, dSrvTLSConf.KeyStore.Password) - decisionSrvKeyID = dSrvTLSConf.KeyID - } - - if pSrvTLSConf != nil { - proxySrvKS, _ = keystore.NewKeyStoreFromPEMFile(pSrvTLSConf.KeyStore.Path, pSrvTLSConf.KeyStore.Password) - proxySrvKeyID = pSrvTLSConf.KeyID - } - - if mSrvTLSConf != nil { - managementSrvKS, _ = keystore.NewKeyStoreFromPEMFile(mSrvTLSConf.KeyStore.Path, mSrvTLSConf.KeyStore.Password) - managementSrvKeyID = mSrvTLSConf.KeyID - } - - signerKS, _ = keystore.NewKeyStoreFromPEMFile( - conf.Signer.KeyStore.Path, - conf.Signer.KeyStore.Password, - ) - signerKeyID = conf.Signer.KeyID - - return certificate.Start( - certificate.WithServiceKeyStore( - "decision", decisionSrvKS, - x.IfThenElse(len(decisionSrvKeyID) != 0, - certificate.WithKeyID(decisionSrvKeyID), - certificate.WithFirstEntry(), - ), - ), - certificate.WithServiceKeyStore( - "proxy", - proxySrvKS, - x.IfThenElse(len(proxySrvKeyID) != 0, - certificate.WithKeyID(proxySrvKeyID), - certificate.WithFirstEntry(), - ), + fx.Provide( + fx.Annotate( + certificate.NewObserver, + fx.OnStart(func(co certificate.Observer) error { return co.Start() }), ), - certificate.WithServiceKeyStore( - "management", - managementSrvKS, - x.IfThenElse(len(managementSrvKeyID) != 0, - certificate.WithKeyID(managementSrvKeyID), - certificate.WithFirstEntry(), - ), - ), - certificate.WithServiceKeyStore( - "signer", - signerKS, - x.IfThenElse(len(signerKeyID) != 0, - certificate.WithKeyID(signerKeyID), - certificate.WithFirstEntry(), - ), - ), - certificate.WithEndEntityMonitoringOnly(false), - ) -} + ), +) diff --git a/internal/otel/resource.go b/internal/otel/resource.go index 828c46009..795c9f32c 100644 --- a/internal/otel/resource.go +++ b/internal/otel/resource.go @@ -18,7 +18,7 @@ package otel import ( "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "github.com/dadrus/heimdall/version" ) diff --git a/internal/rules/cel_execution_condition.go b/internal/rules/cel_execution_condition.go index 6b650afc2..49c439f6f 100644 --- a/internal/rules/cel_execution_condition.go +++ b/internal/rules/cel_execution_condition.go @@ -31,14 +31,20 @@ type celExecutionCondition struct { e *cellib.CompiledExpression } -func (c *celExecutionCondition) CanExecute(ctx heimdall.Context, sub *subject.Subject) (bool, error) { - obj := map[string]any{"Request": ctx.Request()} +func (c *celExecutionCondition) CanExecuteOnSubject(ctx heimdall.Context, sub *subject.Subject) (bool, error) { + if err := c.e.Eval(map[string]any{"Request": ctx.Request(), "Subject": sub}); err != nil { + if errors.Is(err, &cellib.EvalError{}) { + return false, nil + } - if sub != nil { - obj["Subject"] = sub + return false, err } - if err := c.e.Eval(obj); err != nil { + return true, nil +} + +func (c *celExecutionCondition) CanExecuteOnError(ctx heimdall.Context, cause error) (bool, error) { + if err := c.e.Eval(map[string]any{"Request": ctx.Request(), "Error": cellib.WrapError(cause)}); err != nil { if errors.Is(err, &cellib.EvalError{}) { return false, nil } diff --git a/internal/rules/cel_execution_condition_test.go b/internal/rules/cel_execution_condition_test.go index 95218a554..8bb24f47f 100644 --- a/internal/rules/cel_execution_condition_test.go +++ b/internal/rules/cel_execution_condition_test.go @@ -27,6 +27,7 @@ import ( "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/heimdall/mocks" "github.com/dadrus/heimdall/internal/rules/mechanisms/subject" + "github.com/dadrus/heimdall/internal/x/errorchain" ) func TestNewCelExecutionCondition(t *testing.T) { @@ -58,7 +59,7 @@ func TestNewCelExecutionCondition(t *testing.T) { } } -func TestCelExecutionConditionCanExecute(t *testing.T) { +func TestCelExecutionConditionCanExecuteOnSubject(t *testing.T) { t.Parallel() sub := &subject.Subject{ @@ -104,12 +105,12 @@ func TestCelExecutionConditionCanExecute(t *testing.T) { ctx.EXPECT().Request().Return(&heimdall.Request{ Method: http.MethodGet, - URL: &url.URL{ + URL: &heimdall.URL{URL: url.URL{ Scheme: "http", Host: "localhost", Path: "/test", RawQuery: "foo=bar&baz=zab", - }, + }}, ClientIPAddresses: []string{"127.0.0.1", "10.10.10.10"}, }) @@ -117,7 +118,67 @@ func TestCelExecutionConditionCanExecute(t *testing.T) { require.NoError(t, err) // WHEN - can, err := condition.CanExecute(ctx, sub) + can, err := condition.CanExecuteOnSubject(ctx, sub) + + // THEN + require.NoError(t, err) + assert.Equal(t, tc.expected, can) + }) + } +} + +type testIdentifier string + +func (tid testIdentifier) ID() string { return string(tid) } + +func TestCelExecutionConditionCanExecuteOnError(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + expression string + expected bool + }{ + { + uc: "complex expression evaluating to true", + expression: `type(Error) in [communication_error, authorization_error] && + Error.Source == "foobar" && + "bar" in Request.URL.Query().foo`, + expected: true, + }, + { + uc: "simple expression evaluating to false", + expression: `type(Error) == internal_error && Request.Method == "GET"`, + expected: false, + }, + { + uc: "simple expression evaluating to true", + expression: `type(Error) == authorization_error && Request.Method == "GET"`, + expected: true, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + // GIVEN + ctx := mocks.NewContextMock(t) + + ctx.EXPECT().Request().Return(&heimdall.Request{ + Method: http.MethodGet, + URL: &heimdall.URL{URL: url.URL{ + Scheme: "http", + Host: "localhost", + Path: "/test", + RawQuery: "foo=bar&baz=zab", + }}, + ClientIPAddresses: []string{"127.0.0.1", "10.10.10.10"}, + }) + + condition, err := newCelExecutionCondition(tc.expression) + require.NoError(t, err) + + // WHEN + can, err := condition.CanExecuteOnError(ctx, errorchain. + NewWithMessage(heimdall.ErrCommunication, "test"). + CausedBy(heimdall.ErrAuthorization).WithErrorContext(testIdentifier("foobar"))) // THEN require.NoError(t, err) diff --git a/internal/rules/composite_error_handler.go b/internal/rules/composite_error_handler.go index 66170909d..0e412a0b1 100644 --- a/internal/rules/composite_error_handler.go +++ b/internal/rules/composite_error_handler.go @@ -17,6 +17,8 @@ package rules import ( + "errors" + "github.com/rs/zerolog" "github.com/dadrus/heimdall/internal/heimdall" @@ -28,16 +30,21 @@ func (eh compositeErrorHandler) Execute(ctx heimdall.Context, exErr error) error logger := zerolog.Ctx(ctx.AppContext()) logger.Debug().Msg("Handling pipeline error") - for _, eh := range eh { - if eh.CanExecute(ctx, exErr) { - err := eh.Execute(ctx, exErr) - if err != nil { - logger.Error().Err(err).Msg("Failed to execute error handler") + for _, handler := range eh { + if err := handler.Execute(ctx, exErr); err != nil { + if errors.Is(err, errErrorHandlerNotApplicable) { + continue } + logger.Error().Err(err).Msg("Failed to execute error handler") + return err } + + return nil } + logger.Warn().Msg("No applicable error handler found") + return exErr } diff --git a/internal/rules/composite_error_handler_test.go b/internal/rules/composite_error_handler_test.go index 6d0b260b5..9a092e41c 100644 --- a/internal/rules/composite_error_handler_test.go +++ b/internal/rules/composite_error_handler_test.go @@ -35,10 +35,9 @@ func TestCompositeErrorHandlerExecutionWithFallback(t *testing.T) { ctx.EXPECT().AppContext().Return(context.Background()) eh1 := rulemocks.NewErrorHandlerMock(t) - eh1.EXPECT().CanExecute(ctx, testsupport.ErrTestPurpose).Return(false) + eh1.EXPECT().Execute(ctx, testsupport.ErrTestPurpose).Return(errErrorHandlerNotApplicable) eh2 := rulemocks.NewErrorHandlerMock(t) - eh2.EXPECT().CanExecute(ctx, testsupport.ErrTestPurpose).Return(true) eh2.EXPECT().Execute(ctx, testsupport.ErrTestPurpose).Return(nil) eh := compositeErrorHandler{eh1, eh2} @@ -58,7 +57,6 @@ func TestCompositeErrorHandlerExecutionWithoutFallback(t *testing.T) { ctx.EXPECT().AppContext().Return(context.Background()) eh1 := rulemocks.NewErrorHandlerMock(t) - eh1.EXPECT().CanExecute(ctx, testsupport.ErrTestPurpose).Return(true) eh1.EXPECT().Execute(ctx, testsupport.ErrTestPurpose).Return(nil) eh2 := rulemocks.NewErrorHandlerMock(t) @@ -80,10 +78,10 @@ func TestCompositeErrorHandlerExecutionWithNoApplicableErrorHandler(t *testing.T ctx.EXPECT().AppContext().Return(context.Background()) eh1 := rulemocks.NewErrorHandlerMock(t) - eh1.EXPECT().CanExecute(ctx, testsupport.ErrTestPurpose).Return(false) + eh1.EXPECT().Execute(ctx, testsupport.ErrTestPurpose).Return(errErrorHandlerNotApplicable) eh2 := rulemocks.NewErrorHandlerMock(t) - eh2.EXPECT().CanExecute(ctx, testsupport.ErrTestPurpose).Return(false) + eh2.EXPECT().Execute(ctx, testsupport.ErrTestPurpose).Return(errErrorHandlerNotApplicable) eh := compositeErrorHandler{eh1, eh2} diff --git a/internal/rules/conditional_error_handler.go b/internal/rules/conditional_error_handler.go new file mode 100644 index 000000000..4d5ec823a --- /dev/null +++ b/internal/rules/conditional_error_handler.go @@ -0,0 +1,50 @@ +// Copyright 2023 Dimitrij Drus +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package rules + +import ( + "errors" + + "github.com/rs/zerolog" + + "github.com/dadrus/heimdall/internal/heimdall" +) + +var errErrorHandlerNotApplicable = errors.New("error handler not applicable") + +type conditionalErrorHandler struct { + h errorHandler + c executionCondition +} + +func (h *conditionalErrorHandler) Execute(ctx heimdall.Context, causeErr error) error { + logger := zerolog.Ctx(ctx.AppContext()) + + logger.Debug().Str("_id", h.h.ID()).Msg("Checking error handler execution condition") + + if canExecute, err := h.c.CanExecuteOnError(ctx, causeErr); err != nil { + return err + } else if canExecute { + return h.h.Execute(ctx, causeErr) + } + + logger.Debug().Str("_id", h.h.ID()).Msg("Error handler not applicable") + + return errErrorHandlerNotApplicable +} + +func (h *conditionalErrorHandler) ID() string { return h.h.ID() } diff --git a/internal/rules/conditional_error_handler_test.go b/internal/rules/conditional_error_handler_test.go new file mode 100644 index 000000000..8a88de57e --- /dev/null +++ b/internal/rules/conditional_error_handler_test.go @@ -0,0 +1,118 @@ +// Copyright 2022-2024 Dimitrij Drus +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package rules + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/dadrus/heimdall/internal/heimdall/mocks" + rulemocks "github.com/dadrus/heimdall/internal/rules/mocks" +) + +func TestConditionalErrorHandlerExecute(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + configureMocks func(t *testing.T, c *rulemocks.ExecutionConditionMock, h *rulemocks.ErrorHandlerMock) + assert func(t *testing.T, err error) + }{ + { + uc: "executes if can", + configureMocks: func(t *testing.T, c *rulemocks.ExecutionConditionMock, h *rulemocks.ErrorHandlerMock) { + t.Helper() + + c.EXPECT().CanExecuteOnError(mock.Anything, mock.Anything).Return(true, nil) + h.EXPECT().Execute(mock.Anything, mock.Anything).Return(nil) + h.EXPECT().ID().Return("test") + }, + assert: func(t *testing.T, err error) { + t.Helper() + + require.NoError(t, err) + }, + }, + { + uc: "does not execute if can not", + configureMocks: func(t *testing.T, c *rulemocks.ExecutionConditionMock, h *rulemocks.ErrorHandlerMock) { + t.Helper() + + c.EXPECT().CanExecuteOnError(mock.Anything, mock.Anything).Return(false, nil) + h.EXPECT().ID().Return("test") + }, + assert: func(t *testing.T, err error) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, errErrorHandlerNotApplicable) + }, + }, + { + uc: "does not execute if can check fails", + configureMocks: func(t *testing.T, c *rulemocks.ExecutionConditionMock, h *rulemocks.ErrorHandlerMock) { + t.Helper() + + c.EXPECT().CanExecuteOnError(mock.Anything, mock.Anything). + Return(true, errors.New("some error")) + h.EXPECT().ID().Return("test") + }, + assert: func(t *testing.T, err error) { + t.Helper() + + require.Error(t, err) + require.ErrorContains(t, err, "some error") + }, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + // GIVEN + condition := rulemocks.NewExecutionConditionMock(t) + handler := rulemocks.NewErrorHandlerMock(t) + decorator := conditionalErrorHandler{c: condition, h: handler} + + ctx := mocks.NewContextMock(t) + ctx.EXPECT().AppContext().Return(context.Background()) + + tc.configureMocks(t, condition, handler) + + // WHEN + err := decorator.Execute(ctx, errors.New("test error")) + + // THEN + tc.assert(t, err) + }) + } +} + +func TestConditionalErrorHandlerID(t *testing.T) { + t.Parallel() + + condition := rulemocks.NewExecutionConditionMock(t) + handler := rulemocks.NewErrorHandlerMock(t) + handler.EXPECT().ID().Return("test") + + eh := conditionalErrorHandler{c: condition, h: handler} + + id := eh.ID() + assert.Equal(t, "test", id) +} diff --git a/internal/rules/conditional_subject_handler.go b/internal/rules/conditional_subject_handler.go index 267c60fd8..74cb89c4d 100644 --- a/internal/rules/conditional_subject_handler.go +++ b/internal/rules/conditional_subject_handler.go @@ -44,7 +44,7 @@ func (h *conditionalSubjectHandler) Execute(ctx heimdall.Context, sub *subject.S } } - if canExecute, err := h.c.CanExecute(ctx, sub); err != nil { + if canExecute, err := h.c.CanExecuteOnSubject(ctx, sub); err != nil { return err } else if canExecute { return h.h.Execute(ctx, sub) diff --git a/internal/rules/conditional_subject_handler_test.go b/internal/rules/conditional_subject_handler_test.go index 881c2a3e5..c38a32a52 100644 --- a/internal/rules/conditional_subject_handler_test.go +++ b/internal/rules/conditional_subject_handler_test.go @@ -42,7 +42,7 @@ func TestConditionalSubjectHandlerExecute(t *testing.T) { configureMocks: func(t *testing.T, c *rulemocks.ExecutionConditionMock, h *rulemocks.SubjectHandlerMock) { t.Helper() - c.EXPECT().CanExecute(mock.Anything, mock.Anything).Return(true, nil) + c.EXPECT().CanExecuteOnSubject(mock.Anything, mock.Anything).Return(true, nil) h.EXPECT().Execute(mock.Anything, mock.Anything).Return(nil) h.EXPECT().ID().Return("test") }, @@ -57,7 +57,7 @@ func TestConditionalSubjectHandlerExecute(t *testing.T) { configureMocks: func(t *testing.T, c *rulemocks.ExecutionConditionMock, h *rulemocks.SubjectHandlerMock) { t.Helper() - c.EXPECT().CanExecute(mock.Anything, mock.Anything).Return(false, nil) + c.EXPECT().CanExecuteOnSubject(mock.Anything, mock.Anything).Return(false, nil) h.EXPECT().ID().Return("test") }, assert: func(t *testing.T, err error) { @@ -71,7 +71,7 @@ func TestConditionalSubjectHandlerExecute(t *testing.T) { configureMocks: func(t *testing.T, c *rulemocks.ExecutionConditionMock, h *rulemocks.SubjectHandlerMock) { t.Helper() - c.EXPECT().CanExecute(mock.Anything, mock.Anything). + c.EXPECT().CanExecuteOnSubject(mock.Anything, mock.Anything). Return(true, testsupport.ErrTestPurpose) h.EXPECT().ID().Return("test") }, @@ -118,3 +118,16 @@ func TestConditionalSubjectHandlerContinueOnError(t *testing.T) { // THEN assert.True(t, ok) } + +func TestConditionalSubjectHandlerID(t *testing.T) { + t.Parallel() + + condition := rulemocks.NewExecutionConditionMock(t) + handler := rulemocks.NewSubjectHandlerMock(t) + handler.EXPECT().ID().Return("test") + + eh := conditionalSubjectHandler{c: condition, h: handler} + + id := eh.ID() + assert.Equal(t, "test", id) +} diff --git a/internal/rules/config/backend.go b/internal/rules/config/backend.go index 424a428a1..4ca0df1e6 100644 --- a/internal/rules/config/backend.go +++ b/internal/rules/config/backend.go @@ -23,32 +23,28 @@ import ( ) type Backend struct { - Host string `json:"host" yaml:"host"` - URLRewriter *URLRewriter `json:"rewrite" yaml:"rewrite"` + Host string `json:"host" yaml:"host" validate:"required"` //nolint:tagalign + URLRewriter *URLRewriter `json:"rewrite" yaml:"rewrite" validate:"omitnil"` //nolint:tagalign } -func (f *Backend) CreateURL(value *url.URL) *url.URL { +func (b *Backend) CreateURL(value *url.URL) *url.URL { upstreamURL := &url.URL{ Scheme: value.Scheme, - Host: f.Host, + Host: b.Host, Path: value.Path, RawPath: value.RawPath, RawQuery: value.RawQuery, } - if f.URLRewriter != nil { - f.URLRewriter.Rewrite(upstreamURL) + if b.URLRewriter != nil { + b.URLRewriter.Rewrite(upstreamURL) } return upstreamURL } -func (f *Backend) DeepCopyInto(out *Backend) { - if f == nil { - return - } - - jsonStr, _ := json.Marshal(f) +func (b *Backend) DeepCopyInto(out *Backend) { + jsonStr, _ := json.Marshal(b) // we cannot do anything with an error here as // the interface implemented here doesn't support diff --git a/internal/rules/config/decoder.go b/internal/rules/config/decoder.go index 36a38391c..752c031ec 100644 --- a/internal/rules/config/decoder.go +++ b/internal/rules/config/decoder.go @@ -28,7 +28,6 @@ func DecodeConfig(input any, output any) error { dec, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( - matcherDecodeHookFunc, mapstructure.StringToTimeDurationHookFunc(), ), Result: output, diff --git a/internal/config/signer.go b/internal/rules/config/encoded_slash_handling.go similarity index 69% rename from internal/config/signer.go rename to internal/rules/config/encoded_slash_handling.go index 3a00053a0..ed7f138e6 100644 --- a/internal/config/signer.go +++ b/internal/rules/config/encoded_slash_handling.go @@ -1,4 +1,4 @@ -// Copyright 2022 Dimitrij Drus +// Copyright 2023 Dimitrij Drus // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,10 @@ package config -type SignerConfig struct { - Name string `koanf:"name"` - KeyStore KeyStore `koanf:"key_store"` - KeyID string `koanf:"key_id"` -} +type EncodedSlashesHandling string + +const ( + EncodedSlashesOff EncodedSlashesHandling = "off" + EncodedSlashesOn EncodedSlashesHandling = "on" + EncodedSlashesOnNoDecode EncodedSlashesHandling = "no_decode" +) diff --git a/internal/rules/config/mapstructure_decoder.go b/internal/rules/config/mapstructure_decoder.go deleted file mode 100644 index a8ff0bd02..000000000 --- a/internal/rules/config/mapstructure_decoder.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2022 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "errors" - "fmt" - "reflect" - - "github.com/dadrus/heimdall/internal/x" -) - -var ( - ErrURLMissing = errors.New("url property not present") - ErrURLType = errors.New("bad url type") - ErrStrategyType = errors.New("bad strategy type") - ErrUnsupportedStrategy = errors.New("unsupported strategy") -) - -func matcherDecodeHookFunc(from reflect.Type, to reflect.Type, data any) (any, error) { - if to != reflect.TypeOf(Matcher{}) { - return data, nil - } - - if from.Kind() != reflect.String && from.Kind() != reflect.Map { - return data, nil - } - - if from.Kind() == reflect.String { - // nolint: forcetypeassert - // already checked above - return Matcher{URL: data.(string), Strategy: "glob"}, nil - } - - // nolint: forcetypeassert - // already checked above - values := data.(map[string]any) - - var strategyValue string - - URL, urlPresent := values["url"] - if !urlPresent { - return nil, ErrURLMissing - } - - urlValue, ok := URL.(string) - if !ok { - return nil, ErrURLType - } - - strategy, strategyPresent := values["strategy"] - if strategyPresent { - strategyValue, ok = strategy.(string) - if !ok { - return nil, ErrStrategyType - } - - if strategyValue != "glob" && strategyValue != "regex" { - return nil, fmt.Errorf("%w: %s", ErrUnsupportedStrategy, strategyValue) - } - } - - return Matcher{ - URL: urlValue, - Strategy: x.IfThenElse(strategyPresent, strategyValue, "glob"), - }, nil -} diff --git a/internal/rules/config/mapstructure_decoder_test.go b/internal/rules/config/mapstructure_decoder_test.go deleted file mode 100644 index 6af3d069c..000000000 --- a/internal/rules/config/mapstructure_decoder_test.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2022 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/dadrus/heimdall/internal/x/testsupport" -) - -func TestMatcherDecodeHookFunc(t *testing.T) { - t.Parallel() - - type Typ struct { - Matcher Matcher `json:"match"` - } - - for _, tc := range []struct { - uc string - config []byte - assert func(t *testing.T, err error, matcher *Matcher) - }{ - { - uc: "specified as string", - config: []byte(`match: foo.bar`), - assert: func(t *testing.T, err error, matcher *Matcher) { - t.Helper() - - require.NoError(t, err) - assert.Equal(t, "foo.bar", matcher.URL) - assert.Equal(t, "glob", matcher.Strategy) - }, - }, - { - uc: "specified as structured type without url", - config: []byte(` -match: - strategy: foo -`), - assert: func(t *testing.T, err error, _ *Matcher) { - t.Helper() - - require.Error(t, err) - assert.Contains(t, err.Error(), ErrURLMissing.Error()) - }, - }, - { - uc: "specified as structured type with bad url type", - config: []byte(` -match: - url: 1 -`), - assert: func(t *testing.T, err error, _ *Matcher) { - t.Helper() - - require.Error(t, err) - assert.Contains(t, err.Error(), ErrURLType.Error()) - }, - }, - { - uc: "specified as structured type with bad strategy type", - config: []byte(` -match: - url: foo.bar - strategy: true -`), - assert: func(t *testing.T, err error, _ *Matcher) { - t.Helper() - - require.Error(t, err) - assert.Contains(t, err.Error(), ErrStrategyType.Error()) - }, - }, - { - uc: "specified as structured type with unsupported strategy", - config: []byte(` -match: - url: foo.bar - strategy: foo -`), - assert: func(t *testing.T, err error, _ *Matcher) { - t.Helper() - - require.Error(t, err) - assert.Contains(t, err.Error(), ErrUnsupportedStrategy.Error()) - }, - }, - { - uc: "specified as structured type without strategy specified", - config: []byte(` -match: - url: foo.bar -`), - assert: func(t *testing.T, err error, matcher *Matcher) { - t.Helper() - - require.NoError(t, err) - assert.Equal(t, "foo.bar", matcher.URL) - assert.Equal(t, "glob", matcher.Strategy) - }, - }, - { - uc: "specified as structured type with glob strategy specified", - config: []byte(` -match: - url: foo.bar - strategy: glob -`), - assert: func(t *testing.T, err error, matcher *Matcher) { - t.Helper() - - require.NoError(t, err) - assert.Equal(t, "foo.bar", matcher.URL) - assert.Equal(t, "glob", matcher.Strategy) - }, - }, - { - uc: "specified as structured type with regex strategy specified", - config: []byte(` -match: - url: foo.bar - strategy: regex -`), - assert: func(t *testing.T, err error, matcher *Matcher) { - t.Helper() - - require.NoError(t, err) - assert.Equal(t, "foo.bar", matcher.URL) - assert.Equal(t, "regex", matcher.Strategy) - }, - }, - } { - t.Run(tc.uc, func(t *testing.T) { - // GIVEN - raw, err := testsupport.DecodeTestConfig(tc.config) - require.NoError(t, err) - - var typ Typ - - // WHEN - err = DecodeConfig(raw, &typ) - - // THEN - tc.assert(t, err, &typ.Matcher) - }) - } -} diff --git a/internal/rules/config/matcher.go b/internal/rules/config/matcher.go index 92f0549da..5c2f80938 100644 --- a/internal/rules/config/matcher.go +++ b/internal/rules/config/matcher.go @@ -16,31 +16,53 @@ package config -import ( - "github.com/goccy/go-json" - - "github.com/dadrus/heimdall/internal/x/stringx" -) +import "slices" type Matcher struct { - URL string `json:"url" yaml:"url"` - Strategy string `json:"strategy" yaml:"strategy"` + Routes []Route `json:"routes" yaml:"routes" validate:"required,dive"` //nolint:lll,tagalign + BacktrackingEnabled *bool `json:"backtracking_enabled" yaml:"backtracking_enabled"` //nolint:lll,tagalign + Scheme string `json:"scheme" yaml:"scheme" validate:"omitempty,oneof=http https"` //nolint:lll,tagalign + Methods []string `json:"methods" yaml:"methods" validate:"omitempty,dive,required"` //nolint:lll,tagalign + Hosts []HostMatcher `json:"hosts" yaml:"hosts" validate:"omitempty,dive,required"` //nolint:lll,tagalign } -func (m *Matcher) UnmarshalJSON(data []byte) error { - if data[0] == '"' { - // data contains just the url matching value - m.URL = stringx.ToString(data[1 : len(data)-1]) - m.Strategy = "glob" +type Route struct { + Path string `json:"path" yaml:"path" validate:"required"` //nolint:lll,tagalign + PathParams []ParameterMatcher `json:"path_params" yaml:"path_params" validate:"omitempty,dive,required"` //nolint:lll,tagalign +} - return nil - } +func (r *Route) DeepCopyInto(out *Route) { + *out = *r - var rawData map[string]any + out.PathParams = slices.Clone(r.PathParams) +} + +type ParameterMatcher struct { + Name string `json:"name" yaml:"name" validate:"required,ne=*"` //nolint:tagalign + Value string `json:"value" yaml:"value" validate:"required"` //nolint:tagalign + Type string `json:"type" yaml:"type" validate:"required,oneof=exact glob regex"` //nolint:tagalign +} - if err := json.Unmarshal(data, &rawData); err != nil { - return err +type HostMatcher struct { + Value string `json:"value" yaml:"value" validate:"required"` //nolint:tagalign + Type string `json:"type" yaml:"type" validate:"required,oneof=exact glob regex"` //nolint:tagalign +} + +func (m *Matcher) DeepCopyInto(out *Matcher) { + var withBacktracking *bool + + if m.BacktrackingEnabled != nil { + value := *m.BacktrackingEnabled + withBacktracking = &value } - return DecodeConfig(rawData, m) + out.Scheme = m.Scheme + out.BacktrackingEnabled = withBacktracking + out.Methods = slices.Clone(m.Methods) + out.Hosts = slices.Clone(m.Hosts) + + out.Routes = make([]Route, len(m.Routes)) + for i, route := range m.Routes { + route.DeepCopyInto(&out.Routes[i]) + } } diff --git a/internal/rules/config/matcher_test.go b/internal/rules/config/matcher_test.go index b3ccd652d..a2b0c7f2c 100644 --- a/internal/rules/config/matcher_test.go +++ b/internal/rules/config/matcher_test.go @@ -19,86 +19,71 @@ package config import ( "testing" - "github.com/goccy/go-json" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -func TestMatcherUnmarshalJSON(t *testing.T) { +func TestMatcherDeepCopyInto(t *testing.T) { t.Parallel() - type Typ struct { - Matcher Matcher `json:"match"` - } + trueValue := true for _, tc := range []struct { - uc string - config []byte - assert func(t *testing.T, err error, matcher *Matcher) + uc string + in *Matcher }{ { - uc: "specified as string", - config: []byte(`{ "match": "foo.bar" }`), - assert: func(t *testing.T, err error, matcher *Matcher) { - t.Helper() - - require.NoError(t, err) - assert.Equal(t, "foo.bar", matcher.URL) - assert.Equal(t, "glob", matcher.Strategy) + uc: "single route defining only a path", + in: &Matcher{ + Routes: []Route{{Path: "/foo/bar"}}, }, }, { - uc: "specified as structured type with invalid json structure", - config: []byte(`{ -"match": { - strategy: foo -} -}`), - assert: func(t *testing.T, err error, _ *Matcher) { - t.Helper() - - require.Error(t, err) - assert.Contains(t, err.Error(), "invalid character") + uc: "single route defining path and some path parameters", + in: &Matcher{ + Routes: []Route{ + { + Path: "/:foo/:bar", + PathParams: []ParameterMatcher{ + {Name: "foo", Value: "bar", Type: "glob"}, + {Name: "bar", Value: "baz", Type: "regex"}, + }, + }, + }, }, }, { - uc: "specified as structured type without url", - config: []byte(`{ -"match": { - "strategy": "foo" -} -}`), - assert: func(t *testing.T, err error, _ *Matcher) { - t.Helper() - - require.Error(t, err) - assert.Contains(t, err.Error(), ErrURLMissing.Error()) - }, - }, - { - uc: "specified as structured type without strategy specified", - config: []byte(`{ -"match": { - "url": "foo.bar" -} -}`), - assert: func(t *testing.T, err error, matcher *Matcher) { - t.Helper() - - require.NoError(t, err) - assert.Equal(t, "foo.bar", matcher.URL) - assert.Equal(t, "glob", matcher.Strategy) + uc: "multiple routes and additional constraints", + in: &Matcher{ + Routes: []Route{ + { + Path: "/:foo/:bar", + PathParams: []ParameterMatcher{ + {Name: "foo", Value: "bar", Type: "glob"}, + {Name: "bar", Value: "baz", Type: "regex"}, + }, + }, + { + Path: "/some/static/path", + }, + }, + BacktrackingEnabled: &trueValue, + Scheme: "https", + Hosts: []HostMatcher{ + { + Value: "*example.com", + Type: "glob", + }, + }, + Methods: []string{"GET", "POST"}, }, }, } { t.Run(tc.uc, func(t *testing.T) { - var typ Typ + out := new(Matcher) - // WHEN - err := json.Unmarshal(tc.config, &typ) + tc.in.DeepCopyInto(out) - // THEN - tc.assert(t, err, &typ.Matcher) + assert.Equal(t, tc.in, out) }) } } diff --git a/internal/rules/config/parser_test.go b/internal/rules/config/parser_test.go index ed2256733..4e2995f26 100644 --- a/internal/rules/config/parser_test.go +++ b/internal/rules/config/parser_test.go @@ -53,62 +53,216 @@ func TestParseRules(t *testing.T) { assert: func(t *testing.T, err error, ruleSet *RuleSet) { t.Helper() + require.Error(t, err) require.ErrorIs(t, err, ErrEmptyRuleSet) require.Nil(t, ruleSet) }, }, { - uc: "JSON content type and not empty contents", + uc: "Empty JSON content", + contentType: "application/json", + assert: func(t *testing.T, err error, ruleSet *RuleSet) { + t.Helper() + + require.ErrorIs(t, err, ErrEmptyRuleSet) + require.Nil(t, ruleSet) + }, + }, + { + uc: "JSON rule set without rules", contentType: "application/json", content: []byte(`{ "version": "1", "name": "foo", -"rules": [{"id": "bar"}] +"rules": [] }`), assert: func(t *testing.T, err error, ruleSet *RuleSet) { t.Helper() - require.NoError(t, err) - require.NotNil(t, ruleSet) - assert.Equal(t, "1", ruleSet.Version) - assert.Equal(t, "foo", ruleSet.Name) - assert.Len(t, ruleSet.Rules, 1) + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.Contains(t, err.Error(), "'rules' must contain more than 0 items") + require.Nil(t, ruleSet) }, }, { - uc: "JSON content type with validation error", + uc: "JSON rule set with a rule without required elements", contentType: "application/json", content: []byte(`{ "version": "1", "name": "foo", -"rules": [{"id": "bar", "allow_encoded_slashes": "foo"}] +"rules": [{"forward_to": {"host":"foo.bar"}}] }`), assert: func(t *testing.T, err error, ruleSet *RuleSet) { t.Helper() + require.Error(t, err) require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.Contains(t, err.Error(), "'rules'[0].'id' is a required field") + require.Contains(t, err.Error(), "'rules'[0].'match' is a required field") + require.Contains(t, err.Error(), "'rules'[0].'execute' must contain more than 0 items") require.Nil(t, ruleSet) }, }, { - uc: "JSON content type and empty contents", + uc: "JSON rule set with a rule which match definition does not contain required fields", contentType: "application/json", + content: []byte(`{ +"version": "1", +"name": "foo", +"rules": [ + { + "id": "foo", + "match": { + "hosts":[{ "value": "*.foo.bar", "type": "glob" }] + }, + "execute": [{"authenticator":"test"}]}] +}`), assert: func(t *testing.T, err error, ruleSet *RuleSet) { t.Helper() - require.ErrorIs(t, err, ErrEmptyRuleSet) + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "'rules'[0].'match'.'routes' is a required field") + require.Nil(t, ruleSet) + }, + }, + { + uc: "JSON rule set with a rule which match definition contains unsupported scheme", + contentType: "application/json", + content: []byte(`{ +"version": "1", +"name": "foo", +"rules": [ + { + "id": "foo", + "match":{ + "routes": [{ "path":"/foo/bar" }], + "scheme":"foo", + "methods":["ALL"] + }, + "execute": [{"authenticator":"test"}] + }] +}`), + assert: func(t *testing.T, err error, ruleSet *RuleSet) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.Contains(t, err.Error(), "'rules'[0].'match'.'scheme' must be one of [http https]") + require.Nil(t, ruleSet) + }, + }, + { + uc: "JSON rule set with a rule with forward_to without host", + contentType: "application/json", + content: []byte(`{ +"version": "1", +"name": "foo", +"rules": [ + { + "id": "foo", + "match":{ + "routes": [{ "path":"/foo/bar" }] + }, + "execute": [{"authenticator":"test"}], + "forward_to": { "rewrite": {"scheme": "http"}} + }] +}`), + assert: func(t *testing.T, err error, ruleSet *RuleSet) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.Contains(t, err.Error(), "'rules'[0].'forward_to'.'host' is a required field") + require.Nil(t, ruleSet) + }, + }, + { + uc: "JSON rule set with invalid allow_encoded_slashes settings", + contentType: "application/json", + content: []byte(`{ +"version": "1", +"name": "foo", +"rules": [ + { + "id": "foo", + "match":{ + "routes": [{ "path":"/foo/bar" }] + }, + "allow_encoded_slashes": "foo", + "execute": [{"authenticator":"test"}] + }] +}`), + assert: func(t *testing.T, err error, ruleSet *RuleSet) { + t.Helper() + + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.Contains(t, err.Error(), "'rules'[0].'allow_encoded_slashes' must be one of [off on no_decode]") require.Nil(t, ruleSet) }, }, { - uc: "YAML content type and not empty contents", + uc: "Valid JSON rule set", + contentType: "application/json", + content: []byte(`{ +"version": "1", +"name": "foo", +"rules": [ + { + "id": "foo", + "match":{ + "routes": [{ "path":"/foo/bar" }], + "methods": ["ALL"], + "backtracking_enabled": true, + "hosts":[{ "value": "*.foo.bar", "type": "glob" }], + "scheme": "https" + }, + "execute": [{"authenticator":"test"}] + }] +}`), + assert: func(t *testing.T, err error, ruleSet *RuleSet) { + t.Helper() + + require.NoError(t, err) + require.NotNil(t, ruleSet) + assert.Equal(t, "1", ruleSet.Version) + assert.Equal(t, "foo", ruleSet.Name) + assert.Len(t, ruleSet.Rules, 1) + + rul := ruleSet.Rules[0] + require.NotNil(t, rul) + assert.Equal(t, "foo", rul.ID) + assert.Len(t, rul.Matcher.Routes, 1) + assert.Equal(t, "/foo/bar", rul.Matcher.Routes[0].Path) + assert.ElementsMatch(t, []string{"ALL"}, rul.Matcher.Methods) + assert.True(t, *rul.Matcher.BacktrackingEnabled) + assert.Len(t, rul.Execute, 1) + assert.Equal(t, "test", rul.Execute[0]["authenticator"]) + }, + }, + { + uc: "Valid YAML rule set", contentType: "application/yaml", content: []byte(` version: "1" name: foo rules: - id: bar - allow_encoded_slashes: off + match: + routes: + - path: /foo/:bar + path_params: + - name: bar + type: glob + value: "*foo" + methods: + - GET + forward_to: + host: test + allow_encoded_slashes: no_decode + execute: + - authenticator: test `), assert: func(t *testing.T, err error, ruleSet *RuleSet) { t.Helper() @@ -118,10 +272,48 @@ rules: assert.Equal(t, "1", ruleSet.Version) assert.Equal(t, "foo", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) + rul := ruleSet.Rules[0] + require.NotNil(t, rul) + assert.Equal(t, "bar", rul.ID) + assert.Len(t, rul.Matcher.Routes, 1) + assert.Equal(t, "/foo/:bar", rul.Matcher.Routes[0].Path) + assert.Len(t, rul.Matcher.Routes[0].PathParams, 1) + assert.ElementsMatch(t, []string{"GET"}, rul.Matcher.Methods) + assert.Equal(t, "test", rul.Backend.Host) + assert.Equal(t, EncodedSlashesOnNoDecode, rul.EncodedSlashesHandling) + assert.Len(t, rul.Execute, 1) + assert.Equal(t, "test", rul.Execute[0]["authenticator"]) + }, + }, + { + uc: "YAML content type and validation error due to missing properties", + contentType: "application/yaml", + content: []byte(` +version: "1" +name: foo +rules: +- id: bar + match: + routes: + - path: /foo/:* + path_params: + - name: "*" + type: glob + value: "*foo" + execute: + - authenticator: test +`), + assert: func(t *testing.T, err error, ruleSet *RuleSet) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "'rules'[0].'match'.'routes'[0].'path_params'[0].'name' should not be equal to *") + require.Nil(t, ruleSet) }, }, { - uc: "YAML content type and validation error", + uc: "YAML content type and validation error due bad path params name", contentType: "application/yaml", content: []byte(` version: "1" @@ -133,7 +325,11 @@ rules: assert: func(t *testing.T, err error, ruleSet *RuleSet) { t.Helper() + require.Error(t, err) require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "'rules'[0].'allow_encoded_slashes' must be one of [off on no_decode]") + require.ErrorContains(t, err, "'rules'[0].'match' is a required field") + require.ErrorContains(t, err, "'rules'[0].'execute' must contain more than 0 items") require.Nil(t, ruleSet) }, }, @@ -191,6 +387,11 @@ version: "1" name: foo rules: - id: bar + match: + routes: + - path: foo + execute: + - authenticator: test `), assert: func(t *testing.T, err error, ruleSet *RuleSet) { t.Helper() @@ -200,7 +401,12 @@ rules: assert.Equal(t, "1", ruleSet.Version) assert.Equal(t, "foo", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) - assert.Equal(t, "bar", ruleSet.Rules[0].ID) + + rul := ruleSet.Rules[0] + require.NotNil(t, rul) + assert.Equal(t, "bar", rul.ID) + assert.Len(t, rul.Matcher.Routes, 1) + assert.Equal(t, "foo", rul.Matcher.Routes[0].Path) }, }, { @@ -229,6 +435,17 @@ version: "1" name: ${FOO} rules: - id: bar + match: + routes: + - path: /foo/:bar + path_params: + - name: bar + type: glob + value: "[a-z]" + methods: + - GET + execute: + - authenticator: test `), assert: func(t *testing.T, err error, ruleSet *RuleSet) { t.Helper() @@ -238,7 +455,17 @@ rules: assert.Equal(t, "1", ruleSet.Version) assert.Equal(t, "bar", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) - assert.Equal(t, "bar", ruleSet.Rules[0].ID) + + rul := ruleSet.Rules[0] + require.NotNil(t, rul) + assert.Equal(t, "bar", rul.ID) + assert.Len(t, rul.Matcher.Routes, 1) + assert.Equal(t, "/foo/:bar", rul.Matcher.Routes[0].Path) + assert.Len(t, rul.Matcher.Routes[0].PathParams, 1) + assert.Equal(t, "bar", rul.Matcher.Routes[0].PathParams[0].Name) + assert.Equal(t, "glob", rul.Matcher.Routes[0].PathParams[0].Type) + assert.Equal(t, "[a-z]", rul.Matcher.Routes[0].PathParams[0].Value) + assert.Equal(t, "GET", rul.Matcher.Methods[0]) }, }, { @@ -248,6 +475,11 @@ version: "1" name: ${FOO} rules: - id: bar + match: + routes: + - path: foo + execute: + - authenticator: test `), assert: func(t *testing.T, err error, ruleSet *RuleSet) { t.Helper() @@ -257,7 +489,12 @@ rules: assert.Equal(t, "1", ruleSet.Version) assert.Equal(t, "${FOO}", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) - assert.Equal(t, "bar", ruleSet.Rules[0].ID) + + rul := ruleSet.Rules[0] + require.NotNil(t, rul) + assert.Equal(t, "bar", rul.ID) + assert.Len(t, rul.Matcher.Routes, 1) + assert.Equal(t, "foo", rul.Matcher.Routes[0].Path) }, }, } { diff --git a/internal/rules/config/rule.go b/internal/rules/config/rule.go index 8648fdaf7..ed1e9b10e 100644 --- a/internal/rules/config/rule.go +++ b/internal/rules/config/rule.go @@ -17,46 +17,50 @@ package config import ( - "github.com/dadrus/heimdall/internal/config" -) + "crypto" + "fmt" -type EncodedSlashesHandling string + "github.com/goccy/go-json" -const ( - EncodedSlashesOff EncodedSlashesHandling = "off" - EncodedSlashesOn EncodedSlashesHandling = "on" - EncodedSlashesNoDecode EncodedSlashesHandling = "no_decode" + "github.com/dadrus/heimdall/internal/config" + "github.com/dadrus/heimdall/internal/heimdall" ) type Rule struct { - ID string `json:"id" yaml:"id"` + ID string `json:"id" yaml:"id" validate:"required"` //nolint:lll,tagalign EncodedSlashesHandling EncodedSlashesHandling `json:"allow_encoded_slashes" yaml:"allow_encoded_slashes" validate:"omitempty,oneof=off on no_decode"` //nolint:lll,tagalign - RuleMatcher Matcher `json:"match" yaml:"match"` - Backend *Backend `json:"forward_to" yaml:"forward_to"` - Methods []string `json:"methods" yaml:"methods"` - Execute []config.MechanismConfig `json:"execute" yaml:"execute"` + Matcher Matcher `json:"match" yaml:"match" validate:"required"` //nolint:lll,tagalign + Backend *Backend `json:"forward_to" yaml:"forward_to" validate:"omitnil"` //nolint:lll,tagalign + Execute []config.MechanismConfig `json:"execute" yaml:"execute" validate:"gt=0,dive,required"` //nolint:lll,tagalign ErrorHandler []config.MechanismConfig `json:"on_error" yaml:"on_error"` } -func (in *Rule) DeepCopyInto(out *Rule) { - *out = *in - out.RuleMatcher = in.RuleMatcher +func (r *Rule) Hash() ([]byte, error) { + rawRuleConfig, err := json.Marshal(r) + if err != nil { + return nil, fmt.Errorf("%w: failed to create hash", heimdall.ErrInternal) + } - if in.Backend != nil { - in, out := in.Backend, out.Backend + md := crypto.SHA256.New() + md.Write(rawRuleConfig) - in.DeepCopyInto(out) - } + return md.Sum(nil), nil +} + +func (r *Rule) DeepCopyInto(out *Rule) { + *out = *r - if in.Methods != nil { - in, out := &in.Methods, &out.Methods + inm, outm := &r.Matcher, &out.Matcher + inm.DeepCopyInto(outm) - *out = make([]string, len(*in)) - copy(*out, *in) + if r.Backend != nil { + in, out := r.Backend, out.Backend + + in.DeepCopyInto(out) } - if in.Execute != nil { - in, out := &in.Execute, &out.Execute + if r.Execute != nil { + in, out := &r.Execute, &out.Execute *out = make([]config.MechanismConfig, len(*in)) for i := range *in { @@ -64,8 +68,8 @@ func (in *Rule) DeepCopyInto(out *Rule) { } } - if in.ErrorHandler != nil { - in, out := &in.ErrorHandler, &out.ErrorHandler + if r.ErrorHandler != nil { + in, out := &r.ErrorHandler, &out.ErrorHandler *out = make([]config.MechanismConfig, len(*in)) for i := range *in { @@ -74,13 +78,13 @@ func (in *Rule) DeepCopyInto(out *Rule) { } } -func (in *Rule) DeepCopy() *Rule { - if in == nil { +func (r *Rule) DeepCopy() *Rule { + if r == nil { return nil } out := new(Rule) - in.DeepCopyInto(out) + r.DeepCopyInto(out) return out } diff --git a/internal/rules/config/rule_set.go b/internal/rules/config/rule_set.go index 8e1b33524..d52f73d47 100644 --- a/internal/rules/config/rule_set.go +++ b/internal/rules/config/rule_set.go @@ -17,11 +17,7 @@ package config import ( - "strings" "time" - - "github.com/dadrus/heimdall/internal/heimdall" - "github.com/dadrus/heimdall/internal/x/errorchain" ) type MetaData struct { @@ -33,23 +29,7 @@ type MetaData struct { type RuleSet struct { MetaData - Version string `json:"version" yaml:"version"` + Version string `json:"version" yaml:"version" validate:"required"` //nolint:tagalign Name string `json:"name" yaml:"name"` - Rules []Rule `json:"rules" validate:"dive" yaml:"rules"` -} - -func (rs RuleSet) VerifyPathPrefix(prefix string) error { - for _, rule := range rs.Rules { - if strings.HasPrefix(rule.RuleMatcher.URL, "/") && - // only path is specified - !strings.HasPrefix(rule.RuleMatcher.URL, prefix) || - // patterns are specified before the path - // There should be a better way to check it - !strings.Contains(rule.RuleMatcher.URL, prefix) { - return errorchain.NewWithMessage(heimdall.ErrConfiguration, - "path prefix validation failed for rule ID=%s") - } - } - - return nil + Rules []Rule `json:"rules" yaml:"rules" validate:"gt=0,dive,required"` //nolint:tagalign } diff --git a/internal/rules/config/rule_set_test.go b/internal/rules/config/rule_set_test.go deleted file mode 100644 index 03002dfb3..000000000 --- a/internal/rules/config/rule_set_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2023 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestRuleSetConfigurationVerifyPathPrefixPathPrefixVerify(t *testing.T) { - t.Parallel() - - for _, tc := range []struct { - uc string - prefix string - url string - fail bool - }{ - {uc: "path only and without required prefix", prefix: "/foo/bar", url: "/bar/foo/moo", fail: true}, - {uc: "path only with required prefix", prefix: "/foo/bar", url: "/foo/bar/moo", fail: false}, - {uc: "full url and without required prefix", prefix: "/foo/bar", url: "https://<**>/bar/foo/moo", fail: true}, - {uc: "full url with required prefix", prefix: "/foo/bar", url: "https://<**>/foo/bar/moo", fail: false}, - } { - t.Run(tc.uc, func(t *testing.T) { - // GIVEN - rs := RuleSet{ - Rules: []Rule{{RuleMatcher: Matcher{URL: tc.url}}}, - } - - // WHEN - err := rs.VerifyPathPrefix(tc.prefix) - - if tc.fail { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } -} diff --git a/internal/rules/config/rule_test.go b/internal/rules/config/rule_test.go index 42d653976..c51a03ea8 100644 --- a/internal/rules/config/rule_test.go +++ b/internal/rules/config/rule_test.go @@ -28,14 +28,35 @@ import ( func TestRuleConfigDeepCopyInto(t *testing.T) { t.Parallel() + trueValue := true + // GIVEN var out Rule in := Rule{ ID: "foo", - RuleMatcher: Matcher{ - URL: "bar", - Strategy: "glob", + Matcher: Matcher{ + Routes: []Route{ + { + Path: "/:foo/*something", + PathParams: []ParameterMatcher{ + {Name: "foo", Value: "bar", Type: "glob"}, + {Name: "something", Value: ".*\\.css", Type: "regex"}, + }, + }, + { + Path: "/some/static/path", + }, + }, + BacktrackingEnabled: &trueValue, + Scheme: "https", + Hosts: []HostMatcher{ + { + Value: "**.example.com", + Type: "glob", + }, + }, + Methods: []string{"GET", "PATCH"}, }, Backend: &Backend{ Host: "baz", @@ -46,7 +67,6 @@ func TestRuleConfigDeepCopyInto(t *testing.T) { QueryParamsToRemove: []string{"baz"}, }, }, - Methods: []string{"GET", "PATCH"}, Execute: []config.MechanismConfig{{"foo": "bar"}}, ErrorHandler: []config.MechanismConfig{{"bar": "foo"}}, } @@ -55,24 +75,36 @@ func TestRuleConfigDeepCopyInto(t *testing.T) { in.DeepCopyInto(&out) // THEN - assert.Equal(t, in.ID, out.ID) - assert.Equal(t, in.RuleMatcher.URL, out.RuleMatcher.URL) - assert.Equal(t, in.Backend, out.Backend) - assert.Equal(t, in.RuleMatcher.Strategy, out.RuleMatcher.Strategy) - assert.Equal(t, in.Methods, out.Methods) - assert.Equal(t, in.Execute, out.Execute) - assert.Equal(t, in.ErrorHandler, out.ErrorHandler) + assert.Equal(t, in, out) } func TestRuleConfigDeepCopy(t *testing.T) { t.Parallel() // GIVEN - in := Rule{ + in := &Rule{ ID: "foo", - RuleMatcher: Matcher{ - URL: "bar", - Strategy: "glob", + Matcher: Matcher{ + Routes: []Route{ + { + Path: "/:foo/*something", + PathParams: []ParameterMatcher{ + {Name: "foo", Value: "bar", Type: "glob"}, + {Name: "something", Value: ".*\\.css", Type: "regex"}, + }, + }, + { + Path: "/some/static/path", + }, + }, + Scheme: "https", + Hosts: []HostMatcher{ + { + Value: "**.example.com", + Type: "glob", + }, + }, + Methods: []string{"GET", "PATCH"}, }, Backend: &Backend{ Host: "baz", @@ -83,7 +115,6 @@ func TestRuleConfigDeepCopy(t *testing.T) { QueryParamsToRemove: []string{"baz"}, }, }, - Methods: []string{"GET", "PATCH"}, Execute: []config.MechanismConfig{{"foo": "bar"}}, ErrorHandler: []config.MechanismConfig{{"bar": "foo"}}, } @@ -96,11 +127,5 @@ func TestRuleConfigDeepCopy(t *testing.T) { require.NotSame(t, &in, out) // but same contents - assert.Equal(t, in.ID, out.ID) - assert.Equal(t, in.RuleMatcher.URL, out.RuleMatcher.URL) - assert.Equal(t, in.Backend, out.Backend) - assert.Equal(t, in.RuleMatcher.Strategy, out.RuleMatcher.Strategy) - assert.Equal(t, in.Methods, out.Methods) - assert.Equal(t, in.Execute, out.Execute) - assert.Equal(t, in.ErrorHandler, out.ErrorHandler) + assert.Equal(t, in, out) } diff --git a/internal/rules/config/version.go b/internal/rules/config/version.go index 96168624e..9cbd87d6b 100644 --- a/internal/rules/config/version.go +++ b/internal/rules/config/version.go @@ -16,4 +16,4 @@ package config -const CurrentRuleSetVersion = "1alpha3" +const CurrentRuleSetVersion = "1alpha4" diff --git a/internal/rules/default_execution_condition.go b/internal/rules/default_execution_condition.go index 3759be5bf..acf1c67cc 100644 --- a/internal/rules/default_execution_condition.go +++ b/internal/rules/default_execution_condition.go @@ -23,6 +23,10 @@ import ( type defaultExecutionCondition struct{} -func (c defaultExecutionCondition) CanExecute(_ heimdall.Context, _ *subject.Subject) (bool, error) { +func (c defaultExecutionCondition) CanExecuteOnSubject(_ heimdall.Context, _ *subject.Subject) (bool, error) { + return true, nil +} + +func (c defaultExecutionCondition) CanExecuteOnError(_ heimdall.Context, _ error) (bool, error) { return true, nil } diff --git a/internal/rules/endpoint/authstrategy/http_message_signatures.go b/internal/rules/endpoint/authstrategy/http_message_signatures.go new file mode 100644 index 000000000..e2d366538 --- /dev/null +++ b/internal/rules/endpoint/authstrategy/http_message_signatures.go @@ -0,0 +1,248 @@ +// Copyright 2024 Dimitrij Drus +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package authstrategy + +import ( + "context" + "crypto/sha256" + "crypto/x509" + "encoding/binary" + "fmt" + "net/http" + "sync" + "time" + + "github.com/dadrus/httpsig" + "github.com/go-jose/go-jose/v4" + "github.com/rs/zerolog" + + "github.com/dadrus/heimdall/internal/heimdall" + "github.com/dadrus/heimdall/internal/keystore" + "github.com/dadrus/heimdall/internal/x" + "github.com/dadrus/heimdall/internal/x/errorchain" + "github.com/dadrus/heimdall/internal/x/pkix" + "github.com/dadrus/heimdall/internal/x/stringx" +) + +type KeyStore struct { + Path string `mapstructure:"path" validate:"required"` + Password string `mapstructure:"password"` +} + +type SignerConfig struct { + Name string `mapstructure:"name"` + KeyStore KeyStore `mapstructure:"key_store" validate:"required"` + KeyID string `mapstructure:"key_id"` +} + +type HTTPMessageSignatures struct { + Signer SignerConfig `mapstructure:"signer" validate:"required"` + Components []string `mapstructure:"components" validate:"gt=0,dive,required"` + TTL *time.Duration `mapstructure:"ttl"` + Label string `mapstructure:"label"` + + mut sync.RWMutex + // used to allow downloading the keys for signature verification purposes + // since the http message signatures rfc does not define a format for key transport + // JWK is used here. + pubKeys []jose.JSONWebKey + // used to monitor the expiration of configured certificates + certChain []*x509.Certificate + signer httpsig.Signer +} + +func (s *HTTPMessageSignatures) OnChanged(logger zerolog.Logger) { + err := s.init() + if err != nil { + logger.Warn().Err(err). + Str("_file", s.Signer.KeyStore.Path). + Msg("Signer key store reload failed") + } else { + logger.Info(). + Str("_file", s.Signer.KeyStore.Path). + Msg("Signer key store reloaded") + } +} + +func (s *HTTPMessageSignatures) init() error { + ks, err := keystore.NewKeyStoreFromPEMFile(s.Signer.KeyStore.Path, s.Signer.KeyStore.Password) + if err != nil { + return errorchain.NewWithMessage(heimdall.ErrConfiguration, + "failed loading keystore for http_message_signatures strategy").CausedBy(err) + } + + var kse *keystore.Entry + + if len(s.Signer.KeyID) == 0 { + kse, err = ks.Entries()[0], nil + } else { + kse, err = ks.GetKey(s.Signer.KeyID) + } + + if err != nil { + return errorchain.NewWithMessage(heimdall.ErrConfiguration, + "failed retrieving key from key store for http_message_signatures strategy").CausedBy(err) + } + + if len(kse.CertChain) != 0 { + opts := []pkix.ValidationOption{ + pkix.WithKeyUsage(x509.KeyUsageDigitalSignature), + pkix.WithRootCACertificates([]*x509.Certificate{kse.CertChain[len(kse.CertChain)-1]}), + pkix.WithCurrentTime(time.Now()), + } + + if len(kse.CertChain) > 2 { //nolint: mnd + opts = append(opts, pkix.WithIntermediateCACertificates(kse.CertChain[1:len(kse.CertChain)-1])) + } + + if err = pkix.ValidateCertificate(kse.CertChain[0], opts...); err != nil { + return errorchain.NewWithMessage(heimdall.ErrConfiguration, + "certificate for http_message_signatures strategy cannot be used for signing purposes"). + CausedBy(err) + } + } + + keys := make([]jose.JSONWebKey, len(ks.Entries())) + for idx, entry := range ks.Entries() { + keys[idx] = entry.JWK() + } + + signer, err := httpsig.NewSigner( + toHTTPSigKey(kse), + httpsig.WithComponents(s.Components...), + httpsig.WithTag(x.IfThenElse(len(s.Signer.Name) != 0, s.Signer.Name, "heimdall")), + httpsig.WithLabel(s.Label), + httpsig.WithTTL(x.IfThenElseExec(s.TTL != nil, + func() time.Duration { return *s.TTL }, + func() time.Duration { return 1 * time.Minute }, + )), + ) + if err != nil { + return errorchain.NewWithMessage(heimdall.ErrConfiguration, + "failed to configure http_message_signatures strategy").CausedBy(err) + } + + s.mut.Lock() + defer s.mut.Unlock() + + s.signer = signer + s.pubKeys = keys + s.certChain = kse.CertChain + + return nil +} + +func (s *HTTPMessageSignatures) Apply(ctx context.Context, req *http.Request) error { + logger := zerolog.Ctx(ctx) + logger.Debug().Msg("Applying http_message_signatures strategy to authenticate request") + + s.mut.RLock() + defer s.mut.RUnlock() + + header, err := s.signer.Sign(httpsig.MessageFromRequest(req)) + if err != nil { + return err + } + + // set the updated headers + req.Header = header + + return nil +} + +func (s *HTTPMessageSignatures) Keys() []jose.JSONWebKey { + s.mut.RLock() + defer s.mut.RUnlock() + + return s.pubKeys +} + +func (s *HTTPMessageSignatures) Hash() []byte { + const int64BytesCount = 8 + + hash := sha256.New() + hash.Write(stringx.ToBytes(s.Label)) + + for _, component := range s.Components { + hash.Write(stringx.ToBytes(component)) + } + + if s.TTL != nil { + ttlBytes := make([]byte, int64BytesCount) + binary.LittleEndian.PutUint64(ttlBytes, uint64(*s.TTL)) + + hash.Write(ttlBytes) + } + + hash.Write(stringx.ToBytes(s.Signer.Name)) + hash.Write(stringx.ToBytes(s.Signer.KeyID)) + + return hash.Sum(nil) +} + +func (s *HTTPMessageSignatures) Name() string { return "http message signer" } +func (s *HTTPMessageSignatures) Certificates() []*x509.Certificate { + s.mut.RLock() + defer s.mut.RUnlock() + + return s.certChain +} + +func toHTTPSigKey(entry *keystore.Entry) httpsig.Key { + var httpSigAlg httpsig.SignatureAlgorithm + + switch entry.Alg { + case keystore.AlgRSA: + httpSigAlg = getRSAAlgorithm(entry.KeySize) + case keystore.AlgECDSA: + httpSigAlg = getECDSAAlgorithm(entry.KeySize) + default: + panic("unsupported key algorithm: " + entry.Alg) + } + + return httpsig.Key{ + Algorithm: httpSigAlg, + KeyID: entry.KeyID, + Key: entry.PrivateKey, + } +} + +func getECDSAAlgorithm(keySize int) httpsig.SignatureAlgorithm { + switch keySize { + case 256: //nolint: mnd + return httpsig.EcdsaP256Sha256 + case 384: //nolint: mnd + return httpsig.EcdsaP384Sha384 + case 512: //nolint: mnd + return httpsig.EcdsaP521Sha512 + default: + panic(fmt.Sprintf("unsupported ECDSA key size: %d", keySize)) + } +} + +func getRSAAlgorithm(keySize int) httpsig.SignatureAlgorithm { + switch keySize { + case 2048: //nolint: mnd + return httpsig.RsaPssSha256 + case 3072: //nolint: mnd + return httpsig.RsaPssSha384 + case 4096: //nolint: mnd + return httpsig.RsaPssSha512 + default: + panic(fmt.Sprintf("unsupported RSA key size: %d", keySize)) + } +} diff --git a/internal/rules/endpoint/authstrategy/http_message_signatures_test.go b/internal/rules/endpoint/authstrategy/http_message_signatures_test.go new file mode 100644 index 000000000..d63135edf --- /dev/null +++ b/internal/rules/endpoint/authstrategy/http_message_signatures_test.go @@ -0,0 +1,472 @@ +// Copyright 2024 Dimitrij Drus +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package authstrategy + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/dadrus/httpsig" + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/dadrus/heimdall/internal/heimdall" + "github.com/dadrus/heimdall/internal/keystore" + "github.com/dadrus/heimdall/internal/x/pkix/pemx" + "github.com/dadrus/heimdall/internal/x/testsupport" +) + +func TestToHTTPSigKey(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + kse *keystore.Entry + expAlg httpsig.SignatureAlgorithm + }{ + { + expAlg: httpsig.RsaPssSha256, + kse: &keystore.Entry{KeyID: "foo", Alg: keystore.AlgRSA, KeySize: 2048, PrivateKey: &rsa.PrivateKey{}}, + }, + { + expAlg: httpsig.RsaPssSha384, + kse: &keystore.Entry{KeyID: "foo", Alg: keystore.AlgRSA, KeySize: 3072, PrivateKey: &rsa.PrivateKey{}}, + }, + { + expAlg: httpsig.RsaPssSha512, + kse: &keystore.Entry{KeyID: "foo", Alg: keystore.AlgRSA, KeySize: 4096, PrivateKey: &rsa.PrivateKey{}}, + }, + { + expAlg: httpsig.EcdsaP256Sha256, + kse: &keystore.Entry{KeyID: "foo", Alg: keystore.AlgECDSA, KeySize: 256, PrivateKey: &ecdsa.PrivateKey{}}, + }, + { + expAlg: httpsig.EcdsaP384Sha384, + kse: &keystore.Entry{KeyID: "foo", Alg: keystore.AlgECDSA, KeySize: 384, PrivateKey: &ecdsa.PrivateKey{}}, + }, + { + expAlg: httpsig.EcdsaP521Sha512, + kse: &keystore.Entry{KeyID: "foo", Alg: keystore.AlgECDSA, KeySize: 512, PrivateKey: &ecdsa.PrivateKey{}}, + }, + } { + t.Run(string(tc.expAlg), func(t *testing.T) { + key := toHTTPSigKey(tc.kse) + + assert.Equal(t, tc.expAlg, key.Algorithm) + assert.Equal(t, tc.kse.KeyID, key.KeyID) + assert.Equal(t, tc.kse.PrivateKey, key.Key) + }) + } +} + +func TestHTTPMessageSignaturesInit(t *testing.T) { + t.Parallel() + + rootCA, err := testsupport.NewRootCA("Test Root CA 1", time.Hour*24) + require.NoError(t, err) + + // INT CA + intCAPrivKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + + intCACert, err := rootCA.IssueCertificate( + testsupport.WithSubject(pkix.Name{ + CommonName: "Test Int CA 1", + Organization: []string{"Test"}, + Country: []string{"EU"}, + }), + testsupport.WithIsCA(), + testsupport.WithValidity(time.Now(), time.Hour*24), + testsupport.WithSubjectPubKey(&intCAPrivKey.PublicKey, x509.ECDSAWithSHA384)) + require.NoError(t, err) + + intCA := testsupport.NewCA(intCAPrivKey, intCACert) + + // EE CERTS + ee1PrivKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + ee1cert, err := intCA.IssueCertificate( + testsupport.WithSubject(pkix.Name{ + CommonName: "Test EE 1", + Organization: []string{"Test"}, + Country: []string{"EU"}, + }), + testsupport.WithValidity(time.Now(), time.Hour*24), + testsupport.WithSubjectPubKey(&ee1PrivKey.PublicKey, x509.ECDSAWithSHA384), + testsupport.WithKeyUsage(x509.KeyUsageDigitalSignature)) + require.NoError(t, err) + + ee2PrivKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + ee2cert, err := intCA.IssueCertificate( + testsupport.WithSubject(pkix.Name{ + CommonName: "Test EE 2", + Organization: []string{"Test"}, + Country: []string{"EU"}, + }), + testsupport.WithValidity(time.Now(), time.Hour*24), + testsupport.WithSubjectPubKey(&ee2PrivKey.PublicKey, x509.ECDSAWithSHA384)) + require.NoError(t, err) + + pemBytes, err := pemx.BuildPEM( + pemx.WithECDSAPrivateKey(ee1PrivKey, pemx.WithHeader("X-Key-ID", "key1")), + pemx.WithX509Certificate(ee1cert), + pemx.WithECDSAPrivateKey(ee2PrivKey, pemx.WithHeader("X-Key-ID", "key2")), + pemx.WithX509Certificate(ee2cert), + pemx.WithX509Certificate(intCACert), + pemx.WithX509Certificate(rootCA.Certificate), + ) + require.NoError(t, err) + + testDir := t.TempDir() + trustStorePath := filepath.Join(testDir, "keystore.pem") + + err = os.WriteFile(trustStorePath, pemBytes, 0o600) + require.NoError(t, err) + + for _, tc := range []struct { + uc string + conf *HTTPMessageSignatures + assert func(t *testing.T, err error, conf *HTTPMessageSignatures) + }{ + { + uc: "failed loading keystore", + conf: &HTTPMessageSignatures{}, + assert: func(t *testing.T, err error, _ *HTTPMessageSignatures) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "failed loading keystore") + }, + }, + { + uc: "no key for given key id", + conf: &HTTPMessageSignatures{ + Signer: SignerConfig{KeyStore: KeyStore{Path: trustStorePath}, KeyID: "foo"}, + }, + assert: func(t *testing.T, err error, _ *HTTPMessageSignatures) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "failed retrieving key from key store") + }, + }, + { + uc: "certificate cannot be used for signing", + conf: &HTTPMessageSignatures{ + Signer: SignerConfig{KeyStore: KeyStore{Path: trustStorePath}, KeyID: "key2"}, + }, + assert: func(t *testing.T, err error, _ *HTTPMessageSignatures) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "cannot be used for signing purposes") + }, + }, + { + uc: "bad signer configuration", + conf: &HTTPMessageSignatures{ + Signer: SignerConfig{KeyStore: KeyStore{Path: trustStorePath}}, + Components: []string{"@foo"}, + }, + assert: func(t *testing.T, err error, _ *HTTPMessageSignatures) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "failed to configure") + }, + }, + { + uc: "successful configuration with default ttl", + conf: &HTTPMessageSignatures{ + Signer: SignerConfig{KeyStore: KeyStore{Path: trustStorePath}, KeyID: "key1"}, + Components: []string{"@method"}, + }, + assert: func(t *testing.T, err error, conf *HTTPMessageSignatures) { + t.Helper() + + require.NoError(t, err) + + assert.NotNil(t, conf.signer) + assert.NotEmpty(t, conf.Certificates()) + assert.NotEmpty(t, conf.Keys()) + assert.Equal(t, "http message signer", conf.Name()) + }, + }, + { + uc: "successful configuration with custom ttl", + conf: &HTTPMessageSignatures{ + Signer: SignerConfig{KeyStore: KeyStore{Path: trustStorePath}, KeyID: "key1"}, + Components: []string{"@method"}, + TTL: func() *time.Duration { + ttl := 1 * time.Hour + + return &ttl + }(), + }, + assert: func(t *testing.T, err error, conf *HTTPMessageSignatures) { + t.Helper() + + require.NoError(t, err) + + assert.NotNil(t, conf.signer) + assert.NotEmpty(t, conf.Certificates()) + assert.NotEmpty(t, conf.Keys()) + assert.Equal(t, "http message signer", conf.Name()) + }, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + err := tc.conf.init() + + tc.assert(t, err, tc.conf) + }) + } +} + +func TestHTTPMessageSignaturesHash(t *testing.T) { + t.Parallel() + + ttl := 1 * time.Hour + conf1 := &HTTPMessageSignatures{ + Signer: SignerConfig{KeyStore: KeyStore{Path: "/path/to/keystore.pem"}, KeyID: "key1"}, + Components: []string{"@method"}, + TTL: &ttl, + } + conf2 := &HTTPMessageSignatures{ + Signer: SignerConfig{KeyStore: KeyStore{Path: "/path/to/keystore.pem"}, KeyID: "key1", Name: "foo"}, + Components: []string{"@status"}, + TTL: &ttl, + Label: "test", + } + + hash1 := conf1.Hash() + hash2 := conf2.Hash() + + assert.NotEmpty(t, hash1) + assert.NotEmpty(t, hash2) + assert.NotEqual(t, hash1, hash2) + assert.Equal(t, hash1, conf1.Hash()) + assert.Equal(t, hash2, conf2.Hash()) +} + +func TestHTTPMessageSignaturesApply(t *testing.T) { + t.Parallel() + + privKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + + cb := testsupport.NewCertificateBuilder( + testsupport.WithValidity(time.Now(), 15*time.Second), + testsupport.WithSerialNumber(big.NewInt(1)), + testsupport.WithSubject(pkix.Name{ + CommonName: "Test", + Organization: []string{"Test"}, + Country: []string{"EU"}, + }), + testsupport.WithSubjectPubKey(&privKey.PublicKey, x509.ECDSAWithSHA384), + testsupport.WithSelfSigned(), + testsupport.WithSignaturePrivKey(privKey), + testsupport.WithKeyUsage(x509.KeyUsageDigitalSignature), + ) + + cert, err := cb.Build() + require.NoError(t, err) + + pemBytes, err := pemx.BuildPEM( + pemx.WithECDSAPrivateKey(privKey, pemx.WithHeader("X-Key-ID", "test")), + pemx.WithX509Certificate(cert), + ) + require.NoError(t, err) + + testDir := t.TempDir() + trustStorePath := filepath.Join(testDir, "keystore.pem") + + err = os.WriteFile(trustStorePath, pemBytes, 0o600) + require.NoError(t, err) + + for _, tc := range []struct { + uc string + conf *HTTPMessageSignatures + assert func(t *testing.T, err error, req *http.Request) + }{ + { + uc: "fails", + conf: &HTTPMessageSignatures{ + Signer: SignerConfig{KeyStore: KeyStore{Path: trustStorePath}}, + Components: []string{"x-some-header"}, + }, + assert: func(t *testing.T, err error, req *http.Request) { + t.Helper() + + require.Error(t, err) + require.ErrorContains(t, err, "x-some-header") + assert.Empty(t, req.Header.Get("Signature")) + assert.Empty(t, req.Header.Get("Signature-Input")) + }, + }, + { + uc: "successful", + conf: &HTTPMessageSignatures{ + Signer: SignerConfig{KeyStore: KeyStore{Path: trustStorePath}}, + Components: []string{"@method", "content-digest"}, + }, + assert: func(t *testing.T, err error, req *http.Request) { + t.Helper() + + require.NoError(t, err) + assert.NotEmpty(t, req.Header.Get("Signature")) + sigInput := req.Header.Get("Signature-Input") + assert.Contains(t, sigInput, `("@method" "content-digest")`) + assert.Contains(t, sigInput, `created=`) + assert.Contains(t, sigInput, `expires=`) + assert.Contains(t, sigInput, `keyid="test"`) + assert.Contains(t, sigInput, `alg="ecdsa-p384-sha384"`) + assert.Contains(t, sigInput, `nonce=`) + assert.Contains(t, sigInput, `tag="heimdall"`) + contentDigest := req.Header.Get("Content-Digest") + assert.Contains(t, contentDigest, "sha-256=:X48E9qOokqqrvdts8nOJRJN3OWDUoyWxBf7kbu9DBPE=:") + assert.Contains(t, contentDigest, "sha-512=:WZDPaVn/7XgHaAy8pmojAkGWoRx2UFChF41A2svX+TaPm+AbwAgBWnrIiYllu7BNNyealdVLvRwEmTHWXvJwew==:") + }, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + err := tc.conf.init() + require.NoError(t, err) + + req, err := http.NewRequestWithContext( + context.Background(), + http.MethodGet, + "http//example.com/test", + strings.NewReader(`{"hello": "world"}`), + ) + require.NoError(t, err) + + err = tc.conf.Apply(context.Background(), req) + + tc.assert(t, err, req) + }) + } +} + +func TestHTTPMessageSignaturesOnChanged(t *testing.T) { + t.Parallel() + + // GIVEN + testDir := t.TempDir() + + privKey1, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + + privKey2, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + + cert1, err := testsupport.NewCertificateBuilder(testsupport.WithValidity(time.Now(), 10*time.Hour), + testsupport.WithSerialNumber(big.NewInt(1)), + testsupport.WithSubject(pkix.Name{ + CommonName: "test cert 1", + Organization: []string{"Test"}, + Country: []string{"EU"}, + }), + testsupport.WithSubjectPubKey(&privKey1.PublicKey, x509.ECDSAWithSHA384), + testsupport.WithSelfSigned(), + testsupport.WithKeyUsage(x509.KeyUsageDigitalSignature), + testsupport.WithSignaturePrivKey(privKey1)). + Build() + require.NoError(t, err) + + cert2, err := testsupport.NewCertificateBuilder(testsupport.WithValidity(time.Now(), 10*time.Hour), + testsupport.WithSerialNumber(big.NewInt(1)), + testsupport.WithSubject(pkix.Name{ + CommonName: "test cert 1", + Organization: []string{"Test"}, + Country: []string{"EU"}, + }), + testsupport.WithSubjectPubKey(&privKey2.PublicKey, x509.ECDSAWithSHA384), + testsupport.WithSelfSigned(), + testsupport.WithKeyUsage(x509.KeyUsageDigitalSignature), + testsupport.WithSignaturePrivKey(privKey2)). + Build() + require.NoError(t, err) + + pemBytes1, err := pemx.BuildPEM( + pemx.WithECDSAPrivateKey(privKey1, pemx.WithHeader("X-Key-ID", "key1")), + pemx.WithX509Certificate(cert1), + ) + require.NoError(t, err) + + pemBytes2, err := pemx.BuildPEM( + pemx.WithECDSAPrivateKey(privKey2, pemx.WithHeader("X-Key-ID", "key1")), + pemx.WithX509Certificate(cert2), + ) + require.NoError(t, err) + + pemFile, err := os.Create(filepath.Join(testDir, "keystore.pem")) + require.NoError(t, err) + + _, err = pemFile.Write(pemBytes1) + require.NoError(t, err) + + conf := &HTTPMessageSignatures{ + Signer: SignerConfig{KeyStore: KeyStore{Path: pemFile.Name()}, KeyID: "key1"}, + Components: []string{"@method"}, + } + err = conf.init() + require.NoError(t, err) + + require.Equal(t, cert1, conf.certChain[0]) + require.Equal(t, &privKey1.PublicKey, conf.pubKeys[0].Key) + + // WHEN + _, err = pemFile.Seek(0, 0) + require.NoError(t, err) + + _, err = pemFile.Write(pemBytes2) + require.NoError(t, err) + + conf.OnChanged(log.Logger) + + // THEN + require.Equal(t, cert2, conf.certChain[0]) + require.Equal(t, &privKey2.PublicKey, conf.pubKeys[0].Key) + + // WHEN + err = os.Truncate(pemFile.Name(), 0) + require.NoError(t, err) + + conf.OnChanged(log.Logger) + + // THEN + require.Equal(t, cert2, conf.certChain[0]) + require.Equal(t, &privKey2.PublicKey, conf.pubKeys[0].Key) +} diff --git a/internal/rules/endpoint/authstrategy/mapstructure_decoder.go b/internal/rules/endpoint/authstrategy/mapstructure_decoder.go index bed8d21b3..6766f290c 100644 --- a/internal/rules/endpoint/authstrategy/mapstructure_decoder.go +++ b/internal/rules/endpoint/authstrategy/mapstructure_decoder.go @@ -22,12 +22,23 @@ import ( "github.com/go-viper/mapstructure/v2" "github.com/dadrus/heimdall/internal/heimdall" + "github.com/dadrus/heimdall/internal/keyholder" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/rules/endpoint" "github.com/dadrus/heimdall/internal/validation" + "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/internal/x/errorchain" ) -func DecodeAuthenticationStrategyHookFunc() mapstructure.DecodeHookFunc { +//go:generate mockery --name CreationContext --structname CreationContextMock --inpackage --testonly + +type CreationContext interface { + Watcher() watcher.Watcher + KeyHolderRegistry() keyholder.Registry + CertificateObserver() certificate.Observer +} + +func DecodeAuthenticationStrategyHookFunc(ctx CreationContext) mapstructure.DecodeHookFunc { return func(from reflect.Type, to reflect.Type, data any) (any, error) { var as endpoint.AuthenticationStrategy @@ -61,6 +72,8 @@ func DecodeAuthenticationStrategyHookFunc() mapstructure.DecodeHookFunc { return decodeStrategy("api_key", &APIKey{}, typed["config"]) case "oauth2_client_credentials": return decodeStrategy("oauth2_client_credentials", &OAuth2ClientCredentials{}, typed["config"]) + case "http_message_signatures": + return decodeHTTPMessageSignaturesStrategy(ctx, typed["config"]) default: return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, "unsupported authentication type: '%s'", typed["type"]) @@ -68,6 +81,27 @@ func DecodeAuthenticationStrategyHookFunc() mapstructure.DecodeHookFunc { } } +func decodeHTTPMessageSignaturesStrategy(ctx CreationContext, config any) (any, error) { + httpSig := &HTTPMessageSignatures{} + + if _, err := decodeStrategy("http_message_signatures", httpSig, config); err != nil { + return nil, err + } + + if err := httpSig.init(); err != nil { + return nil, err + } + + if err := ctx.Watcher().Add(httpSig.Signer.KeyStore.Path, httpSig); err != nil { + return nil, errorchain.NewWithMessage(heimdall.ErrInternal, + "failed registering http_message_signatures for updates").CausedBy(err) + } + + ctx.CertificateObserver().Add(httpSig) + + return httpSig, nil +} + func decodeStrategy[S endpoint.AuthenticationStrategy]( name string, strategy S, @@ -78,7 +112,19 @@ func decodeStrategy[S endpoint.AuthenticationStrategy]( "'%s' strategy requires 'config' property to be set", name) } - if err := mapstructure.Decode(config, strategy); err != nil { + dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + ), + Result: strategy, + ErrorUnused: true, + }) + if err != nil { + return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, + "failed to unmarshal '%s' strategy config", name).CausedBy(err) + } + + if err := dec.Decode(config); err != nil { return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, "failed to unmarshal '%s' strategy config", name).CausedBy(err) } diff --git a/internal/rules/endpoint/authstrategy/mapstructure_decoder_test.go b/internal/rules/endpoint/authstrategy/mapstructure_decoder_test.go index 7a6d7bc6e..fcfebf92d 100644 --- a/internal/rules/endpoint/authstrategy/mapstructure_decoder_test.go +++ b/internal/rules/endpoint/authstrategy/mapstructure_decoder_test.go @@ -17,13 +17,29 @@ package authstrategy import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "errors" + "math/big" + "os" + "path/filepath" "testing" + "time" "github.com/go-viper/mapstructure/v2" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/dadrus/heimdall/internal/heimdall" + mocks3 "github.com/dadrus/heimdall/internal/otel/metrics/certificate/mocks" "github.com/dadrus/heimdall/internal/rules/endpoint" + "github.com/dadrus/heimdall/internal/watcher/mocks" + "github.com/dadrus/heimdall/internal/x" + "github.com/dadrus/heimdall/internal/x/pkix/pemx" "github.com/dadrus/heimdall/internal/x/testsupport" ) @@ -41,7 +57,7 @@ func TestDecodeAuthenticationStrategyHookFuncForBasicAuthStrategy(t *testing.T) assert func(t *testing.T, err error, as endpoint.AuthenticationStrategy) }{ { - uc: "basic auth with all required properties", + uc: "all required properties configured", config: []byte(` auth: type: basic_auth @@ -59,7 +75,25 @@ auth: }, }, { - uc: "basic auth without user property", + uc: "with unsupported properties", + config: []byte(` +auth: + type: basic_auth + config: + user: foo + password: bar + foo: bar +`), + assert: func(t *testing.T, err error, _ endpoint.AuthenticationStrategy) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "invalid keys: foo") + }, + }, + { + uc: "without user property", config: []byte(` auth: type: basic_auth @@ -74,7 +108,7 @@ auth: }, }, { - uc: "basic auth without password property", + uc: "without password property", config: []byte(` auth: type: basic_auth @@ -89,7 +123,7 @@ auth: }, }, { - uc: "basic auth without config property", + uc: "without config property", config: []byte(` auth: type: basic_auth @@ -102,13 +136,13 @@ auth: }, }, } { - t.Run("case="+tc.uc, func(t *testing.T) { + t.Run(tc.uc, func(t *testing.T) { // GIVEN var typ Type dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( - DecodeAuthenticationStrategyHookFunc(), + DecodeAuthenticationStrategyHookFunc(nil), ), Result: &typ, }) @@ -140,7 +174,7 @@ func TestDecodeAuthenticationStrategyHookFuncForAPIKeyStrategy(t *testing.T) { assert func(t *testing.T, err error, as endpoint.AuthenticationStrategy) }{ { - uc: "api key with all required properties, with in=header", + uc: "all required properties, with in=header", config: []byte(` auth: type: api_key @@ -161,7 +195,26 @@ auth: }, }, { - uc: "api key with all required properties, with in=cookie", + uc: "with unsupported properties", + config: []byte(` +auth: + type: api_key + config: + name: foo + value: bar + in: header + foo: bar +`), + assert: func(t *testing.T, err error, _ endpoint.AuthenticationStrategy) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "invalid keys: foo") + }, + }, + { + uc: "all required properties, with in=cookie", config: []byte(` auth: type: api_key @@ -182,7 +235,7 @@ auth: }, }, { - uc: "api key with all required properties, with in=query", + uc: "all required properties, with in=query", config: []byte(` auth: type: api_key @@ -203,7 +256,7 @@ auth: }, }, { - uc: "api key with all required properties, with in=foobar", + uc: "all required properties, with in=foobar", config: []byte(` auth: type: api_key @@ -220,7 +273,7 @@ auth: }, }, { - uc: "api key without in property", + uc: "without in property", config: []byte(` auth: type: api_key @@ -236,7 +289,7 @@ auth: }, }, { - uc: "api key without name property", + uc: "without name property", config: []byte(` auth: type: api_key @@ -252,7 +305,7 @@ auth: }, }, { - uc: "api key without value property", + uc: "without value property", config: []byte(` auth: type: api_key @@ -268,7 +321,7 @@ auth: }, }, { - uc: "api key without config property", + uc: "without config property", config: []byte(` auth: type: api_key @@ -281,13 +334,13 @@ auth: }, }, } { - t.Run("case="+tc.uc, func(t *testing.T) { + t.Run(tc.uc, func(t *testing.T) { // GIVEN var typ Type dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( - DecodeAuthenticationStrategyHookFunc(), + DecodeAuthenticationStrategyHookFunc(nil), ), Result: &typ, }) @@ -312,14 +365,13 @@ func TestDecodeAuthenticationStrategyHookFuncForClientCredentialsStrategy(t *tes AuthStrategy endpoint.AuthenticationStrategy `mapstructure:"auth"` } - // du to a bug in the linter for _, tc := range []struct { uc string config []byte assert func(t *testing.T, err error, as endpoint.AuthenticationStrategy) }{ { - uc: "client credentials with all required properties", + uc: "all required properties", config: []byte(` auth: type: oauth2_client_credentials @@ -340,7 +392,26 @@ auth: }, }, { - uc: "client credentials with all possible properties", + uc: "with unsupported properties", + config: []byte(` +auth: + type: oauth2_client_credentials + config: + client_id: foo + client_secret: bar + token_url: http://foobar.foo + foo: bar +`), + assert: func(t *testing.T, err error, _ endpoint.AuthenticationStrategy) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "invalid keys: foo") + }, + }, + { + uc: "all possible properties", config: []byte(` auth: type: oauth2_client_credentials @@ -365,7 +436,7 @@ auth: }, }, { - uc: "client credentials without client_id property", + uc: "without client_id property", config: []byte(` auth: type: oauth2_client_credentials @@ -381,7 +452,7 @@ auth: }, }, { - uc: "client credentials without client_secret property", + uc: "without client_secret property", config: []byte(` auth: type: oauth2_client_credentials @@ -397,7 +468,7 @@ auth: }, }, { - uc: "client credentials without token_url property", + uc: "without token_url property", config: []byte(` auth: type: oauth2_client_credentials @@ -413,7 +484,7 @@ auth: }, }, { - uc: "client credentials without config property", + uc: "without config property", config: []byte(` auth: type: oauth2_client_credentials @@ -426,13 +497,301 @@ auth: }, }, } { - t.Run("case="+tc.uc, func(t *testing.T) { + t.Run(tc.uc, func(t *testing.T) { // GIVEN var typ Type dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( - DecodeAuthenticationStrategyHookFunc(), + DecodeAuthenticationStrategyHookFunc(nil), + ), + Result: &typ, + }) + require.NoError(t, err) + + conf, err := testsupport.DecodeTestConfig(tc.config) + require.NoError(t, err) + + // WHEN + err = dec.Decode(conf) + + // THEN + tc.assert(t, err, typ.AuthStrategy) + }) + } +} + +func TestDecodeAuthenticationStrategyHookFuncForHTTPMessageSignatures(t *testing.T) { + t.Parallel() + + testDir := t.TempDir() + + privKey1, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + + cert1, err := testsupport.NewCertificateBuilder(testsupport.WithValidity(time.Now(), 10*time.Hour), + testsupport.WithSerialNumber(big.NewInt(1)), + testsupport.WithSubject(pkix.Name{ + CommonName: "test cert 1", + Organization: []string{"Test"}, + Country: []string{"EU"}, + }), + testsupport.WithSubjectPubKey(&privKey1.PublicKey, x509.ECDSAWithSHA384), + testsupport.WithSelfSigned(), + testsupport.WithKeyUsage(x509.KeyUsageDigitalSignature), + testsupport.WithSignaturePrivKey(privKey1)). + Build() + require.NoError(t, err) + + pemBytes1, err := pemx.BuildPEM( + pemx.WithECDSAPrivateKey(privKey1, pemx.WithHeader("X-Key-ID", "key1")), + pemx.WithX509Certificate(cert1), + ) + require.NoError(t, err) + + pemFile, err := os.Create(filepath.Join(testDir, "keystore.pem")) + require.NoError(t, err) + + _, err = pemFile.Write(pemBytes1) + require.NoError(t, err) + + type Type struct { + AuthStrategy endpoint.AuthenticationStrategy `mapstructure:"auth"` + } + + for _, tc := range []struct { + uc string + config []byte + configureContext func(t *testing.T, ccm *CreationContextMock) + assert func(t *testing.T, err error, as endpoint.AuthenticationStrategy) + }{ + { + uc: "without signer", + config: []byte(` +auth: + type: http_message_signatures + config: + components: ["@method"] +`), + assert: func(t *testing.T, err error, _ endpoint.AuthenticationStrategy) { + t.Helper() + + require.Error(t, err) + require.ErrorContains(t, err, "'signer' is a required field") + }, + }, + { + uc: "without key store", + config: []byte(` +auth: + type: http_message_signatures + config: + signer: + name: foo + components: ["@method"] +`), + assert: func(t *testing.T, err error, _ endpoint.AuthenticationStrategy) { + t.Helper() + + require.Error(t, err) + require.ErrorContains(t, err, "'signer'.'key_store' is a required field") + }, + }, + { + uc: "without key store path", + config: []byte(` +auth: + type: http_message_signatures + config: + signer: + key_store: + password: foo + components: ["@method"] +`), + assert: func(t *testing.T, err error, _ endpoint.AuthenticationStrategy) { + t.Helper() + + require.Error(t, err) + require.ErrorContains(t, err, "'signer'.'key_store'.'path' is a required field") + }, + }, + { + uc: "without component identifiers", + config: []byte(` +auth: + type: http_message_signatures + config: + signer: + key_store: + path: /some/file.pem +`), + assert: func(t *testing.T, err error, _ endpoint.AuthenticationStrategy) { + t.Helper() + + require.Error(t, err) + require.ErrorContains(t, err, "'components' must contain more than 0 items") + }, + }, + { + uc: "error while initializing strategy", + config: []byte(` +auth: + type: http_message_signatures + config: + components: ["@method"] + signer: + key_store: + path: /some/path.pem +`), + assert: func(t *testing.T, err error, _ endpoint.AuthenticationStrategy) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "/some/path.pem") + }, + }, + { + uc: "with unsupported properties", + config: []byte(` +auth: + type: http_message_signatures + config: + components: ["@method"] + foo: bar + signer: + key_store: + path: /some/path.pem +`), + assert: func(t *testing.T, err error, _ endpoint.AuthenticationStrategy) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "invalid keys: foo") + }, + }, + { + uc: "error while registering signer for updates watching", + config: []byte(` +auth: + type: http_message_signatures + config: + components: ["@method"] + signer: + key_store: + path: ` + pemFile.Name() + ` +`), + configureContext: func(t *testing.T, ccm *CreationContextMock) { + t.Helper() + + watcher := mocks.NewWatcherMock(t) + watcher.EXPECT().Add(pemFile.Name(), mock.Anything).Return(errors.New("test error")) + + ccm.EXPECT().Watcher().Return(watcher) + }, + assert: func(t *testing.T, err error, _ endpoint.AuthenticationStrategy) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrInternal) + require.ErrorContains(t, err, "failed registering") + }, + }, + { + uc: "minimal possible configuration", + config: []byte(` +auth: + type: http_message_signatures + config: + components: ["@method"] + signer: + key_store: + path: ` + pemFile.Name() + ` +`), + configureContext: func(t *testing.T, ccm *CreationContextMock) { + t.Helper() + + watcher := mocks.NewWatcherMock(t) + watcher.EXPECT().Add(pemFile.Name(), mock.Anything).Return(nil) + + observer := mocks3.NewObserverMock(t) + observer.EXPECT().Add(mock.Anything) + + ccm.EXPECT().Watcher().Return(watcher) + ccm.EXPECT().CertificateObserver().Return(observer) + }, + assert: func(t *testing.T, err error, as endpoint.AuthenticationStrategy) { + t.Helper() + + require.NoError(t, err) + + httpSig, ok := as.(*HTTPMessageSignatures) + require.True(t, ok) + + assert.NotNil(t, httpSig.signer) + assert.NotEmpty(t, httpSig.Certificates()) + assert.NotEmpty(t, httpSig.Keys()) + assert.Equal(t, "http message signer", httpSig.Name()) + }, + }, + { + uc: "full possible configuration", + config: []byte(` +auth: + type: http_message_signatures + config: + ttl: 1m + label: bar + components: ["@method"] + signer: + name: foobar + key_id: key1 + key_store: + password: secret + path: ` + pemFile.Name() + ` +`), + configureContext: func(t *testing.T, ccm *CreationContextMock) { + t.Helper() + + watcher := mocks.NewWatcherMock(t) + watcher.EXPECT().Add(pemFile.Name(), mock.Anything).Return(nil) + + observer := mocks3.NewObserverMock(t) + observer.EXPECT().Add(mock.Anything) + + ccm.EXPECT().Watcher().Return(watcher) + ccm.EXPECT().CertificateObserver().Return(observer) + }, + assert: func(t *testing.T, err error, as endpoint.AuthenticationStrategy) { + t.Helper() + + require.NoError(t, err) + + httpSig, ok := as.(*HTTPMessageSignatures) + require.True(t, ok) + + assert.NotNil(t, httpSig.signer) + assert.NotEmpty(t, httpSig.Certificates()) + assert.NotEmpty(t, httpSig.Keys()) + assert.Equal(t, "http message signer", httpSig.Name()) + }, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + // GIVEN + ccm := NewCreationContextMock(t) + configureContext := x.IfThenElse(tc.configureContext != nil, + tc.configureContext, + func(t *testing.T, _ *CreationContextMock) { t.Helper() }, + ) + configureContext(t, ccm) + + var typ Type + + dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + DecodeAuthenticationStrategyHookFunc(ccm), ), Result: &typ, }) @@ -462,7 +821,7 @@ func TestDecodeAuthenticationStrategyHookFuncForUnknownStrategy(t *testing.T) { dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( - DecodeAuthenticationStrategyHookFunc(), + DecodeAuthenticationStrategyHookFunc(nil), ), Result: &typ, }) diff --git a/internal/rules/endpoint/authstrategy/mock_creation_context_test.go b/internal/rules/endpoint/authstrategy/mock_creation_context_test.go new file mode 100644 index 000000000..c31b2fd8b --- /dev/null +++ b/internal/rules/endpoint/authstrategy/mock_creation_context_test.go @@ -0,0 +1,180 @@ +// Code generated by mockery v2.42.1. DO NOT EDIT. + +package authstrategy + +import ( + keyholder "github.com/dadrus/heimdall/internal/keyholder" + certificate "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + + mock "github.com/stretchr/testify/mock" + + watcher "github.com/dadrus/heimdall/internal/watcher" +) + +// CreationContextMock is an autogenerated mock type for the CreationContext type +type CreationContextMock struct { + mock.Mock +} + +type CreationContextMock_Expecter struct { + mock *mock.Mock +} + +func (_m *CreationContextMock) EXPECT() *CreationContextMock_Expecter { + return &CreationContextMock_Expecter{mock: &_m.Mock} +} + +// CertificateObserver provides a mock function with given fields: +func (_m *CreationContextMock) CertificateObserver() certificate.Observer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CertificateObserver") + } + + var r0 certificate.Observer + if rf, ok := ret.Get(0).(func() certificate.Observer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(certificate.Observer) + } + } + + return r0 +} + +// CreationContextMock_CertificateObserver_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CertificateObserver' +type CreationContextMock_CertificateObserver_Call struct { + *mock.Call +} + +// CertificateObserver is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) CertificateObserver() *CreationContextMock_CertificateObserver_Call { + return &CreationContextMock_CertificateObserver_Call{Call: _e.mock.On("CertificateObserver")} +} + +func (_c *CreationContextMock_CertificateObserver_Call) Run(run func()) *CreationContextMock_CertificateObserver_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) Return(_a0 certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) RunAndReturn(run func() certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(run) + return _c +} + +// KeyHolderRegistry provides a mock function with given fields: +func (_m *CreationContextMock) KeyHolderRegistry() keyholder.Registry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for KeyHolderRegistry") + } + + var r0 keyholder.Registry + if rf, ok := ret.Get(0).(func() keyholder.Registry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keyholder.Registry) + } + } + + return r0 +} + +// CreationContextMock_KeyHolderRegistry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'KeyHolderRegistry' +type CreationContextMock_KeyHolderRegistry_Call struct { + *mock.Call +} + +// KeyHolderRegistry is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) KeyHolderRegistry() *CreationContextMock_KeyHolderRegistry_Call { + return &CreationContextMock_KeyHolderRegistry_Call{Call: _e.mock.On("KeyHolderRegistry")} +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Run(run func()) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Return(_a0 keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) RunAndReturn(run func() keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(run) + return _c +} + +// Watcher provides a mock function with given fields: +func (_m *CreationContextMock) Watcher() watcher.Watcher { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Watcher") + } + + var r0 watcher.Watcher + if rf, ok := ret.Get(0).(func() watcher.Watcher); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(watcher.Watcher) + } + } + + return r0 +} + +// CreationContextMock_Watcher_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Watcher' +type CreationContextMock_Watcher_Call struct { + *mock.Call +} + +// Watcher is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) Watcher() *CreationContextMock_Watcher_Call { + return &CreationContextMock_Watcher_Call{Call: _e.mock.On("Watcher")} +} + +func (_c *CreationContextMock_Watcher_Call) Run(run func()) *CreationContextMock_Watcher_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) Return(_a0 watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) RunAndReturn(run func() watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(run) + return _c +} + +// NewCreationContextMock creates a new instance of CreationContextMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCreationContextMock(t interface { + mock.TestingT + Cleanup(func()) +}) *CreationContextMock { + mock := &CreationContextMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/rules/error_handler.go b/internal/rules/error_handler.go index 5423b68bf..75ee66aba 100644 --- a/internal/rules/error_handler.go +++ b/internal/rules/error_handler.go @@ -23,6 +23,6 @@ import ( //go:generate mockery --name errorHandler --structname ErrorHandlerMock type errorHandler interface { - CanExecute(ctx heimdall.Context, causeErr error) bool + ID() string Execute(ctx heimdall.Context, causeErr error) error } diff --git a/internal/rules/event/event.go b/internal/rules/event/event.go deleted file mode 100644 index 8b84813f3..000000000 --- a/internal/rules/event/event.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package event - -import ( - "github.com/dadrus/heimdall/internal/rules/rule" -) - -type ChangeType uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create ChangeType = 1 << iota - Remove - Update -) - -func (t ChangeType) String() string { - switch t { - case Create: - return "Create" - case Remove: - return "Remove" - case Update: - return "Update" - default: - return "Unknown" - } -} - -type RuleSetChanged struct { - Source string - Name string - Rules []rule.Rule - ChangeType ChangeType -} diff --git a/internal/rules/event/queue.go b/internal/rules/event/queue.go deleted file mode 100644 index c03d20352..000000000 --- a/internal/rules/event/queue.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package event - -type RuleSetChangedEventQueue chan RuleSetChanged diff --git a/internal/rules/execution_condition.go b/internal/rules/execution_condition.go index 811664c03..fa4bc24af 100644 --- a/internal/rules/execution_condition.go +++ b/internal/rules/execution_condition.go @@ -24,5 +24,6 @@ import ( //go:generate mockery --name executionCondition --structname ExecutionConditionMock type executionCondition interface { - CanExecute(ctx heimdall.Context, sub *subject.Subject) (bool, error) + CanExecuteOnSubject(ctx heimdall.Context, sub *subject.Subject) (bool, error) + CanExecuteOnError(ctx heimdall.Context, err error) (bool, error) } diff --git a/internal/rules/mechanisms/authenticators/anonymous_authenticator.go b/internal/rules/mechanisms/authenticators/anonymous_authenticator.go index 221cd834a..f110d06f5 100644 --- a/internal/rules/mechanisms/authenticators/anonymous_authenticator.go +++ b/internal/rules/mechanisms/authenticators/anonymous_authenticator.go @@ -26,21 +26,25 @@ import ( // by intention. Used only during application bootstrap. func init() { // nolint: gochecknoinits registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Authenticator, error) { + func(ctx CreationContext, id string, typ string, conf map[string]any) (bool, Authenticator, error) { if typ != AuthenticatorAnonymous { return false, nil, nil } - auth, err := newAnonymousAuthenticator(id, conf) + auth, err := newAnonymousAuthenticator(ctx, id, conf) return true, auth, err }) } -func newAnonymousAuthenticator(id string, rawConfig map[string]any) (*anonymousAuthenticator, error) { +func newAnonymousAuthenticator( + ctx CreationContext, + id string, + rawConfig map[string]any, +) (*anonymousAuthenticator, error) { var auth anonymousAuthenticator - if err := decodeConfig(AuthenticatorAnonymous, rawConfig, &auth); err != nil { + if err := decodeConfig(ctx, AuthenticatorAnonymous, rawConfig, &auth); err != nil { return nil, err } @@ -71,7 +75,7 @@ func (a *anonymousAuthenticator) WithConfig(config map[string]any) (Authenticato return a, nil } - return newAnonymousAuthenticator(a.id, config) + return newAnonymousAuthenticator(nil, a.id, config) } func (a *anonymousAuthenticator) IsFallbackOnErrorAllowed() bool { diff --git a/internal/rules/mechanisms/authenticators/anonymous_authenticator_test.go b/internal/rules/mechanisms/authenticators/anonymous_authenticator_test.go index 5ecdda5c4..77fc2f22b 100644 --- a/internal/rules/mechanisms/authenticators/anonymous_authenticator_test.go +++ b/internal/rules/mechanisms/authenticators/anonymous_authenticator_test.go @@ -82,7 +82,7 @@ func TestCreateAnonymousAuthenticator(t *testing.T) { require.NoError(t, err) // WHEN - auth, err := newAnonymousAuthenticator(tc.id, conf) + auth, err := newAnonymousAuthenticator(nil, tc.id, conf) // THEN tc.assert(t, err, auth) @@ -151,7 +151,7 @@ func TestCreateAnonymousAuthenticatorFromPrototype(t *testing.T) { conf, err := testsupport.DecodeTestConfig(tc.config) require.NoError(t, err) - prototype, err := newAnonymousAuthenticator(tc.id, pc) + prototype, err := newAnonymousAuthenticator(nil, tc.id, pc) require.NoError(t, err) // WHEN diff --git a/internal/rules/mechanisms/authenticators/authenticator_type_registry.go b/internal/rules/mechanisms/authenticators/authenticator_type_registry.go index df466818a..0a14a5f66 100644 --- a/internal/rules/mechanisms/authenticators/authenticator_type_registry.go +++ b/internal/rules/mechanisms/authenticators/authenticator_type_registry.go @@ -20,6 +20,9 @@ import ( "errors" "sync" + "github.com/dadrus/heimdall/internal/keyholder" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/internal/x/errorchain" ) @@ -27,13 +30,21 @@ var ( ErrUnsupportedAuthenticatorType = errors.New("authenticator type unsupported") // by intention. Used only during application bootstrap. - authenticatorTypeFactories []AuthenticatorTypeFactory //nolint:gochecknoglobals - authenticatorTypeFactoriesMu sync.RWMutex //nolint:gochecknoglobals + authenticatorTypeFactories []TypeFactory //nolint:gochecknoglobals + authenticatorTypeFactoriesMu sync.RWMutex //nolint:gochecknoglobals ) -type AuthenticatorTypeFactory func(id string, typ string, config map[string]any) (bool, Authenticator, error) +//go:generate mockery --name CreationContext --structname CreationContextMock --inpackage --testonly -func registerTypeFactory(factory AuthenticatorTypeFactory) { +type CreationContext interface { + Watcher() watcher.Watcher + KeyHolderRegistry() keyholder.Registry + CertificateObserver() certificate.Observer +} + +type TypeFactory func(ctx CreationContext, id string, typ string, config map[string]any) (bool, Authenticator, error) + +func registerTypeFactory(factory TypeFactory) { authenticatorTypeFactoriesMu.Lock() defer authenticatorTypeFactoriesMu.Unlock() @@ -44,12 +55,12 @@ func registerTypeFactory(factory AuthenticatorTypeFactory) { authenticatorTypeFactories = append(authenticatorTypeFactories, factory) } -func CreatePrototype(id string, typ string, config map[string]any) (Authenticator, error) { +func CreatePrototype(ctx CreationContext, id string, typ string, config map[string]any) (Authenticator, error) { authenticatorTypeFactoriesMu.RLock() defer authenticatorTypeFactoriesMu.RUnlock() for _, create := range authenticatorTypeFactories { - if ok, at, err := create(id, typ, config); ok { + if ok, at, err := create(ctx, id, typ, config); ok { return at, err } } diff --git a/internal/rules/mechanisms/authenticators/authenticator_type_registry_test.go b/internal/rules/mechanisms/authenticators/authenticator_type_registry_test.go index ac40e45a9..5b6e4e46d 100644 --- a/internal/rules/mechanisms/authenticators/authenticator_type_registry_test.go +++ b/internal/rules/mechanisms/authenticators/authenticator_type_registry_test.go @@ -57,7 +57,7 @@ func TestCreateAuthenticatorPrototype(t *testing.T) { } { t.Run("case="+tc.uc, func(t *testing.T) { // WHEN - auth, err := CreatePrototype("foo", tc.typ, nil) + auth, err := CreatePrototype(NewCreationContextMock(t), "foo", tc.typ, nil) // THEN tc.assert(t, err, auth) diff --git a/internal/rules/mechanisms/authenticators/basic_auth_authenticator.go b/internal/rules/mechanisms/authenticators/basic_auth_authenticator.go index 5da3bf9ae..d6e0afb64 100644 --- a/internal/rules/mechanisms/authenticators/basic_auth_authenticator.go +++ b/internal/rules/mechanisms/authenticators/basic_auth_authenticator.go @@ -41,12 +41,12 @@ const ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Authenticator, error) { + func(ctx CreationContext, id string, typ string, conf map[string]any) (bool, Authenticator, error) { if typ != AuthenticatorBasicAuth { return false, nil, nil } - auth, err := newBasicAuthAuthenticator(id, conf) + auth, err := newBasicAuthAuthenticator(ctx, id, conf) return true, auth, err }) @@ -59,7 +59,11 @@ type basicAuthAuthenticator struct { allowFallbackOnError bool } -func newBasicAuthAuthenticator(id string, rawConfig map[string]any) (*basicAuthAuthenticator, error) { +func newBasicAuthAuthenticator( + ctx CreationContext, + id string, + rawConfig map[string]any, +) (*basicAuthAuthenticator, error) { type Config struct { UserID string `mapstructure:"user_id" validate:"required"` Password string `mapstructure:"password" validate:"required"` @@ -67,7 +71,7 @@ func newBasicAuthAuthenticator(id string, rawConfig map[string]any) (*basicAuthA } var conf Config - if err := decodeConfig(AuthenticatorBasicAuth, rawConfig, &conf); err != nil { + if err := decodeConfig(ctx, AuthenticatorBasicAuth, rawConfig, &conf); err != nil { return nil, err } @@ -150,7 +154,7 @@ func (a *basicAuthAuthenticator) WithConfig(rawConfig map[string]any) (Authentic } var conf Config - if err := decodeConfig(AuthenticatorBasicAuth, rawConfig, &conf); err != nil { + if err := decodeConfig(nil, AuthenticatorBasicAuth, rawConfig, &conf); err != nil { return nil, err } diff --git a/internal/rules/mechanisms/authenticators/basic_auth_authenticator_test.go b/internal/rules/mechanisms/authenticators/basic_auth_authenticator_test.go index 97afbbb31..d0abffd5a 100644 --- a/internal/rules/mechanisms/authenticators/basic_auth_authenticator_test.go +++ b/internal/rules/mechanisms/authenticators/basic_auth_authenticator_test.go @@ -140,7 +140,7 @@ foo: bar`), require.NoError(t, err) // WHEN - auth, err := newBasicAuthAuthenticator(tc.id, conf) + auth, err := newBasicAuthAuthenticator(nil, tc.id, conf) // THEN tc.assert(t, err, auth) @@ -310,7 +310,7 @@ password: baz`), conf, err := testsupport.DecodeTestConfig(tc.config) require.NoError(t, err) - prototype, err := newBasicAuthAuthenticator(tc.id, pc) + prototype, err := newBasicAuthAuthenticator(nil, tc.id, pc) require.NoError(t, err) // WHEN @@ -501,7 +501,7 @@ password: bar`)) } { t.Run("case="+tc.uc, func(t *testing.T) { // GIVEN - auth, err := newBasicAuthAuthenticator(tc.id, conf) + auth, err := newBasicAuthAuthenticator(nil, tc.id, conf) require.NoError(t, err) ctx := mocks.NewContextMock(t) diff --git a/internal/rules/mechanisms/authenticators/config_decoder.go b/internal/rules/mechanisms/authenticators/config_decoder.go index 96bc27d7f..39622b492 100644 --- a/internal/rules/mechanisms/authenticators/config_decoder.go +++ b/internal/rules/mechanisms/authenticators/config_decoder.go @@ -30,11 +30,11 @@ import ( "github.com/dadrus/heimdall/internal/x/errorchain" ) -func decodeConfig(authenticatorType string, input, output any) error { +func decodeConfig(ctx CreationContext, authenticatorType string, input, output any) error { dec, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( - authstrategy.DecodeAuthenticationStrategyHookFunc(), + authstrategy.DecodeAuthenticationStrategyHookFunc(ctx), endpoint.DecodeEndpointHookFunc(), mapstructure.StringToTimeDurationHookFunc(), extractors.DecodeCompositeExtractStrategyHookFunc(), diff --git a/internal/rules/mechanisms/authenticators/extractors/composite_extract_strategy_test.go b/internal/rules/mechanisms/authenticators/extractors/composite_extract_strategy_test.go index c7fca593c..bf29ebdcf 100644 --- a/internal/rules/mechanisms/authenticators/extractors/composite_extract_strategy_test.go +++ b/internal/rules/mechanisms/authenticators/extractors/composite_extract_strategy_test.go @@ -70,7 +70,7 @@ func TestCompositeExtractHeaderValueWithScheme(t *testing.T) { ctx := mocks.NewContextMock(t) ctx.EXPECT().Request().Return(&heimdall.Request{ RequestFunctions: fnt, - URL: &url.URL{}, + URL: &heimdall.URL{URL: url.URL{}}, }) strategy := CompositeExtractStrategy{ diff --git a/internal/rules/mechanisms/authenticators/extractors/query_parameter_extract_strategy_test.go b/internal/rules/mechanisms/authenticators/extractors/query_parameter_extract_strategy_test.go index 3c98c86b7..8ff44b20c 100644 --- a/internal/rules/mechanisms/authenticators/extractors/query_parameter_extract_strategy_test.go +++ b/internal/rules/mechanisms/authenticators/extractors/query_parameter_extract_strategy_test.go @@ -40,7 +40,7 @@ func TestExtractQueryParameter(t *testing.T) { ctx := mocks.NewContextMock(t) ctx.EXPECT().Request().Return(&heimdall.Request{ RequestFunctions: fnt, - URL: &url.URL{RawQuery: fmt.Sprintf("%s=%s", queryParam, queryParamValue)}, + URL: &heimdall.URL{URL: url.URL{RawQuery: fmt.Sprintf("%s=%s", queryParam, queryParamValue)}}, }) strategy := QueryParameterExtractStrategy{Name: queryParam} @@ -62,7 +62,7 @@ func TestExtractNotExistingQueryParameterValue(t *testing.T) { ctx := mocks.NewContextMock(t) ctx.EXPECT().Request().Return(&heimdall.Request{ RequestFunctions: fnt, - URL: &url.URL{}, + URL: &heimdall.URL{}, }) strategy := QueryParameterExtractStrategy{Name: "Test-Cookie"} diff --git a/internal/rules/mechanisms/authenticators/generic_authenticator.go b/internal/rules/mechanisms/authenticators/generic_authenticator.go index 7ab905e20..8d27650ea 100644 --- a/internal/rules/mechanisms/authenticators/generic_authenticator.go +++ b/internal/rules/mechanisms/authenticators/generic_authenticator.go @@ -44,12 +44,12 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Authenticator, error) { + func(ctx CreationContext, id string, typ string, conf map[string]any) (bool, Authenticator, error) { if typ != AuthenticatorGeneric { return false, nil, nil } - auth, err := newGenericAuthenticator(id, conf) + auth, err := newGenericAuthenticator(ctx, id, conf) return true, auth, err }) @@ -68,7 +68,7 @@ type genericAuthenticator struct { allowFallbackOnError bool } -func newGenericAuthenticator(id string, rawConfig map[string]any) (*genericAuthenticator, error) { +func newGenericAuthenticator(ctx CreationContext, id string, rawConfig map[string]any) (*genericAuthenticator, error) { type Config struct { Endpoint endpoint.Endpoint `mapstructure:"identity_info_endpoint" validate:"required"` //nolint:lll SubjectInfo SubjectInfo `mapstructure:"subject" validate:"required"` //nolint:lll @@ -82,7 +82,7 @@ func newGenericAuthenticator(id string, rawConfig map[string]any) (*genericAuthe } var conf Config - if err := decodeConfig(AuthenticatorGeneric, rawConfig, &conf); err != nil { + if err := decodeConfig(ctx, AuthenticatorGeneric, rawConfig, &conf); err != nil { return nil, err } @@ -142,7 +142,7 @@ func (a *genericAuthenticator) WithConfig(config map[string]any) (Authenticator, } var conf Config - if err := decodeConfig(AuthenticatorGeneric, config, &conf); err != nil { + if err := decodeConfig(nil, AuthenticatorGeneric, config, &conf); err != nil { return nil, err } diff --git a/internal/rules/mechanisms/authenticators/generic_authenticator_test.go b/internal/rules/mechanisms/authenticators/generic_authenticator_test.go index c8d485170..2795453e8 100644 --- a/internal/rules/mechanisms/authenticators/generic_authenticator_test.go +++ b/internal/rules/mechanisms/authenticators/generic_authenticator_test.go @@ -308,7 +308,7 @@ session_lifespan: require.NoError(t, err) // WHEN - auth, err := newGenericAuthenticator(tc.id, conf) + auth, err := newGenericAuthenticator(nil, tc.id, conf) // THEN tc.assertError(t, err, auth) @@ -712,7 +712,7 @@ forward_cookies: conf, err := testsupport.DecodeTestConfig(tc.config) require.NoError(t, err) - prototype, err := newGenericAuthenticator(tc.id, pc) + prototype, err := newGenericAuthenticator(nil, tc.id, pc) require.NoError(t, err) // WHEN diff --git a/internal/rules/mechanisms/authenticators/jwt_authenticator.go b/internal/rules/mechanisms/authenticators/jwt_authenticator.go index e0d48d964..438099378 100644 --- a/internal/rules/mechanisms/authenticators/jwt_authenticator.go +++ b/internal/rules/mechanisms/authenticators/jwt_authenticator.go @@ -53,12 +53,12 @@ const defaultJWTAuthenticatorTTL = 10 * time.Minute //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Authenticator, error) { + func(ctx CreationContext, id string, typ string, conf map[string]any) (bool, Authenticator, error) { if typ != AuthenticatorJwt { return false, nil, nil } - auth, err := newJwtAuthenticator(id, conf) + auth, err := newJwtAuthenticator(ctx, id, conf) return true, auth, err }) @@ -76,7 +76,12 @@ type jwtAuthenticator struct { validateJWKCert bool } -func newJwtAuthenticator(id string, rawConfig map[string]any) (*jwtAuthenticator, error) { // nolint: funlen +// nolint: funlen +func newJwtAuthenticator( + ctx CreationContext, + id string, + rawConfig map[string]any, +) (*jwtAuthenticator, error) { // nolint: funlen type Config struct { JWKSEndpoint *endpoint.Endpoint `mapstructure:"jwks_endpoint" validate:"required_without=MetadataEndpoint,excluded_with=MetadataEndpoint"` //nolint:lll,tagalign MetadataEndpoint *oauth2.MetadataEndpoint `mapstructure:"metadata_endpoint" validate:"required_without=JWKSEndpoint,excluded_with=JWKSEndpoint"` //nolint:lll,tagalign @@ -90,7 +95,7 @@ func newJwtAuthenticator(id string, rawConfig map[string]any) (*jwtAuthenticator } var conf Config - if err := decodeConfig(AuthenticatorJwt, rawConfig, &conf); err != nil { + if err := decodeConfig(ctx, AuthenticatorJwt, rawConfig, &conf); err != nil { return nil, err } @@ -214,7 +219,7 @@ func (a *jwtAuthenticator) WithConfig(config map[string]any) (Authenticator, err } var conf Config - if err := decodeConfig(AuthenticatorJwt, config, &conf); err != nil { + if err := decodeConfig(nil, AuthenticatorJwt, config, &conf); err != nil { return nil, err } diff --git a/internal/rules/mechanisms/authenticators/jwt_authenticator_test.go b/internal/rules/mechanisms/authenticators/jwt_authenticator_test.go index babfa0052..536ba2fe2 100644 --- a/internal/rules/mechanisms/authenticators/jwt_authenticator_test.go +++ b/internal/rules/mechanisms/authenticators/jwt_authenticator_test.go @@ -448,7 +448,7 @@ cache_ttl: 5s`), require.NoError(t, err) // WHEN - a, err := newJwtAuthenticator(tc.id, conf) + a, err := newJwtAuthenticator(nil, tc.id, conf) // THEN tc.assert(t, err, a) @@ -790,7 +790,7 @@ metadata_endpoint: conf, err := testsupport.DecodeTestConfig(tc.config) require.NoError(t, err) - prototype, err := newJwtAuthenticator(tc.id, pc) + prototype, err := newJwtAuthenticator(nil, tc.id, pc) require.NoError(t, err) // WHEN diff --git a/internal/rules/mechanisms/authenticators/mock_creation_context_test.go b/internal/rules/mechanisms/authenticators/mock_creation_context_test.go new file mode 100644 index 000000000..d24493d32 --- /dev/null +++ b/internal/rules/mechanisms/authenticators/mock_creation_context_test.go @@ -0,0 +1,180 @@ +// Code generated by mockery v2.42.1. DO NOT EDIT. + +package authenticators + +import ( + keyholder "github.com/dadrus/heimdall/internal/keyholder" + certificate "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + + mock "github.com/stretchr/testify/mock" + + watcher "github.com/dadrus/heimdall/internal/watcher" +) + +// CreationContextMock is an autogenerated mock type for the CreationContext type +type CreationContextMock struct { + mock.Mock +} + +type CreationContextMock_Expecter struct { + mock *mock.Mock +} + +func (_m *CreationContextMock) EXPECT() *CreationContextMock_Expecter { + return &CreationContextMock_Expecter{mock: &_m.Mock} +} + +// CertificateObserver provides a mock function with given fields: +func (_m *CreationContextMock) CertificateObserver() certificate.Observer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CertificateObserver") + } + + var r0 certificate.Observer + if rf, ok := ret.Get(0).(func() certificate.Observer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(certificate.Observer) + } + } + + return r0 +} + +// CreationContextMock_CertificateObserver_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CertificateObserver' +type CreationContextMock_CertificateObserver_Call struct { + *mock.Call +} + +// CertificateObserver is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) CertificateObserver() *CreationContextMock_CertificateObserver_Call { + return &CreationContextMock_CertificateObserver_Call{Call: _e.mock.On("CertificateObserver")} +} + +func (_c *CreationContextMock_CertificateObserver_Call) Run(run func()) *CreationContextMock_CertificateObserver_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) Return(_a0 certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) RunAndReturn(run func() certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(run) + return _c +} + +// KeyHolderRegistry provides a mock function with given fields: +func (_m *CreationContextMock) KeyHolderRegistry() keyholder.Registry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for KeyHolderRegistry") + } + + var r0 keyholder.Registry + if rf, ok := ret.Get(0).(func() keyholder.Registry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keyholder.Registry) + } + } + + return r0 +} + +// CreationContextMock_KeyHolderRegistry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'KeyHolderRegistry' +type CreationContextMock_KeyHolderRegistry_Call struct { + *mock.Call +} + +// KeyHolderRegistry is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) KeyHolderRegistry() *CreationContextMock_KeyHolderRegistry_Call { + return &CreationContextMock_KeyHolderRegistry_Call{Call: _e.mock.On("KeyHolderRegistry")} +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Run(run func()) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Return(_a0 keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) RunAndReturn(run func() keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(run) + return _c +} + +// Watcher provides a mock function with given fields: +func (_m *CreationContextMock) Watcher() watcher.Watcher { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Watcher") + } + + var r0 watcher.Watcher + if rf, ok := ret.Get(0).(func() watcher.Watcher); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(watcher.Watcher) + } + } + + return r0 +} + +// CreationContextMock_Watcher_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Watcher' +type CreationContextMock_Watcher_Call struct { + *mock.Call +} + +// Watcher is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) Watcher() *CreationContextMock_Watcher_Call { + return &CreationContextMock_Watcher_Call{Call: _e.mock.On("Watcher")} +} + +func (_c *CreationContextMock_Watcher_Call) Run(run func()) *CreationContextMock_Watcher_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) Return(_a0 watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) RunAndReturn(run func() watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(run) + return _c +} + +// NewCreationContextMock creates a new instance of CreationContextMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCreationContextMock(t interface { + mock.TestingT + Cleanup(func()) +}) *CreationContextMock { + mock := &CreationContextMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/rules/mechanisms/authenticators/oauth2_introspection_authenticator.go b/internal/rules/mechanisms/authenticators/oauth2_introspection_authenticator.go index 67c825324..d207b374f 100644 --- a/internal/rules/mechanisms/authenticators/oauth2_introspection_authenticator.go +++ b/internal/rules/mechanisms/authenticators/oauth2_introspection_authenticator.go @@ -49,12 +49,12 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Authenticator, error) { + func(ctx CreationContext, id string, typ string, conf map[string]any) (bool, Authenticator, error) { if typ != AuthenticatorOAuth2Introspection { return false, nil, nil } - auth, err := newOAuth2IntrospectionAuthenticator(id, conf) + auth, err := newOAuth2IntrospectionAuthenticator(ctx, id, conf) return true, auth, err }) @@ -70,8 +70,11 @@ type oauth2IntrospectionAuthenticator struct { allowFallbackOnError bool } -func newOAuth2IntrospectionAuthenticator( // nolint: funlen - id string, rawConfig map[string]any, +// nolint: funlen +func newOAuth2IntrospectionAuthenticator( + ctx CreationContext, + id string, + rawConfig map[string]any, ) (*oauth2IntrospectionAuthenticator, error) { type Config struct { IntrospectionEndpoint *endpoint.Endpoint `mapstructure:"introspection_endpoint" validate:"required_without=MetadataEndpoint,excluded_with=MetadataEndpoint"` //nolint:lll,tagalign @@ -84,7 +87,7 @@ func newOAuth2IntrospectionAuthenticator( // nolint: funlen } var conf Config - if err := decodeConfig(AuthenticatorOAuth2Introspection, rawConfig, &conf); err != nil { + if err := decodeConfig(ctx, AuthenticatorOAuth2Introspection, rawConfig, &conf); err != nil { return nil, err } @@ -198,7 +201,7 @@ func (a *oauth2IntrospectionAuthenticator) WithConfig(rawConfig map[string]any) } var conf Config - if err := decodeConfig(AuthenticatorOAuth2Introspection, rawConfig, &conf); err != nil { + if err := decodeConfig(nil, AuthenticatorOAuth2Introspection, rawConfig, &conf); err != nil { return nil, err } diff --git a/internal/rules/mechanisms/authenticators/oauth2_introspection_authenticator_test.go b/internal/rules/mechanisms/authenticators/oauth2_introspection_authenticator_test.go index eb24652ad..d9d1fffe7 100644 --- a/internal/rules/mechanisms/authenticators/oauth2_introspection_authenticator_test.go +++ b/internal/rules/mechanisms/authenticators/oauth2_introspection_authenticator_test.go @@ -335,7 +335,7 @@ metadata_endpoint: require.NoError(t, err) // WHEN - a, err := newOAuth2IntrospectionAuthenticator(tc.id, conf) + a, err := newOAuth2IntrospectionAuthenticator(nil, tc.id, conf) // THEN tc.assert(t, err, a) @@ -588,7 +588,7 @@ subject: conf, err := testsupport.DecodeTestConfig(tc.config) require.NoError(t, err) - prototype, err := newOAuth2IntrospectionAuthenticator(tc.id, pc) + prototype, err := newOAuth2IntrospectionAuthenticator(nil, tc.id, pc) require.NoError(t, err) // WHEN diff --git a/internal/rules/mechanisms/authenticators/unauthorized_authenticator.go b/internal/rules/mechanisms/authenticators/unauthorized_authenticator.go index 126cb19bf..e65747b6d 100644 --- a/internal/rules/mechanisms/authenticators/unauthorized_authenticator.go +++ b/internal/rules/mechanisms/authenticators/unauthorized_authenticator.go @@ -29,7 +29,7 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, _ map[string]any) (bool, Authenticator, error) { + func(_ CreationContext, id string, typ string, _ map[string]any) (bool, Authenticator, error) { if typ != AuthenticatorUnauthorized { return false, nil, nil } diff --git a/internal/rules/mechanisms/authorizers/allow_authorizer.go b/internal/rules/mechanisms/authorizers/allow_authorizer.go index ac05e7590..bc8486f13 100644 --- a/internal/rules/mechanisms/authorizers/allow_authorizer.go +++ b/internal/rules/mechanisms/authorizers/allow_authorizer.go @@ -28,7 +28,7 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, _ map[string]any) (bool, Authorizer, error) { + func(_ CreationContext, id string, typ string, _ map[string]any) (bool, Authorizer, error) { if typ != AuthorizerAllow { return false, nil, nil } diff --git a/internal/rules/mechanisms/authorizers/authorizer_type_registry.go b/internal/rules/mechanisms/authorizers/authorizer_type_registry.go index 796016d04..454b1ae57 100644 --- a/internal/rules/mechanisms/authorizers/authorizer_type_registry.go +++ b/internal/rules/mechanisms/authorizers/authorizer_type_registry.go @@ -20,6 +20,9 @@ import ( "errors" "sync" + "github.com/dadrus/heimdall/internal/keyholder" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/internal/x/errorchain" ) @@ -27,13 +30,21 @@ var ( ErrUnsupportedAuthorizerType = errors.New("authorizer type unsupported") // by intention. Used only during application bootstrap. - authorizerTypeFactories []AuthorizerTypeFactory //nolint:gochecknoglobals - authorizerTypeFactoriesMu sync.RWMutex //nolint:gochecknoglobals + authorizerTypeFactories []TypeFactory //nolint:gochecknoglobals + authorizerTypeFactoriesMu sync.RWMutex //nolint:gochecknoglobals ) -type AuthorizerTypeFactory func(id string, typ string, config map[string]any) (bool, Authorizer, error) +//go:generate mockery --name CreationContext --structname CreationContextMock --inpackage --testonly -func registerTypeFactory(factory AuthorizerTypeFactory) { +type CreationContext interface { + Watcher() watcher.Watcher + KeyHolderRegistry() keyholder.Registry + CertificateObserver() certificate.Observer +} + +type TypeFactory func(ctx CreationContext, id string, typ string, config map[string]any) (bool, Authorizer, error) + +func registerTypeFactory(factory TypeFactory) { authorizerTypeFactoriesMu.Lock() defer authorizerTypeFactoriesMu.Unlock() @@ -44,12 +55,12 @@ func registerTypeFactory(factory AuthorizerTypeFactory) { authorizerTypeFactories = append(authorizerTypeFactories, factory) } -func CreatePrototype(id string, typ string, config map[string]any) (Authorizer, error) { +func CreatePrototype(ctx CreationContext, id string, typ string, config map[string]any) (Authorizer, error) { authorizerTypeFactoriesMu.RLock() defer authorizerTypeFactoriesMu.RUnlock() for _, create := range authorizerTypeFactories { - if ok, at, err := create(id, typ, config); ok { + if ok, at, err := create(ctx, id, typ, config); ok { return at, err } } diff --git a/internal/rules/mechanisms/authorizers/authorizer_type_registry_test.go b/internal/rules/mechanisms/authorizers/authorizer_type_registry_test.go index efcb36462..e852d94ee 100644 --- a/internal/rules/mechanisms/authorizers/authorizer_type_registry_test.go +++ b/internal/rules/mechanisms/authorizers/authorizer_type_registry_test.go @@ -57,7 +57,7 @@ func TestCreateAuthorizerPrototypeUsingKnowType(t *testing.T) { } { t.Run("case="+tc.uc, func(t *testing.T) { // WHEN - auth, err := CreatePrototype("foo", tc.typ, nil) + auth, err := CreatePrototype(NewCreationContextMock(t), "foo", tc.typ, nil) // THEN tc.assert(t, err, auth) diff --git a/internal/rules/mechanisms/authorizers/cel_authorizer.go b/internal/rules/mechanisms/authorizers/cel_authorizer.go index f5369f09d..8325a017f 100644 --- a/internal/rules/mechanisms/authorizers/cel_authorizer.go +++ b/internal/rules/mechanisms/authorizers/cel_authorizer.go @@ -31,12 +31,12 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Authorizer, error) { + func(ctx CreationContext, id string, typ string, conf map[string]any) (bool, Authorizer, error) { if typ != AuthorizerCEL { return false, nil, nil } - auth, err := newCELAuthorizer(id, conf) + auth, err := newCELAuthorizer(ctx, id, conf) return true, auth, err }) @@ -47,13 +47,13 @@ type celAuthorizer struct { expressions compiledExpressions } -func newCELAuthorizer(id string, rawConfig map[string]any) (*celAuthorizer, error) { +func newCELAuthorizer(ctx CreationContext, id string, rawConfig map[string]any) (*celAuthorizer, error) { type Config struct { Expressions []Expression `mapstructure:"expressions" validate:"required,gt=0,dive"` } var conf Config - if err := decodeConfig(AuthorizerCEL, rawConfig, &conf); err != nil { + if err := decodeConfig(ctx, AuthorizerCEL, rawConfig, &conf); err != nil { return nil, err } @@ -75,7 +75,7 @@ func (a *celAuthorizer) Execute(ctx heimdall.Context, sub *subject.Subject) erro logger := zerolog.Ctx(ctx.AppContext()) logger.Debug().Str("_id", a.id).Msg("Authorizing using CEL authorizer") - return a.expressions.eval(map[string]any{"Subject": sub, "Request": ctx.Request()}, a) + return a.expressions.eval(map[string]any{"Subject": sub, "Request": ctx.Request(), "Outputs": ctx.Outputs()}, a) } func (a *celAuthorizer) WithConfig(rawConfig map[string]any) (Authorizer, error) { @@ -83,7 +83,7 @@ func (a *celAuthorizer) WithConfig(rawConfig map[string]any) (Authorizer, error) return a, nil } - return newCELAuthorizer(a.id, rawConfig) + return newCELAuthorizer(nil, a.id, rawConfig) } func (a *celAuthorizer) ID() string { return a.id } diff --git a/internal/rules/mechanisms/authorizers/cel_authorizer_test.go b/internal/rules/mechanisms/authorizers/cel_authorizer_test.go index 904f7f868..d376b05cd 100644 --- a/internal/rules/mechanisms/authorizers/cel_authorizer_test.go +++ b/internal/rules/mechanisms/authorizers/cel_authorizer_test.go @@ -142,7 +142,7 @@ expressions: require.NoError(t, err) // WHEN - a, err := newCELAuthorizer(tc.id, conf) + a, err := newCELAuthorizer(nil, tc.id, conf) // THEN tc.assert(t, err, a) @@ -218,7 +218,7 @@ expressions: conf, err := testsupport.DecodeTestConfig(tc.config) require.NoError(t, err) - prototype, err := newCELAuthorizer(tc.id, pc) + prototype, err := newCELAuthorizer(nil, tc.id, pc) require.NoError(t, err) // WHEN @@ -255,6 +255,7 @@ expressions: t.Helper() ctx.EXPECT().Request().Return(nil) + ctx.EXPECT().Outputs().Return(nil) }, assert: func(t *testing.T, err error) { t.Helper() @@ -269,7 +270,7 @@ expressions: }, }, { - uc: "expressions can use subject and request properties", + uc: "expressions can use subject, request and outputs properties", id: "authz2", config: []byte(` expressions: @@ -290,6 +291,8 @@ expressions: - expression: Request.Cookie("FooCookie") == "barfoo" - expression: Request.URL.String() == "http://localhost/test?foo=bar&baz=zab" - expression: Request.URL.Path.split("/").last() == "test" + - expression: Request.URL.Captures.foo == "bar" + - expression: Outputs.foo == "bar" `), configureContextAndSubject: func(t *testing.T, ctx *mocks.ContextMock, sub *subject.Subject) { t.Helper() @@ -308,14 +311,19 @@ expressions: ctx.EXPECT().Request().Return(&heimdall.Request{ RequestFunctions: reqf, Method: http.MethodGet, - URL: &url.URL{ - Scheme: "http", - Host: "localhost", - Path: "/test", - RawQuery: "foo=bar&baz=zab", + URL: &heimdall.URL{ + URL: url.URL{ + Scheme: "http", + Host: "localhost", + Path: "/test", + RawQuery: "foo=bar&baz=zab", + }, + Captures: map[string]string{"foo": "bar"}, }, ClientIPAddresses: []string{"127.0.0.1", "10.10.10.10"}, }) + + ctx.EXPECT().Outputs().Return(map[string]any{"foo": "bar"}) }, assert: func(t *testing.T, err error) { t.Helper() @@ -336,7 +344,7 @@ expressions: tc.configureContextAndSubject(t, ctx, sub) - auth, err := newCELAuthorizer(tc.id, conf) + auth, err := newCELAuthorizer(nil, tc.id, conf) require.NoError(t, err) // WHEN diff --git a/internal/rules/mechanisms/authorizers/config_decoder.go b/internal/rules/mechanisms/authorizers/config_decoder.go index 8101f6626..809daa4d2 100644 --- a/internal/rules/mechanisms/authorizers/config_decoder.go +++ b/internal/rules/mechanisms/authorizers/config_decoder.go @@ -27,11 +27,11 @@ import ( "github.com/dadrus/heimdall/internal/x/errorchain" ) -func decodeConfig(authorizerType string, input, output any) error { +func decodeConfig(ctx CreationContext, authorizerType string, input, output any) error { dec, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( - authstrategy.DecodeAuthenticationStrategyHookFunc(), + authstrategy.DecodeAuthenticationStrategyHookFunc(ctx), endpoint.DecodeEndpointHookFunc(), mapstructure.StringToTimeDurationHookFunc(), template.DecodeTemplateHookFunc(), diff --git a/internal/rules/mechanisms/authorizers/deny_authorizer.go b/internal/rules/mechanisms/authorizers/deny_authorizer.go index e7c1a8f02..2a093ac94 100644 --- a/internal/rules/mechanisms/authorizers/deny_authorizer.go +++ b/internal/rules/mechanisms/authorizers/deny_authorizer.go @@ -29,7 +29,7 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, _ map[string]any) (bool, Authorizer, error) { + func(_ CreationContext, id string, typ string, _ map[string]any) (bool, Authorizer, error) { if typ != AuthorizerDeny { return false, nil, nil } diff --git a/internal/rules/mechanisms/authorizers/mock_creation_context_test.go b/internal/rules/mechanisms/authorizers/mock_creation_context_test.go new file mode 100644 index 000000000..bafb6ae3a --- /dev/null +++ b/internal/rules/mechanisms/authorizers/mock_creation_context_test.go @@ -0,0 +1,180 @@ +// Code generated by mockery v2.42.1. DO NOT EDIT. + +package authorizers + +import ( + keyholder "github.com/dadrus/heimdall/internal/keyholder" + certificate "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + + mock "github.com/stretchr/testify/mock" + + watcher "github.com/dadrus/heimdall/internal/watcher" +) + +// CreationContextMock is an autogenerated mock type for the CreationContext type +type CreationContextMock struct { + mock.Mock +} + +type CreationContextMock_Expecter struct { + mock *mock.Mock +} + +func (_m *CreationContextMock) EXPECT() *CreationContextMock_Expecter { + return &CreationContextMock_Expecter{mock: &_m.Mock} +} + +// CertificateObserver provides a mock function with given fields: +func (_m *CreationContextMock) CertificateObserver() certificate.Observer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CertificateObserver") + } + + var r0 certificate.Observer + if rf, ok := ret.Get(0).(func() certificate.Observer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(certificate.Observer) + } + } + + return r0 +} + +// CreationContextMock_CertificateObserver_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CertificateObserver' +type CreationContextMock_CertificateObserver_Call struct { + *mock.Call +} + +// CertificateObserver is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) CertificateObserver() *CreationContextMock_CertificateObserver_Call { + return &CreationContextMock_CertificateObserver_Call{Call: _e.mock.On("CertificateObserver")} +} + +func (_c *CreationContextMock_CertificateObserver_Call) Run(run func()) *CreationContextMock_CertificateObserver_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) Return(_a0 certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) RunAndReturn(run func() certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(run) + return _c +} + +// KeyHolderRegistry provides a mock function with given fields: +func (_m *CreationContextMock) KeyHolderRegistry() keyholder.Registry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for KeyHolderRegistry") + } + + var r0 keyholder.Registry + if rf, ok := ret.Get(0).(func() keyholder.Registry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keyholder.Registry) + } + } + + return r0 +} + +// CreationContextMock_KeyHolderRegistry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'KeyHolderRegistry' +type CreationContextMock_KeyHolderRegistry_Call struct { + *mock.Call +} + +// KeyHolderRegistry is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) KeyHolderRegistry() *CreationContextMock_KeyHolderRegistry_Call { + return &CreationContextMock_KeyHolderRegistry_Call{Call: _e.mock.On("KeyHolderRegistry")} +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Run(run func()) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Return(_a0 keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) RunAndReturn(run func() keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(run) + return _c +} + +// Watcher provides a mock function with given fields: +func (_m *CreationContextMock) Watcher() watcher.Watcher { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Watcher") + } + + var r0 watcher.Watcher + if rf, ok := ret.Get(0).(func() watcher.Watcher); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(watcher.Watcher) + } + } + + return r0 +} + +// CreationContextMock_Watcher_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Watcher' +type CreationContextMock_Watcher_Call struct { + *mock.Call +} + +// Watcher is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) Watcher() *CreationContextMock_Watcher_Call { + return &CreationContextMock_Watcher_Call{Call: _e.mock.On("Watcher")} +} + +func (_c *CreationContextMock_Watcher_Call) Run(run func()) *CreationContextMock_Watcher_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) Return(_a0 watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) RunAndReturn(run func() watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(run) + return _c +} + +// NewCreationContextMock creates a new instance of CreationContextMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCreationContextMock(t interface { + mock.TestingT + Cleanup(func()) +}) *CreationContextMock { + mock := &CreationContextMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/rules/mechanisms/authorizers/remote_authorizer.go b/internal/rules/mechanisms/authorizers/remote_authorizer.go index 49f7cabd6..6d7314404 100644 --- a/internal/rules/mechanisms/authorizers/remote_authorizer.go +++ b/internal/rules/mechanisms/authorizers/remote_authorizer.go @@ -51,12 +51,12 @@ var errNoContent = errors.New("no payload received") //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Authorizer, error) { + func(ctx CreationContext, id string, typ string, conf map[string]any) (bool, Authorizer, error) { if typ != AuthorizerRemote { return false, nil, nil } - auth, err := newRemoteAuthorizer(id, conf) + auth, err := newRemoteAuthorizer(ctx, id, conf) return true, auth, err }) @@ -87,13 +87,13 @@ func (ai *authorizationInformation) addHeadersTo(headerNames []string, ctx heimd } } -func (ai *authorizationInformation) addAttributesTo(key string, sub *subject.Subject) { +func (ai *authorizationInformation) addResultsTo(key string, ctx heimdall.Context) { if ai.Payload != nil { - sub.Attributes[key] = ai.Payload + ctx.Outputs()[key] = ai.Payload } } -func newRemoteAuthorizer(id string, rawConfig map[string]any) (*remoteAuthorizer, error) { +func newRemoteAuthorizer(ctx CreationContext, id string, rawConfig map[string]any) (*remoteAuthorizer, error) { type Config struct { Endpoint endpoint.Endpoint `mapstructure:"endpoint" validate:"required"` //nolint:lll Expressions []Expression `mapstructure:"expressions" validate:"dive"` @@ -104,7 +104,7 @@ func newRemoteAuthorizer(id string, rawConfig map[string]any) (*remoteAuthorizer } var conf Config - if err := decodeConfig(AuthorizerRemote, rawConfig, &conf); err != nil { + if err := decodeConfig(ctx, AuthorizerRemote, rawConfig, &conf); err != nil { return nil, err } @@ -182,7 +182,7 @@ func (a *remoteAuthorizer) Execute(ctx heimdall.Context, sub *subject.Subject) e } authInfo.addHeadersTo(a.headersForUpstream, ctx) - authInfo.addAttributesTo(a.id, sub) + authInfo.addResultsTo(a.id, ctx) return nil } @@ -201,7 +201,7 @@ func (a *remoteAuthorizer) WithConfig(rawConfig map[string]any) (Authorizer, err } var conf Config - if err := decodeConfig(AuthorizerRemote, rawConfig, &conf); err != nil { + if err := decodeConfig(nil, AuthorizerRemote, rawConfig, &conf); err != nil { return nil, err } @@ -247,6 +247,7 @@ func (a *remoteAuthorizer) doAuthorize( return tpl.Render(map[string]any{ "Subject": sub, "Values": values, + "Outputs": ctx.Outputs(), }) }) @@ -372,6 +373,7 @@ func (a *remoteAuthorizer) renderTemplates( if values, err = a.v.Render(map[string]any{ "Request": ctx.Request(), "Subject": sub, + "Outputs": ctx.Outputs(), }); err != nil { return nil, "", errorchain.NewWithMessage(heimdall.ErrInternal, "failed to render values for the authorization endpoint"). @@ -384,6 +386,7 @@ func (a *remoteAuthorizer) renderTemplates( "Request": ctx.Request(), "Subject": sub, "Values": values, + "Outputs": ctx.Outputs(), }); err != nil { return nil, "", errorchain.NewWithMessage(heimdall.ErrInternal, "failed to render payload for the authorization endpoint"). diff --git a/internal/rules/mechanisms/authorizers/remote_authorizer_test.go b/internal/rules/mechanisms/authorizers/remote_authorizer_test.go index 6ff45c0cc..aed075708 100644 --- a/internal/rules/mechanisms/authorizers/remote_authorizer_test.go +++ b/internal/rules/mechanisms/authorizers/remote_authorizer_test.go @@ -201,7 +201,7 @@ values: "Subject": &subject.Subject{ID: "bar"}, "Request": &heimdall.Request{ RequestFunctions: rfunc, - URL: &url.URL{Scheme: "http", Host: "foo.bar", Path: "/foo/bar"}, + URL: &heimdall.URL{URL: url.URL{Scheme: "http", Host: "foo.bar", Path: "/foo/bar"}}, }, }) require.NoError(t, err) @@ -233,7 +233,7 @@ values: require.NoError(t, err) // WHEN - auth, err := newRemoteAuthorizer(tc.id, conf) + auth, err := newRemoteAuthorizer(nil, tc.id, conf) // THEN tc.assert(t, err, auth) @@ -475,7 +475,7 @@ cache_ttl: 15s conf, err := testsupport.DecodeTestConfig(tc.config) require.NoError(t, err) - prototype, err := newRemoteAuthorizer(tc.id, pc) + prototype, err := newRemoteAuthorizer(nil, tc.id, pc) require.NoError(t, err) // WHEN @@ -540,7 +540,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { instructServer func(t *testing.T) configureContext func(t *testing.T, ctx *heimdallmocks.ContextMock) configureCache func(t *testing.T, cch *mocks.CacheMock, authorizer *remoteAuthorizer, sub *subject.Subject) - assert func(t *testing.T, err error, sub *subject.Subject) + assert func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) }{ { uc: "successful with payload and with header, without payload from server and without header " + @@ -556,7 +556,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { return values.Values{"foo": tpl} }(), payload: func() template.Template { - tpl, _ := template.New("{{ .Subject.ID }}-{{ .Values.foo }}") + tpl, _ := template.New("{{ .Subject.ID }}-{{ .Values.foo }}-{{ .Outputs.foo }}") return tpl }(), @@ -581,7 +581,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { data, err := io.ReadAll(req.Body) require.NoError(t, err) - assert.Equal(t, "my-id-bar", string(data)) + assert.Equal(t, "my-id-bar-bar", string(data)) } }, configureContext: func(t *testing.T, ctx *heimdallmocks.ContextMock) { @@ -589,7 +589,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, sub *subject.Subject) { + assert: func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) { t.Helper() require.NoError(t, err) @@ -597,6 +597,8 @@ func TestRemoteAuthorizerExecute(t *testing.T) { assert.True(t, authorizationEndpointCalled) assert.Len(t, sub.Attributes, 1) assert.Equal(t, "baz", sub.Attributes["bar"]) + assert.Len(t, outputs, 1) + assert.Equal(t, "bar", outputs["foo"]) }, }, { @@ -663,16 +665,18 @@ func TestRemoteAuthorizerExecute(t *testing.T) { ctx.EXPECT().AddHeaderForUpstream("X-Foo-Bar", "HeyFoo") ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, sub *subject.Subject) { + assert: func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) { t.Helper() require.NoError(t, err) assert.True(t, authorizationEndpointCalled) - assert.Len(t, sub.Attributes, 2) + assert.Len(t, sub.Attributes, 1) assert.Equal(t, "baz", sub.Attributes["bar"]) - attrs := sub.Attributes["authorizer"] + assert.Len(t, outputs, 2) + assert.Equal(t, "bar", outputs["foo"]) + attrs := outputs["authorizer"] assert.NotEmpty(t, attrs) authorizerAttrs, ok := attrs.(map[string]any) require.True(t, ok) @@ -702,7 +706,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { return values.Values{"foo": tpl} }(), payload: func() template.Template { - tpl, _ := template.New(`user_id={{ urlenc .Subject.ID }}&{{ .Subject.Attributes.bar }}={{ .Values.foo }}`) + tpl, _ := template.New(`user_id={{ urlenc .Subject.ID }}&{{ .Subject.Attributes.bar }}={{ .Values.foo }}&{{ .Values.foo }}={{ .Outputs.foo }}`) return tpl }(), @@ -728,9 +732,10 @@ func TestRemoteAuthorizerExecute(t *testing.T) { formValues, err := url.ParseQuery(string(data)) require.NoError(t, err) - assert.Len(t, formValues, 2) + assert.Len(t, formValues, 3) assert.Equal(t, []string{"my id"}, formValues["user_id"]) assert.Equal(t, []string{"foo"}, formValues["baz"]) + assert.Equal(t, []string{"bar"}, formValues["foo"]) } responseCode = http.StatusOK @@ -754,7 +759,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { return err == nil && ai.Payload == nil && len(ai.Headers.Get("X-Foo-Bar")) != 0 }), auth.ttl).Return(nil) }, - assert: func(t *testing.T, err error, sub *subject.Subject) { + assert: func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) { t.Helper() require.NoError(t, err) @@ -763,7 +768,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { assert.Len(t, sub.Attributes, 1) assert.Equal(t, "baz", sub.Attributes["bar"]) - assert.Empty(t, sub.Attributes["authorizer"]) + assert.Empty(t, outputs["authorizer"]) }, }, { @@ -806,7 +811,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { cch.EXPECT().Get(mock.Anything, cacheKey).Return(nil, errors.New("no cache entry")) cch.EXPECT().Set(mock.Anything, cacheKey, mock.Anything, auth.ttl).Return(nil) }, - assert: func(t *testing.T, err error, sub *subject.Subject) { + assert: func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) { t.Helper() require.NoError(t, err) @@ -815,7 +820,8 @@ func TestRemoteAuthorizerExecute(t *testing.T) { assert.Len(t, sub.Attributes, 1) assert.Equal(t, "baz", sub.Attributes["bar"]) - assert.Empty(t, sub.Attributes["authorizer"]) + assert.Len(t, outputs, 1) + assert.Equal(t, "bar", outputs["foo"]) }, }, { @@ -862,16 +868,18 @@ func TestRemoteAuthorizerExecute(t *testing.T) { cch.EXPECT().Get(mock.Anything, mock.Anything).Return(rawInfo, nil) }, - assert: func(t *testing.T, err error, sub *subject.Subject) { + assert: func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) { t.Helper() require.NoError(t, err) assert.False(t, authorizationEndpointCalled) - assert.Len(t, sub.Attributes, 2) + assert.Len(t, sub.Attributes, 1) assert.Equal(t, "baz", sub.Attributes["bar"]) - attrs := sub.Attributes["authorizer"] + assert.Len(t, outputs, 2) + assert.Equal(t, "bar", outputs["foo"]) + attrs := outputs["authorizer"] assert.NotEmpty(t, attrs) authorizerAttrs, ok := attrs.(map[string]any) require.True(t, ok) @@ -903,7 +911,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, _ *subject.Subject) { + assert: func(t *testing.T, err error, _ *subject.Subject, _ map[string]any) { t.Helper() require.Error(t, err) @@ -938,13 +946,15 @@ func TestRemoteAuthorizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, sub *subject.Subject) { + assert: func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) { t.Helper() require.NoError(t, err) assert.True(t, authorizationEndpointCalled) - assert.Equal(t, "Hi Foo", sub.Attributes["foo"]) + assert.Empty(t, sub.Attributes) + assert.Len(t, outputs, 1) + assert.Equal(t, "Hi Foo", outputs["foo"]) }, }, { @@ -964,7 +974,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, _ *subject.Subject) { + assert: func(t *testing.T, err error, _ *subject.Subject, _ map[string]any) { t.Helper() require.Error(t, err) @@ -981,7 +991,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { { uc: "with error due to nil subject", authorizer: &remoteAuthorizer{id: "authz"}, - assert: func(t *testing.T, err error, _ *subject.Subject) { + assert: func(t *testing.T, err error, _ *subject.Subject, _ map[string]any) { t.Helper() assert.False(t, authorizationEndpointCalled) @@ -1059,7 +1069,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, _ *subject.Subject) { + assert: func(t *testing.T, err error, _ *subject.Subject, _ map[string]any) { t.Helper() assert.True(t, authorizationEndpointCalled) @@ -1137,15 +1147,17 @@ func TestRemoteAuthorizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, sub *subject.Subject) { + assert: func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) { t.Helper() assert.True(t, authorizationEndpointCalled) require.NoError(t, err) - require.Len(t, sub.Attributes, 1) - attrs := sub.Attributes["authorizer"] + require.Empty(t, sub.Attributes) + assert.Len(t, outputs, 2) + assert.Equal(t, "bar", outputs["foo"]) + attrs := outputs["authorizer"] assert.NotEmpty(t, attrs) authorizerAttrs, ok := attrs.(map[string]any) require.True(t, ok) @@ -1176,7 +1188,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, _ *subject.Subject) { + assert: func(t *testing.T, err error, _ *subject.Subject, _ map[string]any) { t.Helper() assert.False(t, authorizationEndpointCalled) @@ -1208,7 +1220,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, _ *subject.Subject) { + assert: func(t *testing.T, err error, _ *subject.Subject, _ map[string]any) { t.Helper() assert.False(t, authorizationEndpointCalled) @@ -1250,6 +1262,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { ctx := heimdallmocks.NewContextMock(t) ctx.EXPECT().AppContext().Return(cache.WithContext(context.Background(), cch)) + ctx.EXPECT().Outputs().Return(map[string]any{"foo": "bar"}) configureContext(t, ctx) configureCache(t, cch, tc.authorizer, tc.subject) @@ -1259,7 +1272,7 @@ func TestRemoteAuthorizerExecute(t *testing.T) { err := tc.authorizer.Execute(ctx, tc.subject) // THEN - tc.assert(t, err, tc.subject) + tc.assert(t, err, tc.subject, ctx.Outputs()) }) } } diff --git a/internal/rules/mechanisms/cellib/library.go b/internal/rules/mechanisms/cellib/library.go index 183bd215e..94278ba38 100644 --- a/internal/rules/mechanisms/cellib/library.go +++ b/internal/rules/mechanisms/cellib/library.go @@ -49,6 +49,7 @@ func (heimdallLibrary) CompileOptions() []cel.EnvOption { ext.NativeTypes(reflect.TypeOf(&subject.Subject{})), cel.Variable("Payload", cel.DynType), cel.Variable("Subject", cel.DynType), + cel.Variable("Outputs", cel.MapType(cel.StringType, cel.DynType)), } } diff --git a/internal/rules/mechanisms/cellib/requests_test.go b/internal/rules/mechanisms/cellib/requests_test.go index e9bbaaa06..a1913e400 100644 --- a/internal/rules/mechanisms/cellib/requests_test.go +++ b/internal/rules/mechanisms/cellib/requests_test.go @@ -38,8 +38,8 @@ func TestRequests(t *testing.T) { ) require.NoError(t, err) - rawURI := "http://localhost/foo/bar?foo=bar&foo=baz&bar=foo" - uri, err := url.Parse("http://localhost/foo/bar?foo=bar&foo=baz&bar=foo") + rawURI := "http://localhost:8080/foo/bar?foo=bar&foo=baz&bar=foo" + uri, err := url.Parse(rawURI) require.NoError(t, err) reqf := mocks.NewRequestFunctionsMock(t) @@ -50,9 +50,12 @@ func TestRequests(t *testing.T) { reqf.EXPECT().Body().Return(map[string]any{"foo": []any{"bar"}}) req := &heimdall.Request{ - RequestFunctions: reqf, - Method: http.MethodHead, - URL: uri, + RequestFunctions: reqf, + Method: http.MethodHead, + URL: &heimdall.URL{ + URL: *uri, + Captures: map[string]string{"foo": "bar"}, + }, ClientIPAddresses: []string{"127.0.0.1"}, } @@ -61,6 +64,11 @@ func TestRequests(t *testing.T) { }{ {expr: `Request.Method == "HEAD"`}, {expr: `Request.URL.String() == "` + rawURI + `"`}, + {expr: `Request.URL.Captures.foo == "bar"`}, + {expr: `Request.URL.Query().bar == ["foo"]`}, + {expr: `Request.URL.Host == "localhost:8080"`}, + {expr: `Request.URL.Hostname() == "localhost"`}, + {expr: `Request.URL.Port() == "8080"`}, {expr: `Request.Cookie("foo") == "bar"`}, {expr: `Request.Header("bar") == "baz"`}, {expr: `Request.Header("zab").contains("bar")`}, diff --git a/internal/rules/mechanisms/cellib/urls.go b/internal/rules/mechanisms/cellib/urls.go index 33f81bb5b..b9f6bf989 100644 --- a/internal/rules/mechanisms/cellib/urls.go +++ b/internal/rules/mechanisms/cellib/urls.go @@ -17,7 +17,6 @@ package cellib import ( - "net/url" "reflect" "github.com/google/cel-go/cel" @@ -25,6 +24,8 @@ import ( "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/ext" + + "github.com/dadrus/heimdall/internal/heimdall" ) func Urls() cel.EnvOption { @@ -42,16 +43,16 @@ func (urlsLib) ProgramOptions() []cel.ProgramOption { } func (urlsLib) CompileOptions() []cel.EnvOption { - urlType := cel.ObjectType(reflect.TypeOf(url.URL{}).String(), traits.ReceiverType) + urlType := cel.ObjectType(reflect.TypeOf(heimdall.URL{}).String(), traits.ReceiverType) return []cel.EnvOption{ - ext.NativeTypes(reflect.TypeOf(&url.URL{})), + ext.NativeTypes(reflect.TypeOf(&heimdall.URL{})), cel.Function("String", cel.MemberOverload("url_String", []*cel.Type{urlType}, cel.StringType, cel.UnaryBinding(func(value ref.Val) ref.Val { // nolint: forcetypeassert - return types.String(value.Value().(*url.URL).String()) + return types.String(value.Value().(*heimdall.URL).String()) }), ), ), @@ -60,7 +61,25 @@ func (urlsLib) CompileOptions() []cel.EnvOption { []*cel.Type{urlType}, cel.MapType(types.StringType, cel.ListType(cel.StringType)), cel.UnaryBinding(func(value ref.Val) ref.Val { // nolint: forcetypeassert - return types.NewDynamicMap(types.DefaultTypeAdapter, value.Value().(*url.URL).Query()) + return types.NewDynamicMap(types.DefaultTypeAdapter, value.Value().(*heimdall.URL).Query()) + }), + ), + ), + cel.Function("Hostname", + cel.MemberOverload("url_Hostname", + []*cel.Type{urlType}, types.StringType, + cel.UnaryBinding(func(value ref.Val) ref.Val { + // nolint: forcetypeassert + return types.String(value.Value().(*heimdall.URL).Hostname()) + }), + ), + ), + cel.Function("Port", + cel.MemberOverload("url_Port", + []*cel.Type{urlType}, types.StringType, + cel.UnaryBinding(func(value ref.Val) ref.Val { + // nolint: forcetypeassert + return types.String(value.Value().(*heimdall.URL).Port()) }), ), ), diff --git a/internal/rules/mechanisms/cellib/urls_test.go b/internal/rules/mechanisms/cellib/urls_test.go index 447298c11..30e05663e 100644 --- a/internal/rules/mechanisms/cellib/urls_test.go +++ b/internal/rules/mechanisms/cellib/urls_test.go @@ -22,6 +22,8 @@ import ( "github.com/google/cel-go/cel" "github.com/stretchr/testify/require" + + "github.com/dadrus/heimdall/internal/heimdall" ) func TestUrls(t *testing.T) { @@ -33,8 +35,8 @@ func TestUrls(t *testing.T) { ) require.NoError(t, err) - rawURI := "http://localhost/foo/bar?foo=bar&foo=baz&bar=foo" - uri, err := url.Parse("http://localhost/foo/bar?foo=bar&foo=baz&bar=foo") + rawURI := "http://localhost:8080/foo/bar?foo=bar&foo=baz&bar=foo" + uri, err := url.Parse(rawURI) require.NoError(t, err) for _, tc := range []struct { @@ -43,6 +45,11 @@ func TestUrls(t *testing.T) { {expr: `uri.String() == "` + rawURI + `"`}, {expr: `uri.Query() == {"foo":["bar", "baz"], "bar": ["foo"]}`}, {expr: `uri.Query().bar == ["foo"]`}, + {expr: `uri.Host == "localhost:8080"`}, + {expr: `uri.Hostname() == "localhost"`}, + {expr: `uri.Port() == "8080"`}, + {expr: `uri.Captures.zab == "baz"`}, + {expr: `uri.Path == "/foo/bar"`}, } { t.Run(tc.expr, func(t *testing.T) { ast, iss := env.Compile(tc.expr) @@ -58,7 +65,7 @@ func TestUrls(t *testing.T) { prg, err := env.Program(ast, cel.EvalOptions(cel.OptOptimize)) require.NoError(t, err) - out, _, err := prg.Eval(map[string]any{"uri": uri}) + out, _, err := prg.Eval(map[string]any{"uri": &heimdall.URL{URL: *uri, Captures: map[string]string{"zab": "baz"}}}) require.NoError(t, err) require.Equal(t, true, out.Value()) //nolint:testifylint }) diff --git a/internal/rules/mechanisms/contextualizers/config_decoder.go b/internal/rules/mechanisms/contextualizers/config_decoder.go index 6079557c6..5141fed61 100644 --- a/internal/rules/mechanisms/contextualizers/config_decoder.go +++ b/internal/rules/mechanisms/contextualizers/config_decoder.go @@ -27,11 +27,11 @@ import ( "github.com/dadrus/heimdall/internal/x/errorchain" ) -func decodeConfig(contextualizerType string, input, output any) error { +func decodeConfig(ctx CreationContext, contextualizerType string, input, output any) error { dec, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( - authstrategy.DecodeAuthenticationStrategyHookFunc(), + authstrategy.DecodeAuthenticationStrategyHookFunc(ctx), endpoint.DecodeEndpointHookFunc(), mapstructure.StringToTimeDurationHookFunc(), template.DecodeTemplateHookFunc(), diff --git a/internal/rules/mechanisms/contextualizers/contextualizer_type_registry.go b/internal/rules/mechanisms/contextualizers/contextualizer_type_registry.go index 2d86b4172..ea28f1182 100644 --- a/internal/rules/mechanisms/contextualizers/contextualizer_type_registry.go +++ b/internal/rules/mechanisms/contextualizers/contextualizer_type_registry.go @@ -20,6 +20,9 @@ import ( "errors" "sync" + "github.com/dadrus/heimdall/internal/keyholder" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/internal/x/errorchain" ) @@ -27,13 +30,21 @@ var ( ErrUnsupportedContextualizerType = errors.New("contextualizer type unsupported") // by intention. Used only during application bootstrap. - typeFactories []ContextualizerTypeFactory //nolint:gochecknoglobals - typeFactoriesMu sync.RWMutex //nolint:gochecknoglobals + typeFactories []TypeFactory //nolint:gochecknoglobals + typeFactoriesMu sync.RWMutex //nolint:gochecknoglobals ) -type ContextualizerTypeFactory func(id string, typ string, c map[string]any) (bool, Contextualizer, error) +//go:generate mockery --name CreationContext --structname CreationContextMock --inpackage --testonly -func registerTypeFactory(factory ContextualizerTypeFactory) { +type CreationContext interface { + Watcher() watcher.Watcher + KeyHolderRegistry() keyholder.Registry + CertificateObserver() certificate.Observer +} + +type TypeFactory func(ctx CreationContext, id string, typ string, c map[string]any) (bool, Contextualizer, error) + +func registerTypeFactory(factory TypeFactory) { typeFactoriesMu.Lock() defer typeFactoriesMu.Unlock() @@ -44,12 +55,12 @@ func registerTypeFactory(factory ContextualizerTypeFactory) { typeFactories = append(typeFactories, factory) } -func CreatePrototype(id string, typ string, config map[string]any) (Contextualizer, error) { +func CreatePrototype(ctx CreationContext, id string, typ string, config map[string]any) (Contextualizer, error) { typeFactoriesMu.RLock() defer typeFactoriesMu.RUnlock() for _, create := range typeFactories { - if ok, at, err := create(id, typ, config); ok { + if ok, at, err := create(ctx, id, typ, config); ok { return at, err } } diff --git a/internal/rules/mechanisms/contextualizers/contextualizer_type_registry_test.go b/internal/rules/mechanisms/contextualizers/contextualizer_type_registry_test.go index 40aa7855a..ef5882f7a 100644 --- a/internal/rules/mechanisms/contextualizers/contextualizer_type_registry_test.go +++ b/internal/rules/mechanisms/contextualizers/contextualizer_type_registry_test.go @@ -58,7 +58,7 @@ func TestCreateContextualzerPrototype(t *testing.T) { } { t.Run("case="+tc.uc, func(t *testing.T) { // WHEN - errorHandler, err := CreatePrototype("foo", tc.typ, nil) + errorHandler, err := CreatePrototype(NewCreationContextMock(t), "foo", tc.typ, nil) // THEN tc.assert(t, err, errorHandler) diff --git a/internal/rules/mechanisms/contextualizers/generic_contextualizer.go b/internal/rules/mechanisms/contextualizers/generic_contextualizer.go index 1045c249c..16dca6434 100644 --- a/internal/rules/mechanisms/contextualizers/generic_contextualizer.go +++ b/internal/rules/mechanisms/contextualizers/generic_contextualizer.go @@ -53,12 +53,12 @@ var errNoContent = errors.New("no payload received") //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Contextualizer, error) { + func(ctx CreationContext, id string, typ string, conf map[string]any) (bool, Contextualizer, error) { if typ != ContextualizerGeneric { return false, nil, nil } - eh, err := newGenericContextualizer(id, conf) + eh, err := newGenericContextualizer(ctx, id, conf) return true, eh, err }) @@ -79,7 +79,11 @@ type genericContextualizer struct { v values.Values } -func newGenericContextualizer(id string, rawConfig map[string]any) (*genericContextualizer, error) { +func newGenericContextualizer( + ctx CreationContext, + id string, + rawConfig map[string]any, +) (*genericContextualizer, error) { type Config struct { Endpoint endpoint.Endpoint `mapstructure:"endpoint" validate:"required"` ForwardHeaders []string `mapstructure:"forward_headers"` @@ -91,7 +95,7 @@ func newGenericContextualizer(id string, rawConfig map[string]any) (*genericCont } var conf Config - if err := decodeConfig(ContextualizerGeneric, rawConfig, &conf); err != nil { + if err := decodeConfig(ctx, ContextualizerGeneric, rawConfig, &conf); err != nil { return nil, err } @@ -164,7 +168,7 @@ func (h *genericContextualizer) Execute(ctx heimdall.Context, sub *subject.Subje } if response.Payload != nil { - sub.Attributes[h.id] = response.Payload + ctx.Outputs()[h.id] = response.Payload } return nil @@ -185,7 +189,7 @@ func (h *genericContextualizer) WithConfig(rawConfig map[string]any) (Contextual } var conf Config - if err := decodeConfig(ContextualizerGeneric, rawConfig, &conf); err != nil { + if err := decodeConfig(nil, ContextualizerGeneric, rawConfig, &conf); err != nil { return nil, err } @@ -268,6 +272,7 @@ func (h *genericContextualizer) createRequest( return tpl.Render(map[string]any{ "Subject": sub, "Values": values, + "Outputs": ctx.Outputs(), }) }) @@ -385,6 +390,7 @@ func (h *genericContextualizer) renderTemplates( if values, err = h.v.Render(map[string]any{ "Request": ctx.Request(), "Subject": sub, + "Outputs": ctx.Outputs(), }); err != nil { return nil, "", errorchain.NewWithMessage(heimdall.ErrInternal, "failed to render values for the contextualization endpoint"). @@ -397,6 +403,7 @@ func (h *genericContextualizer) renderTemplates( "Request": ctx.Request(), "Subject": sub, "Values": values, + "Outputs": ctx.Outputs(), }); err != nil { return nil, "", errorchain.NewWithMessage(heimdall.ErrInternal, "failed to render payload for the contextualization endpoint"). diff --git a/internal/rules/mechanisms/contextualizers/generic_contextualizer_test.go b/internal/rules/mechanisms/contextualizers/generic_contextualizer_test.go index 94d7704bc..1724bc7b5 100644 --- a/internal/rules/mechanisms/contextualizers/generic_contextualizer_test.go +++ b/internal/rules/mechanisms/contextualizers/generic_contextualizer_test.go @@ -166,7 +166,7 @@ continue_pipeline_on_error: true require.NoError(t, err) // WHEN - contextualizer, err := newGenericContextualizer(tc.id, conf) + contextualizer, err := newGenericContextualizer(nil, tc.id, conf) // THEN tc.assert(t, err, contextualizer) @@ -473,7 +473,7 @@ continue_pipeline_on_error: false conf, err := testsupport.DecodeTestConfig(tc.config) require.NoError(t, err) - prototype, err := newGenericContextualizer(tc.id, pc) + prototype, err := newGenericContextualizer(nil, tc.id, pc) require.NoError(t, err) // WHEN @@ -531,12 +531,12 @@ func TestGenericContextualizerExecute(t *testing.T) { configureContext func(t *testing.T, ctx *heimdallmocks.ContextMock) configureCache func(t *testing.T, cch *mocks.CacheMock, contextualizer *genericContextualizer, sub *subject.Subject) - assert func(t *testing.T, err error, sub *subject.Subject) + assert func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) }{ { uc: "fails due to nil subject", contextualizer: &genericContextualizer{id: "contextualizer", e: endpoint.Endpoint{URL: srv.URL}}, - assert: func(t *testing.T, err error, _ *subject.Subject) { + assert: func(t *testing.T, err error, _ *subject.Subject, _ map[string]any) { t.Helper() assert.False(t, remoteEndpointCalled) @@ -578,14 +578,18 @@ func TestGenericContextualizerExecute(t *testing.T) { cch.EXPECT().Get(mock.Anything, mock.Anything).Return(rawData, nil) }, - assert: func(t *testing.T, err error, sub *subject.Subject) { + assert: func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) { t.Helper() assert.False(t, remoteEndpointCalled) require.NoError(t, err) - assert.Len(t, sub.Attributes, 2) - assert.Equal(t, "Hi Foo", sub.Attributes["contextualizer"]) + assert.Len(t, sub.Attributes, 1) + assert.Equal(t, "baz", sub.Attributes["bar"]) + + assert.Len(t, outputs, 2) + assert.Equal(t, "Hi Foo", outputs["contextualizer"]) + assert.Equal(t, "bar", outputs["foo"]) }, }, { @@ -606,7 +610,7 @@ func TestGenericContextualizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, _ *subject.Subject) { + assert: func(t *testing.T, err error, _ *subject.Subject, _ map[string]any) { t.Helper() assert.False(t, remoteEndpointCalled) @@ -638,7 +642,7 @@ func TestGenericContextualizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, _ *subject.Subject) { + assert: func(t *testing.T, err error, _ *subject.Subject, _ map[string]any) { t.Helper() assert.False(t, remoteEndpointCalled) @@ -664,7 +668,7 @@ func TestGenericContextualizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, _ *subject.Subject) { + assert: func(t *testing.T, err error, _ *subject.Subject, _ map[string]any) { t.Helper() assert.False(t, remoteEndpointCalled) @@ -695,7 +699,7 @@ func TestGenericContextualizerExecute(t *testing.T) { ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, err error, _ *subject.Subject) { + assert: func(t *testing.T, err error, _ *subject.Subject, _ map[string]any) { t.Helper() assert.True(t, remoteEndpointCalled) @@ -747,7 +751,7 @@ func TestGenericContextualizerExecute(t *testing.T) { cch.EXPECT().Get(mock.Anything, mock.Anything).Return(nil, errors.New("no cache entry")) cch.EXPECT().Set(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) }, - assert: func(t *testing.T, err error, sub *subject.Subject) { + assert: func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) { t.Helper() assert.True(t, remoteEndpointCalled) @@ -755,6 +759,7 @@ func TestGenericContextualizerExecute(t *testing.T) { require.NoError(t, err) assert.Len(t, sub.Attributes, 1) + assert.Len(t, outputs, 1) }, }, { @@ -796,14 +801,16 @@ func TestGenericContextualizerExecute(t *testing.T) { responseContent = []byte(`Hi from endpoint`) responseCode = http.StatusOK }, - assert: func(t *testing.T, err error, sub *subject.Subject) { + assert: func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) { t.Helper() assert.True(t, remoteEndpointCalled) require.NoError(t, err) - assert.Len(t, sub.Attributes, 2) + assert.Len(t, sub.Attributes, 1) + assert.Len(t, outputs, 2) + assert.Equal(t, "Hi from endpoint", outputs["test-contextualizer"]) }, }, { @@ -811,11 +818,12 @@ func TestGenericContextualizerExecute(t *testing.T) { contextualizer: &genericContextualizer{ id: "test-contextualizer", e: endpoint.Endpoint{ - URL: srv.URL + "/{{ .Subject.ID }}", + URL: srv.URL + "/{{ .Subject.ID }}/{{ .Outputs.foo }}", Headers: map[string]string{ "Content-Type": "application/json", "Accept": "application/json", "X-Bar": "{{ .Subject.Attributes.bar }}", + "X-Foo": "{{ .Outputs.foo }}", }, }, v: func() values.Values { @@ -827,7 +835,8 @@ func TestGenericContextualizerExecute(t *testing.T) { tpl, _ := template.New(` { "user_id": {{ quote .Subject.ID }}, - "value": {{ quote .Values.foo }} + "value": {{ quote .Values.foo }}, + "foo": {{ quote .Outputs.foo }} } `) @@ -843,10 +852,11 @@ func TestGenericContextualizerExecute(t *testing.T) { checkRequest = func(req *http.Request) { t.Helper() - assert.Equal(t, "/Foo", req.URL.Path) + assert.Equal(t, "/Foo/bar", req.URL.Path) assert.Equal(t, "application/json", req.Header.Get("Content-Type")) assert.Equal(t, "application/json", req.Header.Get("Accept")) assert.Equal(t, "baz", req.Header.Get("X-Bar")) + assert.Equal(t, "bar", req.Header.Get("X-Foo")) assert.Equal(t, "Hi Foo", req.Header.Get("X-Bar-Foo")) cookie, err := req.Cookie("X-Foo-Session") require.NoError(t, err) @@ -855,7 +865,7 @@ func TestGenericContextualizerExecute(t *testing.T) { content, err := io.ReadAll(req.Body) require.NoError(t, err) - assert.JSONEq(t, `{"user_id": "Foo", "value": "bar"}`, string(content)) + assert.JSONEq(t, `{"user_id": "Foo", "value": "bar", "foo":"bar"}`, string(content)) } responseContentType = "application/json" @@ -873,18 +883,20 @@ func TestGenericContextualizerExecute(t *testing.T) { &heimdall.Request{ RequestFunctions: reqf, Method: http.MethodPost, - URL: &url.URL{Scheme: "http", Host: "foobar.baz", Path: "zab"}, + URL: &heimdall.URL{URL: url.URL{Scheme: "http", Host: "foobar.baz", Path: "zab"}}, }) }, - assert: func(t *testing.T, err error, sub *subject.Subject) { + assert: func(t *testing.T, err error, sub *subject.Subject, outputs map[string]any) { t.Helper() assert.True(t, remoteEndpointCalled) require.NoError(t, err) - assert.Len(t, sub.Attributes, 2) - entry := sub.Attributes["test-contextualizer"] + assert.Len(t, sub.Attributes, 1) + + assert.Len(t, outputs, 2) + entry := outputs["test-contextualizer"] assert.Len(t, entry, 1) assert.Contains(t, entry, "baz") }, @@ -916,6 +928,7 @@ func TestGenericContextualizerExecute(t *testing.T) { ctx := heimdallmocks.NewContextMock(t) ctx.EXPECT().AppContext().Return(cache.WithContext(context.Background(), cch)) + ctx.EXPECT().Outputs().Return(map[string]any{"foo": "bar"}) configureContext(t, ctx) configureCache(t, cch, tc.contextualizer, tc.subject) @@ -925,7 +938,7 @@ func TestGenericContextualizerExecute(t *testing.T) { err := tc.contextualizer.Execute(ctx, tc.subject) // THEN - tc.assert(t, err, tc.subject) + tc.assert(t, err, tc.subject, ctx.Outputs()) }) } } diff --git a/internal/rules/mechanisms/contextualizers/mock_creation_context_test.go b/internal/rules/mechanisms/contextualizers/mock_creation_context_test.go new file mode 100644 index 000000000..8c3a0bbce --- /dev/null +++ b/internal/rules/mechanisms/contextualizers/mock_creation_context_test.go @@ -0,0 +1,180 @@ +// Code generated by mockery v2.42.1. DO NOT EDIT. + +package contextualizers + +import ( + keyholder "github.com/dadrus/heimdall/internal/keyholder" + certificate "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + + mock "github.com/stretchr/testify/mock" + + watcher "github.com/dadrus/heimdall/internal/watcher" +) + +// CreationContextMock is an autogenerated mock type for the CreationContext type +type CreationContextMock struct { + mock.Mock +} + +type CreationContextMock_Expecter struct { + mock *mock.Mock +} + +func (_m *CreationContextMock) EXPECT() *CreationContextMock_Expecter { + return &CreationContextMock_Expecter{mock: &_m.Mock} +} + +// CertificateObserver provides a mock function with given fields: +func (_m *CreationContextMock) CertificateObserver() certificate.Observer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CertificateObserver") + } + + var r0 certificate.Observer + if rf, ok := ret.Get(0).(func() certificate.Observer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(certificate.Observer) + } + } + + return r0 +} + +// CreationContextMock_CertificateObserver_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CertificateObserver' +type CreationContextMock_CertificateObserver_Call struct { + *mock.Call +} + +// CertificateObserver is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) CertificateObserver() *CreationContextMock_CertificateObserver_Call { + return &CreationContextMock_CertificateObserver_Call{Call: _e.mock.On("CertificateObserver")} +} + +func (_c *CreationContextMock_CertificateObserver_Call) Run(run func()) *CreationContextMock_CertificateObserver_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) Return(_a0 certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) RunAndReturn(run func() certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(run) + return _c +} + +// KeyHolderRegistry provides a mock function with given fields: +func (_m *CreationContextMock) KeyHolderRegistry() keyholder.Registry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for KeyHolderRegistry") + } + + var r0 keyholder.Registry + if rf, ok := ret.Get(0).(func() keyholder.Registry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keyholder.Registry) + } + } + + return r0 +} + +// CreationContextMock_KeyHolderRegistry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'KeyHolderRegistry' +type CreationContextMock_KeyHolderRegistry_Call struct { + *mock.Call +} + +// KeyHolderRegistry is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) KeyHolderRegistry() *CreationContextMock_KeyHolderRegistry_Call { + return &CreationContextMock_KeyHolderRegistry_Call{Call: _e.mock.On("KeyHolderRegistry")} +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Run(run func()) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Return(_a0 keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) RunAndReturn(run func() keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(run) + return _c +} + +// Watcher provides a mock function with given fields: +func (_m *CreationContextMock) Watcher() watcher.Watcher { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Watcher") + } + + var r0 watcher.Watcher + if rf, ok := ret.Get(0).(func() watcher.Watcher); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(watcher.Watcher) + } + } + + return r0 +} + +// CreationContextMock_Watcher_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Watcher' +type CreationContextMock_Watcher_Call struct { + *mock.Call +} + +// Watcher is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) Watcher() *CreationContextMock_Watcher_Call { + return &CreationContextMock_Watcher_Call{Call: _e.mock.On("Watcher")} +} + +func (_c *CreationContextMock_Watcher_Call) Run(run func()) *CreationContextMock_Watcher_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) Return(_a0 watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) RunAndReturn(run func() watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(run) + return _c +} + +// NewCreationContextMock creates a new instance of CreationContextMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCreationContextMock(t interface { + mock.TestingT + Cleanup(func()) +}) *CreationContextMock { + mock := &CreationContextMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/rules/mechanisms/errorhandlers/base_error_handler.go b/internal/rules/mechanisms/errorhandlers/base_error_handler.go deleted file mode 100644 index 548375ef9..000000000 --- a/internal/rules/mechanisms/errorhandlers/base_error_handler.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2023 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package errorhandlers - -import ( - "errors" - - "github.com/google/cel-go/cel" - "github.com/rs/zerolog" - - "github.com/dadrus/heimdall/internal/heimdall" - "github.com/dadrus/heimdall/internal/rules/mechanisms/cellib" - "github.com/dadrus/heimdall/internal/x/errorchain" -) - -func newBaseErrorHandler(id, conditionExpression string) (*baseErrorHandler, error) { - env, err := cel.NewEnv(cellib.Library()) - if err != nil { - return nil, errorchain.NewWithMessage(heimdall.ErrInternal, "failed creating CEL environment").CausedBy(err) - } - - condition, err := cellib.CompileExpression(env, conditionExpression, "condition failed") - if err != nil { - return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, - "failed to compile %s condition", conditionExpression).CausedBy(err) - } - - return &baseErrorHandler{id: id, c: condition}, nil -} - -type baseErrorHandler struct { - id string - c *cellib.CompiledExpression -} - -func (eh *baseErrorHandler) ID() string { return eh.id } - -func (eh *baseErrorHandler) CanExecute(ctx heimdall.Context, cause error) bool { - logger := zerolog.Ctx(ctx.AppContext()) - logger.Debug().Str("_id", eh.id).Msg("Checking error handler applicability") - - err := eh.c.Eval(map[string]any{"Request": ctx.Request(), "Error": cellib.WrapError(cause)}) - if err != nil { - if errors.Is(err, &cellib.EvalError{}) { - logger.Debug().Err(err).Str("_id", eh.id).Msg("Error handler not applicable") - } else { - logger.Error().Err(err).Str("_id", eh.id).Msg("Failed checking error handler applicability") - } - - return false - } - - return true -} diff --git a/internal/rules/mechanisms/errorhandlers/base_error_handler_test.go b/internal/rules/mechanisms/errorhandlers/base_error_handler_test.go deleted file mode 100644 index 0e93ac8da..000000000 --- a/internal/rules/mechanisms/errorhandlers/base_error_handler_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2023 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package errorhandlers - -import ( - "context" - "net/http" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/dadrus/heimdall/internal/heimdall" - "github.com/dadrus/heimdall/internal/heimdall/mocks" -) - -func TestNewBaseErrorHandler(t *testing.T) { - t.Parallel() - - for _, tc := range []struct { - expression string - error bool - }{ - {"true == true", false}, - {"foo == true", true}, - } { - t.Run(tc.expression, func(t *testing.T) { - base, err := newBaseErrorHandler("test", tc.expression) - - if tc.error { - require.Error(t, err) - require.Nil(t, base) - } else { - require.NoError(t, err) - require.NotNil(t, base) - assert.Equal(t, "test", base.ID()) - } - }) - } -} - -func TestBaseErrorHandlerCanExecute(t *testing.T) { - t.Parallel() - - for _, tc := range []struct { - expression string - req *heimdall.Request - cause error - expect bool - }{ - {"type(Error) == precondition_error", nil, heimdall.ErrAuthorization, false}, - {"Request.Method == 'GET'", &heimdall.Request{Method: http.MethodGet}, heimdall.ErrArgument, true}, - {"Request.URL == 'http://foo.bar'", nil, heimdall.ErrArgument, false}, - } { - t.Run(tc.expression, func(t *testing.T) { - // GIVEN - mctx := mocks.NewContextMock(t) - mctx.EXPECT().AppContext().Return(context.TODO()) - mctx.EXPECT().Request().Return(tc.req) - - base, err := newBaseErrorHandler("test", tc.expression) - require.NoError(t, err) - - // WHEN - result := base.CanExecute(mctx, tc.cause) - - // THEN - assert.Equal(t, tc.expect, result) - }) - } -} diff --git a/internal/rules/mechanisms/errorhandlers/default_error_handler.go b/internal/rules/mechanisms/errorhandlers/default_error_handler.go index 7e08ec120..9455ab256 100644 --- a/internal/rules/mechanisms/errorhandlers/default_error_handler.go +++ b/internal/rules/mechanisms/errorhandlers/default_error_handler.go @@ -20,6 +20,7 @@ import ( "github.com/rs/zerolog" "github.com/dadrus/heimdall/internal/heimdall" + "github.com/dadrus/heimdall/internal/x/errorchain" ) // by intention. Used only during application bootstrap @@ -27,7 +28,7 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, _ map[string]any) (bool, ErrorHandler, error) { + func(_ CreationContext, id string, typ string, _ map[string]any) (bool, ErrorHandler, error) { if typ != ErrorHandlerDefault { return false, nil, nil } @@ -44,8 +45,6 @@ func newDefaultErrorHandler(id string) *defaultErrorHandler { return &defaultErrorHandler{id: id} } -func (eh *defaultErrorHandler) CanExecute(_ heimdall.Context, _ error) bool { return true } - func (eh *defaultErrorHandler) Execute(ctx heimdall.Context, causeErr error) error { logger := zerolog.Ctx(ctx.AppContext()) logger.Debug().Str("_id", eh.id).Msg("Handling error using default error handler") @@ -55,6 +54,13 @@ func (eh *defaultErrorHandler) Execute(ctx heimdall.Context, causeErr error) err return nil } -func (eh *defaultErrorHandler) WithConfig(_ map[string]any) (ErrorHandler, error) { return eh, nil } +func (eh *defaultErrorHandler) WithConfig(conf map[string]any) (ErrorHandler, error) { + if len(conf) != 0 { + return nil, errorchain.NewWithMessage(heimdall.ErrConfiguration, + "reconfiguration of the default error handler is not supported") + } + + return eh, nil +} func (eh *defaultErrorHandler) ID() string { return eh.id } diff --git a/internal/rules/mechanisms/errorhandlers/default_error_handler_test.go b/internal/rules/mechanisms/errorhandlers/default_error_handler_test.go index c7cc2f9f3..acdc2a60c 100644 --- a/internal/rules/mechanisms/errorhandlers/default_error_handler_test.go +++ b/internal/rules/mechanisms/errorhandlers/default_error_handler_test.go @@ -38,7 +38,6 @@ func TestDefaultErrorHandlerExecution(t *testing.T) { errorHandler := newDefaultErrorHandler("foo") // WHEN & THEN - require.True(t, errorHandler.CanExecute(nil, nil)) require.NoError(t, errorHandler.Execute(ctx, heimdall.ErrConfiguration)) } @@ -47,16 +46,22 @@ func TestDefaultErrorHandlerPrototype(t *testing.T) { // GIVEN prototype := newDefaultErrorHandler("foo") + assert.Equal(t, "foo", prototype.ID()) // WHEN eh1, err1 := prototype.WithConfig(nil) eh2, err2 := prototype.WithConfig(map[string]any{"foo": "bar"}) + eh3, err3 := prototype.WithConfig(map[string]any{}) // THEN require.NoError(t, err1) assert.Equal(t, prototype, eh1) - require.NoError(t, err2) - assert.Equal(t, prototype, eh2) - assert.Equal(t, "foo", prototype.ID()) + require.Error(t, err2) + require.ErrorIs(t, err2, heimdall.ErrConfiguration) + require.ErrorContains(t, err2, "reconfiguration of the default error handler is not supported") + assert.Nil(t, eh2) + + require.NoError(t, err3) + assert.Equal(t, prototype, eh3) } diff --git a/internal/rules/mechanisms/errorhandlers/error_handler.go b/internal/rules/mechanisms/errorhandlers/error_handler.go index b80705b55..47f9c0011 100644 --- a/internal/rules/mechanisms/errorhandlers/error_handler.go +++ b/internal/rules/mechanisms/errorhandlers/error_handler.go @@ -24,7 +24,6 @@ import ( type ErrorHandler interface { ID() string - CanExecute(ctx heimdall.Context, causeErr error) bool Execute(ctx heimdall.Context, causeErr error) error WithConfig(config map[string]any) (ErrorHandler, error) } diff --git a/internal/rules/mechanisms/errorhandlers/error_handler_type_registry.go b/internal/rules/mechanisms/errorhandlers/error_handler_type_registry.go index e2895e329..46f7a2ea1 100644 --- a/internal/rules/mechanisms/errorhandlers/error_handler_type_registry.go +++ b/internal/rules/mechanisms/errorhandlers/error_handler_type_registry.go @@ -20,19 +20,30 @@ import ( "errors" "sync" + "github.com/dadrus/heimdall/internal/keyholder" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/internal/x/errorchain" ) var ( ErrUnsupportedErrorHandlerType = errors.New("error handler type unsupported") - errorHandlerTypeFactories []ErrorHandlerTypeFactory // nolint: gochecknoglobals - errorHandlerTypeFactoriesMu sync.RWMutex // nolint: gochecknoglobals + errorHandlerTypeFactories []TypeFactory // nolint: gochecknoglobals + errorHandlerTypeFactoriesMu sync.RWMutex // nolint: gochecknoglobals ) -type ErrorHandlerTypeFactory func(id string, typ string, c map[string]any) (bool, ErrorHandler, error) +//go:generate mockery --name CreationContext --structname CreationContextMock --inpackage --testonly -func registerTypeFactory(factory ErrorHandlerTypeFactory) { +type CreationContext interface { + Watcher() watcher.Watcher + KeyHolderRegistry() keyholder.Registry + CertificateObserver() certificate.Observer +} + +type TypeFactory func(ctx CreationContext, id string, typ string, c map[string]any) (bool, ErrorHandler, error) + +func registerTypeFactory(factory TypeFactory) { errorHandlerTypeFactoriesMu.Lock() defer errorHandlerTypeFactoriesMu.Unlock() @@ -43,12 +54,12 @@ func registerTypeFactory(factory ErrorHandlerTypeFactory) { errorHandlerTypeFactories = append(errorHandlerTypeFactories, factory) } -func CreatePrototype(id string, typ string, config map[string]any) (ErrorHandler, error) { +func CreatePrototype(ctx CreationContext, id string, typ string, config map[string]any) (ErrorHandler, error) { errorHandlerTypeFactoriesMu.RLock() defer errorHandlerTypeFactoriesMu.RUnlock() for _, create := range errorHandlerTypeFactories { - if ok, at, err := create(id, typ, config); ok { + if ok, at, err := create(ctx, id, typ, config); ok { return at, err } } diff --git a/internal/rules/mechanisms/errorhandlers/error_handler_type_registry_test.go b/internal/rules/mechanisms/errorhandlers/error_handler_type_registry_test.go index 3427b5292..1bd8eea4b 100644 --- a/internal/rules/mechanisms/errorhandlers/error_handler_type_registry_test.go +++ b/internal/rules/mechanisms/errorhandlers/error_handler_type_registry_test.go @@ -57,7 +57,7 @@ func TestCreateErrorHandlerPrototypePrototype(t *testing.T) { } { t.Run("case="+tc.uc, func(t *testing.T) { // WHEN - errorHandler, err := CreatePrototype("foo", tc.typ, nil) + errorHandler, err := CreatePrototype(NewCreationContextMock(t), "foo", tc.typ, nil) // THEN tc.assert(t, err, errorHandler) diff --git a/internal/rules/mechanisms/errorhandlers/mock_creation_context_test.go b/internal/rules/mechanisms/errorhandlers/mock_creation_context_test.go new file mode 100644 index 000000000..b7662c2ac --- /dev/null +++ b/internal/rules/mechanisms/errorhandlers/mock_creation_context_test.go @@ -0,0 +1,180 @@ +// Code generated by mockery v2.42.1. DO NOT EDIT. + +package errorhandlers + +import ( + keyholder "github.com/dadrus/heimdall/internal/keyholder" + certificate "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + + mock "github.com/stretchr/testify/mock" + + watcher "github.com/dadrus/heimdall/internal/watcher" +) + +// CreationContextMock is an autogenerated mock type for the CreationContext type +type CreationContextMock struct { + mock.Mock +} + +type CreationContextMock_Expecter struct { + mock *mock.Mock +} + +func (_m *CreationContextMock) EXPECT() *CreationContextMock_Expecter { + return &CreationContextMock_Expecter{mock: &_m.Mock} +} + +// CertificateObserver provides a mock function with given fields: +func (_m *CreationContextMock) CertificateObserver() certificate.Observer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CertificateObserver") + } + + var r0 certificate.Observer + if rf, ok := ret.Get(0).(func() certificate.Observer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(certificate.Observer) + } + } + + return r0 +} + +// CreationContextMock_CertificateObserver_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CertificateObserver' +type CreationContextMock_CertificateObserver_Call struct { + *mock.Call +} + +// CertificateObserver is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) CertificateObserver() *CreationContextMock_CertificateObserver_Call { + return &CreationContextMock_CertificateObserver_Call{Call: _e.mock.On("CertificateObserver")} +} + +func (_c *CreationContextMock_CertificateObserver_Call) Run(run func()) *CreationContextMock_CertificateObserver_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) Return(_a0 certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) RunAndReturn(run func() certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(run) + return _c +} + +// KeyHolderRegistry provides a mock function with given fields: +func (_m *CreationContextMock) KeyHolderRegistry() keyholder.Registry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for KeyHolderRegistry") + } + + var r0 keyholder.Registry + if rf, ok := ret.Get(0).(func() keyholder.Registry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keyholder.Registry) + } + } + + return r0 +} + +// CreationContextMock_KeyHolderRegistry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'KeyHolderRegistry' +type CreationContextMock_KeyHolderRegistry_Call struct { + *mock.Call +} + +// KeyHolderRegistry is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) KeyHolderRegistry() *CreationContextMock_KeyHolderRegistry_Call { + return &CreationContextMock_KeyHolderRegistry_Call{Call: _e.mock.On("KeyHolderRegistry")} +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Run(run func()) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Return(_a0 keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) RunAndReturn(run func() keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(run) + return _c +} + +// Watcher provides a mock function with given fields: +func (_m *CreationContextMock) Watcher() watcher.Watcher { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Watcher") + } + + var r0 watcher.Watcher + if rf, ok := ret.Get(0).(func() watcher.Watcher); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(watcher.Watcher) + } + } + + return r0 +} + +// CreationContextMock_Watcher_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Watcher' +type CreationContextMock_Watcher_Call struct { + *mock.Call +} + +// Watcher is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) Watcher() *CreationContextMock_Watcher_Call { + return &CreationContextMock_Watcher_Call{Call: _e.mock.On("Watcher")} +} + +func (_c *CreationContextMock_Watcher_Call) Run(run func()) *CreationContextMock_Watcher_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) Return(_a0 watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) RunAndReturn(run func() watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(run) + return _c +} + +// NewCreationContextMock creates a new instance of CreationContextMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCreationContextMock(t interface { + mock.TestingT + Cleanup(func()) +}) *CreationContextMock { + mock := &CreationContextMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/rules/mechanisms/errorhandlers/mocks/error_handler.go b/internal/rules/mechanisms/errorhandlers/mocks/error_handler.go index f9d83a31f..8517265d9 100644 --- a/internal/rules/mechanisms/errorhandlers/mocks/error_handler.go +++ b/internal/rules/mechanisms/errorhandlers/mocks/error_handler.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -22,53 +22,14 @@ func (_m *ErrorHandlerMock) EXPECT() *ErrorHandlerMock_Expecter { return &ErrorHandlerMock_Expecter{mock: &_m.Mock} } -// CanExecute provides a mock function with given fields: ctx, causeErr -func (_m *ErrorHandlerMock) CanExecute(ctx heimdall.Context, causeErr error) bool { - ret := _m.Called(ctx, causeErr) - - var r0 bool - if rf, ok := ret.Get(0).(func(heimdall.Context, error) bool); ok { - r0 = rf(ctx, causeErr) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// ErrorHandlerMock_CanExecute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CanExecute' -type ErrorHandlerMock_CanExecute_Call struct { - *mock.Call -} - -// CanExecute is a helper method to define mock.On call -// - ctx heimdall.Context -// - causeErr error -func (_e *ErrorHandlerMock_Expecter) CanExecute(ctx interface{}, causeErr interface{}) *ErrorHandlerMock_CanExecute_Call { - return &ErrorHandlerMock_CanExecute_Call{Call: _e.mock.On("CanExecute", ctx, causeErr)} -} - -func (_c *ErrorHandlerMock_CanExecute_Call) Run(run func(ctx heimdall.Context, causeErr error)) *ErrorHandlerMock_CanExecute_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(heimdall.Context), args[1].(error)) - }) - return _c -} - -func (_c *ErrorHandlerMock_CanExecute_Call) Return(_a0 bool) *ErrorHandlerMock_CanExecute_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *ErrorHandlerMock_CanExecute_Call) RunAndReturn(run func(heimdall.Context, error) bool) *ErrorHandlerMock_CanExecute_Call { - _c.Call.Return(run) - return _c -} - // Execute provides a mock function with given fields: ctx, causeErr func (_m *ErrorHandlerMock) Execute(ctx heimdall.Context, causeErr error) error { ret := _m.Called(ctx, causeErr) + if len(ret) == 0 { + panic("no return value specified for Execute") + } + var r0 error if rf, ok := ret.Get(0).(func(heimdall.Context, error) error); ok { r0 = rf(ctx, causeErr) @@ -112,6 +73,10 @@ func (_c *ErrorHandlerMock_Execute_Call) RunAndReturn(run func(heimdall.Context, func (_m *ErrorHandlerMock) ID() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ID") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -153,6 +118,10 @@ func (_c *ErrorHandlerMock_ID_Call) RunAndReturn(run func() string) *ErrorHandle func (_m *ErrorHandlerMock) WithConfig(config map[string]interface{}) (errorhandlers.ErrorHandler, error) { ret := _m.Called(config) + if len(ret) == 0 { + panic("no return value specified for WithConfig") + } + var r0 errorhandlers.ErrorHandler var r1 error if rf, ok := ret.Get(0).(func(map[string]interface{}) (errorhandlers.ErrorHandler, error)); ok { @@ -203,13 +172,12 @@ func (_c *ErrorHandlerMock_WithConfig_Call) RunAndReturn(run func(map[string]int return _c } -type mockConstructorTestingTNewErrorHandlerMock interface { +// NewErrorHandlerMock creates a new instance of ErrorHandlerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewErrorHandlerMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewErrorHandlerMock creates a new instance of ErrorHandlerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewErrorHandlerMock(t mockConstructorTestingTNewErrorHandlerMock) *ErrorHandlerMock { +}) *ErrorHandlerMock { mock := &ErrorHandlerMock{} mock.Mock.Test(t) diff --git a/internal/rules/mechanisms/errorhandlers/redirect_error_handler.go b/internal/rules/mechanisms/errorhandlers/redirect_error_handler.go index 4c38db361..9fc0cb9a2 100644 --- a/internal/rules/mechanisms/errorhandlers/redirect_error_handler.go +++ b/internal/rules/mechanisms/errorhandlers/redirect_error_handler.go @@ -32,7 +32,7 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, ErrorHandler, error) { + func(_ CreationContext, id string, typ string, conf map[string]any) (bool, ErrorHandler, error) { if typ != ErrorHandlerRedirect { return false, nil, nil } @@ -44,17 +44,15 @@ func init() { } type redirectErrorHandler struct { - *baseErrorHandler - + id string to template.Template code int } func newRedirectErrorHandler(id string, rawConfig map[string]any) (*redirectErrorHandler, error) { type Config struct { - Condition string `mapstructure:"if" validate:"required"` - To template.Template `mapstructure:"to" validate:"required"` - Code int `mapstructure:"code"` + To template.Template `mapstructure:"to" validate:"required"` + Code int `mapstructure:"code"` } var conf Config @@ -62,18 +60,15 @@ func newRedirectErrorHandler(id string, rawConfig map[string]any) (*redirectErro return nil, err } - base, err := newBaseErrorHandler(id, conf.Condition) - if err != nil { - return nil, err - } - return &redirectErrorHandler{ - baseErrorHandler: base, - to: conf.To, - code: x.IfThenElse(conf.Code != 0, conf.Code, http.StatusFound), + id: id, + to: conf.To, + code: x.IfThenElse(conf.Code != 0, conf.Code, http.StatusFound), }, nil } +func (eh *redirectErrorHandler) ID() string { return eh.id } + func (eh *redirectErrorHandler) Execute(ctx heimdall.Context, _ error) error { logger := zerolog.Ctx(ctx.AppContext()) logger.Debug().Str("_id", eh.id).Msg("Handling error using redirect error handler") @@ -95,28 +90,11 @@ func (eh *redirectErrorHandler) Execute(ctx heimdall.Context, _ error) error { return nil } -func (eh *redirectErrorHandler) WithConfig(rawConfig map[string]any) (ErrorHandler, error) { - if len(rawConfig) == 0 { - return eh, nil - } - - type Config struct { - Condition string `mapstructure:"if" validate:"required"` - } - - var conf Config - if err := decodeConfig(ErrorHandlerRedirect, rawConfig, &conf); err != nil { - return nil, err - } - - base, err := newBaseErrorHandler(eh.id, conf.Condition) - if err != nil { - return nil, err +func (eh *redirectErrorHandler) WithConfig(conf map[string]any) (ErrorHandler, error) { + if len(conf) != 0 { + return nil, errorchain.NewWithMessage(heimdall.ErrConfiguration, + "reconfiguration of a redirect error handler is not supported") } - return &redirectErrorHandler{ - baseErrorHandler: base, - to: eh.to, - code: eh.code, - }, nil + return eh, nil } diff --git a/internal/rules/mechanisms/errorhandlers/redirect_error_handler_test.go b/internal/rules/mechanisms/errorhandlers/redirect_error_handler_test.go index 983c47b27..596591679 100644 --- a/internal/rules/mechanisms/errorhandlers/redirect_error_handler_test.go +++ b/internal/rules/mechanisms/errorhandlers/redirect_error_handler_test.go @@ -40,7 +40,7 @@ func TestCreateRedirectErrorHandler(t *testing.T) { assert func(t *testing.T, err error, redEH *redirectErrorHandler) }{ { - uc: "configuration without required 'To' parameter", + uc: "configuration without required 'to' parameter", config: []byte(`code: 302`), assert: func(t *testing.T, err error, _ *redirectErrorHandler) { t.Helper() @@ -50,50 +50,10 @@ func TestCreateRedirectErrorHandler(t *testing.T) { assert.Contains(t, err.Error(), "'to' is a required field") }, }, - { - uc: "configuration without required 'if' parameter", - config: []byte(`to: http://foo.bar`), - assert: func(t *testing.T, err error, _ *redirectErrorHandler) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "'if' is a required field") - }, - }, - { - uc: "with empty 'if' configuration", - config: []byte(` -to: http://foo.bar -if: "" -`), - assert: func(t *testing.T, err error, _ *redirectErrorHandler) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "'if' is a required field") - }, - }, - { - uc: "with invalid 'if' conditions configuration", - config: []byte(` -to: http://foo.bar -if: foo -`), - assert: func(t *testing.T, err error, _ *redirectErrorHandler) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "failed to compile") - }, - }, { uc: "with unexpected fields in configuration", config: []byte(` to: http://foo.bar -bar: foo if: true == false `), assert: func(t *testing.T, err error, _ *redirectErrorHandler) { @@ -105,11 +65,8 @@ if: true == false }, }, { - uc: "with minimal valid configuration", - config: []byte(` -to: http://foo.bar -if: Error.Source == "foo" -`), + uc: "with minimal valid configuration", + config: []byte(`to: http://foo.bar`), assert: func(t *testing.T, err error, redEH *redirectErrorHandler) { t.Helper() @@ -122,7 +79,6 @@ if: Error.Source == "foo" assert.Equal(t, "http://foo.bar", toURL) assert.Equal(t, http.StatusFound, redEH.code) - assert.NotNil(t, redEH.c) }, }, { @@ -130,7 +86,6 @@ if: Error.Source == "foo" config: []byte(` to: http://foo.bar?origin={{ .Request.URL | urlenc }} code: 301 -if: type(Error) == authentication_error `), assert: func(t *testing.T, err error, redEH *redirectErrorHandler) { t.Helper() @@ -141,7 +96,9 @@ if: type(Error) == authentication_error ctx := mocks.NewContextMock(t) ctx.EXPECT().Request(). - Return(&heimdall.Request{URL: &url.URL{Scheme: "http", Host: "foobar.baz", Path: "zab"}}) + Return(&heimdall.Request{ + URL: &heimdall.URL{URL: url.URL{Scheme: "http", Host: "foobar.baz", Path: "zab"}}, + }) toURL, err := redEH.to.Render(map[string]any{ "Request": ctx.Request(), @@ -150,7 +107,6 @@ if: type(Error) == authentication_error assert.Equal(t, "http://foo.bar?origin=http%3A%2F%2Ffoobar.baz%2Fzab", toURL) assert.Equal(t, http.StatusMovedPermanently, redEH.code) - assert.NotNil(t, redEH.c) }, }, } { @@ -177,87 +133,36 @@ func TestCreateRedirectErrorHandlerFromPrototype(t *testing.T) { assert func(t *testing.T, err error, prototype *redirectErrorHandler, configured *redirectErrorHandler) }{ { - uc: "no new configuration provided", - prototypeConfig: []byte(` -to: http://foo.bar -if: type(Error) == authentication_error -`), + uc: "no new configuration provided", + prototypeConfig: []byte(`to: http://foo.bar`), assert: func(t *testing.T, err error, prototype *redirectErrorHandler, configured *redirectErrorHandler) { t.Helper() require.NoError(t, err) assert.Equal(t, prototype, configured) - assert.Equal(t, "no new configuration provided", configured.ID()) }, }, { - uc: "empty configuration provided", - prototypeConfig: []byte(` -to: http://foo.bar -if: type(Error) == authentication_error -`), - config: []byte(``), + uc: "empty configuration provided", + prototypeConfig: []byte(`to: http://foo.bar`), + config: []byte(``), assert: func(t *testing.T, err error, prototype *redirectErrorHandler, configured *redirectErrorHandler) { t.Helper() require.NoError(t, err) assert.Equal(t, prototype, configured) - assert.Equal(t, "empty configuration provided", configured.ID()) - }, - }, - { - uc: "unsupported fields provided", - prototypeConfig: []byte(` -to: http://foo.bar -if: type(Error) == authentication_error -`), - config: []byte(`to: http://foo.bar`), - assert: func(t *testing.T, err error, _ *redirectErrorHandler, _ *redirectErrorHandler) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "failed decoding") }, }, { - uc: "invalid 'if' condition", - prototypeConfig: []byte(` -to: http://foo.bar -if: type(Error) == authentication_error -`), - config: []byte(`if: foo`), + uc: "unsupported configuration provided", + prototypeConfig: []byte(`to: http://foo.bar`), + config: []byte(`to: http://foo.bar`), assert: func(t *testing.T, err error, _ *redirectErrorHandler, _ *redirectErrorHandler) { t.Helper() require.Error(t, err) require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "failed to compile") - }, - }, - { - uc: "required 'if' field provided", - prototypeConfig: []byte(` -to: http://foo.bar -code: 301 -if: type(Error) in [authentication_error, authorization_error] -`), - config: []byte(`if: type(Error) == precondition_error`), - assert: func(t *testing.T, err error, prototype *redirectErrorHandler, configured *redirectErrorHandler) { - t.Helper() - - ctx := mocks.NewContextMock(t) - ctx.EXPECT().AppContext().Return(context.TODO()) - ctx.EXPECT().Request().Return(nil) - - require.NoError(t, err) - assert.NotEqual(t, prototype, configured) - assert.NotNil(t, configured) - assert.Equal(t, "required 'if' field provided", configured.ID()) - assert.Equal(t, prototype.to, configured.to) - assert.Equal(t, prototype.code, configured.code) - assert.NotEqual(t, prototype.c, configured.c) - assert.True(t, configured.CanExecute(ctx, heimdall.ErrArgument)) + require.ErrorContains(t, err, "reconfiguration of a redirect error handler is not supported") }, }, } { @@ -298,55 +203,29 @@ func TestRedirectErrorHandlerExecute(t *testing.T) { config []byte error error configureContext func(t *testing.T, ctx *mocks.ContextMock) - assert func(t *testing.T, wasResponsible bool, err error) + assert func(t *testing.T, err error) }{ { - uc: "not responsible for error", - config: []byte(` -to: http://foo.bar -if: type(Error) == authentication_error -`), - error: heimdall.ErrInternal, - configureContext: func(t *testing.T, ctx *mocks.ContextMock) { - t.Helper() - - ctx.EXPECT().Request().Return(nil) - }, - assert: func(t *testing.T, wasResponsible bool, err error) { - t.Helper() - - require.NoError(t, err) - assert.False(t, wasResponsible) - }, - }, - { - uc: "responsible for error but with template rendering error", - config: []byte(` -to: http://foo.bar={{ len .foobar }} -if: type(Error) == authentication_error -`), - error: heimdall.ErrAuthentication, + uc: "with template rendering error", + config: []byte(`to: http://foo.bar={{ len .foobar }}`), + error: heimdall.ErrAuthentication, configureContext: func(t *testing.T, ctx *mocks.ContextMock) { t.Helper() ctx.EXPECT().Request().Return(nil) }, - assert: func(t *testing.T, wasResponsible bool, err error) { + assert: func(t *testing.T, err error) { t.Helper() require.Error(t, err) require.ErrorIs(t, err, heimdall.ErrInternal) assert.Contains(t, err.Error(), "failed to render") - assert.True(t, wasResponsible) }, }, { - uc: "responsible without return to url templating", - config: []byte(` -to: http://foo.bar -if: type(Error) == authentication_error -`), - error: heimdall.ErrAuthentication, + uc: "without return to url templating", + config: []byte(`to: http://foo.bar`), + error: heimdall.ErrAuthentication, configureContext: func(t *testing.T, ctx *mocks.ContextMock) { t.Helper() @@ -361,19 +240,17 @@ if: type(Error) == authentication_error return true })) }, - assert: func(t *testing.T, wasResponsible bool, err error) { + assert: func(t *testing.T, err error) { t.Helper() require.NoError(t, err) - assert.True(t, wasResponsible) }, }, { - uc: "responsible with template and code set", + uc: "with template and code set", config: []byte(` to: http://foo.bar?origin={{ .Request.URL | urlenc }} code: 300 -if: type(Error) == authentication_error `), error: heimdall.ErrAuthentication, configureContext: func(t *testing.T, ctx *mocks.ContextMock) { @@ -382,7 +259,7 @@ if: type(Error) == authentication_error requestURL, err := url.Parse("http://test.org") require.NoError(t, err) - ctx.EXPECT().Request().Return(&heimdall.Request{URL: requestURL}) + ctx.EXPECT().Request().Return(&heimdall.Request{URL: &heimdall.URL{URL: *requestURL}}) ctx.EXPECT().SetPipelineError(mock.MatchedBy(func(redirErr *heimdall.RedirectError) bool { t.Helper() @@ -399,11 +276,10 @@ if: type(Error) == authentication_error return true })) }, - assert: func(t *testing.T, wasResponsible bool, err error) { + assert: func(t *testing.T, err error) { t.Helper() require.NoError(t, err) - assert.True(t, wasResponsible) }, }, } { @@ -420,19 +296,11 @@ if: type(Error) == authentication_error errorHandler, err := newRedirectErrorHandler("foo", conf) require.NoError(t, err) - var ( - isResponsible bool - execErr error - ) - // WHEN - isResponsible = errorHandler.CanExecute(mctx, tc.error) - if isResponsible { - execErr = errorHandler.Execute(mctx, tc.error) - } + execErr := errorHandler.Execute(mctx, tc.error) // THEN - tc.assert(t, isResponsible, execErr) + tc.assert(t, execErr) }) } } diff --git a/internal/rules/mechanisms/errorhandlers/www_authenticate_error_handler.go b/internal/rules/mechanisms/errorhandlers/www_authenticate_error_handler.go index 9bc17442f..1ccf53db1 100644 --- a/internal/rules/mechanisms/errorhandlers/www_authenticate_error_handler.go +++ b/internal/rules/mechanisms/errorhandlers/www_authenticate_error_handler.go @@ -28,7 +28,7 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, ErrorHandler, error) { + func(_ CreationContext, id string, typ string, conf map[string]any) (bool, ErrorHandler, error) { if typ != ErrorHandlerWWWAuthenticate { return false, nil, nil } @@ -40,15 +40,13 @@ func init() { } type wwwAuthenticateErrorHandler struct { - *baseErrorHandler - + id string realm string } func newWWWAuthenticateErrorHandler(id string, rawConfig map[string]any) (*wwwAuthenticateErrorHandler, error) { type Config struct { - Condition string `mapstructure:"if" validate:"required"` - Realm string `mapstructure:"realm"` + Realm string `mapstructure:"realm"` } var conf Config @@ -56,17 +54,14 @@ func newWWWAuthenticateErrorHandler(id string, rawConfig map[string]any) (*wwwAu return nil, err } - base, err := newBaseErrorHandler(id, conf.Condition) - if err != nil { - return nil, err - } - return &wwwAuthenticateErrorHandler{ - baseErrorHandler: base, - realm: x.IfThenElse(len(conf.Realm) != 0, conf.Realm, "Please authenticate"), + id: id, + realm: x.IfThenElse(len(conf.Realm) != 0, conf.Realm, "Please authenticate"), }, nil } +func (eh *wwwAuthenticateErrorHandler) ID() string { return eh.id } + func (eh *wwwAuthenticateErrorHandler) Execute(ctx heimdall.Context, _ error) error { logger := zerolog.Ctx(ctx.AppContext()) logger.Debug().Str("_id", eh.id).Msg("Handling error using www-authenticate error handler") @@ -83,13 +78,11 @@ func (eh *wwwAuthenticateErrorHandler) WithConfig(rawConfig map[string]any) (Err } type Config struct { - Condition string `mapstructure:"if"` - Realm *string `mapstructure:"realm"` + Realm string `mapstructure:"realm"` } var ( conf Config - base *baseErrorHandler err error ) @@ -97,19 +90,8 @@ func (eh *wwwAuthenticateErrorHandler) WithConfig(rawConfig map[string]any) (Err return nil, err } - if len(conf.Condition) != 0 { - base, err = newBaseErrorHandler(eh.id, conf.Condition) - if err != nil { - return nil, err - } - } else { - base = eh.baseErrorHandler - } - return &wwwAuthenticateErrorHandler{ - baseErrorHandler: base, - realm: x.IfThenElseExec(conf.Realm != nil, - func() string { return *conf.Realm }, - func() string { return eh.realm }), + id: eh.id, + realm: conf.Realm, }, nil } diff --git a/internal/rules/mechanisms/errorhandlers/www_authenticate_error_handler_test.go b/internal/rules/mechanisms/errorhandlers/www_authenticate_error_handler_test.go index 5347a32cb..7366cec2a 100644 --- a/internal/rules/mechanisms/errorhandlers/www_authenticate_error_handler_test.go +++ b/internal/rules/mechanisms/errorhandlers/www_authenticate_error_handler_test.go @@ -38,83 +38,34 @@ func TestCreateWWWAuthenticateErrorHandler(t *testing.T) { config []byte assert func(t *testing.T, err error, errorHandler *wwwAuthenticateErrorHandler) }{ - { - uc: "configuration without required 'if' parameter", - config: []byte(`realm: FooBar`), - assert: func(t *testing.T, err error, _ *wwwAuthenticateErrorHandler) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "'if' is a required field") - }, - }, - { - uc: "without provided configuration", - assert: func(t *testing.T, err error, _ *wwwAuthenticateErrorHandler) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "'if' is a required field") - }, - }, - { - uc: "with empty 'if' configuration", - config: []byte(`if: ""`), - assert: func(t *testing.T, err error, _ *wwwAuthenticateErrorHandler) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "'if' is a required field") - }, - }, - { - uc: "with invalid 'if' configuration", - config: []byte(`if: foo`), - assert: func(t *testing.T, err error, _ *wwwAuthenticateErrorHandler) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "failed to compile") - }, - }, { uc: "with configuration containing unsupported fields", config: []byte(` realm: FooBar if: type(Error) == authentication_error -foo: bar `), assert: func(t *testing.T, err error, _ *wwwAuthenticateErrorHandler) { t.Helper() require.Error(t, err) require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "failed decoding") + require.ErrorContains(t, err, "failed decoding") }, }, { - uc: "with minimum required configuration", - config: []byte(`if: type(Error) == authentication_error`), + uc: "without configuration (minimal configuration)", assert: func(t *testing.T, err error, errorHandler *wwwAuthenticateErrorHandler) { t.Helper() require.NoError(t, err) require.NotNil(t, errorHandler) - assert.Equal(t, "with minimum required configuration", errorHandler.ID()) + assert.Equal(t, "without configuration (minimal configuration)", errorHandler.ID()) assert.Equal(t, "Please authenticate", errorHandler.realm) - require.NotNil(t, errorHandler.c) }, }, { - uc: "with all possible attributes", - config: []byte(` -realm: "What is your password" -if: type(Error) == precondition_error -`), + uc: "with all possible attributes", + config: []byte(`realm: "What is your password"`), assert: func(t *testing.T, err error, errorHandler *wwwAuthenticateErrorHandler) { t.Helper() @@ -122,7 +73,6 @@ if: type(Error) == precondition_error require.NotNil(t, errorHandler) assert.Equal(t, "with all possible attributes", errorHandler.ID()) assert.Equal(t, "What is your password", errorHandler.realm) - require.NotNil(t, errorHandler.c) }, }, } { @@ -151,7 +101,7 @@ func TestCreateWWWAuthenticateErrorHandlerFromPrototype(t *testing.T) { }{ { uc: "no new configuration provided", - prototypeConfig: []byte(`if: type(Error) == authentication_error`), + prototypeConfig: []byte(`realm: "foo"`), assert: func(t *testing.T, err error, prototype *wwwAuthenticateErrorHandler, configured *wwwAuthenticateErrorHandler, ) { @@ -159,12 +109,11 @@ func TestCreateWWWAuthenticateErrorHandlerFromPrototype(t *testing.T) { require.NoError(t, err) assert.Equal(t, prototype, configured) - assert.Equal(t, "no new configuration provided", configured.ID()) }, }, { uc: "empty configuration provided", - prototypeConfig: []byte(`if: type(Error) == authentication_error`), + prototypeConfig: []byte(`realm: "foo"`), config: []byte(``), assert: func(t *testing.T, err error, prototype *wwwAuthenticateErrorHandler, configured *wwwAuthenticateErrorHandler, @@ -173,45 +122,11 @@ func TestCreateWWWAuthenticateErrorHandlerFromPrototype(t *testing.T) { require.NoError(t, err) assert.Equal(t, prototype, configured) - assert.Equal(t, "empty configuration provided", configured.ID()) - }, - }, - { - uc: "unsupported fields provided", - prototypeConfig: []byte(`if: type(Error) == authentication_error`), - config: []byte(`to: http://foo.bar`), - assert: func(t *testing.T, err error, _ *wwwAuthenticateErrorHandler, - _ *wwwAuthenticateErrorHandler, - ) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "failed decoding") }, }, { - uc: "with 'if' reconfigured", - prototypeConfig: []byte(`if: type(Error) in [authentication_error, authorization_error]`), - config: []byte(`if: type(Error) == precondition_error`), - assert: func(t *testing.T, err error, prototype *wwwAuthenticateErrorHandler, - configured *wwwAuthenticateErrorHandler, - ) { - t.Helper() - - require.NoError(t, err) - assert.NotEqual(t, prototype, configured) - assert.NotNil(t, configured) - assert.Equal(t, "with 'if' reconfigured", configured.ID()) - assert.Equal(t, "Please authenticate", prototype.realm) - assert.Equal(t, prototype.realm, configured.realm) - assert.NotEqual(t, prototype.c, configured.c) - }, - }, - { - uc: "with invalid 'if' reconfigured", - prototypeConfig: []byte(`if: type(Error) in [authentication_error, authorization_error]`), - config: []byte(`if: foo`), + uc: "unsupported fields provided", + config: []byte(`to: http://foo.bar`), assert: func(t *testing.T, err error, _ *wwwAuthenticateErrorHandler, _ *wwwAuthenticateErrorHandler, ) { @@ -219,12 +134,12 @@ func TestCreateWWWAuthenticateErrorHandlerFromPrototype(t *testing.T) { require.Error(t, err) require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "failed to compile") + require.ErrorContains(t, err, "failed decoding") }, }, { uc: "with 'realm' reconfigured", - prototypeConfig: []byte(`if: type(Error) == authentication_error`), + prototypeConfig: []byte(`realm: "Foobar"`), config: []byte(`realm: "You password please"`), assert: func(t *testing.T, err error, prototype *wwwAuthenticateErrorHandler, configured *wwwAuthenticateErrorHandler, @@ -237,7 +152,6 @@ func TestCreateWWWAuthenticateErrorHandlerFromPrototype(t *testing.T) { assert.Equal(t, "with 'realm' reconfigured", configured.ID()) assert.NotEqual(t, prototype.realm, configured.realm) assert.Equal(t, "You password please", configured.realm) - assert.Equal(t, prototype.c, configured.c) }, }, } { @@ -278,32 +192,14 @@ func TestWWWAuthenticateErrorHandlerExecute(t *testing.T) { config []byte error error configureContext func(t *testing.T, ctx *mocks.ContextMock) - assert func(t *testing.T, wasResponsible bool, err error) + assert func(t *testing.T, err error) }{ { - uc: "not responsible for error", - config: []byte(`if: type(Error) == authentication_error`), - error: heimdall.ErrInternal, - configureContext: func(t *testing.T, ctx *mocks.ContextMock) { - t.Helper() - - ctx.EXPECT().Request().Return(nil) - }, - assert: func(t *testing.T, wasResponsible bool, err error) { - t.Helper() - - require.NoError(t, err) - assert.False(t, wasResponsible) - }, - }, - { - uc: "responsible for error with default realm", - config: []byte(`if: type(Error) == authentication_error`), - error: heimdall.ErrAuthentication, + uc: "with default realm", + error: heimdall.ErrAuthentication, configureContext: func(t *testing.T, ctx *mocks.ContextMock) { t.Helper() - ctx.EXPECT().Request().Return(nil) ctx.EXPECT().SetPipelineError(heimdall.ErrAuthentication) ctx.EXPECT().AddHeaderForUpstream("WWW-Authenticate", mock.MatchedBy(func(val string) bool { @@ -314,24 +210,19 @@ func TestWWWAuthenticateErrorHandlerExecute(t *testing.T) { return true })) }, - assert: func(t *testing.T, wasResponsible bool, err error) { + assert: func(t *testing.T, err error) { t.Helper() require.NoError(t, err) - assert.True(t, wasResponsible) }, }, { - uc: "responsible for error with custom realm", - config: []byte(` -realm: "Your password please" -if: type(Error) == authentication_error -`), - error: heimdall.ErrAuthentication, + uc: "with custom realm", + config: []byte(`realm: "Your password please"`), + error: heimdall.ErrAuthentication, configureContext: func(t *testing.T, ctx *mocks.ContextMock) { t.Helper() - ctx.EXPECT().Request().Return(nil) ctx.EXPECT().SetPipelineError(heimdall.ErrAuthentication) ctx.EXPECT().AddHeaderForUpstream("WWW-Authenticate", mock.MatchedBy(func(val string) bool { @@ -342,11 +233,10 @@ if: type(Error) == authentication_error return true })) }, - assert: func(t *testing.T, wasResponsible bool, err error) { + assert: func(t *testing.T, err error) { t.Helper() require.NoError(t, err) - assert.True(t, wasResponsible) }, }, } { @@ -363,19 +253,11 @@ if: type(Error) == authentication_error errorHandler, err := newWWWAuthenticateErrorHandler("foo", conf) require.NoError(t, err) - var ( - isResponsible bool - execErr error - ) - // WHEN - isResponsible = errorHandler.CanExecute(mctx, tc.error) - if isResponsible { - execErr = errorHandler.Execute(mctx, tc.error) - } + execErr := errorHandler.Execute(mctx, tc.error) // THEN - tc.assert(t, isResponsible, execErr) + tc.assert(t, execErr) }) } } diff --git a/internal/rules/mechanisms/factory.go b/internal/rules/mechanisms/factory.go deleted file mode 100644 index 9acbd2c3b..000000000 --- a/internal/rules/mechanisms/factory.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package mechanisms - -import ( - "errors" - - "github.com/dadrus/heimdall/internal/config" - "github.com/dadrus/heimdall/internal/rules/mechanisms/authenticators" - "github.com/dadrus/heimdall/internal/rules/mechanisms/authorizers" - "github.com/dadrus/heimdall/internal/rules/mechanisms/contextualizers" - "github.com/dadrus/heimdall/internal/rules/mechanisms/errorhandlers" - "github.com/dadrus/heimdall/internal/rules/mechanisms/finalizers" -) - -var ( - ErrAuthenticatorCreation = errors.New("failed to create authenticator") - ErrAuthorizerCreation = errors.New("failed to create authorizer") - ErrFinalizerCreation = errors.New("failed to create finalizer") - ErrContextualizerCreation = errors.New("failed to create contextualizer") - ErrErrorHandlerCreation = errors.New("failed to create error handler") -) - -//go:generate mockery --name Factory --structname FactoryMock - -type Factory interface { - CreateAuthenticator(version, id string, conf config.MechanismConfig) (authenticators.Authenticator, error) - CreateAuthorizer(version, id string, conf config.MechanismConfig) (authorizers.Authorizer, error) - CreateContextualizer(version, id string, conf config.MechanismConfig) (contextualizers.Contextualizer, error) - CreateFinalizer(version, id string, conf config.MechanismConfig) (finalizers.Finalizer, error) - CreateErrorHandler(version, id string, conf config.MechanismConfig) (errorhandlers.ErrorHandler, error) -} diff --git a/internal/rules/mechanisms/finalizers/cookie_finalizer.go b/internal/rules/mechanisms/finalizers/cookie_finalizer.go index 4828b2310..65ed09e10 100644 --- a/internal/rules/mechanisms/finalizers/cookie_finalizer.go +++ b/internal/rules/mechanisms/finalizers/cookie_finalizer.go @@ -30,7 +30,7 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Finalizer, error) { + func(_ CreationContext, id string, typ string, conf map[string]any) (bool, Finalizer, error) { if typ != FinalizerCookie { return false, nil, nil } @@ -62,25 +62,26 @@ func newCookieFinalizer(id string, rawConfig map[string]any) (*cookieFinalizer, }, nil } -func (u *cookieFinalizer) Execute(ctx heimdall.Context, sub *subject.Subject) error { +func (f *cookieFinalizer) Execute(ctx heimdall.Context, sub *subject.Subject) error { logger := zerolog.Ctx(ctx.AppContext()) - logger.Debug().Str("_id", u.id).Msg("Finalizing using cookie finalizer") + logger.Debug().Str("_id", f.id).Msg("Finalizing using cookie finalizer") if sub == nil { return errorchain. NewWithMessage(heimdall.ErrInternal, "failed to execute cookie finalizer due to 'nil' subject"). - WithErrorContext(u) + WithErrorContext(f) } - for name, tmpl := range u.cookies { + for name, tmpl := range f.cookies { value, err := tmpl.Render(map[string]any{ "Request": ctx.Request(), "Subject": sub, + "Outputs": ctx.Outputs(), }) if err != nil { return errorchain. NewWithMessagef(heimdall.ErrInternal, "failed to render value for '%s' cookie", name). - WithErrorContext(u). + WithErrorContext(f). CausedBy(err) } @@ -92,14 +93,14 @@ func (u *cookieFinalizer) Execute(ctx heimdall.Context, sub *subject.Subject) er return nil } -func (u *cookieFinalizer) WithConfig(config map[string]any) (Finalizer, error) { +func (f *cookieFinalizer) WithConfig(config map[string]any) (Finalizer, error) { if len(config) == 0 { - return u, nil + return f, nil } - return newCookieFinalizer(u.id, config) + return newCookieFinalizer(f.id, config) } -func (u *cookieFinalizer) ID() string { return u.id } +func (f *cookieFinalizer) ID() string { return f.id } -func (u *cookieFinalizer) ContinueOnError() bool { return false } +func (f *cookieFinalizer) ContinueOnError() bool { return false } diff --git a/internal/rules/mechanisms/finalizers/cookie_finalizer_test.go b/internal/rules/mechanisms/finalizers/cookie_finalizer_test.go index 9abe279b0..38174280c 100644 --- a/internal/rules/mechanisms/finalizers/cookie_finalizer_test.go +++ b/internal/rules/mechanisms/finalizers/cookie_finalizer_test.go @@ -263,6 +263,7 @@ cookies: bar: "{{ .Subject.ID }}" baz: bar x_foo: '{{ .Request.Header "X-Foo" }}' + x_bar: '{{ .Outputs.foo }}' `), configureContext: func(t *testing.T, ctx *mocks.ContextMock) { t.Helper() @@ -274,7 +275,9 @@ cookies: ctx.EXPECT().AddCookieForUpstream("bar", "FooBar") ctx.EXPECT().AddCookieForUpstream("baz", "bar") ctx.EXPECT().AddCookieForUpstream("x_foo", "Bar") + ctx.EXPECT().AddCookieForUpstream("x_bar", "bar") ctx.EXPECT().Request().Return(&heimdall.Request{RequestFunctions: reqf}) + ctx.EXPECT().Outputs().Return(map[string]any{"foo": "bar"}) }, createSubject: func(t *testing.T) *subject.Subject { t.Helper() diff --git a/internal/rules/mechanisms/finalizers/finalizer_type_registry.go b/internal/rules/mechanisms/finalizers/finalizer_type_registry.go index 4bba4eac6..89c1a24fa 100644 --- a/internal/rules/mechanisms/finalizers/finalizer_type_registry.go +++ b/internal/rules/mechanisms/finalizers/finalizer_type_registry.go @@ -20,6 +20,9 @@ import ( "errors" "sync" + "github.com/dadrus/heimdall/internal/keyholder" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/internal/x/errorchain" ) @@ -31,7 +34,15 @@ var ( typeFactoriesMu sync.RWMutex //nolint:gochecknoglobals ) -type TypeFactory func(id string, typ string, c map[string]any) (bool, Finalizer, error) +//go:generate mockery --name CreationContext --structname CreationContextMock --inpackage --testonly + +type CreationContext interface { + Watcher() watcher.Watcher + KeyHolderRegistry() keyholder.Registry + CertificateObserver() certificate.Observer +} + +type TypeFactory func(ctx CreationContext, id string, typ string, c map[string]any) (bool, Finalizer, error) func registerTypeFactory(factory TypeFactory) { typeFactoriesMu.Lock() @@ -44,12 +55,12 @@ func registerTypeFactory(factory TypeFactory) { typeFactories = append(typeFactories, factory) } -func CreatePrototype(id string, typ string, mConfig map[string]any) (Finalizer, error) { +func CreatePrototype(ctx CreationContext, id string, typ string, mConfig map[string]any) (Finalizer, error) { typeFactoriesMu.RLock() defer typeFactoriesMu.RUnlock() for _, create := range typeFactories { - if ok, at, err := create(id, typ, mConfig); ok { + if ok, at, err := create(ctx, id, typ, mConfig); ok { return at, err } } diff --git a/internal/rules/mechanisms/finalizers/finalizer_type_registry_test.go b/internal/rules/mechanisms/finalizers/finalizer_type_registry_test.go index 8d606383b..5c86c4d87 100644 --- a/internal/rules/mechanisms/finalizers/finalizer_type_registry_test.go +++ b/internal/rules/mechanisms/finalizers/finalizer_type_registry_test.go @@ -57,7 +57,7 @@ func TestCreateFinalizerPrototype(t *testing.T) { } { t.Run("case="+tc.uc, func(t *testing.T) { // WHEN - finalizer, err := CreatePrototype("foo", tc.typ, nil) + finalizer, err := CreatePrototype(NewCreationContextMock(t), "foo", tc.typ, nil) // THEN tc.assert(t, err, finalizer) diff --git a/internal/rules/mechanisms/finalizers/header_finalizer.go b/internal/rules/mechanisms/finalizers/header_finalizer.go index fb5579f52..88d088669 100644 --- a/internal/rules/mechanisms/finalizers/header_finalizer.go +++ b/internal/rules/mechanisms/finalizers/header_finalizer.go @@ -30,7 +30,7 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Finalizer, error) { + func(_ CreationContext, id string, typ string, conf map[string]any) (bool, Finalizer, error) { if typ != FinalizerHeader { return false, nil, nil } @@ -62,25 +62,26 @@ func newHeaderFinalizer(id string, rawConfig map[string]any) (*headerFinalizer, }, nil } -func (u *headerFinalizer) Execute(ctx heimdall.Context, sub *subject.Subject) error { +func (f *headerFinalizer) Execute(ctx heimdall.Context, sub *subject.Subject) error { logger := zerolog.Ctx(ctx.AppContext()) - logger.Debug().Str("_id", u.id).Msg("Finalizing using header finalizer") + logger.Debug().Str("_id", f.id).Msg("Finalizing using header finalizer") if sub == nil { return errorchain. NewWithMessage(heimdall.ErrInternal, "failed to execute header finalizer due to 'nil' subject"). - WithErrorContext(u) + WithErrorContext(f) } - for name, tmpl := range u.headers { + for name, tmpl := range f.headers { value, err := tmpl.Render(map[string]any{ "Request": ctx.Request(), "Subject": sub, + "Outputs": ctx.Outputs(), }) if err != nil { return errorchain. NewWithMessagef(heimdall.ErrInternal, "failed to render value for '%s' header", name). - WithErrorContext(u). + WithErrorContext(f). CausedBy(err) } @@ -92,14 +93,14 @@ func (u *headerFinalizer) Execute(ctx heimdall.Context, sub *subject.Subject) er return nil } -func (u *headerFinalizer) WithConfig(config map[string]any) (Finalizer, error) { +func (f *headerFinalizer) WithConfig(config map[string]any) (Finalizer, error) { if len(config) == 0 { - return u, nil + return f, nil } - return newHeaderFinalizer(u.id, config) + return newHeaderFinalizer(f.id, config) } -func (u *headerFinalizer) ID() string { return u.id } +func (f *headerFinalizer) ID() string { return f.id } -func (u *headerFinalizer) ContinueOnError() bool { return false } +func (f *headerFinalizer) ContinueOnError() bool { return false } diff --git a/internal/rules/mechanisms/finalizers/header_finalizer_test.go b/internal/rules/mechanisms/finalizers/header_finalizer_test.go index 055f10f92..b4b742f82 100644 --- a/internal/rules/mechanisms/finalizers/header_finalizer_test.go +++ b/internal/rules/mechanisms/finalizers/header_finalizer_test.go @@ -262,6 +262,7 @@ headers: bar: "{{ .Subject.ID }}" baz: bar X-Baz: '{{ .Request.Header "X-Foo" }}' + X-Foo: '{{ .Outputs.foo }}' `), configureContext: func(t *testing.T, ctx *mocks.ContextMock) { t.Helper() @@ -273,7 +274,9 @@ headers: ctx.EXPECT().AddHeaderForUpstream("bar", "FooBar") ctx.EXPECT().AddHeaderForUpstream("baz", "bar") ctx.EXPECT().AddHeaderForUpstream("X-Baz", "Bar") + ctx.EXPECT().AddHeaderForUpstream("X-Foo", "bar") ctx.EXPECT().Request().Return(&heimdall.Request{RequestFunctions: reqf}) + ctx.EXPECT().Outputs().Return(map[string]any{"foo": "bar"}) }, createSubject: func(t *testing.T) *subject.Subject { t.Helper() diff --git a/internal/rules/mechanisms/finalizers/jwt_finalizer.go b/internal/rules/mechanisms/finalizers/jwt_finalizer.go index 0030fbc3d..f63a0d5db 100644 --- a/internal/rules/mechanisms/finalizers/jwt_finalizer.go +++ b/internal/rules/mechanisms/finalizers/jwt_finalizer.go @@ -18,6 +18,7 @@ package finalizers import ( "crypto/sha256" + "crypto/x509" "encoding/binary" "encoding/hex" "fmt" @@ -45,12 +46,12 @@ const ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Finalizer, error) { + func(ctx CreationContext, id string, typ string, conf map[string]any) (bool, Finalizer, error) { if typ != FinalizerJwt { return false, nil, nil } - finalizer, err := newJWTFinalizer(id, conf) + finalizer, err := newJWTFinalizer(ctx, id, conf) return true, finalizer, err }) @@ -62,15 +63,17 @@ type jwtFinalizer struct { ttl time.Duration headerName string headerScheme string + signer *jwtSigner } -func newJWTFinalizer(id string, rawConfig map[string]any) (*jwtFinalizer, error) { +func newJWTFinalizer(ctx CreationContext, id string, rawConfig map[string]any) (*jwtFinalizer, error) { type HeaderConfig struct { Name string `mapstructure:"name" validate:"required"` Scheme string `mapstructure:"scheme"` } type Config struct { + Signer SignerConfig `mapstructure:"signer" validate:"required"` TTL *time.Duration `mapstructure:"ttl" validate:"omitempty,gt=1s"` Claims template.Template `mapstructure:"claims"` Header *HeaderConfig `mapstructure:"header"` @@ -81,7 +84,14 @@ func newJWTFinalizer(id string, rawConfig map[string]any) (*jwtFinalizer, error) return nil, err } - return &jwtFinalizer{ + signer, err := newJWTSigner(&conf.Signer, ctx.Watcher()) + if err != nil { + return nil, err + } + + ctx.KeyHolderRegistry().AddKeyHolder(signer) + + fin := &jwtFinalizer{ id: id, claims: conf.Claims, ttl: x.IfThenElseExec(conf.TTL != nil, @@ -93,17 +103,22 @@ func newJWTFinalizer(id string, rawConfig map[string]any) (*jwtFinalizer, error) headerScheme: x.IfThenElseExec(conf.Header != nil, func() string { return conf.Header.Scheme }, func() string { return "Bearer" }), - }, nil + signer: signer, + } + + ctx.CertificateObserver().Add(fin) + + return fin, nil } -func (u *jwtFinalizer) Execute(ctx heimdall.Context, sub *subject.Subject) error { +func (f *jwtFinalizer) Execute(ctx heimdall.Context, sub *subject.Subject) error { logger := zerolog.Ctx(ctx.AppContext()) - logger.Debug().Str("_id", u.id).Msg("Finalizing using JWT finalizer") + logger.Debug().Str("_id", f.id).Msg("Finalizing using JWT finalizer") if sub == nil { return errorchain. NewWithMessage(heimdall.ErrInternal, "failed to execute jwt finalizer due to 'nil' subject"). - WithErrorContext(u) + WithErrorContext(f) } cch := cache.Ctx(ctx.AppContext()) @@ -113,7 +128,7 @@ func (u *jwtFinalizer) Execute(ctx heimdall.Context, sub *subject.Subject) error err error ) - cacheKey := u.calculateCacheKey(sub, ctx.Signer()) + cacheKey := f.calculateCacheKey(ctx, sub) if entry, err := cch.Get(ctx.AppContext(), cacheKey); err == nil { logger.Debug().Msg("Reusing JWT from cache") @@ -121,26 +136,26 @@ func (u *jwtFinalizer) Execute(ctx heimdall.Context, sub *subject.Subject) error } if len(jwtToken) == 0 { - jwtToken, err = u.generateToken(ctx, sub) + jwtToken, err = f.generateToken(ctx, sub) if err != nil { return err } - if len(cacheKey) != 0 && u.ttl > defaultCacheLeeway { - if err = cch.Set(ctx.AppContext(), cacheKey, stringx.ToBytes(jwtToken), u.ttl-defaultCacheLeeway); err != nil { + if len(cacheKey) != 0 && f.ttl > defaultCacheLeeway { + if err = cch.Set(ctx.AppContext(), cacheKey, stringx.ToBytes(jwtToken), f.ttl-defaultCacheLeeway); err != nil { logger.Warn().Err(err).Msg("Failed to cache JWT token") } } } - ctx.AddHeaderForUpstream(u.headerName, fmt.Sprintf("%s %s", u.headerScheme, jwtToken)) + ctx.AddHeaderForUpstream(f.headerName, fmt.Sprintf("%s %s", f.headerScheme, jwtToken)) return nil } -func (u *jwtFinalizer) WithConfig(rawConfig map[string]any) (Finalizer, error) { +func (f *jwtFinalizer) WithConfig(rawConfig map[string]any) (Finalizer, error) { if len(rawConfig) == 0 { - return u, nil + return f, nil } type Config struct { @@ -154,35 +169,36 @@ func (u *jwtFinalizer) WithConfig(rawConfig map[string]any) (Finalizer, error) { } return &jwtFinalizer{ - id: u.id, - claims: x.IfThenElse(conf.Claims != nil, conf.Claims, u.claims), + id: f.id, + claims: x.IfThenElse(conf.Claims != nil, conf.Claims, f.claims), ttl: x.IfThenElseExec(conf.TTL != nil, func() time.Duration { return *conf.TTL }, - func() time.Duration { return u.ttl }), - headerName: u.headerName, - headerScheme: u.headerScheme, + func() time.Duration { return f.ttl }), + headerName: f.headerName, + headerScheme: f.headerScheme, + signer: f.signer, }, nil } -func (u *jwtFinalizer) ID() string { return u.id } +func (f *jwtFinalizer) ID() string { return f.id } -func (u *jwtFinalizer) ContinueOnError() bool { return false } +func (f *jwtFinalizer) ContinueOnError() bool { return false } -func (u *jwtFinalizer) generateToken(ctx heimdall.Context, sub *subject.Subject) (string, error) { +func (f *jwtFinalizer) generateToken(ctx heimdall.Context, sub *subject.Subject) (string, error) { logger := zerolog.Ctx(ctx.AppContext()) logger.Debug().Msg("Generating new JWT") - iss := ctx.Signer() claims := map[string]any{} - if u.claims != nil { - vals, err := u.claims.Render(map[string]any{ + if f.claims != nil { + vals, err := f.claims.Render(map[string]any{ "Subject": sub, + "Outputs": ctx.Outputs(), }) if err != nil { return "", errorchain. NewWithMessage(heimdall.ErrInternal, "failed to render claims"). - WithErrorContext(u). + WithErrorContext(f). CausedBy(err) } @@ -191,35 +207,41 @@ func (u *jwtFinalizer) generateToken(ctx heimdall.Context, sub *subject.Subject) if err = json.Unmarshal(stringx.ToBytes(vals), &claims); err != nil { return "", errorchain. NewWithMessage(heimdall.ErrInternal, "failed to unmarshal claims rendered by template"). - WithErrorContext(u). + WithErrorContext(f). CausedBy(err) } } - token, err := iss.Sign(sub.ID, u.ttl, claims) + token, err := f.signer.Sign(sub.ID, f.ttl, claims) if err != nil { return "", errorchain. NewWithMessage(heimdall.ErrInternal, "failed to sign token"). - WithErrorContext(u). + WithErrorContext(f). CausedBy(err) } return token, nil } -func (u *jwtFinalizer) calculateCacheKey(sub *subject.Subject, iss heimdall.JWTSigner) string { +func (f *jwtFinalizer) calculateCacheKey(ctx heimdall.Context, sub *subject.Subject) string { const int64BytesCount = 8 ttlBytes := make([]byte, int64BytesCount) - binary.LittleEndian.PutUint64(ttlBytes, uint64(u.ttl)) + binary.LittleEndian.PutUint64(ttlBytes, uint64(f.ttl)) hash := sha256.New() - hash.Write(iss.Hash()) - hash.Write(x.IfThenElseExec(u.claims != nil, - func() []byte { return u.claims.Hash() }, + hash.Write(f.signer.Hash()) + hash.Write(x.IfThenElseExec(f.claims != nil, + func() []byte { return f.claims.Hash() }, func() []byte { return []byte{} })) hash.Write(ttlBytes) hash.Write(sub.Hash()) + rawSub, _ := json.Marshal(ctx.Outputs()) + hash.Write(rawSub) + return hex.EncodeToString(hash.Sum(nil)) } + +func (f *jwtFinalizer) Name() string { return f.id } +func (f *jwtFinalizer) Certificates() []*x509.Certificate { return f.signer.activeCertificateChain() } diff --git a/internal/rules/mechanisms/finalizers/jwt_finalizer_test.go b/internal/rules/mechanisms/finalizers/jwt_finalizer_test.go index d225a98d7..881c40c84 100644 --- a/internal/rules/mechanisms/finalizers/jwt_finalizer_test.go +++ b/internal/rules/mechanisms/finalizers/jwt_finalizer_test.go @@ -18,7 +18,13 @@ package finalizers import ( "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" "errors" + "os" + "path/filepath" + "strings" "testing" "time" @@ -30,42 +36,110 @@ import ( "github.com/dadrus/heimdall/internal/cache/mocks" "github.com/dadrus/heimdall/internal/heimdall" heimdallmocks "github.com/dadrus/heimdall/internal/heimdall/mocks" + mocks3 "github.com/dadrus/heimdall/internal/keyholder/mocks" + mocks4 "github.com/dadrus/heimdall/internal/otel/metrics/certificate/mocks" "github.com/dadrus/heimdall/internal/rules/mechanisms/subject" + mocks2 "github.com/dadrus/heimdall/internal/watcher/mocks" "github.com/dadrus/heimdall/internal/x" + "github.com/dadrus/heimdall/internal/x/pkix/pemx" "github.com/dadrus/heimdall/internal/x/testsupport" ) func TestCreateJWTFinalizer(t *testing.T) { t.Parallel() + privKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + + pemBytes, err := pemx.BuildPEM( + pemx.WithECDSAPrivateKey(privKey, pemx.WithHeader("X-Key-ID", "key")), + ) + require.NoError(t, err) + + testDir := t.TempDir() + pemFile := filepath.Join(testDir, "keystore.pem") + + err = os.WriteFile(pemFile, pemBytes, 0o600) + require.NoError(t, err) + const expectedTTL = 5 * time.Second for _, tc := range []struct { - uc string - id string - config []byte - assert func(t *testing.T, err error, finalizer *jwtFinalizer) + uc string + id string + config []byte + configureContext func(t *testing.T, ctx *CreationContextMock) + assert func(t *testing.T, err error, finalizer *jwtFinalizer) }{ { - uc: "without config", - id: "jun", - assert: func(t *testing.T, err error, finalizer *jwtFinalizer) { + uc: "without config", + id: "fin", + configureContext: func(t *testing.T, _ *CreationContextMock) { t.Helper() }, + assert: func(t *testing.T, err error, _ *jwtFinalizer) { t.Helper() - require.NoError(t, err) + require.Error(t, err) + require.ErrorContains(t, err, "'signer' is a required field") + }, + }, + { + uc: "with empty config", + id: "fin", + config: []byte(``), + configureContext: func(t *testing.T, _ *CreationContextMock) { t.Helper() }, + assert: func(t *testing.T, err error, _ *jwtFinalizer) { + t.Helper() - require.NotNil(t, finalizer) - assert.Equal(t, defaultJWTTTL, finalizer.ttl) - assert.Nil(t, finalizer.claims) - assert.Equal(t, "jun", finalizer.ID()) - assert.Equal(t, "Authorization", finalizer.headerName) - assert.Equal(t, "Bearer", finalizer.headerScheme) + require.Error(t, err) + require.ErrorContains(t, err, "'signer' is a required field") }, }, { - uc: "with empty config", - id: "jun", - config: []byte(``), + uc: "with not existing key store for signer", + id: "fin", + config: []byte(` +signer: + key_store: + path: /does/not/exist.pem + key_id: key +`), + configureContext: func(t *testing.T, ctx *CreationContextMock) { + t.Helper() + + ctx.EXPECT().Watcher().Return(mocks2.NewWatcherMock(t)) + }, + assert: func(t *testing.T, err error, _ *jwtFinalizer) { + t.Helper() + + require.Error(t, err) + require.ErrorContains(t, err, "failed loading keystore") + }, + }, + { + uc: "with signer only", + id: "fin", + config: []byte(` +signer: + key_store: + path: ` + pemFile + ` + key_id: key +`), + configureContext: func(t *testing.T, ctx *CreationContextMock) { + t.Helper() + + wm := mocks2.NewWatcherMock(t) + wm.EXPECT().Add(pemFile, mock.Anything).Return(nil) + + khr := mocks3.NewRegistryMock(t) + khr.EXPECT().AddKeyHolder(mock.Anything) + + co := mocks4.NewObserverMock(t) + co.EXPECT().Add(mock.Anything) + + ctx.EXPECT().Watcher().Return(wm) + ctx.EXPECT().KeyHolderRegistry().Return(khr) + ctx.EXPECT().CertificateObserver().Return(co) + }, assert: func(t *testing.T, err error, finalizer *jwtFinalizer) { t.Helper() @@ -74,15 +148,42 @@ func TestCreateJWTFinalizer(t *testing.T) { require.NotNil(t, finalizer) assert.Equal(t, defaultJWTTTL, finalizer.ttl) assert.Nil(t, finalizer.claims) - assert.Equal(t, "jun", finalizer.ID()) + assert.Equal(t, "fin", finalizer.ID()) assert.Equal(t, "Authorization", finalizer.headerName) assert.Equal(t, "Bearer", finalizer.headerScheme) + require.NotNil(t, finalizer.signer) + assert.Equal(t, "heimdall", finalizer.signer.iss) + assert.Equal(t, pemFile, finalizer.signer.path) + assert.Equal(t, "key", finalizer.signer.keyID) + assert.Equal(t, privKey, finalizer.signer.key) }, }, { - uc: "with ttl only", - id: "jun", - config: []byte(`ttl: 5s`), + uc: "with ttl and signer", + id: "fin", + config: []byte(` +ttl: 5s +signer: + name: foo + key_store: + path: ` + pemFile + ` +`), + configureContext: func(t *testing.T, ctx *CreationContextMock) { + t.Helper() + + wm := mocks2.NewWatcherMock(t) + wm.EXPECT().Add(pemFile, mock.Anything).Return(nil) + + khr := mocks3.NewRegistryMock(t) + khr.EXPECT().AddKeyHolder(mock.Anything) + + co := mocks4.NewObserverMock(t) + co.EXPECT().Add(mock.Anything) + + ctx.EXPECT().Watcher().Return(wm) + ctx.EXPECT().KeyHolderRegistry().Return(khr) + ctx.EXPECT().CertificateObserver().Return(co) + }, assert: func(t *testing.T, err error, finalizer *jwtFinalizer) { t.Helper() @@ -91,14 +192,24 @@ func TestCreateJWTFinalizer(t *testing.T) { require.NotNil(t, finalizer) assert.Equal(t, expectedTTL, finalizer.ttl) assert.Nil(t, finalizer.claims) - assert.Equal(t, "jun", finalizer.ID()) + assert.Equal(t, "fin", finalizer.ID()) assert.Equal(t, "Authorization", finalizer.headerName) assert.Equal(t, "Bearer", finalizer.headerScheme) + require.NotNil(t, finalizer.signer) + assert.Equal(t, "foo", finalizer.signer.iss) + assert.Equal(t, pemFile, finalizer.signer.path) + assert.Equal(t, privKey, finalizer.signer.key) }, }, { - uc: "with too short ttl", - config: []byte(`ttl: 5ms`), + uc: "with too short ttl", + config: []byte(` +ttl: 5ms +signer: + key_store: + path: ` + pemFile + ` +`), + configureContext: func(t *testing.T, _ *CreationContextMock) { t.Helper() }, assert: func(t *testing.T, err error, _ *jwtFinalizer) { t.Helper() @@ -108,12 +219,32 @@ func TestCreateJWTFinalizer(t *testing.T) { }, }, { - uc: "with claims only", - id: "jun", + uc: "with claims and key store", + id: "fin", config: []byte(` +signer: + name: foo + key_store: + path: ` + pemFile + ` claims: '{ "sub": {{ quote .Subject.ID }} }' `), + configureContext: func(t *testing.T, ctx *CreationContextMock) { + t.Helper() + + wm := mocks2.NewWatcherMock(t) + wm.EXPECT().Add(pemFile, mock.Anything).Return(nil) + + khr := mocks3.NewRegistryMock(t) + khr.EXPECT().AddKeyHolder(mock.Anything) + + co := mocks4.NewObserverMock(t) + co.EXPECT().Add(mock.Anything) + + ctx.EXPECT().Watcher().Return(wm) + ctx.EXPECT().KeyHolderRegistry().Return(khr) + ctx.EXPECT().CertificateObserver().Return(co) + }, assert: func(t *testing.T, err error, finalizer *jwtFinalizer) { t.Helper() @@ -127,20 +258,43 @@ claims: }) require.NoError(t, err) assert.Equal(t, `{ "sub": "bar" }`, val) - assert.Equal(t, "jun", finalizer.ID()) + assert.Equal(t, "fin", finalizer.ID()) assert.Equal(t, "Authorization", finalizer.headerName) assert.Equal(t, "Bearer", finalizer.headerScheme) assert.False(t, finalizer.ContinueOnError()) + require.NotNil(t, finalizer.signer) + assert.Equal(t, "foo", finalizer.signer.iss) + assert.Equal(t, pemFile, finalizer.signer.path) + assert.Equal(t, privKey, finalizer.signer.key) }, }, { - uc: "with claims and ttl", - id: "jun", + uc: "with claims, signer and ttl", + id: "fin", config: []byte(` ttl: 5s +signer: + key_store: + path: ` + pemFile + ` claims: '{ "sub": {{ quote .Subject.ID }} }' `), + configureContext: func(t *testing.T, ctx *CreationContextMock) { + t.Helper() + + wm := mocks2.NewWatcherMock(t) + wm.EXPECT().Add(pemFile, mock.Anything).Return(nil) + + khr := mocks3.NewRegistryMock(t) + khr.EXPECT().AddKeyHolder(mock.Anything) + + co := mocks4.NewObserverMock(t) + co.EXPECT().Add(mock.Anything) + + ctx.EXPECT().Watcher().Return(wm) + ctx.EXPECT().KeyHolderRegistry().Return(khr) + ctx.EXPECT().CertificateObserver().Return(co) + }, assert: func(t *testing.T, err error, finalizer *jwtFinalizer) { t.Helper() @@ -154,10 +308,14 @@ claims: }) require.NoError(t, err) assert.Equal(t, `{ "sub": "bar" }`, val) - assert.Equal(t, "jun", finalizer.ID()) + assert.Equal(t, "fin", finalizer.ID()) assert.Equal(t, "Authorization", finalizer.headerName) assert.Equal(t, "Bearer", finalizer.headerScheme) assert.False(t, finalizer.ContinueOnError()) + require.NotNil(t, finalizer.signer) + assert.Equal(t, "heimdall", finalizer.signer.iss) + assert.Equal(t, pemFile, finalizer.signer.path) + assert.Equal(t, privKey, finalizer.signer.key) }, }, { @@ -166,6 +324,7 @@ claims: ttl: 5s foo: bar" `), + configureContext: func(t *testing.T, _ *CreationContextMock) { t.Helper() }, assert: func(t *testing.T, err error, _ *jwtFinalizer) { t.Helper() @@ -176,11 +335,14 @@ foo: bar" }, { uc: "with bad header config", - id: "jun", config: []byte(` +signer: + key_store: + path: ` + pemFile + ` header: scheme: Foo `), + configureContext: func(t *testing.T, _ *CreationContextMock) { t.Helper() }, assert: func(t *testing.T, err error, _ *jwtFinalizer) { t.Helper() @@ -191,11 +353,30 @@ header: }, { uc: "with valid header config without scheme", - id: "jun", + id: "fin", config: []byte(` +signer: + key_store: + path: ` + pemFile + ` header: name: Foo `), + configureContext: func(t *testing.T, ctx *CreationContextMock) { + t.Helper() + + wm := mocks2.NewWatcherMock(t) + wm.EXPECT().Add(pemFile, mock.Anything).Return(nil) + + khr := mocks3.NewRegistryMock(t) + khr.EXPECT().AddKeyHolder(mock.Anything) + + co := mocks4.NewObserverMock(t) + co.EXPECT().Add(mock.Anything) + + ctx.EXPECT().Watcher().Return(wm) + ctx.EXPECT().KeyHolderRegistry().Return(khr) + ctx.EXPECT().CertificateObserver().Return(co) + }, assert: func(t *testing.T, err error, finalizer *jwtFinalizer) { t.Helper() @@ -203,19 +384,42 @@ header: require.NotNil(t, finalizer) assert.Equal(t, defaultJWTTTL, finalizer.ttl) assert.Nil(t, finalizer.claims) - assert.Equal(t, "jun", finalizer.ID()) + assert.Equal(t, "fin", finalizer.ID()) assert.Equal(t, "Foo", finalizer.headerName) assert.Empty(t, finalizer.headerScheme) + require.NotNil(t, finalizer.signer) + assert.Equal(t, "heimdall", finalizer.signer.iss) + assert.Equal(t, pemFile, finalizer.signer.path) + assert.Equal(t, privKey, finalizer.signer.key) }, }, { uc: "with valid header config with scheme", - id: "jun", + id: "fin", config: []byte(` +signer: + key_store: + path: ` + pemFile + ` header: name: Foo scheme: Bar `), + configureContext: func(t *testing.T, ctx *CreationContextMock) { + t.Helper() + + wm := mocks2.NewWatcherMock(t) + wm.EXPECT().Add(pemFile, mock.Anything).Return(nil) + + khr := mocks3.NewRegistryMock(t) + khr.EXPECT().AddKeyHolder(mock.Anything) + + co := mocks4.NewObserverMock(t) + co.EXPECT().Add(mock.Anything) + + ctx.EXPECT().Watcher().Return(wm) + ctx.EXPECT().KeyHolderRegistry().Return(khr) + ctx.EXPECT().CertificateObserver().Return(co) + }, assert: func(t *testing.T, err error, finalizer *jwtFinalizer) { t.Helper() @@ -223,9 +427,13 @@ header: require.NotNil(t, finalizer) assert.Equal(t, defaultJWTTTL, finalizer.ttl) assert.Nil(t, finalizer.claims) - assert.Equal(t, "jun", finalizer.ID()) + assert.Equal(t, "fin", finalizer.ID()) assert.Equal(t, "Foo", finalizer.headerName) assert.Equal(t, "Bar", finalizer.headerScheme) + require.NotNil(t, finalizer.signer) + assert.Equal(t, "heimdall", finalizer.signer.iss) + assert.Equal(t, pemFile, finalizer.signer.path) + assert.Equal(t, privKey, finalizer.signer.key) }, }, } { @@ -233,8 +441,11 @@ header: conf, err := testsupport.DecodeTestConfig(tc.config) require.NoError(t, err) + ctx := NewCreationContextMock(t) + tc.configureContext(t, ctx) + // WHEN - finalizer, err := newJWTFinalizer(tc.id, conf) + finalizer, err := newJWTFinalizer(ctx, tc.id, conf) // THEN tc.assert(t, err, finalizer) @@ -245,44 +456,72 @@ header: func TestCreateJWTFinalizerFromPrototype(t *testing.T) { t.Parallel() - const ( - expectedTTL = 5 * time.Second + privKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + + pemBytes, err := pemx.BuildPEM( + pemx.WithECDSAPrivateKey(privKey, pemx.WithHeader("X-Key-ID", "key")), ) + require.NoError(t, err) + + testDir := t.TempDir() + pemFile := filepath.Join(testDir, "keystore.pem") + + err = os.WriteFile(pemFile, pemBytes, 0o600) + require.NoError(t, err) + + const expectedTTL = 5 * time.Second for _, tc := range []struct { - uc string - id string - config []byte - assert func(t *testing.T, err error, prototype *jwtFinalizer, configured *jwtFinalizer) + uc string + id string + prototypeConfig []byte + config []byte + assert func(t *testing.T, err error, prototype *jwtFinalizer, configured *jwtFinalizer) }{ { uc: "no new configuration provided", - id: "jun1", + id: "fin1", + prototypeConfig: []byte(` +signer: + key_store: + path: ` + pemFile + ` +`), assert: func(t *testing.T, err error, prototype *jwtFinalizer, configured *jwtFinalizer) { t.Helper() require.NoError(t, err) assert.Equal(t, prototype, configured) - assert.Equal(t, "jun1", configured.ID()) + assert.Equal(t, "fin1", configured.ID()) assert.False(t, configured.ContinueOnError()) }, }, { - uc: "empty configuration provided", - id: "jun2", + uc: "empty configuration provided", + id: "fin2", + prototypeConfig: []byte(` +signer: + key_store: + path: ` + pemFile + ` +`), config: []byte(``), assert: func(t *testing.T, err error, prototype *jwtFinalizer, configured *jwtFinalizer) { t.Helper() require.NoError(t, err) assert.Equal(t, prototype, configured) - assert.Equal(t, "jun2", configured.ID()) + assert.Equal(t, "fin2", configured.ID()) assert.False(t, configured.ContinueOnError()) }, }, { - uc: "configuration with ttl only provided", - id: "jun3", + uc: "configuration with ttl only provided", + id: "fin3", + prototypeConfig: []byte(` +signer: + key_store: + path: ` + pemFile + ` +`), config: []byte(`ttl: 5s`), assert: func(t *testing.T, err error, prototype *jwtFinalizer, configured *jwtFinalizer) { t.Helper() @@ -294,13 +533,19 @@ func TestCreateJWTFinalizerFromPrototype(t *testing.T) { assert.Equal(t, "Bearer", configured.headerScheme) assert.NotEqual(t, prototype.ttl, configured.ttl) assert.Equal(t, expectedTTL, configured.ttl) - assert.Equal(t, "jun3", configured.ID()) + assert.Equal(t, "fin3", configured.ID()) assert.False(t, prototype.ContinueOnError()) assert.False(t, configured.ContinueOnError()) + assert.Equal(t, prototype.signer, configured.signer) }, }, { - uc: "configuration with too short ttl", + uc: "configuration with too short ttl", + prototypeConfig: []byte(` +signer: + key_store: + path: ` + pemFile + ` +`), config: []byte(`ttl: 5ms`), assert: func(t *testing.T, err error, _ *jwtFinalizer, _ *jwtFinalizer) { t.Helper() @@ -312,7 +557,12 @@ func TestCreateJWTFinalizerFromPrototype(t *testing.T) { }, { uc: "configuration with claims only provided", - id: "jun4", + id: "fin4", + prototypeConfig: []byte(` +signer: + key_store: + path: ` + pemFile + ` +`), config: []byte(` claims: '{ "sub": {{ quote .Subject.ID }} }' @@ -332,14 +582,20 @@ claims: }) require.NoError(t, err) assert.Equal(t, `{ "sub": "bar" }`, val) - assert.Equal(t, "jun4", configured.ID()) + assert.Equal(t, "fin4", configured.ID()) assert.False(t, prototype.ContinueOnError()) assert.False(t, configured.ContinueOnError()) + assert.Equal(t, prototype.signer, configured.signer) }, }, { uc: "configuration with both ttl and claims provided", - id: "jun5", + id: "fin5", + prototypeConfig: []byte(` +signer: + key_store: + path: ` + pemFile + ` +`), config: []byte(` ttl: 5s claims: @@ -361,13 +617,19 @@ claims: }) require.NoError(t, err) assert.Equal(t, `{ "sub": "bar" }`, val) - assert.Equal(t, "jun5", configured.ID()) + assert.Equal(t, "fin5", configured.ID()) assert.False(t, prototype.ContinueOnError()) assert.False(t, configured.ContinueOnError()) + assert.Equal(t, prototype.signer, configured.signer) }, }, { uc: "with unknown entries in configuration", + prototypeConfig: []byte(` +signer: + key_store: + path: ` + pemFile + ` +`), config: []byte(` ttl: 5s foo: bar @@ -382,10 +644,27 @@ foo: bar }, } { t.Run("case="+tc.uc, func(t *testing.T) { + protoConf, err := testsupport.DecodeTestConfig(tc.prototypeConfig) + require.NoError(t, err) + conf, err := testsupport.DecodeTestConfig(tc.config) require.NoError(t, err) - prototype, err := newJWTFinalizer(tc.id, nil) + wm := mocks2.NewWatcherMock(t) + wm.EXPECT().Add(pemFile, mock.Anything).Return(nil) + + khr := mocks3.NewRegistryMock(t) + khr.EXPECT().AddKeyHolder(mock.Anything) + + co := mocks4.NewObserverMock(t) + co.EXPECT().Add(mock.Anything) + + ctx := NewCreationContextMock(t) + ctx.EXPECT().Watcher().Return(wm) + ctx.EXPECT().KeyHolderRegistry().Return(khr) + ctx.EXPECT().CertificateObserver().Return(co) + + prototype, err := newJWTFinalizer(ctx, tc.id, protoConf) require.NoError(t, err) // WHEN @@ -412,21 +691,40 @@ func TestJWTFinalizerExecute(t *testing.T) { const configuredTTL = 1 * time.Minute + privKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err) + + pemBytes, err := pemx.BuildPEM( + pemx.WithECDSAPrivateKey(privKey, pemx.WithHeader("X-Key-ID", "key")), + ) + require.NoError(t, err) + + testDir := t.TempDir() + pemFile := filepath.Join(testDir, "keystore.pem") + + err = os.WriteFile(pemFile, pemBytes, 0o600) + require.NoError(t, err) + for _, tc := range []struct { uc string id string config []byte subject *subject.Subject configureMocks func(t *testing.T, + fin *jwtFinalizer, ctx *heimdallmocks.ContextMock, - signer *heimdallmocks.JWTSignerMock, cch *mocks.CacheMock, sub *subject.Subject) assert func(t *testing.T, err error) }{ { uc: "with 'nil' subject", - id: "jun1", + id: "fin1", + config: []byte(` +signer: + key_store: + path: ` + pemFile + ` +`), assert: func(t *testing.T, err error) { t.Helper() @@ -436,25 +734,26 @@ func TestJWTFinalizerExecute(t *testing.T) { var identifier interface{ ID() string } require.ErrorAs(t, err, &identifier) - assert.Equal(t, "jun1", identifier.ID()) + assert.Equal(t, "fin1", identifier.ID()) }, }, { - uc: "with used prefilled cache", + uc: "with used prefilled cache", + config: []byte(` +signer: + key_store: + path: ` + pemFile + ` +`), subject: &subject.Subject{ID: "foo", Attributes: map[string]any{"baz": "bar"}}, - configureMocks: func(t *testing.T, ctx *heimdallmocks.ContextMock, signer *heimdallmocks.JWTSignerMock, + configureMocks: func(t *testing.T, fin *jwtFinalizer, ctx *heimdallmocks.ContextMock, cch *mocks.CacheMock, sub *subject.Subject, ) { t.Helper() - signer.EXPECT().Hash().Return([]byte("foobar")) - - ctx.EXPECT().Signer().Return(signer) ctx.EXPECT().AddHeaderForUpstream("Authorization", "Bearer TestToken") + ctx.EXPECT().Outputs().Return(map[string]any{"foo": "bar"}) - finalizer := jwtFinalizer{ttl: defaultJWTTTL} - - cacheKey := finalizer.calculateCacheKey(sub, signer) + cacheKey := fin.calculateCacheKey(ctx, sub) cch.EXPECT().Get(mock.Anything, cacheKey).Return([]byte("TestToken"), nil) }, assert: func(t *testing.T, err error) { @@ -464,23 +763,25 @@ func TestJWTFinalizerExecute(t *testing.T) { }, }, { - uc: "with no cache hit and without custom claims", - config: []byte(`ttl: 1m`), + uc: "with no cache hit and without custom claims", + config: []byte(` +signer: + key_store: + path: ` + pemFile + ` +ttl: 1m +`), subject: &subject.Subject{ID: "foo", Attributes: map[string]any{"baz": "bar"}}, - configureMocks: func(t *testing.T, ctx *heimdallmocks.ContextMock, signer *heimdallmocks.JWTSignerMock, - cch *mocks.CacheMock, sub *subject.Subject, + configureMocks: func(t *testing.T, _ *jwtFinalizer, ctx *heimdallmocks.ContextMock, + cch *mocks.CacheMock, _ *subject.Subject, ) { t.Helper() - signer.EXPECT().Hash().Return([]byte("foobar")) - signer.EXPECT().Sign(sub.ID, configuredTTL, map[string]any{}). - Return("barfoo", nil) - - ctx.EXPECT().Signer().Return(signer) - ctx.EXPECT().AddHeaderForUpstream("Authorization", "Bearer barfoo") + ctx.EXPECT().AddHeaderForUpstream("Authorization", + mock.MatchedBy(func(val string) bool { return strings.HasPrefix(val, "Bearer ") })) + ctx.EXPECT().Outputs().Return(map[string]any{}) cch.EXPECT().Get(mock.Anything, mock.Anything).Return(nil, errors.New("no cache entry")) - cch.EXPECT().Set(mock.Anything, mock.Anything, []byte("barfoo"), configuredTTL-defaultCacheLeeway).Return(nil) + cch.EXPECT().Set(mock.Anything, mock.Anything, mock.Anything, configuredTTL-defaultCacheLeeway).Return(nil) }, assert: func(t *testing.T, err error) { t.Helper() @@ -491,31 +792,30 @@ func TestJWTFinalizerExecute(t *testing.T) { { uc: "with no cache hit, with custom claims and custom header", config: []byte(` +signer: + key_store: + path: ` + pemFile + ` header: name: X-Token scheme: Bar claims: '{ {{ $val := .Subject.Attributes.baz }} "sub_id": {{ quote .Subject.ID }}, - {{ quote $val }}: "baz" + {{ quote $val }}: "baz", + "foo": {{ .Outputs.foo | quote }} }'`), subject: &subject.Subject{ID: "foo", Attributes: map[string]any{"baz": "bar"}}, - configureMocks: func(t *testing.T, ctx *heimdallmocks.ContextMock, signer *heimdallmocks.JWTSignerMock, - cch *mocks.CacheMock, sub *subject.Subject, + configureMocks: func(t *testing.T, _ *jwtFinalizer, ctx *heimdallmocks.ContextMock, + cch *mocks.CacheMock, _ *subject.Subject, ) { t.Helper() - signer.EXPECT().Hash().Return([]byte("foobar")) - signer.EXPECT().Sign(sub.ID, defaultJWTTTL, map[string]any{ - "sub_id": "foo", - "bar": "baz", - }).Return("barfoo", nil) - - ctx.EXPECT().Signer().Return(signer) - ctx.EXPECT().AddHeaderForUpstream("X-Token", "Bar barfoo") + ctx.EXPECT().AddHeaderForUpstream("X-Token", + mock.MatchedBy(func(val string) bool { return strings.HasPrefix(val, "Bar ") })) + ctx.EXPECT().Outputs().Return(map[string]any{"foo": "bar"}) cch.EXPECT().Get(mock.Anything, mock.Anything).Return(nil, errors.New("no cache entry")) - cch.EXPECT().Set(mock.Anything, mock.Anything, []byte("barfoo"), defaultJWTTTL-defaultCacheLeeway).Return(nil) + cch.EXPECT().Set(mock.Anything, mock.Anything, mock.Anything, defaultJWTTTL-defaultCacheLeeway).Return(nil) }, assert: func(t *testing.T, err error) { t.Helper() @@ -524,18 +824,21 @@ claims: '{ }, }, { - uc: "with custom claims template, which does not result in a JSON object", - id: "jun2", - config: []byte(`claims: "foo: bar"`), + uc: "with custom claims template, which does not result in a JSON object", + id: "jun2", + config: []byte(` +signer: + key_store: + path: ` + pemFile + ` +claims: "foo: bar" +`), subject: &subject.Subject{ID: "foo", Attributes: map[string]any{"baz": "bar"}}, - configureMocks: func(t *testing.T, ctx *heimdallmocks.ContextMock, signer *heimdallmocks.JWTSignerMock, + configureMocks: func(t *testing.T, _ *jwtFinalizer, ctx *heimdallmocks.ContextMock, cch *mocks.CacheMock, _ *subject.Subject, ) { t.Helper() - signer.EXPECT().Hash().Return([]byte("foobar")) - - ctx.EXPECT().Signer().Return(signer) + ctx.EXPECT().Outputs().Return(map[string]any{}) cch.EXPECT().Get(mock.Anything, mock.Anything).Return(nil, errors.New("no cache entry")) }, @@ -552,18 +855,21 @@ claims: '{ }, }, { - uc: "with custom claims template, which fails during rendering", - id: "jun3", - config: []byte(`claims: "{{ len .foobar }}"`), + uc: "with custom claims template, which fails during rendering", + id: "jun3", + config: []byte(` +signer: + key_store: + path: ` + pemFile + ` +claims: "{{ len .foobar }}" +`), subject: &subject.Subject{ID: "foo", Attributes: map[string]any{"baz": "bar"}}, - configureMocks: func(t *testing.T, ctx *heimdallmocks.ContextMock, signer *heimdallmocks.JWTSignerMock, + configureMocks: func(t *testing.T, _ *jwtFinalizer, ctx *heimdallmocks.ContextMock, cch *mocks.CacheMock, _ *subject.Subject, ) { t.Helper() - signer.EXPECT().Hash().Return([]byte("foobar")) - - ctx.EXPECT().Signer().Return(signer) + ctx.EXPECT().Outputs().Return(map[string]any{}) cch.EXPECT().Get(mock.Anything, mock.Anything).Return(nil, errors.New("no cache entry")) }, @@ -584,9 +890,7 @@ claims: '{ // GIVEN configureMocks := x.IfThenElse(tc.configureMocks != nil, tc.configureMocks, - func(t *testing.T, _ *heimdallmocks.ContextMock, _ *heimdallmocks.JWTSignerMock, - _ *mocks.CacheMock, _ *subject.Subject, - ) { + func(t *testing.T, _ *jwtFinalizer, _ *heimdallmocks.ContextMock, _ *mocks.CacheMock, _ *subject.Subject) { t.Helper() }) @@ -595,14 +899,28 @@ claims: '{ cch := mocks.NewCacheMock(t) mctx := heimdallmocks.NewContextMock(t) - signer := heimdallmocks.NewJWTSignerMock(t) + + wm := mocks2.NewWatcherMock(t) + wm.EXPECT().Add(pemFile, mock.Anything).Return(nil) + + khr := mocks3.NewRegistryMock(t) + khr.EXPECT().AddKeyHolder(mock.Anything) + + co := mocks4.NewObserverMock(t) + co.EXPECT().Add(mock.Anything) + + cctx := NewCreationContextMock(t) + cctx.EXPECT().Watcher().Return(wm) + cctx.EXPECT().KeyHolderRegistry().Return(khr) + cctx.EXPECT().CertificateObserver().Return(co) mctx.EXPECT().AppContext().Return(cache.WithContext(context.Background(), cch)) - configureMocks(t, mctx, signer, cch, tc.subject) - finalizer, err := newJWTFinalizer(tc.id, conf) + finalizer, err := newJWTFinalizer(cctx, tc.id, conf) require.NoError(t, err) + configureMocks(t, finalizer, mctx, cch, tc.subject) + // WHEN err = finalizer.Execute(mctx, tc.subject) diff --git a/internal/signer/jwt_signer.go b/internal/rules/mechanisms/finalizers/jwt_signer.go similarity index 71% rename from internal/signer/jwt_signer.go rename to internal/rules/mechanisms/finalizers/jwt_signer.go index 07e7b004b..f72caa460 100644 --- a/internal/signer/jwt_signer.go +++ b/internal/rules/mechanisms/finalizers/jwt_signer.go @@ -14,13 +14,10 @@ // // SPDX-License-Identifier: Apache-2.0 -package signer +package finalizers import ( "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" "crypto/sha256" "crypto/x509" "sync" @@ -31,42 +28,25 @@ import ( "github.com/google/uuid" "github.com/knadh/koanf/maps" "github.com/rs/zerolog" - "github.com/rs/zerolog/log" - "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/keystore" "github.com/dadrus/heimdall/internal/watcher" + "github.com/dadrus/heimdall/internal/x" "github.com/dadrus/heimdall/internal/x/errorchain" "github.com/dadrus/heimdall/internal/x/pkix" "github.com/dadrus/heimdall/internal/x/stringx" ) -func NewJWTSigner(conf *config.Configuration, logger zerolog.Logger, fw watcher.Watcher) (heimdall.JWTSigner, error) { - signer := &jwtSigner{ - path: conf.Signer.KeyStore.Path, - password: conf.Signer.KeyStore.Password, - keyID: conf.Signer.KeyID, - iss: conf.Signer.Name, - } - - if err := signer.load(logger); err != nil { - logger.Error().Err(err).Msg("Failed creating signer") - - return nil, err - } - - if len(signer.path) != 0 && fw != nil { - if err := fw.Add(signer.path, signer); err != nil { - logger.Error().Err(err).Msg("Failed registering signer for updates") - - return nil, err - } - } - - logger.Info().Msg("Signer configured") +type KeyStore struct { + Path string `mapstructure:"path" validate:"required"` + Password string `mapstructure:"password"` +} - return signer, nil +type SignerConfig struct { + Name string `mapstructure:"name"` + KeyStore KeyStore `mapstructure:"key_store" validate:"required"` + KeyID string `mapstructure:"key_id"` } type jwtSigner struct { @@ -75,54 +55,54 @@ type jwtSigner struct { keyID string iss string - mut sync.Mutex + mut sync.RWMutex jwk jose.JSONWebKey key crypto.Signer pubKeys []jose.JSONWebKey } +func newJWTSigner(conf *SignerConfig, fw watcher.Watcher) (*jwtSigner, error) { + signer := &jwtSigner{ + path: conf.KeyStore.Path, + password: conf.KeyStore.Password, + keyID: conf.KeyID, + iss: x.IfThenElse(len(conf.Name) == 0, "heimdall", conf.Name), + } + + if err := signer.load(); err != nil { + return nil, err + } + + if err := fw.Add(signer.path, signer); err != nil { + return nil, errorchain.NewWithMessage(heimdall.ErrInternal, "failed registering jwt signer for updates"). + CausedBy(err) + } + + return signer, nil +} + func (s *jwtSigner) OnChanged(logger zerolog.Logger) { - err := s.load(logger) + err := s.load() if err != nil { - log.Warn().Err(err). + logger.Warn().Err(err). Str("_file", s.path). Msg("Signer key store reload failed") } else { - log.Info(). + logger.Info(). Str("_file", s.path). Msg("Signer key store reloaded") } } -func (s *jwtSigner) load(logger zerolog.Logger) error { - var ( - ks keystore.KeyStore - kse *keystore.Entry - err error - ) - - if len(s.path) == 0 { - logger.Warn(). - Msg("Key store is not configured. NEVER DO IT IN PRODUCTION!!!! Generating an ECDSA P-384 key pair.") - - var privateKey *ecdsa.PrivateKey - - privateKey, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - if err != nil { - return errorchain.NewWithMessage(heimdall.ErrInternal, - "failed to generate ECDSA P-384 key pair").CausedBy(err) - } - - ks, err = keystore.NewKeyStoreFromKey(privateKey) - } else { - ks, err = keystore.NewKeyStoreFromPEMFile(s.path, s.password) - } - +func (s *jwtSigner) load() error { + ks, err := keystore.NewKeyStoreFromPEMFile(s.path, s.password) if err != nil { return errorchain.NewWithMessage(heimdall.ErrInternal, "failed loading keystore"). CausedBy(err) } + var kse *keystore.Entry + if len(s.keyID) == 0 { kse, err = ks.Entries()[0], nil } else { @@ -161,9 +141,9 @@ func (s *jwtSigner) load(logger zerolog.Logger) error { } func (s *jwtSigner) Hash() []byte { - s.mut.Lock() + s.mut.RLock() jwk := s.jwk - s.mut.Unlock() + s.mut.RUnlock() hash := sha256.New() hash.Write(stringx.ToBytes(jwk.KeyID)) @@ -174,10 +154,10 @@ func (s *jwtSigner) Hash() []byte { } func (s *jwtSigner) Sign(sub string, ttl time.Duration, customClaims map[string]any) (string, error) { - s.mut.Lock() + s.mut.RLock() jwk := s.jwk key := s.key - s.mut.Unlock() + s.mut.RUnlock() signer, err := jose.NewSigner( jose.SigningKey{Algorithm: jose.SignatureAlgorithm(jwk.Algorithm), Key: key}, @@ -212,8 +192,15 @@ func (s *jwtSigner) Sign(sub string, ttl time.Duration, customClaims map[string] } func (s *jwtSigner) Keys() []jose.JSONWebKey { - s.mut.Lock() - defer s.mut.Unlock() + s.mut.RLock() + defer s.mut.RUnlock() return s.pubKeys } + +func (s *jwtSigner) activeCertificateChain() []*x509.Certificate { + s.mut.RLock() + defer s.mut.RUnlock() + + return s.jwk.Certificates +} diff --git a/internal/signer/jwt_signer_test.go b/internal/rules/mechanisms/finalizers/jwt_signer_test.go similarity index 84% rename from internal/signer/jwt_signer_test.go rename to internal/rules/mechanisms/finalizers/jwt_signer_test.go index 2430e1715..1e0f238a1 100644 --- a/internal/signer/jwt_signer_test.go +++ b/internal/rules/mechanisms/finalizers/jwt_signer_test.go @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package signer +package finalizers import ( "crypto/ecdsa" @@ -38,7 +38,6 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/keystore" "github.com/dadrus/heimdall/internal/watcher/mocks" @@ -123,34 +122,31 @@ func TestNewJWTSigner(t *testing.T) { for _, tc := range []struct { uc string - config func(t *testing.T, wm *mocks.WatcherMock) config.SignerConfig + config func(t *testing.T, wm *mocks.WatcherMock) *SignerConfig assert func(t *testing.T, err error, signer *jwtSigner) }{ { uc: "without configuration", - config: func(t *testing.T, _ *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, _ *mocks.WatcherMock) *SignerConfig { t.Helper() - return config.SignerConfig{} + return &SignerConfig{} }, - assert: func(t *testing.T, err error, signer *jwtSigner) { + assert: func(t *testing.T, err error, _ *jwtSigner) { t.Helper() - require.NoError(t, err) - - assert.IsType(t, &ecdsa.PrivateKey{}, signer.key) - assert.NotEmpty(t, signer.jwk.KeyID) - assert.Equal(t, string(jose.ES384), signer.jwk.Algorithm) + require.Error(t, err) + require.ErrorContains(t, err, "failed loading keystore") }, }, { uc: "no key id configured", - config: func(t *testing.T, wm *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, wm *mocks.WatcherMock) *SignerConfig { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) - return config.SignerConfig{Name: "foo", KeyStore: config.KeyStore{Path: keyFile.Name()}} + return &SignerConfig{Name: "foo", KeyStore: KeyStore{Path: keyFile.Name()}} }, assert: func(t *testing.T, err error, signer *jwtSigner) { t.Helper() @@ -161,16 +157,17 @@ func TestNewJWTSigner(t *testing.T) { assert.Equal(t, rsaPrivKey1, signer.key) assert.Equal(t, "key1", signer.jwk.KeyID) assert.Equal(t, string(jose.PS256), signer.jwk.Algorithm) + assert.Empty(t, signer.activeCertificateChain()) }, }, { uc: "with key id configured", - config: func(t *testing.T, wm *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, wm *mocks.WatcherMock) *SignerConfig { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) - return config.SignerConfig{Name: "foo", KeyStore: config.KeyStore{Path: keyFile.Name()}, KeyID: "key2"} + return &SignerConfig{Name: "foo", KeyStore: KeyStore{Path: keyFile.Name()}, KeyID: "key2"} }, assert: func(t *testing.T, err error, signer *jwtSigner) { t.Helper() @@ -181,14 +178,15 @@ func TestNewJWTSigner(t *testing.T) { assert.Equal(t, rsaPrivKey2, signer.key) assert.Equal(t, "key2", signer.jwk.KeyID) assert.Equal(t, string(jose.PS384), signer.jwk.Algorithm) + assert.Empty(t, signer.activeCertificateChain()) }, }, { uc: "with error while retrieving key from key store", - config: func(t *testing.T, _ *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, _ *mocks.WatcherMock) *SignerConfig { t.Helper() - return config.SignerConfig{Name: "foo", KeyStore: config.KeyStore{Path: keyFile.Name()}, KeyID: "baz"} + return &SignerConfig{Name: "foo", KeyStore: KeyStore{Path: keyFile.Name()}, KeyID: "baz"} }, assert: func(t *testing.T, err error, _ *jwtSigner) { t.Helper() @@ -199,12 +197,12 @@ func TestNewJWTSigner(t *testing.T) { }, { uc: "with rsa 2048 key", - config: func(t *testing.T, wm *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, wm *mocks.WatcherMock) *SignerConfig { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) - return config.SignerConfig{Name: "foo", KeyStore: config.KeyStore{Path: keyFile.Name()}, KeyID: "key1"} + return &SignerConfig{Name: "foo", KeyStore: KeyStore{Path: keyFile.Name()}, KeyID: "key1"} }, assert: func(t *testing.T, err error, signer *jwtSigner) { t.Helper() @@ -215,16 +213,17 @@ func TestNewJWTSigner(t *testing.T) { assert.Equal(t, rsaPrivKey1, signer.key) assert.Equal(t, "key1", signer.jwk.KeyID) assert.Equal(t, string(jose.PS256), signer.jwk.Algorithm) + assert.Empty(t, signer.activeCertificateChain()) }, }, { uc: "with rsa 3072 key", - config: func(t *testing.T, wm *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, wm *mocks.WatcherMock) *SignerConfig { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) - return config.SignerConfig{Name: "foo", KeyStore: config.KeyStore{Path: keyFile.Name()}, KeyID: "key2"} + return &SignerConfig{Name: "foo", KeyStore: KeyStore{Path: keyFile.Name()}, KeyID: "key2"} }, assert: func(t *testing.T, err error, signer *jwtSigner) { t.Helper() @@ -235,16 +234,17 @@ func TestNewJWTSigner(t *testing.T) { assert.Equal(t, rsaPrivKey2, signer.key) assert.Equal(t, "key2", signer.jwk.KeyID) assert.Equal(t, string(jose.PS384), signer.jwk.Algorithm) + assert.Empty(t, signer.activeCertificateChain()) }, }, { uc: "with rsa 4096 key", - config: func(t *testing.T, wm *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, wm *mocks.WatcherMock) *SignerConfig { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) - return config.SignerConfig{Name: "foo", KeyStore: config.KeyStore{Path: keyFile.Name()}, KeyID: "key3"} + return &SignerConfig{Name: "foo", KeyStore: KeyStore{Path: keyFile.Name()}, KeyID: "key3"} }, assert: func(t *testing.T, err error, signer *jwtSigner) { t.Helper() @@ -255,16 +255,17 @@ func TestNewJWTSigner(t *testing.T) { assert.Equal(t, rsaPrivKey3, signer.key) assert.Equal(t, "key3", signer.jwk.KeyID) assert.Equal(t, string(jose.PS512), signer.jwk.Algorithm) + assert.Empty(t, signer.activeCertificateChain()) }, }, { uc: "with P256 ecdsa key", - config: func(t *testing.T, wm *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, wm *mocks.WatcherMock) *SignerConfig { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) - return config.SignerConfig{Name: "foo", KeyStore: config.KeyStore{Path: keyFile.Name()}, KeyID: "key4"} + return &SignerConfig{Name: "foo", KeyStore: KeyStore{Path: keyFile.Name()}, KeyID: "key4"} }, assert: func(t *testing.T, err error, signer *jwtSigner) { t.Helper() @@ -275,16 +276,17 @@ func TestNewJWTSigner(t *testing.T) { assert.Equal(t, ecdsaPrivKey1, signer.key) assert.Equal(t, "key4", signer.jwk.KeyID) assert.Equal(t, string(jose.ES256), signer.jwk.Algorithm) + assert.Empty(t, signer.activeCertificateChain()) }, }, { uc: "with P384 ecdsa key", - config: func(t *testing.T, wm *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, wm *mocks.WatcherMock) *SignerConfig { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) - return config.SignerConfig{Name: "foo", KeyStore: config.KeyStore{Path: keyFile.Name()}, KeyID: "key5"} + return &SignerConfig{Name: "foo", KeyStore: KeyStore{Path: keyFile.Name()}, KeyID: "key5"} }, assert: func(t *testing.T, err error, signer *jwtSigner) { t.Helper() @@ -295,16 +297,17 @@ func TestNewJWTSigner(t *testing.T) { assert.Equal(t, ecdsaPrivKey2, signer.key) assert.Equal(t, "key5", signer.jwk.KeyID) assert.Equal(t, string(jose.ES384), signer.jwk.Algorithm) + assert.Empty(t, signer.activeCertificateChain()) }, }, { uc: "with P512 ecdsa key", - config: func(t *testing.T, wm *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, wm *mocks.WatcherMock) *SignerConfig { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) - return config.SignerConfig{Name: "foo", KeyStore: config.KeyStore{Path: keyFile.Name()}, KeyID: "key6"} + return &SignerConfig{Name: "foo", KeyStore: KeyStore{Path: keyFile.Name()}, KeyID: "key6"} }, assert: func(t *testing.T, err error, signer *jwtSigner) { t.Helper() @@ -315,14 +318,15 @@ func TestNewJWTSigner(t *testing.T) { assert.Equal(t, ecdsaPrivKey3, signer.key) assert.Equal(t, "key6", signer.jwk.KeyID) assert.Equal(t, string(jose.ES512), signer.jwk.Algorithm) + assert.Empty(t, signer.activeCertificateChain()) }, }, { uc: "with not existing key store", - config: func(t *testing.T, _ *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, _ *mocks.WatcherMock) *SignerConfig { t.Helper() - return config.SignerConfig{Name: "foo", KeyStore: config.KeyStore{Path: "/does/not/exist"}} + return &SignerConfig{Name: "foo", KeyStore: KeyStore{Path: "/does/not/exist"}} }, assert: func(t *testing.T, err error, _ *jwtSigner) { t.Helper() @@ -334,12 +338,12 @@ func TestNewJWTSigner(t *testing.T) { }, { uc: "with certificate, which cannot be used for signature due to missing key usage", - config: func(t *testing.T, _ *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, _ *mocks.WatcherMock) *SignerConfig { t.Helper() - return config.SignerConfig{ + return &SignerConfig{ Name: "foo", - KeyStore: config.KeyStore{Path: keyFile.Name()}, + KeyStore: KeyStore{Path: keyFile.Name()}, KeyID: "missing_key_usage", } }, @@ -353,14 +357,14 @@ func TestNewJWTSigner(t *testing.T) { }, { uc: "with self-signed certificate usable for JWT signing", - config: func(t *testing.T, wm *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, wm *mocks.WatcherMock) *SignerConfig { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) - return config.SignerConfig{ + return &SignerConfig{ Name: "foo", - KeyStore: config.KeyStore{Path: keyFile.Name()}, + KeyStore: KeyStore{Path: keyFile.Name()}, KeyID: "self_signed", } }, @@ -373,18 +377,19 @@ func TestNewJWTSigner(t *testing.T) { assert.Equal(t, ecdsaPrivKey5, signer.key) assert.Equal(t, "self_signed", signer.jwk.KeyID) assert.Equal(t, string(jose.ES512), signer.jwk.Algorithm) + assert.Equal(t, []*x509.Certificate{cert5}, signer.activeCertificateChain()) }, }, { uc: "fails due to error while registering with file watcher", - config: func(t *testing.T, wm *mocks.WatcherMock) config.SignerConfig { + config: func(t *testing.T, wm *mocks.WatcherMock) *SignerConfig { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(errors.New("test error")) - return config.SignerConfig{ + return &SignerConfig{ Name: "foo", - KeyStore: config.KeyStore{Path: keyFile.Name()}, + KeyStore: KeyStore{Path: keyFile.Name()}, KeyID: "self_signed", } }, @@ -399,21 +404,10 @@ func TestNewJWTSigner(t *testing.T) { t.Run("case="+tc.uc, func(t *testing.T) { // WHEN wm := mocks.NewWatcherMock(t) - signer, err := NewJWTSigner(&config.Configuration{Signer: tc.config(t, wm)}, log.Logger, wm) + signer, err := newJWTSigner(tc.config(t, wm), wm) // THEN - var ( - impl *jwtSigner - ok bool - ) - - if err == nil { - impl, ok = signer.(*jwtSigner) - require.True(t, ok) - } - - tc.assert(t, err, impl) - wm.AssertExpectations(t) + tc.assert(t, err, signer) }) } } @@ -592,10 +586,12 @@ func TestJwtSignerKeys(t *testing.T) { _, err = keyFile.Write(pemBytes) require.NoError(t, err) - signer, err := NewJWTSigner( - &config.Configuration{Signer: config.SignerConfig{KeyStore: config.KeyStore{Path: keyFile.Name()}}}, - log.Logger, - nil, + fw := mocks.NewWatcherMock(t) + fw.EXPECT().Add(keyFile.Name(), mock.Anything).Return(nil) + + signer, err := newJWTSigner( + &SignerConfig{KeyStore: KeyStore{Path: keyFile.Name()}}, + fw, ) require.NoError(t, err) @@ -668,7 +664,7 @@ func TestJWTSignerOnChanged(t *testing.T) { require.NoError(t, err) signer := &jwtSigner{path: pemFile.Name(), keyID: "key1"} - err = signer.load(log.Logger) + err = signer.load() require.NoError(t, err) require.Equal(t, cert1, signer.jwk.Certificates[0]) diff --git a/internal/rules/mechanisms/finalizers/mock_creation_context_test.go b/internal/rules/mechanisms/finalizers/mock_creation_context_test.go new file mode 100644 index 000000000..767698b42 --- /dev/null +++ b/internal/rules/mechanisms/finalizers/mock_creation_context_test.go @@ -0,0 +1,180 @@ +// Code generated by mockery v2.42.1. DO NOT EDIT. + +package finalizers + +import ( + keyholder "github.com/dadrus/heimdall/internal/keyholder" + certificate "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + + mock "github.com/stretchr/testify/mock" + + watcher "github.com/dadrus/heimdall/internal/watcher" +) + +// CreationContextMock is an autogenerated mock type for the CreationContext type +type CreationContextMock struct { + mock.Mock +} + +type CreationContextMock_Expecter struct { + mock *mock.Mock +} + +func (_m *CreationContextMock) EXPECT() *CreationContextMock_Expecter { + return &CreationContextMock_Expecter{mock: &_m.Mock} +} + +// CertificateObserver provides a mock function with given fields: +func (_m *CreationContextMock) CertificateObserver() certificate.Observer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CertificateObserver") + } + + var r0 certificate.Observer + if rf, ok := ret.Get(0).(func() certificate.Observer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(certificate.Observer) + } + } + + return r0 +} + +// CreationContextMock_CertificateObserver_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CertificateObserver' +type CreationContextMock_CertificateObserver_Call struct { + *mock.Call +} + +// CertificateObserver is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) CertificateObserver() *CreationContextMock_CertificateObserver_Call { + return &CreationContextMock_CertificateObserver_Call{Call: _e.mock.On("CertificateObserver")} +} + +func (_c *CreationContextMock_CertificateObserver_Call) Run(run func()) *CreationContextMock_CertificateObserver_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) Return(_a0 certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_CertificateObserver_Call) RunAndReturn(run func() certificate.Observer) *CreationContextMock_CertificateObserver_Call { + _c.Call.Return(run) + return _c +} + +// KeyHolderRegistry provides a mock function with given fields: +func (_m *CreationContextMock) KeyHolderRegistry() keyholder.Registry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for KeyHolderRegistry") + } + + var r0 keyholder.Registry + if rf, ok := ret.Get(0).(func() keyholder.Registry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keyholder.Registry) + } + } + + return r0 +} + +// CreationContextMock_KeyHolderRegistry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'KeyHolderRegistry' +type CreationContextMock_KeyHolderRegistry_Call struct { + *mock.Call +} + +// KeyHolderRegistry is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) KeyHolderRegistry() *CreationContextMock_KeyHolderRegistry_Call { + return &CreationContextMock_KeyHolderRegistry_Call{Call: _e.mock.On("KeyHolderRegistry")} +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Run(run func()) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) Return(_a0 keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_KeyHolderRegistry_Call) RunAndReturn(run func() keyholder.Registry) *CreationContextMock_KeyHolderRegistry_Call { + _c.Call.Return(run) + return _c +} + +// Watcher provides a mock function with given fields: +func (_m *CreationContextMock) Watcher() watcher.Watcher { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Watcher") + } + + var r0 watcher.Watcher + if rf, ok := ret.Get(0).(func() watcher.Watcher); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(watcher.Watcher) + } + } + + return r0 +} + +// CreationContextMock_Watcher_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Watcher' +type CreationContextMock_Watcher_Call struct { + *mock.Call +} + +// Watcher is a helper method to define mock.On call +func (_e *CreationContextMock_Expecter) Watcher() *CreationContextMock_Watcher_Call { + return &CreationContextMock_Watcher_Call{Call: _e.mock.On("Watcher")} +} + +func (_c *CreationContextMock_Watcher_Call) Run(run func()) *CreationContextMock_Watcher_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) Return(_a0 watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CreationContextMock_Watcher_Call) RunAndReturn(run func() watcher.Watcher) *CreationContextMock_Watcher_Call { + _c.Call.Return(run) + return _c +} + +// NewCreationContextMock creates a new instance of CreationContextMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCreationContextMock(t interface { + mock.TestingT + Cleanup(func()) +}) *CreationContextMock { + mock := &CreationContextMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/rules/mechanisms/finalizers/noop_finalizer.go b/internal/rules/mechanisms/finalizers/noop_finalizer.go index cdeb621e1..2ee9c7597 100644 --- a/internal/rules/mechanisms/finalizers/noop_finalizer.go +++ b/internal/rules/mechanisms/finalizers/noop_finalizer.go @@ -28,7 +28,7 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, _ map[string]any) (bool, Finalizer, error) { + func(_ CreationContext, id string, typ string, _ map[string]any) (bool, Finalizer, error) { if typ != FinalizerNoop { return false, nil, nil } @@ -43,15 +43,15 @@ type noopFinalizer struct { id string } -func (u *noopFinalizer) Execute(ctx heimdall.Context, _ *subject.Subject) error { +func (f *noopFinalizer) Execute(ctx heimdall.Context, _ *subject.Subject) error { logger := zerolog.Ctx(ctx.AppContext()) - logger.Debug().Str("_id", u.id).Msg("Finalizing using noop finalizer") + logger.Debug().Str("_id", f.id).Msg("Finalizing using noop finalizer") return nil } -func (u *noopFinalizer) WithConfig(map[string]any) (Finalizer, error) { return u, nil } +func (f *noopFinalizer) WithConfig(map[string]any) (Finalizer, error) { return f, nil } -func (u *noopFinalizer) ID() string { return u.id } +func (f *noopFinalizer) ID() string { return f.id } -func (u *noopFinalizer) ContinueOnError() bool { return false } +func (f *noopFinalizer) ContinueOnError() bool { return false } diff --git a/internal/rules/mechanisms/finalizers/oauth2_client_credentials_finalizer.go b/internal/rules/mechanisms/finalizers/oauth2_client_credentials_finalizer.go index d10fbc0ec..f9a31d800 100644 --- a/internal/rules/mechanisms/finalizers/oauth2_client_credentials_finalizer.go +++ b/internal/rules/mechanisms/finalizers/oauth2_client_credentials_finalizer.go @@ -33,7 +33,7 @@ import ( //nolint:gochecknoinits func init() { registerTypeFactory( - func(id string, typ string, conf map[string]any) (bool, Finalizer, error) { + func(_ CreationContext, id string, typ string, conf map[string]any) (bool, Finalizer, error) { if typ != FinalizerOAuth2ClientCredentials { return false, nil, nil } diff --git a/internal/rules/mechanisms/factory_impl.go b/internal/rules/mechanisms/mechanism_factory.go similarity index 69% rename from internal/rules/mechanisms/factory_impl.go rename to internal/rules/mechanisms/mechanism_factory.go index 1329d1fa7..5348ae0c5 100644 --- a/internal/rules/mechanisms/factory_impl.go +++ b/internal/rules/mechanisms/mechanism_factory.go @@ -17,23 +17,52 @@ package mechanisms import ( + "errors" + "github.com/rs/zerolog" "github.com/dadrus/heimdall/internal/config" + "github.com/dadrus/heimdall/internal/keyholder" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/rules/mechanisms/authenticators" "github.com/dadrus/heimdall/internal/rules/mechanisms/authorizers" "github.com/dadrus/heimdall/internal/rules/mechanisms/contextualizers" "github.com/dadrus/heimdall/internal/rules/mechanisms/errorhandlers" "github.com/dadrus/heimdall/internal/rules/mechanisms/finalizers" + "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/internal/x/errorchain" ) -func NewFactory(conf *config.Configuration, logger zerolog.Logger) (Factory, error) { - logger.Info().Msg("Loading pipeline definitions") +var ( + ErrAuthenticatorCreation = errors.New("failed to create authenticator") + ErrAuthorizerCreation = errors.New("failed to create authorizer") + ErrFinalizerCreation = errors.New("failed to create finalizer") + ErrContextualizerCreation = errors.New("failed to create contextualizer") + ErrErrorHandlerCreation = errors.New("failed to create error handler") +) + +//go:generate mockery --name MechanismFactory --structname MechanismFactoryMock + +type MechanismFactory interface { + CreateAuthenticator(version, id string, conf config.MechanismConfig) (authenticators.Authenticator, error) + CreateAuthorizer(version, id string, conf config.MechanismConfig) (authorizers.Authorizer, error) + CreateContextualizer(version, id string, conf config.MechanismConfig) (contextualizers.Contextualizer, error) + CreateFinalizer(version, id string, conf config.MechanismConfig) (finalizers.Finalizer, error) + CreateErrorHandler(version, id string, conf config.MechanismConfig) (errorhandlers.ErrorHandler, error) +} + +func NewMechanismFactory( + conf *config.Configuration, + logger zerolog.Logger, + fw watcher.Watcher, + khr keyholder.Registry, + co certificate.Observer, +) (MechanismFactory, error) { + logger.Info().Msg("Loading mechanism catalogue") - repository, err := newPrototypeRepository(conf, logger) + repository, err := newMechanismRepository(conf, logger, fw, khr, co) if err != nil { - logger.Error().Err(err).Msg("Failed loading pipeline definitions") + logger.Error().Err(err).Msg("Failed loading mechanism catalogue") return nil, err } @@ -42,7 +71,7 @@ func NewFactory(conf *config.Configuration, logger zerolog.Logger) (Factory, err } type mechanismsFactory struct { - r *prototypeRepository + r *mechanismRepository } func (hf *mechanismsFactory) CreateAuthenticator(_, id string, conf config.MechanismConfig) ( diff --git a/internal/rules/mechanisms/factory_test.go b/internal/rules/mechanisms/mechanism_factory_test.go similarity index 98% rename from internal/rules/mechanisms/factory_test.go rename to internal/rules/mechanisms/mechanism_factory_test.go index 9c73daf00..43f3dc659 100644 --- a/internal/rules/mechanisms/factory_test.go +++ b/internal/rules/mechanisms/mechanism_factory_test.go @@ -113,7 +113,7 @@ func TestHandlerFactoryCreateAuthenticator(t *testing.T) { configureMock(t, mAuth) factory := &mechanismsFactory{ - r: &prototypeRepository{ + r: &mechanismRepository{ authenticators: map[string]authenticators.Authenticator{ ID: mAuth, }, @@ -205,7 +205,7 @@ func TestHandlerFactoryCreateAuthorizer(t *testing.T) { configureMock(t, mAuth) factory := &mechanismsFactory{ - r: &prototypeRepository{ + r: &mechanismRepository{ authorizers: map[string]authorizers.Authorizer{ ID: mAuth, }, @@ -298,7 +298,7 @@ func TestHandlerFactoryCreateContextualizer(t *testing.T) { configureMock(t, mContextualizer) factory := &mechanismsFactory{ - r: &prototypeRepository{ + r: &mechanismRepository{ contextualizers: map[string]contextualizers.Contextualizer{ ID: mContextualizer, }, @@ -390,7 +390,7 @@ func TestHandlerFactoryCreateFinalizer(t *testing.T) { configureMock(t, mFin) factory := &mechanismsFactory{ - r: &prototypeRepository{ + r: &mechanismRepository{ finalizers: map[string]finalizers.Finalizer{ ID: mFin, }, @@ -482,7 +482,7 @@ func TestHandlerFactoryCreateErrorHandler(t *testing.T) { configureMock(t, mEH) factory := &mechanismsFactory{ - r: &prototypeRepository{ + r: &mechanismRepository{ errorHandlers: map[string]errorhandlers.ErrorHandler{ ID: mEH, }, @@ -551,7 +551,7 @@ func TestCreateHandlerFactory(t *testing.T) { ) // WHEN - factory, err := NewFactory(tc.conf, log.Logger) + factory, err := NewMechanismFactory(tc.conf, log.Logger, nil, nil, nil) // THEN if err == nil { diff --git a/internal/rules/mechanisms/prototype_repository.go b/internal/rules/mechanisms/mechanism_repository.go similarity index 64% rename from internal/rules/mechanisms/prototype_repository.go rename to internal/rules/mechanisms/mechanism_repository.go index 0d9642a41..b86df368b 100644 --- a/internal/rules/mechanisms/prototype_repository.go +++ b/internal/rules/mechanisms/mechanism_repository.go @@ -22,24 +22,46 @@ import ( "github.com/rs/zerolog" "github.com/dadrus/heimdall/internal/config" + "github.com/dadrus/heimdall/internal/keyholder" + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" "github.com/dadrus/heimdall/internal/rules/mechanisms/authenticators" "github.com/dadrus/heimdall/internal/rules/mechanisms/authorizers" "github.com/dadrus/heimdall/internal/rules/mechanisms/contextualizers" "github.com/dadrus/heimdall/internal/rules/mechanisms/errorhandlers" "github.com/dadrus/heimdall/internal/rules/mechanisms/finalizers" + "github.com/dadrus/heimdall/internal/watcher" "github.com/dadrus/heimdall/internal/x/errorchain" ) var ErrNoSuchPipelineObject = errors.New("pipeline object not found") -func newPrototypeRepository( +type creationContext struct { + fw watcher.Watcher + khr keyholder.Registry + co certificate.Observer +} + +func (cc *creationContext) Watcher() watcher.Watcher { return cc.fw } +func (cc *creationContext) KeyHolderRegistry() keyholder.Registry { return cc.khr } +func (cc *creationContext) CertificateObserver() certificate.Observer { return cc.co } + +func newMechanismRepository( conf *config.Configuration, logger zerolog.Logger, -) (*prototypeRepository, error) { + fw watcher.Watcher, + khr keyholder.Registry, + co certificate.Observer, +) (*mechanismRepository, error) { logger.Debug().Msg("Loading definitions for authenticators") - authenticatorMap, err := createPipelineObjects(conf.Prototypes.Authenticators, logger, - authenticators.CreatePrototype) + cc := &creationContext{ + fw: fw, + khr: khr, + co: co, + } + + authenticatorMap, err := createPipelineObjects[authenticators.Authenticator, authenticators.CreationContext]( + cc, conf.Prototypes.Authenticators, logger, authenticators.CreatePrototype) if err != nil { logger.Error().Err(err).Msg("Failed loading authenticators definitions") @@ -48,8 +70,8 @@ func newPrototypeRepository( logger.Debug().Msg("Loading definitions for authorizers") - authorizerMap, err := createPipelineObjects(conf.Prototypes.Authorizers, logger, - authorizers.CreatePrototype) + authorizerMap, err := createPipelineObjects[authorizers.Authorizer, authorizers.CreationContext]( + cc, conf.Prototypes.Authorizers, logger, authorizers.CreatePrototype) if err != nil { logger.Error().Err(err).Msg("Failed loading authorizers definitions") @@ -58,8 +80,8 @@ func newPrototypeRepository( logger.Debug().Msg("Loading definitions for contextualizer") - contextualizerMap, err := createPipelineObjects(conf.Prototypes.Contextualizers, logger, - contextualizers.CreatePrototype) + contextualizerMap, err := createPipelineObjects[contextualizers.Contextualizer, contextualizers.CreationContext]( + cc, conf.Prototypes.Contextualizers, logger, contextualizers.CreatePrototype) if err != nil { logger.Error().Err(err).Msg("Failed loading contextualizer definitions") @@ -68,8 +90,8 @@ func newPrototypeRepository( logger.Debug().Msg("Loading definitions for finalizers") - finalizerMap, err := createPipelineObjects(conf.Prototypes.Finalizers, logger, - finalizers.CreatePrototype) + finalizerMap, err := createPipelineObjects[finalizers.Finalizer, finalizers.CreationContext]( + cc, conf.Prototypes.Finalizers, logger, finalizers.CreatePrototype) if err != nil { logger.Error().Err(err).Msg("Failed loading finalizer definitions") @@ -78,15 +100,15 @@ func newPrototypeRepository( logger.Debug().Msg("Loading definitions for error handler") - ehMap, err := createPipelineObjects(conf.Prototypes.ErrorHandlers, logger, - errorhandlers.CreatePrototype) + ehMap, err := createPipelineObjects[errorhandlers.ErrorHandler, errorhandlers.CreationContext]( + cc, conf.Prototypes.ErrorHandlers, logger, errorhandlers.CreatePrototype) if err != nil { logger.Error().Err(err).Msg("Failed loading error handler definitions") return nil, err } - return &prototypeRepository{ + return &mechanismRepository{ authenticators: authenticatorMap, authorizers: authorizerMap, contextualizers: contextualizerMap, @@ -95,21 +117,22 @@ func newPrototypeRepository( }, nil } -func createPipelineObjects[T any]( +func createPipelineObjects[T any, CC any]( + ctx CC, pObjects []config.Mechanism, logger zerolog.Logger, - create func(id string, typ string, c map[string]any) (T, error), + create func(ctx CC, id string, typ string, c map[string]any) (T, error), ) (map[string]T, error) { objects := make(map[string]T) for _, pe := range pObjects { - logger.Debug().Str("_id", pe.ID).Str("_type", pe.Type).Msg("Loading pipeline definition") + logger.Debug().Str("_id", pe.ID).Str("_type", pe.Type).Msg("Loading mechanism definition") if len(pe.Condition) != 0 { pe.Config["if"] = pe.Condition } - if r, err := create(pe.ID, pe.Type, pe.Config); err == nil { + if r, err := create(ctx, pe.ID, pe.Type, pe.Config); err == nil { objects[pe.ID] = r } else { return nil, err @@ -119,7 +142,7 @@ func createPipelineObjects[T any]( return objects, nil } -type prototypeRepository struct { +type mechanismRepository struct { authenticators map[string]authenticators.Authenticator authorizers map[string]authorizers.Authorizer contextualizers map[string]contextualizers.Contextualizer @@ -127,7 +150,7 @@ type prototypeRepository struct { errorHandlers map[string]errorhandlers.ErrorHandler } -func (r *prototypeRepository) Authenticator(id string) (authenticators.Authenticator, error) { +func (r *mechanismRepository) Authenticator(id string) (authenticators.Authenticator, error) { authenticator, ok := r.authenticators[id] if !ok { return nil, errorchain.NewWithMessagef(ErrNoSuchPipelineObject, @@ -137,7 +160,7 @@ func (r *prototypeRepository) Authenticator(id string) (authenticators.Authentic return authenticator, nil } -func (r *prototypeRepository) Authorizer(id string) (authorizers.Authorizer, error) { +func (r *mechanismRepository) Authorizer(id string) (authorizers.Authorizer, error) { authorizer, ok := r.authorizers[id] if !ok { return nil, errorchain.NewWithMessagef(ErrNoSuchPipelineObject, @@ -147,7 +170,7 @@ func (r *prototypeRepository) Authorizer(id string) (authorizers.Authorizer, err return authorizer, nil } -func (r *prototypeRepository) Contextualizer(id string) (contextualizers.Contextualizer, error) { +func (r *mechanismRepository) Contextualizer(id string) (contextualizers.Contextualizer, error) { contextualizer, ok := r.contextualizers[id] if !ok { return nil, errorchain.NewWithMessagef(ErrNoSuchPipelineObject, @@ -157,7 +180,7 @@ func (r *prototypeRepository) Contextualizer(id string) (contextualizers.Context return contextualizer, nil } -func (r *prototypeRepository) Finalizer(id string) (finalizers.Finalizer, error) { +func (r *mechanismRepository) Finalizer(id string) (finalizers.Finalizer, error) { finalizer, ok := r.finalizers[id] if !ok { return nil, errorchain.NewWithMessagef(ErrNoSuchPipelineObject, @@ -167,7 +190,7 @@ func (r *prototypeRepository) Finalizer(id string) (finalizers.Finalizer, error) return finalizer, nil } -func (r *prototypeRepository) ErrorHandler(id string) (errorhandlers.ErrorHandler, error) { +func (r *mechanismRepository) ErrorHandler(id string) (errorhandlers.ErrorHandler, error) { errorHandler, ok := r.errorHandlers[id] if !ok { return nil, errorchain.NewWithMessagef(ErrNoSuchPipelineObject, diff --git a/internal/rules/mechanisms/mocks/factory.go b/internal/rules/mechanisms/mocks/factory.go deleted file mode 100644 index 101a5c455..000000000 --- a/internal/rules/mechanisms/mocks/factory.go +++ /dev/null @@ -1,326 +0,0 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. - -package mocks - -import ( - authenticators "github.com/dadrus/heimdall/internal/rules/mechanisms/authenticators" - authorizers "github.com/dadrus/heimdall/internal/rules/mechanisms/authorizers" - - config "github.com/dadrus/heimdall/internal/config" - - contextualizers "github.com/dadrus/heimdall/internal/rules/mechanisms/contextualizers" - - errorhandlers "github.com/dadrus/heimdall/internal/rules/mechanisms/errorhandlers" - - finalizers "github.com/dadrus/heimdall/internal/rules/mechanisms/finalizers" - - mock "github.com/stretchr/testify/mock" -) - -// FactoryMock is an autogenerated mock type for the Factory type -type FactoryMock struct { - mock.Mock -} - -type FactoryMock_Expecter struct { - mock *mock.Mock -} - -func (_m *FactoryMock) EXPECT() *FactoryMock_Expecter { - return &FactoryMock_Expecter{mock: &_m.Mock} -} - -// CreateAuthenticator provides a mock function with given fields: version, id, conf -func (_m *FactoryMock) CreateAuthenticator(version string, id string, conf config.MechanismConfig) (authenticators.Authenticator, error) { - ret := _m.Called(version, id, conf) - - var r0 authenticators.Authenticator - var r1 error - if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) (authenticators.Authenticator, error)); ok { - return rf(version, id, conf) - } - if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) authenticators.Authenticator); ok { - r0 = rf(version, id, conf) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(authenticators.Authenticator) - } - } - - if rf, ok := ret.Get(1).(func(string, string, config.MechanismConfig) error); ok { - r1 = rf(version, id, conf) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FactoryMock_CreateAuthenticator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateAuthenticator' -type FactoryMock_CreateAuthenticator_Call struct { - *mock.Call -} - -// CreateAuthenticator is a helper method to define mock.On call -// - version string -// - id string -// - conf config.MechanismConfig -func (_e *FactoryMock_Expecter) CreateAuthenticator(version interface{}, id interface{}, conf interface{}) *FactoryMock_CreateAuthenticator_Call { - return &FactoryMock_CreateAuthenticator_Call{Call: _e.mock.On("CreateAuthenticator", version, id, conf)} -} - -func (_c *FactoryMock_CreateAuthenticator_Call) Run(run func(version string, id string, conf config.MechanismConfig)) *FactoryMock_CreateAuthenticator_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string), args[1].(string), args[2].(config.MechanismConfig)) - }) - return _c -} - -func (_c *FactoryMock_CreateAuthenticator_Call) Return(_a0 authenticators.Authenticator, _a1 error) *FactoryMock_CreateAuthenticator_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *FactoryMock_CreateAuthenticator_Call) RunAndReturn(run func(string, string, config.MechanismConfig) (authenticators.Authenticator, error)) *FactoryMock_CreateAuthenticator_Call { - _c.Call.Return(run) - return _c -} - -// CreateAuthorizer provides a mock function with given fields: version, id, conf -func (_m *FactoryMock) CreateAuthorizer(version string, id string, conf config.MechanismConfig) (authorizers.Authorizer, error) { - ret := _m.Called(version, id, conf) - - var r0 authorizers.Authorizer - var r1 error - if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) (authorizers.Authorizer, error)); ok { - return rf(version, id, conf) - } - if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) authorizers.Authorizer); ok { - r0 = rf(version, id, conf) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(authorizers.Authorizer) - } - } - - if rf, ok := ret.Get(1).(func(string, string, config.MechanismConfig) error); ok { - r1 = rf(version, id, conf) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FactoryMock_CreateAuthorizer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateAuthorizer' -type FactoryMock_CreateAuthorizer_Call struct { - *mock.Call -} - -// CreateAuthorizer is a helper method to define mock.On call -// - version string -// - id string -// - conf config.MechanismConfig -func (_e *FactoryMock_Expecter) CreateAuthorizer(version interface{}, id interface{}, conf interface{}) *FactoryMock_CreateAuthorizer_Call { - return &FactoryMock_CreateAuthorizer_Call{Call: _e.mock.On("CreateAuthorizer", version, id, conf)} -} - -func (_c *FactoryMock_CreateAuthorizer_Call) Run(run func(version string, id string, conf config.MechanismConfig)) *FactoryMock_CreateAuthorizer_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string), args[1].(string), args[2].(config.MechanismConfig)) - }) - return _c -} - -func (_c *FactoryMock_CreateAuthorizer_Call) Return(_a0 authorizers.Authorizer, _a1 error) *FactoryMock_CreateAuthorizer_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *FactoryMock_CreateAuthorizer_Call) RunAndReturn(run func(string, string, config.MechanismConfig) (authorizers.Authorizer, error)) *FactoryMock_CreateAuthorizer_Call { - _c.Call.Return(run) - return _c -} - -// CreateContextualizer provides a mock function with given fields: version, id, conf -func (_m *FactoryMock) CreateContextualizer(version string, id string, conf config.MechanismConfig) (contextualizers.Contextualizer, error) { - ret := _m.Called(version, id, conf) - - var r0 contextualizers.Contextualizer - var r1 error - if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) (contextualizers.Contextualizer, error)); ok { - return rf(version, id, conf) - } - if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) contextualizers.Contextualizer); ok { - r0 = rf(version, id, conf) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(contextualizers.Contextualizer) - } - } - - if rf, ok := ret.Get(1).(func(string, string, config.MechanismConfig) error); ok { - r1 = rf(version, id, conf) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FactoryMock_CreateContextualizer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateContextualizer' -type FactoryMock_CreateContextualizer_Call struct { - *mock.Call -} - -// CreateContextualizer is a helper method to define mock.On call -// - version string -// - id string -// - conf config.MechanismConfig -func (_e *FactoryMock_Expecter) CreateContextualizer(version interface{}, id interface{}, conf interface{}) *FactoryMock_CreateContextualizer_Call { - return &FactoryMock_CreateContextualizer_Call{Call: _e.mock.On("CreateContextualizer", version, id, conf)} -} - -func (_c *FactoryMock_CreateContextualizer_Call) Run(run func(version string, id string, conf config.MechanismConfig)) *FactoryMock_CreateContextualizer_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string), args[1].(string), args[2].(config.MechanismConfig)) - }) - return _c -} - -func (_c *FactoryMock_CreateContextualizer_Call) Return(_a0 contextualizers.Contextualizer, _a1 error) *FactoryMock_CreateContextualizer_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *FactoryMock_CreateContextualizer_Call) RunAndReturn(run func(string, string, config.MechanismConfig) (contextualizers.Contextualizer, error)) *FactoryMock_CreateContextualizer_Call { - _c.Call.Return(run) - return _c -} - -// CreateErrorHandler provides a mock function with given fields: version, id, conf -func (_m *FactoryMock) CreateErrorHandler(version string, id string, conf config.MechanismConfig) (errorhandlers.ErrorHandler, error) { - ret := _m.Called(version, id, conf) - - var r0 errorhandlers.ErrorHandler - var r1 error - if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) (errorhandlers.ErrorHandler, error)); ok { - return rf(version, id, conf) - } - if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) errorhandlers.ErrorHandler); ok { - r0 = rf(version, id, conf) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(errorhandlers.ErrorHandler) - } - } - - if rf, ok := ret.Get(1).(func(string, string, config.MechanismConfig) error); ok { - r1 = rf(version, id, conf) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FactoryMock_CreateErrorHandler_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateErrorHandler' -type FactoryMock_CreateErrorHandler_Call struct { - *mock.Call -} - -// CreateErrorHandler is a helper method to define mock.On call -// - version string -// - id string -// - conf config.MechanismConfig -func (_e *FactoryMock_Expecter) CreateErrorHandler(version interface{}, id interface{}, conf interface{}) *FactoryMock_CreateErrorHandler_Call { - return &FactoryMock_CreateErrorHandler_Call{Call: _e.mock.On("CreateErrorHandler", version, id, conf)} -} - -func (_c *FactoryMock_CreateErrorHandler_Call) Run(run func(version string, id string, conf config.MechanismConfig)) *FactoryMock_CreateErrorHandler_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string), args[1].(string), args[2].(config.MechanismConfig)) - }) - return _c -} - -func (_c *FactoryMock_CreateErrorHandler_Call) Return(_a0 errorhandlers.ErrorHandler, _a1 error) *FactoryMock_CreateErrorHandler_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *FactoryMock_CreateErrorHandler_Call) RunAndReturn(run func(string, string, config.MechanismConfig) (errorhandlers.ErrorHandler, error)) *FactoryMock_CreateErrorHandler_Call { - _c.Call.Return(run) - return _c -} - -// CreateFinalizer provides a mock function with given fields: version, id, conf -func (_m *FactoryMock) CreateFinalizer(version string, id string, conf config.MechanismConfig) (finalizers.Finalizer, error) { - ret := _m.Called(version, id, conf) - - var r0 finalizers.Finalizer - var r1 error - if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) (finalizers.Finalizer, error)); ok { - return rf(version, id, conf) - } - if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) finalizers.Finalizer); ok { - r0 = rf(version, id, conf) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(finalizers.Finalizer) - } - } - - if rf, ok := ret.Get(1).(func(string, string, config.MechanismConfig) error); ok { - r1 = rf(version, id, conf) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FactoryMock_CreateFinalizer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateFinalizer' -type FactoryMock_CreateFinalizer_Call struct { - *mock.Call -} - -// CreateFinalizer is a helper method to define mock.On call -// - version string -// - id string -// - conf config.MechanismConfig -func (_e *FactoryMock_Expecter) CreateFinalizer(version interface{}, id interface{}, conf interface{}) *FactoryMock_CreateFinalizer_Call { - return &FactoryMock_CreateFinalizer_Call{Call: _e.mock.On("CreateFinalizer", version, id, conf)} -} - -func (_c *FactoryMock_CreateFinalizer_Call) Run(run func(version string, id string, conf config.MechanismConfig)) *FactoryMock_CreateFinalizer_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string), args[1].(string), args[2].(config.MechanismConfig)) - }) - return _c -} - -func (_c *FactoryMock_CreateFinalizer_Call) Return(_a0 finalizers.Finalizer, _a1 error) *FactoryMock_CreateFinalizer_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *FactoryMock_CreateFinalizer_Call) RunAndReturn(run func(string, string, config.MechanismConfig) (finalizers.Finalizer, error)) *FactoryMock_CreateFinalizer_Call { - _c.Call.Return(run) - return _c -} - -type mockConstructorTestingTNewFactoryMock interface { - mock.TestingT - Cleanup(func()) -} - -// NewFactoryMock creates a new instance of FactoryMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewFactoryMock(t mockConstructorTestingTNewFactoryMock) *FactoryMock { - mock := &FactoryMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/internal/rules/mechanisms/mocks/mechanism_factory.go b/internal/rules/mechanisms/mocks/mechanism_factory.go new file mode 100644 index 000000000..821236f43 --- /dev/null +++ b/internal/rules/mechanisms/mocks/mechanism_factory.go @@ -0,0 +1,345 @@ +// Code generated by mockery v2.42.1. DO NOT EDIT. + +package mocks + +import ( + authenticators "github.com/dadrus/heimdall/internal/rules/mechanisms/authenticators" + authorizers "github.com/dadrus/heimdall/internal/rules/mechanisms/authorizers" + + config "github.com/dadrus/heimdall/internal/config" + + contextualizers "github.com/dadrus/heimdall/internal/rules/mechanisms/contextualizers" + + errorhandlers "github.com/dadrus/heimdall/internal/rules/mechanisms/errorhandlers" + + finalizers "github.com/dadrus/heimdall/internal/rules/mechanisms/finalizers" + + mock "github.com/stretchr/testify/mock" +) + +// MechanismFactoryMock is an autogenerated mock type for the MechanismFactory type +type MechanismFactoryMock struct { + mock.Mock +} + +type MechanismFactoryMock_Expecter struct { + mock *mock.Mock +} + +func (_m *MechanismFactoryMock) EXPECT() *MechanismFactoryMock_Expecter { + return &MechanismFactoryMock_Expecter{mock: &_m.Mock} +} + +// CreateAuthenticator provides a mock function with given fields: version, id, conf +func (_m *MechanismFactoryMock) CreateAuthenticator(version string, id string, conf config.MechanismConfig) (authenticators.Authenticator, error) { + ret := _m.Called(version, id, conf) + + if len(ret) == 0 { + panic("no return value specified for CreateAuthenticator") + } + + var r0 authenticators.Authenticator + var r1 error + if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) (authenticators.Authenticator, error)); ok { + return rf(version, id, conf) + } + if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) authenticators.Authenticator); ok { + r0 = rf(version, id, conf) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(authenticators.Authenticator) + } + } + + if rf, ok := ret.Get(1).(func(string, string, config.MechanismConfig) error); ok { + r1 = rf(version, id, conf) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MechanismFactoryMock_CreateAuthenticator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateAuthenticator' +type MechanismFactoryMock_CreateAuthenticator_Call struct { + *mock.Call +} + +// CreateAuthenticator is a helper method to define mock.On call +// - version string +// - id string +// - conf config.MechanismConfig +func (_e *MechanismFactoryMock_Expecter) CreateAuthenticator(version interface{}, id interface{}, conf interface{}) *MechanismFactoryMock_CreateAuthenticator_Call { + return &MechanismFactoryMock_CreateAuthenticator_Call{Call: _e.mock.On("CreateAuthenticator", version, id, conf)} +} + +func (_c *MechanismFactoryMock_CreateAuthenticator_Call) Run(run func(version string, id string, conf config.MechanismConfig)) *MechanismFactoryMock_CreateAuthenticator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(config.MechanismConfig)) + }) + return _c +} + +func (_c *MechanismFactoryMock_CreateAuthenticator_Call) Return(_a0 authenticators.Authenticator, _a1 error) *MechanismFactoryMock_CreateAuthenticator_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MechanismFactoryMock_CreateAuthenticator_Call) RunAndReturn(run func(string, string, config.MechanismConfig) (authenticators.Authenticator, error)) *MechanismFactoryMock_CreateAuthenticator_Call { + _c.Call.Return(run) + return _c +} + +// CreateAuthorizer provides a mock function with given fields: version, id, conf +func (_m *MechanismFactoryMock) CreateAuthorizer(version string, id string, conf config.MechanismConfig) (authorizers.Authorizer, error) { + ret := _m.Called(version, id, conf) + + if len(ret) == 0 { + panic("no return value specified for CreateAuthorizer") + } + + var r0 authorizers.Authorizer + var r1 error + if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) (authorizers.Authorizer, error)); ok { + return rf(version, id, conf) + } + if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) authorizers.Authorizer); ok { + r0 = rf(version, id, conf) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(authorizers.Authorizer) + } + } + + if rf, ok := ret.Get(1).(func(string, string, config.MechanismConfig) error); ok { + r1 = rf(version, id, conf) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MechanismFactoryMock_CreateAuthorizer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateAuthorizer' +type MechanismFactoryMock_CreateAuthorizer_Call struct { + *mock.Call +} + +// CreateAuthorizer is a helper method to define mock.On call +// - version string +// - id string +// - conf config.MechanismConfig +func (_e *MechanismFactoryMock_Expecter) CreateAuthorizer(version interface{}, id interface{}, conf interface{}) *MechanismFactoryMock_CreateAuthorizer_Call { + return &MechanismFactoryMock_CreateAuthorizer_Call{Call: _e.mock.On("CreateAuthorizer", version, id, conf)} +} + +func (_c *MechanismFactoryMock_CreateAuthorizer_Call) Run(run func(version string, id string, conf config.MechanismConfig)) *MechanismFactoryMock_CreateAuthorizer_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(config.MechanismConfig)) + }) + return _c +} + +func (_c *MechanismFactoryMock_CreateAuthorizer_Call) Return(_a0 authorizers.Authorizer, _a1 error) *MechanismFactoryMock_CreateAuthorizer_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MechanismFactoryMock_CreateAuthorizer_Call) RunAndReturn(run func(string, string, config.MechanismConfig) (authorizers.Authorizer, error)) *MechanismFactoryMock_CreateAuthorizer_Call { + _c.Call.Return(run) + return _c +} + +// CreateContextualizer provides a mock function with given fields: version, id, conf +func (_m *MechanismFactoryMock) CreateContextualizer(version string, id string, conf config.MechanismConfig) (contextualizers.Contextualizer, error) { + ret := _m.Called(version, id, conf) + + if len(ret) == 0 { + panic("no return value specified for CreateContextualizer") + } + + var r0 contextualizers.Contextualizer + var r1 error + if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) (contextualizers.Contextualizer, error)); ok { + return rf(version, id, conf) + } + if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) contextualizers.Contextualizer); ok { + r0 = rf(version, id, conf) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(contextualizers.Contextualizer) + } + } + + if rf, ok := ret.Get(1).(func(string, string, config.MechanismConfig) error); ok { + r1 = rf(version, id, conf) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MechanismFactoryMock_CreateContextualizer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateContextualizer' +type MechanismFactoryMock_CreateContextualizer_Call struct { + *mock.Call +} + +// CreateContextualizer is a helper method to define mock.On call +// - version string +// - id string +// - conf config.MechanismConfig +func (_e *MechanismFactoryMock_Expecter) CreateContextualizer(version interface{}, id interface{}, conf interface{}) *MechanismFactoryMock_CreateContextualizer_Call { + return &MechanismFactoryMock_CreateContextualizer_Call{Call: _e.mock.On("CreateContextualizer", version, id, conf)} +} + +func (_c *MechanismFactoryMock_CreateContextualizer_Call) Run(run func(version string, id string, conf config.MechanismConfig)) *MechanismFactoryMock_CreateContextualizer_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(config.MechanismConfig)) + }) + return _c +} + +func (_c *MechanismFactoryMock_CreateContextualizer_Call) Return(_a0 contextualizers.Contextualizer, _a1 error) *MechanismFactoryMock_CreateContextualizer_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MechanismFactoryMock_CreateContextualizer_Call) RunAndReturn(run func(string, string, config.MechanismConfig) (contextualizers.Contextualizer, error)) *MechanismFactoryMock_CreateContextualizer_Call { + _c.Call.Return(run) + return _c +} + +// CreateErrorHandler provides a mock function with given fields: version, id, conf +func (_m *MechanismFactoryMock) CreateErrorHandler(version string, id string, conf config.MechanismConfig) (errorhandlers.ErrorHandler, error) { + ret := _m.Called(version, id, conf) + + if len(ret) == 0 { + panic("no return value specified for CreateErrorHandler") + } + + var r0 errorhandlers.ErrorHandler + var r1 error + if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) (errorhandlers.ErrorHandler, error)); ok { + return rf(version, id, conf) + } + if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) errorhandlers.ErrorHandler); ok { + r0 = rf(version, id, conf) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(errorhandlers.ErrorHandler) + } + } + + if rf, ok := ret.Get(1).(func(string, string, config.MechanismConfig) error); ok { + r1 = rf(version, id, conf) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MechanismFactoryMock_CreateErrorHandler_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateErrorHandler' +type MechanismFactoryMock_CreateErrorHandler_Call struct { + *mock.Call +} + +// CreateErrorHandler is a helper method to define mock.On call +// - version string +// - id string +// - conf config.MechanismConfig +func (_e *MechanismFactoryMock_Expecter) CreateErrorHandler(version interface{}, id interface{}, conf interface{}) *MechanismFactoryMock_CreateErrorHandler_Call { + return &MechanismFactoryMock_CreateErrorHandler_Call{Call: _e.mock.On("CreateErrorHandler", version, id, conf)} +} + +func (_c *MechanismFactoryMock_CreateErrorHandler_Call) Run(run func(version string, id string, conf config.MechanismConfig)) *MechanismFactoryMock_CreateErrorHandler_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(config.MechanismConfig)) + }) + return _c +} + +func (_c *MechanismFactoryMock_CreateErrorHandler_Call) Return(_a0 errorhandlers.ErrorHandler, _a1 error) *MechanismFactoryMock_CreateErrorHandler_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MechanismFactoryMock_CreateErrorHandler_Call) RunAndReturn(run func(string, string, config.MechanismConfig) (errorhandlers.ErrorHandler, error)) *MechanismFactoryMock_CreateErrorHandler_Call { + _c.Call.Return(run) + return _c +} + +// CreateFinalizer provides a mock function with given fields: version, id, conf +func (_m *MechanismFactoryMock) CreateFinalizer(version string, id string, conf config.MechanismConfig) (finalizers.Finalizer, error) { + ret := _m.Called(version, id, conf) + + if len(ret) == 0 { + panic("no return value specified for CreateFinalizer") + } + + var r0 finalizers.Finalizer + var r1 error + if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) (finalizers.Finalizer, error)); ok { + return rf(version, id, conf) + } + if rf, ok := ret.Get(0).(func(string, string, config.MechanismConfig) finalizers.Finalizer); ok { + r0 = rf(version, id, conf) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(finalizers.Finalizer) + } + } + + if rf, ok := ret.Get(1).(func(string, string, config.MechanismConfig) error); ok { + r1 = rf(version, id, conf) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MechanismFactoryMock_CreateFinalizer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateFinalizer' +type MechanismFactoryMock_CreateFinalizer_Call struct { + *mock.Call +} + +// CreateFinalizer is a helper method to define mock.On call +// - version string +// - id string +// - conf config.MechanismConfig +func (_e *MechanismFactoryMock_Expecter) CreateFinalizer(version interface{}, id interface{}, conf interface{}) *MechanismFactoryMock_CreateFinalizer_Call { + return &MechanismFactoryMock_CreateFinalizer_Call{Call: _e.mock.On("CreateFinalizer", version, id, conf)} +} + +func (_c *MechanismFactoryMock_CreateFinalizer_Call) Run(run func(version string, id string, conf config.MechanismConfig)) *MechanismFactoryMock_CreateFinalizer_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(config.MechanismConfig)) + }) + return _c +} + +func (_c *MechanismFactoryMock_CreateFinalizer_Call) Return(_a0 finalizers.Finalizer, _a1 error) *MechanismFactoryMock_CreateFinalizer_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MechanismFactoryMock_CreateFinalizer_Call) RunAndReturn(run func(string, string, config.MechanismConfig) (finalizers.Finalizer, error)) *MechanismFactoryMock_CreateFinalizer_Call { + _c.Call.Return(run) + return _c +} + +// NewMechanismFactoryMock creates a new instance of MechanismFactoryMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMechanismFactoryMock(t interface { + mock.TestingT + Cleanup(func()) +}) *MechanismFactoryMock { + mock := &MechanismFactoryMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/rules/mechanisms/module.go b/internal/rules/mechanisms/module.go index b5704d851..16fc28f1b 100644 --- a/internal/rules/mechanisms/module.go +++ b/internal/rules/mechanisms/module.go @@ -21,5 +21,5 @@ import ( ) var Module = fx.Options( //nolint:gochecknoglobals - fx.Provide(NewFactory), + fx.Provide(NewMechanismFactory), ) diff --git a/internal/rules/mechanisms/template/template_test.go b/internal/rules/mechanisms/template/template_test.go index 137629c8b..9a505f307 100644 --- a/internal/rules/mechanisms/template/template_test.go +++ b/internal/rules/mechanisms/template/template_test.go @@ -40,9 +40,11 @@ func TestTemplateRender(t *testing.T) { ctx := mocks.NewContextMock(t) ctx.EXPECT().Request().Return(&heimdall.Request{ - RequestFunctions: reqf, - Method: http.MethodPatch, - URL: &url.URL{Scheme: "http", Host: "foobar.baz", Path: "zab", RawQuery: "my_query_param=query_value"}, + RequestFunctions: reqf, + Method: http.MethodPatch, + URL: &heimdall.URL{ + URL: url.URL{Scheme: "http", Host: "foobar.baz", Path: "zab", RawQuery: "my_query_param=query_value"}, + }, ClientIPAddresses: []string{"192.168.1.1"}, }) diff --git a/internal/rules/mocks/error_handler.go b/internal/rules/mocks/error_handler.go index 760c9a28d..99cba6552 100644 --- a/internal/rules/mocks/error_handler.go +++ b/internal/rules/mocks/error_handler.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -20,99 +20,104 @@ func (_m *ErrorHandlerMock) EXPECT() *ErrorHandlerMock_Expecter { return &ErrorHandlerMock_Expecter{mock: &_m.Mock} } -// CanExecute provides a mock function with given fields: ctx, causeErr -func (_m *ErrorHandlerMock) CanExecute(ctx heimdall.Context, causeErr error) bool { +// Execute provides a mock function with given fields: ctx, causeErr +func (_m *ErrorHandlerMock) Execute(ctx heimdall.Context, causeErr error) error { ret := _m.Called(ctx, causeErr) - var r0 bool - if rf, ok := ret.Get(0).(func(heimdall.Context, error) bool); ok { + if len(ret) == 0 { + panic("no return value specified for Execute") + } + + var r0 error + if rf, ok := ret.Get(0).(func(heimdall.Context, error) error); ok { r0 = rf(ctx, causeErr) } else { - r0 = ret.Get(0).(bool) + r0 = ret.Error(0) } return r0 } -// ErrorHandlerMock_CanExecute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CanExecute' -type ErrorHandlerMock_CanExecute_Call struct { +// ErrorHandlerMock_Execute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Execute' +type ErrorHandlerMock_Execute_Call struct { *mock.Call } -// CanExecute is a helper method to define mock.On call +// Execute is a helper method to define mock.On call // - ctx heimdall.Context // - causeErr error -func (_e *ErrorHandlerMock_Expecter) CanExecute(ctx interface{}, causeErr interface{}) *ErrorHandlerMock_CanExecute_Call { - return &ErrorHandlerMock_CanExecute_Call{Call: _e.mock.On("CanExecute", ctx, causeErr)} +func (_e *ErrorHandlerMock_Expecter) Execute(ctx interface{}, causeErr interface{}) *ErrorHandlerMock_Execute_Call { + return &ErrorHandlerMock_Execute_Call{Call: _e.mock.On("Execute", ctx, causeErr)} } -func (_c *ErrorHandlerMock_CanExecute_Call) Run(run func(ctx heimdall.Context, causeErr error)) *ErrorHandlerMock_CanExecute_Call { +func (_c *ErrorHandlerMock_Execute_Call) Run(run func(ctx heimdall.Context, causeErr error)) *ErrorHandlerMock_Execute_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(heimdall.Context), args[1].(error)) }) return _c } -func (_c *ErrorHandlerMock_CanExecute_Call) Return(_a0 bool) *ErrorHandlerMock_CanExecute_Call { +func (_c *ErrorHandlerMock_Execute_Call) Return(_a0 error) *ErrorHandlerMock_Execute_Call { _c.Call.Return(_a0) return _c } -func (_c *ErrorHandlerMock_CanExecute_Call) RunAndReturn(run func(heimdall.Context, error) bool) *ErrorHandlerMock_CanExecute_Call { +func (_c *ErrorHandlerMock_Execute_Call) RunAndReturn(run func(heimdall.Context, error) error) *ErrorHandlerMock_Execute_Call { _c.Call.Return(run) return _c } -// Execute provides a mock function with given fields: ctx, causeErr -func (_m *ErrorHandlerMock) Execute(ctx heimdall.Context, causeErr error) error { - ret := _m.Called(ctx, causeErr) +// ID provides a mock function with given fields: +func (_m *ErrorHandlerMock) ID() string { + ret := _m.Called() - var r0 error - if rf, ok := ret.Get(0).(func(heimdall.Context, error) error); ok { - r0 = rf(ctx, causeErr) + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(string) } return r0 } -// ErrorHandlerMock_Execute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Execute' -type ErrorHandlerMock_Execute_Call struct { +// ErrorHandlerMock_ID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ID' +type ErrorHandlerMock_ID_Call struct { *mock.Call } -// Execute is a helper method to define mock.On call -// - ctx heimdall.Context -// - causeErr error -func (_e *ErrorHandlerMock_Expecter) Execute(ctx interface{}, causeErr interface{}) *ErrorHandlerMock_Execute_Call { - return &ErrorHandlerMock_Execute_Call{Call: _e.mock.On("Execute", ctx, causeErr)} +// ID is a helper method to define mock.On call +func (_e *ErrorHandlerMock_Expecter) ID() *ErrorHandlerMock_ID_Call { + return &ErrorHandlerMock_ID_Call{Call: _e.mock.On("ID")} } -func (_c *ErrorHandlerMock_Execute_Call) Run(run func(ctx heimdall.Context, causeErr error)) *ErrorHandlerMock_Execute_Call { +func (_c *ErrorHandlerMock_ID_Call) Run(run func()) *ErrorHandlerMock_ID_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(heimdall.Context), args[1].(error)) + run() }) return _c } -func (_c *ErrorHandlerMock_Execute_Call) Return(_a0 error) *ErrorHandlerMock_Execute_Call { +func (_c *ErrorHandlerMock_ID_Call) Return(_a0 string) *ErrorHandlerMock_ID_Call { _c.Call.Return(_a0) return _c } -func (_c *ErrorHandlerMock_Execute_Call) RunAndReturn(run func(heimdall.Context, error) error) *ErrorHandlerMock_Execute_Call { +func (_c *ErrorHandlerMock_ID_Call) RunAndReturn(run func() string) *ErrorHandlerMock_ID_Call { _c.Call.Return(run) return _c } -type mockConstructorTestingTNewErrorHandlerMock interface { +// NewErrorHandlerMock creates a new instance of ErrorHandlerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewErrorHandlerMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewErrorHandlerMock creates a new instance of ErrorHandlerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewErrorHandlerMock(t mockConstructorTestingTNewErrorHandlerMock) *ErrorHandlerMock { +}) *ErrorHandlerMock { mock := &ErrorHandlerMock{} mock.Mock.Test(t) diff --git a/internal/rules/mocks/execution_condition.go b/internal/rules/mocks/execution_condition.go index b63e27be9..67c2db025 100644 --- a/internal/rules/mocks/execution_condition.go +++ b/internal/rules/mocks/execution_condition.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -22,10 +22,71 @@ func (_m *ExecutionConditionMock) EXPECT() *ExecutionConditionMock_Expecter { return &ExecutionConditionMock_Expecter{mock: &_m.Mock} } -// CanExecute provides a mock function with given fields: ctx, sub -func (_m *ExecutionConditionMock) CanExecute(ctx heimdall.Context, sub *subject.Subject) (bool, error) { +// CanExecuteOnError provides a mock function with given fields: ctx, err +func (_m *ExecutionConditionMock) CanExecuteOnError(ctx heimdall.Context, err error) (bool, error) { + ret := _m.Called(ctx, err) + + if len(ret) == 0 { + panic("no return value specified for CanExecuteOnError") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(heimdall.Context, error) (bool, error)); ok { + return rf(ctx, err) + } + if rf, ok := ret.Get(0).(func(heimdall.Context, error) bool); ok { + r0 = rf(ctx, err) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(heimdall.Context, error) error); ok { + r1 = rf(ctx, err) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecutionConditionMock_CanExecuteOnError_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CanExecuteOnError' +type ExecutionConditionMock_CanExecuteOnError_Call struct { + *mock.Call +} + +// CanExecuteOnError is a helper method to define mock.On call +// - ctx heimdall.Context +// - err error +func (_e *ExecutionConditionMock_Expecter) CanExecuteOnError(ctx interface{}, err interface{}) *ExecutionConditionMock_CanExecuteOnError_Call { + return &ExecutionConditionMock_CanExecuteOnError_Call{Call: _e.mock.On("CanExecuteOnError", ctx, err)} +} + +func (_c *ExecutionConditionMock_CanExecuteOnError_Call) Run(run func(ctx heimdall.Context, err error)) *ExecutionConditionMock_CanExecuteOnError_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(heimdall.Context), args[1].(error)) + }) + return _c +} + +func (_c *ExecutionConditionMock_CanExecuteOnError_Call) Return(_a0 bool, _a1 error) *ExecutionConditionMock_CanExecuteOnError_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ExecutionConditionMock_CanExecuteOnError_Call) RunAndReturn(run func(heimdall.Context, error) (bool, error)) *ExecutionConditionMock_CanExecuteOnError_Call { + _c.Call.Return(run) + return _c +} + +// CanExecuteOnSubject provides a mock function with given fields: ctx, sub +func (_m *ExecutionConditionMock) CanExecuteOnSubject(ctx heimdall.Context, sub *subject.Subject) (bool, error) { ret := _m.Called(ctx, sub) + if len(ret) == 0 { + panic("no return value specified for CanExecuteOnSubject") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(heimdall.Context, *subject.Subject) (bool, error)); ok { @@ -46,42 +107,41 @@ func (_m *ExecutionConditionMock) CanExecute(ctx heimdall.Context, sub *subject. return r0, r1 } -// ExecutionConditionMock_CanExecute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CanExecute' -type ExecutionConditionMock_CanExecute_Call struct { +// ExecutionConditionMock_CanExecuteOnSubject_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CanExecuteOnSubject' +type ExecutionConditionMock_CanExecuteOnSubject_Call struct { *mock.Call } -// CanExecute is a helper method to define mock.On call +// CanExecuteOnSubject is a helper method to define mock.On call // - ctx heimdall.Context // - sub *subject.Subject -func (_e *ExecutionConditionMock_Expecter) CanExecute(ctx interface{}, sub interface{}) *ExecutionConditionMock_CanExecute_Call { - return &ExecutionConditionMock_CanExecute_Call{Call: _e.mock.On("CanExecute", ctx, sub)} +func (_e *ExecutionConditionMock_Expecter) CanExecuteOnSubject(ctx interface{}, sub interface{}) *ExecutionConditionMock_CanExecuteOnSubject_Call { + return &ExecutionConditionMock_CanExecuteOnSubject_Call{Call: _e.mock.On("CanExecuteOnSubject", ctx, sub)} } -func (_c *ExecutionConditionMock_CanExecute_Call) Run(run func(ctx heimdall.Context, sub *subject.Subject)) *ExecutionConditionMock_CanExecute_Call { +func (_c *ExecutionConditionMock_CanExecuteOnSubject_Call) Run(run func(ctx heimdall.Context, sub *subject.Subject)) *ExecutionConditionMock_CanExecuteOnSubject_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(heimdall.Context), args[1].(*subject.Subject)) }) return _c } -func (_c *ExecutionConditionMock_CanExecute_Call) Return(_a0 bool, _a1 error) *ExecutionConditionMock_CanExecute_Call { +func (_c *ExecutionConditionMock_CanExecuteOnSubject_Call) Return(_a0 bool, _a1 error) *ExecutionConditionMock_CanExecuteOnSubject_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *ExecutionConditionMock_CanExecute_Call) RunAndReturn(run func(heimdall.Context, *subject.Subject) (bool, error)) *ExecutionConditionMock_CanExecute_Call { +func (_c *ExecutionConditionMock_CanExecuteOnSubject_Call) RunAndReturn(run func(heimdall.Context, *subject.Subject) (bool, error)) *ExecutionConditionMock_CanExecuteOnSubject_Call { _c.Call.Return(run) return _c } -type mockConstructorTestingTNewExecutionConditionMock interface { +// NewExecutionConditionMock creates a new instance of ExecutionConditionMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionConditionMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionConditionMock creates a new instance of ExecutionConditionMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionConditionMock(t mockConstructorTestingTNewExecutionConditionMock) *ExecutionConditionMock { +}) *ExecutionConditionMock { mock := &ExecutionConditionMock{} mock.Mock.Test(t) diff --git a/internal/rules/mocks/pattern_matcher.go b/internal/rules/mocks/pattern_matcher.go new file mode 100644 index 000000000..82b834990 --- /dev/null +++ b/internal/rules/mocks/pattern_matcher.go @@ -0,0 +1,78 @@ +// Code generated by mockery v2.42.1. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// PatternMatcherMock is an autogenerated mock type for the patternMatcher type +type PatternMatcherMock struct { + mock.Mock +} + +type PatternMatcherMock_Expecter struct { + mock *mock.Mock +} + +func (_m *PatternMatcherMock) EXPECT() *PatternMatcherMock_Expecter { + return &PatternMatcherMock_Expecter{mock: &_m.Mock} +} + +// Match provides a mock function with given fields: pattern +func (_m *PatternMatcherMock) Match(pattern string) bool { + ret := _m.Called(pattern) + + if len(ret) == 0 { + panic("no return value specified for Match") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(string) bool); ok { + r0 = rf(pattern) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// PatternMatcherMock_Match_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Match' +type PatternMatcherMock_Match_Call struct { + *mock.Call +} + +// Match is a helper method to define mock.On call +// - pattern string +func (_e *PatternMatcherMock_Expecter) Match(pattern interface{}) *PatternMatcherMock_Match_Call { + return &PatternMatcherMock_Match_Call{Call: _e.mock.On("Match", pattern)} +} + +func (_c *PatternMatcherMock_Match_Call) Run(run func(pattern string)) *PatternMatcherMock_Match_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *PatternMatcherMock_Match_Call) Return(_a0 bool) *PatternMatcherMock_Match_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *PatternMatcherMock_Match_Call) RunAndReturn(run func(string) bool) *PatternMatcherMock_Match_Call { + _c.Call.Return(run) + return _c +} + +// NewPatternMatcherMock creates a new instance of PatternMatcherMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPatternMatcherMock(t interface { + mock.TestingT + Cleanup(func()) +}) *PatternMatcherMock { + mock := &PatternMatcherMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/rules/module.go b/internal/rules/module.go index 9c8c6a090..602c6b1e6 100644 --- a/internal/rules/module.go +++ b/internal/rules/module.go @@ -17,45 +17,19 @@ package rules import ( - "context" - - "github.com/rs/zerolog" "go.uber.org/fx" - "github.com/dadrus/heimdall/internal/rules/event" "github.com/dadrus/heimdall/internal/rules/provider" - "github.com/dadrus/heimdall/internal/rules/rule" ) -const defaultQueueSize = 20 - // Module is invoked on app bootstrapping. // nolint: gochecknoglobals var Module = fx.Options( fx.Provide( - fx.Annotate( - func(logger zerolog.Logger) event.RuleSetChangedEventQueue { - logger.Debug().Msg("Creating rule set event queue.") - - return make(event.RuleSetChangedEventQueue, defaultQueueSize) - }, - fx.OnStop( - func(queue event.RuleSetChangedEventQueue, logger zerolog.Logger) { - logger.Debug().Msg("Closing rule set event queue") - - close(queue) - }, - ), - ), NewRuleFactory, - fx.Annotate( - newRepository, - fx.OnStart(func(ctx context.Context, o *repository) error { return o.Start(ctx) }), - fx.OnStop(func(ctx context.Context, o *repository) error { return o.Stop(ctx) }), - ), - func(r *repository) rule.Repository { return r }, - newRuleExecutor, + newRepository, NewRuleSetProcessor, + newRuleExecutor, ), provider.Module, ) diff --git a/internal/rules/patternmatcher/glob_matcher.go b/internal/rules/patternmatcher/glob_matcher.go deleted file mode 100644 index fa94009fe..000000000 --- a/internal/rules/patternmatcher/glob_matcher.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2022 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package patternmatcher - -import ( - "bytes" - "errors" - - "github.com/gobwas/glob" -) - -var ( - ErrUnbalancedPattern = errors.New("unbalanced pattern") - ErrNoGlobPatternDefined = errors.New("no glob pattern defined") -) - -type globMatcher struct { - compiled glob.Glob -} - -func (m *globMatcher) Match(value string) bool { - return m.compiled.Match(value) -} - -func newGlobMatcher(pattern string) (*globMatcher, error) { - if len(pattern) == 0 { - return nil, ErrNoGlobPatternDefined - } - - compiled, err := compileGlob(pattern, '<', '>') - if err != nil { - return nil, err - } - - return &globMatcher{compiled: compiled}, nil -} - -func compileGlob(pattern string, delimiterStart, delimiterEnd rune) (glob.Glob, error) { - // Check if it is well-formed. - idxs, errBraces := delimiterIndices(pattern, delimiterStart, delimiterEnd) - if errBraces != nil { - return nil, errBraces - } - - buffer := bytes.NewBufferString("") - - var end int - for ind := 0; ind < len(idxs); ind += 2 { - // Set all values we are interested in. - raw := pattern[end:idxs[ind]] - end = idxs[ind+1] - patt := pattern[idxs[ind]+1 : end-1] - - buffer.WriteString(glob.QuoteMeta(raw)) - buffer.WriteString(patt) - } - - // Add the remaining. - raw := pattern[end:] - buffer.WriteString(glob.QuoteMeta(raw)) - - // Compile full regexp. - return glob.Compile(buffer.String(), '.', '/') -} - -// delimiterIndices returns the first level delimiter indices from a string. -// It returns an error in case of unbalanced delimiters. -func delimiterIndices(value string, delimiterStart, delimiterEnd rune) ([]int, error) { - var level, idx int - - idxs := make([]int, 0) - - for ind := range len(value) { - switch value[ind] { - case byte(delimiterStart): - if level++; level == 1 { - idx = ind - } - case byte(delimiterEnd): - if level--; level == 0 { - idxs = append(idxs, idx, ind+1) - } else if level < 0 { - return nil, ErrUnbalancedPattern - } - } - } - - if level != 0 { - return nil, ErrUnbalancedPattern - } - - return idxs, nil -} diff --git a/internal/rules/patternmatcher/glob_matcher_test.go b/internal/rules/patternmatcher/glob_matcher_test.go deleted file mode 100644 index e4f2da732..000000000 --- a/internal/rules/patternmatcher/glob_matcher_test.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2022 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package patternmatcher - -import ( - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDelimiterIndices(t *testing.T) { - t.Parallel() - - for tn, tc := range []struct { - input string - out []int - err error - }{ - {input: "<", err: ErrUnbalancedPattern}, - {input: ">", err: ErrUnbalancedPattern}, - {input: ">>", err: ErrUnbalancedPattern}, - {input: "><>", err: ErrUnbalancedPattern}, - {input: "foo.barvar", err: ErrUnbalancedPattern}, - {input: "foo.bar>var", err: ErrUnbalancedPattern}, - {input: "foo.bar<<>>", out: []int{7, 11}}, - {input: "foo.bar<<>><>", out: []int{7, 11, 11, 13}}, - {input: "foo.bar<<>><>tt<>", out: []int{7, 11, 11, 13, 15, 17}}, - } { - t.Run(strconv.Itoa(tn), func(t *testing.T) { - out, err := delimiterIndices(tc.input, '<', '>') - assert.Equal(t, tc.out, out) - assert.Equal(t, tc.err, err) - }) - } -} - -func TestIsMatch(t *testing.T) { - t.Parallel() - - for _, tc := range []struct { - uc string - pattern string - matchAgainst string - shouldMatch bool - }{ - { - uc: "question mark1", - pattern: `urn:foo:`, - matchAgainst: "urn:foo:user", - shouldMatch: false, - }, - { - uc: "question mark2", - pattern: `urn:foo:`, - matchAgainst: "urn:foo:u", - shouldMatch: true, - }, - { - uc: "question mark3", - pattern: `urn:foo:`, - matchAgainst: "urn:foo:", - shouldMatch: false, - }, - { - uc: "question mark4", - pattern: `urn:foo:&&`, - matchAgainst: "urn:foo:w&&r", - shouldMatch: true, - }, - { - uc: "question mark5 - both as a special char and a literal", - pattern: `urn:foo:?`, - matchAgainst: "urn:foo:w&r", - shouldMatch: false, - }, - { - uc: "question mark5 - both as a special char and a literal1", - pattern: `urn:foo:?`, - matchAgainst: "urn:foo:w?r", - shouldMatch: true, - }, - { - uc: "asterisk", - pattern: `urn:foo:<*>`, - matchAgainst: "urn:foo:user", - shouldMatch: true, - }, - { - uc: "asterisk1", - pattern: `urn:foo:<*>`, - matchAgainst: "urn:foo:", - shouldMatch: true, - }, - { - uc: "asterisk2", - pattern: `urn:foo:<*>:<*>`, - matchAgainst: "urn:foo:usr:swen", - shouldMatch: true, - }, - { - uc: "asterisk: both as a special char and a literal", - pattern: `*:foo:<*>:<*>`, - matchAgainst: "urn:foo:usr:swen", - shouldMatch: false, - }, - { - uc: "asterisk: both as a special char and a literal1", - pattern: `*:foo:<*>:<*>`, - matchAgainst: "*:foo:usr:swen", - shouldMatch: true, - }, - { - uc: "asterisk + question mark", - pattern: `urn:foo:<*>:role:`, - matchAgainst: "urn:foo:usr:role:a", - shouldMatch: true, - }, - { - uc: "asterisk + question mark1", - pattern: `urn:foo:<*>:role:`, - matchAgainst: "urn:foo:usr:role:admin", - shouldMatch: false, - }, - { - uc: "square brackets", - pattern: `urn:foo:`, - matchAgainst: "urn:foo:moon", - shouldMatch: false, - }, - { - uc: "square brackets1", - pattern: `urn:foo:`, - matchAgainst: "urn:foo:man", - shouldMatch: true, - }, - { - uc: "square brackets2", - pattern: `urn:foo:`, - matchAgainst: "urn:foo:man", - shouldMatch: false, - }, - { - uc: "square brackets3", - pattern: `urn:foo:`, - matchAgainst: "urn:foo:min", - shouldMatch: true, - }, - { - uc: "asterisk matches only one path segment", - pattern: `http://example.com/<*>`, - matchAgainst: "http://example.com/foo/bar", - shouldMatch: false, - }, - } { - t.Run(tc.uc, func(t *testing.T) { - // GIVEN - matcher, err := newGlobMatcher(tc.pattern) - require.NoError(t, err) - - // WHEN - matched := matcher.Match(tc.matchAgainst) - - // THEN - assert.Equal(t, tc.shouldMatch, matched) - }) - } -} diff --git a/internal/rules/patternmatcher/pattern_matcher.go b/internal/rules/patternmatcher/pattern_matcher.go deleted file mode 100644 index 4a0ae68d4..000000000 --- a/internal/rules/patternmatcher/pattern_matcher.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2022 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package patternmatcher - -import ( - "errors" -) - -var ErrUnsupportedPatternMatcher = errors.New("unsupported pattern matcher") - -type PatternMatcher interface { - Match(value string) bool -} - -func NewPatternMatcher(typ, pattern string) (PatternMatcher, error) { - switch typ { - case "glob": - return newGlobMatcher(pattern) - case "regex": - return newRegexMatcher(pattern) - default: - return nil, ErrUnsupportedPatternMatcher - } -} diff --git a/internal/rules/patternmatcher/regex_matcher.go b/internal/rules/patternmatcher/regex_matcher.go deleted file mode 100644 index 7b223b3d6..000000000 --- a/internal/rules/patternmatcher/regex_matcher.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package patternmatcher - -import ( - "errors" - - "github.com/dlclark/regexp2" - "github.com/ory/ladon/compiler" -) - -var ErrNoRegexPatternDefined = errors.New("no glob pattern defined") - -type regexpMatcher struct { - compiled *regexp2.Regexp -} - -func newRegexMatcher(pattern string) (*regexpMatcher, error) { - if len(pattern) == 0 { - return nil, ErrNoRegexPatternDefined - } - - compiled, err := compiler.CompileRegex(pattern, '<', '>') - if err != nil { - return nil, err - } - - return ®expMatcher{compiled: compiled}, nil -} - -func (m *regexpMatcher) Match(matchAgainst string) bool { - // ignoring error as it will be set on timeouts, which basically is the same as match miss - ok, _ := m.compiled.MatchString(matchAgainst) - - return ok -} diff --git a/internal/rules/provider/cloudblob/provider_test.go b/internal/rules/provider/cloudblob/provider_test.go index 630ccf202..930fee305 100644 --- a/internal/rules/provider/cloudblob/provider_test.go +++ b/internal/rules/provider/cloudblob/provider_test.go @@ -110,7 +110,6 @@ buckets: - url: s3://foobar - url: s3://barfoo/foo&foo=bar prefix: bar - rule_path_match_prefix: baz `), assert: func(t *testing.T, err error, prov *provider) { t.Helper() @@ -243,6 +242,11 @@ version: "1" name: test rules: - id: foo + match: + routes: + - path: /foo + execute: + - authenticator: test ` _, err := backend.PutObject(bucketName, "test-rule", @@ -287,6 +291,11 @@ version: "1" name: test rules: - id: foo + match: + routes: + - path: /foo + execute: + - authenticator: test ` _, err := backend.PutObject(bucketName, "test-rule", @@ -336,6 +345,11 @@ version: "1" name: test rules: - id: foo + match: + routes: + - path: /foo + execute: + - authenticator: test ` _, err := backend.PutObject(bucketName, "test-rule1", @@ -350,6 +364,11 @@ version: "1" name: test rules: - id: bar + match: + routes: + - path: /bar + execute: + - authenticator: test ` _, err := backend.PutObject(bucketName, "test-rule2", @@ -422,8 +441,12 @@ version: "1" name: test rules: - id: foo + match: + routes: + - path: /foo + execute: + - authenticator: test ` - _, err := backend.PutObject(bucketName, "test-rule", map[string]string{"Content-Type": "application/yaml"}, strings.NewReader(data), int64(len(data))) @@ -434,8 +457,12 @@ version: "1" name: test rules: - id: bar + match: + routes: + - path: /bar + execute: + - authenticator: test ` - _, err := backend.PutObject(bucketName, "test-rule", map[string]string{"Content-Type": "application/yaml"}, strings.NewReader(data), int64(len(data))) @@ -446,8 +473,12 @@ version: "1" name: test rules: - id: baz + match: + routes: + - path: /baz + execute: + - authenticator: test ` - _, err := backend.PutObject(bucketName, "test-rule", map[string]string{"Content-Type": "application/yaml"}, strings.NewReader(data), int64(len(data))) diff --git a/internal/rules/provider/cloudblob/ruleset_endpoint.go b/internal/rules/provider/cloudblob/ruleset_endpoint.go index 34e16cea2..43017ccba 100644 --- a/internal/rules/provider/cloudblob/ruleset_endpoint.go +++ b/internal/rules/provider/cloudblob/ruleset_endpoint.go @@ -35,9 +35,8 @@ import ( ) type ruleSetEndpoint struct { - URL *url.URL `mapstructure:"url"` - Prefix string `mapstructure:"prefix"` - RulesPathPrefix string `mapstructure:"rule_path_match_prefix"` + URL *url.URL `mapstructure:"url"` + Prefix string `mapstructure:"prefix"` } func (e *ruleSetEndpoint) ID() string { @@ -125,10 +124,6 @@ func (e *ruleSetEndpoint) readRuleSet(ctx context.Context, bucket *blob.Bucket, CausedBy(err) } - if err = contents.VerifyPathPrefix(e.RulesPathPrefix); err != nil { - return nil, err - } - contents.Hash = attrs.MD5 contents.Source = fmt.Sprintf("%s@%s", key, e.ID()) contents.ModTime = attrs.ModTime diff --git a/internal/rules/provider/cloudblob/ruleset_endpoint_test.go b/internal/rules/provider/cloudblob/ruleset_endpoint_test.go index 52553a7bc..244ecc345 100644 --- a/internal/rules/provider/cloudblob/ruleset_endpoint_test.go +++ b/internal/rules/provider/cloudblob/ruleset_endpoint_test.go @@ -169,46 +169,6 @@ func TestFetchRuleSets(t *testing.T) { require.Empty(t, ruleSets) }, }, - { - uc: "rule set with path prefix validation error", - endpoint: ruleSetEndpoint{ - URL: &url.URL{ - Scheme: "s3", - Host: bucketName, - RawQuery: fmt.Sprintf("endpoint=%s®ion=eu-central-1", srv.URL), - }, - RulesPathPrefix: "foo/bar", - }, - setup: func(t *testing.T) { - t.Helper() - - data := ` -{ - "version": "1", - "name": "test", - "rules": [{ - "id": "foobar", - "match": "http://<**>/bar/foo/api", - "methods": ["GET", "POST"], - "execute": [ - { "authenticator": "foobar" } - ] - }] -}` - - _, err := backend.PutObject(bucketName, "test-rule", - map[string]string{"Content-Type": "application/json"}, - strings.NewReader(data), int64(len(data))) - require.NoError(t, err) - }, - assert: func(t *testing.T, err error, _ []*config.RuleSet) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "path prefix validation") - }, - }, { uc: "multiple valid rule sets in yaml and json formats", endpoint: ruleSetEndpoint{ @@ -217,7 +177,6 @@ func TestFetchRuleSets(t *testing.T) { Host: bucketName, RawQuery: fmt.Sprintf("endpoint=%s®ion=eu-central-1", srv.URL), }, - RulesPathPrefix: "foo/bar", }, setup: func(t *testing.T) { t.Helper() @@ -228,8 +187,14 @@ func TestFetchRuleSets(t *testing.T) { "name": "test", "rules": [{ "id": "foobar", - "match": "http://<**>/foo/bar/api1", - "methods": ["GET", "POST"], + "match": { + "routes": [ + { "path": "/foo/bar/api1" } + ], + "scheme": "http", + "hosts": [{ "type": "glob", "value": "**"}], + "methods": ["GET", "POST"] + }, "execute": [ { "authenticator": "foobar" } ] @@ -241,13 +206,19 @@ version: "1" name: test2 rules: - id: barfoo - match: http://<**>/foo/bar/api2 - methods: - - GET - - POST + match: + routes: + - path: /foo/bar/api2 + scheme: http + hosts: + - type: glob + value: "**" + methods: + - GET + - POST execute: - - authenticator: barfoo` - + - authenticator: barfoo +` _, err := backend.PutObject(bucketName, "test-rule1", map[string]string{"Content-Type": "application/json"}, strings.NewReader(ruleSet1), int64(len(ruleSet1))) @@ -294,8 +265,14 @@ rules: "name": "test1", "rules": [{ "id": "foobar", - "match": "http://<**>/foo/bar/api1", - "methods": ["GET", "POST"], + "match": { + "routes": [ + { "path": "/foo/bar/api1" } + ], + "scheme": "http", + "hosts": [{ "type": "glob", "value": "**" }], + "methods": ["GET", "POST"] + }, "execute": [ { "authenticator": "foobar" } ] @@ -306,8 +283,14 @@ rules: "name": "test2", "rules": [{ "id": "barfoo", - "url": "http://<**>/foo/bar/api2", - "methods": ["GET", "POST"], + "match": { + "routes": [ + { "path": "/foo/bar/api2" } + ], + "scheme": "http", + "hosts": [{ "type": "glob", "value": "**"}], + "methods": ["GET", "POST"] + }, "execute": [ { "authenticator": "barfoo" } ] @@ -400,8 +383,14 @@ rules: "name": "test", "rules": [{ "id": "foobar", - "match": "http://<**>/foo/bar/api1", - "methods": ["GET", "POST"], + "match": { + "routes": [ + { "path": "/foo/bar/api1" } + ], + "scheme": "http", + "hosts": [{ "type": "glob", "value": "**" }], + "methods": ["GET", "POST"] + }, "execute": [ { "authenticator": "foobar" } ] diff --git a/internal/rules/provider/filesystem/provider.go b/internal/rules/provider/filesystem/provider.go index e01c3762c..66d341cf5 100644 --- a/internal/rules/provider/filesystem/provider.go +++ b/internal/rules/provider/filesystem/provider.go @@ -267,7 +267,7 @@ func (p *Provider) loadRuleSet(fileName string) (*config2.RuleSet, error) { ruleSet, err := config2.ParseRules("application/yaml", io.TeeReader(file, md), p.envVarsEnabled) if err != nil { - return nil, errorchain.NewWithMessage(heimdall.ErrInternal, "failed to parse received rule set"). + return nil, errorchain.NewWithMessagef(heimdall.ErrInternal, "failed to parse rule set %s", fileName). CausedBy(err) } diff --git a/internal/rules/provider/filesystem/provider_test.go b/internal/rules/provider/filesystem/provider_test.go index 3d7bfb406..7450c0d77 100644 --- a/internal/rules/provider/filesystem/provider_test.go +++ b/internal/rules/provider/filesystem/provider_test.go @@ -200,8 +200,19 @@ func TestProviderLifecycle(t *testing.T) { _, err := file.WriteString(` version: "1" +name: test rules: - id: foo + match: + routes: + - path: /foo/:bar + path_params: + - name: bar + type: glob + value: "*baz" + methods: [ GET ] + execute: + - authenticator: test `) require.NoError(t, err) @@ -221,9 +232,19 @@ rules: ruleSet := mock2.ArgumentCaptorFrom[*config2.RuleSet](&processor.Mock, "captor1").Value() assert.Contains(t, ruleSet.Source, "file_system:") + require.NotNil(t, ruleSet) + assert.Equal(t, "test", ruleSet.Name) assert.Equal(t, "1", ruleSet.Version) assert.Len(t, ruleSet.Rules, 1) assert.Equal(t, "foo", ruleSet.Rules[0].ID) + require.Len(t, ruleSet.Rules[0].Matcher.Routes, 1) + assert.Equal(t, "/foo/:bar", ruleSet.Rules[0].Matcher.Routes[0].Path) + require.Len(t, ruleSet.Rules[0].Matcher.Routes[0].PathParams, 1) + assert.Equal(t, "bar", ruleSet.Rules[0].Matcher.Routes[0].PathParams[0].Name) + assert.Equal(t, "glob", ruleSet.Rules[0].Matcher.Routes[0].PathParams[0].Type) + assert.Equal(t, "*baz", ruleSet.Rules[0].Matcher.Routes[0].PathParams[0].Value) + assert.Equal(t, []string{"GET"}, ruleSet.Rules[0].Matcher.Methods) + assert.NotEmpty(t, ruleSet.Hash) }, }, { @@ -251,6 +272,11 @@ rules: version: "2" rules: - id: foo + match: + routes: + - path: /foo/bar + execute: + - authenticator: test `) require.NoError(t, err) @@ -290,6 +316,11 @@ rules: version: "1" rules: - id: foo + match: + routes: + - path: /foo/bar + execute: + - authenticator: test `) require.NoError(t, err) @@ -322,6 +353,11 @@ rules: version: "1" rules: - id: foo + match: + routes: + - path: /foo/bar + execute: + - authenticator: test `) require.NoError(t, err) @@ -369,6 +405,11 @@ rules: version: "1" rules: - id: foo + match: + routes: + - path: /foo + execute: + - authenticator: test `) require.NoError(t, err) @@ -381,6 +422,11 @@ rules: version: "1" rules: - id: foo + match: + routes: + - path: /foo + execute: + - authenticator: test `) require.NoError(t, err) @@ -393,6 +439,11 @@ rules: version: "2" rules: - id: bar + match: + routes: + - path: /bar + execute: + - authenticator: test `) require.NoError(t, err) diff --git a/internal/rules/provider/httpendpoint/config_decoder.go b/internal/rules/provider/httpendpoint/config_decoder.go index 86fd8ee5d..7aca5f5df 100644 --- a/internal/rules/provider/httpendpoint/config_decoder.go +++ b/internal/rules/provider/httpendpoint/config_decoder.go @@ -27,7 +27,7 @@ func decodeConfig(input any, output any) error { dec, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( - authstrategy.DecodeAuthenticationStrategyHookFunc(), + authstrategy.DecodeAuthenticationStrategyHookFunc(nil), endpoint.DecodeEndpointHookFunc(), mapstructure.StringToTimeDurationHookFunc(), ), diff --git a/internal/rules/provider/httpendpoint/provider_test.go b/internal/rules/provider/httpendpoint/provider_test.go index ecacf4439..39f2d368a 100644 --- a/internal/rules/provider/httpendpoint/provider_test.go +++ b/internal/rules/provider/httpendpoint/provider_test.go @@ -166,7 +166,7 @@ endpoints: Providers: config.RuleProviders{HTTPEndpoint: providerConf}, } - cch, err := memory.NewCache(nil, nil) + cch, err := memory.NewCache(nil, nil, nil) require.NoError(t, err) // WHEN @@ -262,6 +262,12 @@ version: "1" name: test rules: - id: foo + match: + routes: + - path: /foo + methods: [ "GET" ] + execute: + - authenticator: test `)) require.NoError(t, err) }, @@ -304,6 +310,12 @@ version: "1" name: test rules: - id: bar + match: + routes: + - path: /bar + methods: [ "GET" ] + execute: + - authenticator: test `)) require.NoError(t, err) }, @@ -351,6 +363,12 @@ version: "1" name: test rules: - id: foo + match: + routes: + - path: /foo + methods: [ GET ] + execute: + - authenticator: test `)) require.NoError(t, err) case 2: @@ -362,6 +380,12 @@ version: "2" name: test rules: - id: bar + match: + routes: + - path: /bar + methods: [ GET ] + execute: + - authenticator: test `)) require.NoError(t, err) } @@ -427,6 +451,11 @@ version: "1" name: test rules: - id: bar + match: + routes: + - path: /bar + execute: + - authenticator: test `)) require.NoError(t, err) case 2: @@ -436,6 +465,11 @@ version: "1" name: test rules: - id: baz + match: + routes: + - path: /baz + execute: + - authenticator: test `)) require.NoError(t, err) case 3: @@ -445,6 +479,11 @@ version: "1" name: test rules: - id: foo + match: + routes: + - path: /foo + execute: + - authenticator: test `)) require.NoError(t, err) default: @@ -454,6 +493,11 @@ version: "1" name: test rules: - id: foz + match: + routes: + - path: /foz + execute: + - authenticator: test `)) require.NoError(t, err) } @@ -524,6 +568,11 @@ version: "1" name: test rules: - id: bar + match: + routes: + - path: /bar + execute: + - authenticator: test `)) require.NoError(t, err) }, @@ -569,6 +618,11 @@ version: "1" name: test rules: - id: bar + match: + routes: + - path: /bar + execute: + - authenticator: test `)) require.NoError(t, err) }, @@ -612,6 +666,11 @@ version: "1" name: test rules: - id: foo + match: + routes: + - path: /foo + execute: + - authenticator: test `)) require.NoError(t, err) }, @@ -649,6 +708,11 @@ version: "1" name: test rules: - id: bar + match: + routes: + - path: /bar + execute: + - authenticator: test `)) require.NoError(t, err) } else { @@ -658,6 +722,11 @@ version: "1" name: test rules: - id: baz + match: + routes: + - path: /baz + execute: + - authenticator: test `)) require.NoError(t, err) } @@ -700,6 +769,11 @@ version: "1" name: test rules: - id: bar + match: + routes: + - path: /bar + execute: + - authenticator: test `)) require.NoError(t, err) } else { @@ -745,7 +819,7 @@ rules: logs := &strings.Builder{} - cch, err := memory.NewCache(nil, nil) + cch, err := memory.NewCache(nil, nil, nil) require.NoError(t, err) prov, err := newProvider(conf, cch, processor, zerolog.New(logs)) diff --git a/internal/rules/provider/httpendpoint/ruleset_endpoint.go b/internal/rules/provider/httpendpoint/ruleset_endpoint.go index ecfacebb9..3f7c78cac 100644 --- a/internal/rules/provider/httpendpoint/ruleset_endpoint.go +++ b/internal/rules/provider/httpendpoint/ruleset_endpoint.go @@ -33,8 +33,6 @@ import ( type ruleSetEndpoint struct { endpoint.Endpoint `mapstructure:",squash"` - - RulesPathPrefix string `mapstructure:"rule_path_match_prefix"` } func (e *ruleSetEndpoint) ID() string { return e.URL } @@ -78,10 +76,6 @@ func (e *ruleSetEndpoint) FetchRuleSet(ctx context.Context) (*config.RuleSet, er CausedBy(err) } - if err = ruleSet.VerifyPathPrefix(e.RulesPathPrefix); err != nil { - return nil, err - } - ruleSet.Hash = md.Sum(nil) ruleSet.Source = "http_endpoint:" + e.ID() ruleSet.ModTime = time.Now() diff --git a/internal/rules/provider/httpendpoint/ruleset_endpoint_test.go b/internal/rules/provider/httpendpoint/ruleset_endpoint_test.go index e26b39ce0..e13908533 100644 --- a/internal/rules/provider/httpendpoint/ruleset_endpoint_test.go +++ b/internal/rules/provider/httpendpoint/ruleset_endpoint_test.go @@ -134,6 +134,9 @@ version: "1" name: test rules: - id: bar + match: + routes: + - path: /bar `)) require.NoError(t, err) }, @@ -182,6 +185,16 @@ version: "1" name: test rules: - id: foo + match: + routes: + - path: /foo/:bar + path_params: + - name: bar + type: glob + value: "*baz" + methods: [ GET ] + execute: + - authenticator: test `)) require.NoError(t, err) }, @@ -191,9 +204,18 @@ rules: require.NoError(t, err) require.NotNil(t, ruleSet) + assert.Equal(t, "test", ruleSet.Name) + assert.Equal(t, "1", ruleSet.Version) assert.Len(t, ruleSet.Rules, 1) assert.Equal(t, "foo", ruleSet.Rules[0].ID) - require.NotEmpty(t, ruleSet.Hash) + require.Len(t, ruleSet.Rules[0].Matcher.Routes, 1) + assert.Equal(t, "/foo/:bar", ruleSet.Rules[0].Matcher.Routes[0].Path) + require.Len(t, ruleSet.Rules[0].Matcher.Routes[0].PathParams, 1) + assert.Equal(t, "bar", ruleSet.Rules[0].Matcher.Routes[0].PathParams[0].Name) + assert.Equal(t, "glob", ruleSet.Rules[0].Matcher.Routes[0].PathParams[0].Type) + assert.Equal(t, "*baz", ruleSet.Rules[0].Matcher.Routes[0].PathParams[0].Value) + assert.Equal(t, []string{"GET"}, ruleSet.Rules[0].Matcher.Methods) + assert.NotEmpty(t, ruleSet.Hash) }, }, { @@ -212,7 +234,13 @@ rules: "version": "1", "name": "test", "rules": [ - { "id": "foo" } + { + "id": "foo", + "match": { + "routes": [{"path": "/foo"}], + "methods" : ["GET"] + }, + "execute": [{ "authenticator": "test"}] } ] }`)) require.NoError(t, err) @@ -229,73 +257,12 @@ rules: }, }, { - uc: "valid rule set with path only url glob with path prefix violation", - ep: &ruleSetEndpoint{ - Endpoint: endpoint.Endpoint{ - URL: srv.URL, - Method: http.MethodGet, - }, - RulesPathPrefix: "/foo/bar", - }, - writeResponse: func(t *testing.T, w http.ResponseWriter) { - t.Helper() - - w.Header().Set("Content-Type", "application/json") - _, err := w.Write([]byte(`{ - "version": "1", - "name": "test", - "rules": [ - { "id": "foo", "match":"/bar/foo/<**>" } - ] -}`)) - require.NoError(t, err) - }, - assert: func(t *testing.T, err error, _ *config.RuleSet) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "path prefix validation") - }, - }, - { - uc: "valid rule set with full url glob with path prefix violation", - ep: &ruleSetEndpoint{ - Endpoint: endpoint.Endpoint{ - URL: srv.URL, - Method: http.MethodGet, - }, - RulesPathPrefix: "/foo/bar", - }, - writeResponse: func(t *testing.T, w http.ResponseWriter) { - t.Helper() - - w.Header().Set("Content-Type", "application/json") - _, err := w.Write([]byte(`{ - "version": "1", - "name": "test", - "rules": [ - { "id": "foo", "match":"<**>://moobar.local:9090/bar/foo/<**>" } - ] -}`)) - require.NoError(t, err) - }, - assert: func(t *testing.T, err error, _ *config.RuleSet) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "path prefix validation") - }, - }, - { - uc: "valid rule set with full url glob without path prefix violation", + uc: "valid rule set with full url glob", ep: &ruleSetEndpoint{ Endpoint: endpoint.Endpoint{ URL: srv.URL, Method: http.MethodGet, }, - RulesPathPrefix: "/foo/bar", }, writeResponse: func(t *testing.T, w http.ResponseWriter) { t.Helper() @@ -305,7 +272,17 @@ rules: "version": "1", "name": "test", "rules": [ - { "id": "foo", "match":"<**>://moobar.local:9090/foo/bar/<**>" } + { + "id": "foo", + "match": { + "routes": [ + { "path": "/foo/bar/:baz", "path_params": [{ "name": "baz", "type":"glob", "value":"{*.ico,*.js}" }] } + ], + "methods": [ "GET" ], + "hosts": [{ "value":"moobar.local:9090", "type": "exact"}], + }, + "execute": [{ "authenticator": "test"}] + } ] }`)) require.NoError(t, err) diff --git a/internal/rules/provider/kubernetes/admissioncontroller/controller_test.go b/internal/rules/provider/kubernetes/admissioncontroller/controller_test.go index cb2c4baf6..7506d2318 100644 --- a/internal/rules/provider/kubernetes/admissioncontroller/controller_test.go +++ b/internal/rules/provider/kubernetes/admissioncontroller/controller_test.go @@ -45,7 +45,7 @@ import ( "github.com/dadrus/heimdall/internal/config" config2 "github.com/dadrus/heimdall/internal/rules/config" - "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha3" + "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha4" "github.com/dadrus/heimdall/internal/rules/rule/mocks" "github.com/dadrus/heimdall/internal/x" "github.com/dadrus/heimdall/internal/x/pkix/pemx" @@ -114,10 +114,10 @@ func TestControllerLifecycle(t *testing.T) { Namespace: "test", Name: "test-rules", Operation: admissionv1.Create, - Kind: metav1.GroupVersionKind{Group: v1alpha3.GroupName, Version: v1alpha3.GroupVersion, Kind: "RuleSet"}, - Resource: metav1.GroupVersionResource{Group: v1alpha3.GroupName, Version: v1alpha3.GroupVersion, Resource: "rulesets"}, - RequestKind: &metav1.GroupVersionKind{Group: v1alpha3.GroupName, Version: v1alpha3.GroupVersion, Kind: "RuleSet"}, - RequestResource: &metav1.GroupVersionResource{Group: v1alpha3.GroupName, Version: v1alpha3.GroupVersion, Resource: "rulesets"}, + Kind: metav1.GroupVersionKind{Group: v1alpha4.GroupName, Version: v1alpha4.GroupVersion, Kind: "RuleSet"}, + Resource: metav1.GroupVersionResource{Group: v1alpha4.GroupName, Version: v1alpha4.GroupVersion, Resource: "rulesets"}, + RequestKind: &metav1.GroupVersionKind{Group: v1alpha4.GroupName, Version: v1alpha4.GroupVersion, Kind: "RuleSet"}, + RequestResource: &metav1.GroupVersionResource{Group: v1alpha4.GroupName, Version: v1alpha4.GroupVersion, Resource: "rulesets"}, }, } @@ -195,9 +195,9 @@ func TestControllerLifecycle(t *testing.T) { request: func(t *testing.T, URL string) *http.Request { t.Helper() - ruleSet := v1alpha3.RuleSet{ + ruleSet := v1alpha4.RuleSet{ TypeMeta: metav1.TypeMeta{ - APIVersion: fmt.Sprintf("%s/%s", v1alpha3.GroupName, v1alpha3.GroupVersion), + APIVersion: fmt.Sprintf("%s/%s", v1alpha4.GroupName, v1alpha4.GroupVersion), Kind: "RuleSet", }, ObjectMeta: metav1.ObjectMeta{ @@ -208,7 +208,7 @@ func TestControllerLifecycle(t *testing.T) { Generation: 1, CreationTimestamp: metav1.NewTime(time.Now()), }, - Spec: v1alpha3.RuleSetSpec{AuthClassName: "foo"}, + Spec: v1alpha4.RuleSetSpec{AuthClassName: "foo"}, } data, err := json.Marshal(&ruleSet) require.NoError(t, err) @@ -253,9 +253,9 @@ func TestControllerLifecycle(t *testing.T) { request: func(t *testing.T, URL string) *http.Request { t.Helper() - ruleSet := v1alpha3.RuleSet{ + ruleSet := v1alpha4.RuleSet{ TypeMeta: metav1.TypeMeta{ - APIVersion: fmt.Sprintf("%s/%s", v1alpha3.GroupName, v1alpha3.GroupVersion), + APIVersion: fmt.Sprintf("%s/%s", v1alpha4.GroupName, v1alpha4.GroupVersion), Kind: "RuleSet", }, ObjectMeta: metav1.ObjectMeta{ @@ -266,14 +266,15 @@ func TestControllerLifecycle(t *testing.T) { Generation: 1, CreationTimestamp: metav1.NewTime(time.Now()), }, - Spec: v1alpha3.RuleSetSpec{ + Spec: v1alpha4.RuleSetSpec{ AuthClassName: authClass, Rules: []config2.Rule{ { ID: "test", - RuleMatcher: config2.Matcher{ - URL: "http://foo.bar", - Strategy: "glob", + Matcher: config2.Matcher{ + Routes: []config2.Route{{Path: "/foo.bar"}}, + Scheme: "http", + Methods: []string{http.MethodGet}, }, Backend: &config2.Backend{ Host: "baz", @@ -284,7 +285,6 @@ func TestControllerLifecycle(t *testing.T) { QueryParamsToRemove: []string{"baz"}, }, }, - Methods: []string{http.MethodGet}, Execute: []config.MechanismConfig{ {"authenticator": "authn"}, {"authorizer": "authz"}, @@ -310,7 +310,7 @@ func TestControllerLifecycle(t *testing.T) { setupRuleFactory: func(t *testing.T, factory *mocks.FactoryMock) { t.Helper() - factory.EXPECT().CreateRule("1alpha3", mock.Anything, mock.Anything). + factory.EXPECT().CreateRule("1alpha4", mock.Anything, mock.Anything). Once().Return(nil, errors.New("Test error")) }, assert: func(t *testing.T, err error, resp *http.Response) { @@ -346,9 +346,9 @@ func TestControllerLifecycle(t *testing.T) { request: func(t *testing.T, URL string) *http.Request { t.Helper() - ruleSet := v1alpha3.RuleSet{ + ruleSet := v1alpha4.RuleSet{ TypeMeta: metav1.TypeMeta{ - APIVersion: fmt.Sprintf("%s/%s", v1alpha3.GroupName, v1alpha3.GroupVersion), + APIVersion: fmt.Sprintf("%s/%s", v1alpha4.GroupName, v1alpha4.GroupVersion), Kind: "RuleSet", }, ObjectMeta: metav1.ObjectMeta{ @@ -359,14 +359,15 @@ func TestControllerLifecycle(t *testing.T) { Generation: 1, CreationTimestamp: metav1.NewTime(time.Now()), }, - Spec: v1alpha3.RuleSetSpec{ + Spec: v1alpha4.RuleSetSpec{ AuthClassName: authClass, Rules: []config2.Rule{ { ID: "test", - RuleMatcher: config2.Matcher{ - URL: "http://foo.bar", - Strategy: "glob", + Matcher: config2.Matcher{ + Routes: []config2.Route{{Path: "/foo.bar"}}, + Scheme: "http", + Methods: []string{http.MethodGet}, }, Backend: &config2.Backend{ Host: "baz", @@ -377,7 +378,6 @@ func TestControllerLifecycle(t *testing.T) { QueryParamsToRemove: []string{"baz"}, }, }, - Methods: []string{http.MethodGet}, Execute: []config.MechanismConfig{ {"authenticator": "authn"}, {"authorizer": "authz"}, @@ -403,7 +403,7 @@ func TestControllerLifecycle(t *testing.T) { setupRuleFactory: func(t *testing.T, factory *mocks.FactoryMock) { t.Helper() - factory.EXPECT().CreateRule("1alpha3", mock.Anything, mock.Anything). + factory.EXPECT().CreateRule("1alpha4", mock.Anything, mock.Anything). Once().Return(nil, nil) }, assert: func(t *testing.T, err error, resp *http.Response) { diff --git a/internal/rules/provider/kubernetes/admissioncontroller/validator.go b/internal/rules/provider/kubernetes/admissioncontroller/validator.go index 3f56956d9..9d294b2b9 100644 --- a/internal/rules/provider/kubernetes/admissioncontroller/validator.go +++ b/internal/rules/provider/kubernetes/admissioncontroller/validator.go @@ -28,7 +28,7 @@ import ( "github.com/dadrus/heimdall/internal/rules/config" "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/admissioncontroller/admission" - "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha3" + "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha4" "github.com/dadrus/heimdall/internal/rules/rule" ) @@ -88,18 +88,18 @@ func (rv *rulesetValidator) Handle(ctx context.Context, req *admission.Request) return admission.NewResponse(http.StatusOK, "RuleSet valid") } -func (rv *rulesetValidator) ruleSetFrom(req *admission.Request) (*v1alpha3.RuleSet, error) { +func (rv *rulesetValidator) ruleSetFrom(req *admission.Request) (*v1alpha4.RuleSet, error) { if req.Kind.Kind != "RuleSet" { return nil, ErrInvalidObject } - p := &v1alpha3.RuleSet{} + p := &v1alpha4.RuleSet{} err := json.Unmarshal(req.Object.Raw, p) return p, err } func (rv *rulesetValidator) mapVersion(_ string) string { - // currently the only possible version is v1alpha3, which is mapped to the version "1alpha3" used internally - return "1alpha3" + // currently the only possible version is v1alpha4, which is mapped to the version "1alpha4" used internally + return "1alpha4" } diff --git a/internal/rules/provider/kubernetes/api/v1alpha3/client.go b/internal/rules/provider/kubernetes/api/v1alpha4/client.go similarity index 97% rename from internal/rules/provider/kubernetes/api/v1alpha3/client.go rename to internal/rules/provider/kubernetes/api/v1alpha4/client.go index 7b2e79b71..5520aca62 100644 --- a/internal/rules/provider/kubernetes/api/v1alpha3/client.go +++ b/internal/rules/provider/kubernetes/api/v1alpha4/client.go @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package v1alpha3 +package v1alpha4 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const ( GroupName = "heimdall.dadrus.github.com" - GroupVersion = "v1alpha3" + GroupVersion = "v1alpha4" ) func addKnownTypes(gv schema.GroupVersion) func(scheme *runtime.Scheme) error { diff --git a/internal/rules/provider/kubernetes/api/v1alpha3/client_test.go b/internal/rules/provider/kubernetes/api/v1alpha4/client_test.go similarity index 69% rename from internal/rules/provider/kubernetes/api/v1alpha3/client_test.go rename to internal/rules/provider/kubernetes/api/v1alpha4/client_test.go index b13bdc82f..be1b9059b 100644 --- a/internal/rules/provider/kubernetes/api/v1alpha3/client_test.go +++ b/internal/rules/provider/kubernetes/api/v1alpha4/client_test.go @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package v1alpha3 +package v1alpha4 import ( "context" @@ -38,9 +38,9 @@ const watchResponse = `{ ` const response = `{ - "apiVersion": "heimdall.dadrus.github.com/v1alpha3", + "apiVersion": "heimdall.dadrus.github.com/v1alpha4", "items": [{ - "apiVersion": "heimdall.dadrus.github.com/v1alpha3", + "apiVersion": "heimdall.dadrus.github.com/v1alpha4", "kind": "RuleSet", "metadata": { "name": "test-rule-set", @@ -56,16 +56,31 @@ const response = `{ { "authorizer": "test_authz" } ], "id": "test:rule", - "matching_strategy": "glob", - "match": "http://127.0.0.1:9090/foobar/<{foos*}>", + "match": { + "routes": [ + { + "path": "/foobar/*foo", + "path_params": [{ "name": "foo", "type": "glob", "value": "foos*" }] + }, + { + "path": "/foobar/baz" + } + ], + "scheme": "http", + "hosts": [ + {"type": "exact","value": "127.0.0.1"}, + {"type": "glob","value": "172.*.*.1"} + ], + "methods": ["GET", "POST"] + }, "forward_to": { - "host": "foo.bar", - "rewrite": { - "scheme": "https", - "strip_path_prefix": "/foo", - "add_path_prefix": "/baz", - "strip_query_parameters": ["boo"] - } + "host": "foo.bar", + "rewrite": { + "scheme": "https", + "strip_path_prefix": "/foo", + "add_path_prefix": "/baz", + "strip_query_parameters": ["boo"] + } } } ] @@ -126,7 +141,7 @@ func verifyRuleSetList(t *testing.T, rls *RuleSetList) { ruleSet := rls.Items[0] assert.Equal(t, "RuleSet", ruleSet.Kind) - assert.Equal(t, "heimdall.dadrus.github.com/v1alpha3", ruleSet.APIVersion) + assert.Equal(t, "heimdall.dadrus.github.com/v1alpha4", ruleSet.APIVersion) assert.Equal(t, "test-rule-set", ruleSet.Name) assert.Equal(t, "foo", ruleSet.Namespace) assert.Equal(t, "foobar", ruleSet.Spec.AuthClassName) @@ -134,9 +149,20 @@ func verifyRuleSetList(t *testing.T, rls *RuleSetList) { rule := ruleSet.Spec.Rules[0] assert.Equal(t, "test:rule", rule.ID) - assert.Equal(t, "glob", rule.RuleMatcher.Strategy) - assert.Equal(t, "http://127.0.0.1:9090/foobar/<{foos*}>", rule.RuleMatcher.URL) - assert.Empty(t, rule.Methods) + assert.Len(t, rule.Matcher.Routes, 2) + assert.Equal(t, "/foobar/*foo", rule.Matcher.Routes[0].Path) + assert.Len(t, rule.Matcher.Routes[0].PathParams, 1) + assert.Equal(t, "foo", rule.Matcher.Routes[0].PathParams[0].Name) + assert.Equal(t, "glob", rule.Matcher.Routes[0].PathParams[0].Type) + assert.Equal(t, "foos*", rule.Matcher.Routes[0].PathParams[0].Value) + assert.Equal(t, "/foobar/baz", rule.Matcher.Routes[1].Path) + assert.Equal(t, "http", rule.Matcher.Scheme) + assert.Len(t, rule.Matcher.Hosts, 2) + assert.Equal(t, "127.0.0.1", rule.Matcher.Hosts[0].Value) + assert.Equal(t, "exact", rule.Matcher.Hosts[0].Type) + assert.Equal(t, "172.*.*.1", rule.Matcher.Hosts[1].Value) + assert.Equal(t, "glob", rule.Matcher.Hosts[1].Type) + assert.ElementsMatch(t, rule.Matcher.Methods, []string{"GET", "POST"}) assert.Empty(t, rule.ErrorHandler) assert.Equal(t, "https://foo.bar/baz/bar?foo=bar", rule.Backend.CreateURL(&url.URL{ Scheme: "http", diff --git a/internal/rules/provider/kubernetes/api/v1alpha3/json_patch.go b/internal/rules/provider/kubernetes/api/v1alpha4/json_patch.go similarity index 99% rename from internal/rules/provider/kubernetes/api/v1alpha3/json_patch.go rename to internal/rules/provider/kubernetes/api/v1alpha4/json_patch.go index ed9560aaa..2aba8b37d 100644 --- a/internal/rules/provider/kubernetes/api/v1alpha3/json_patch.go +++ b/internal/rules/provider/kubernetes/api/v1alpha4/json_patch.go @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package v1alpha3 +package v1alpha4 import ( "github.com/goccy/go-json" diff --git a/internal/rules/provider/kubernetes/api/v1alpha3/mocks/client.go b/internal/rules/provider/kubernetes/api/v1alpha4/mocks/client.go similarity index 74% rename from internal/rules/provider/kubernetes/api/v1alpha3/mocks/client.go rename to internal/rules/provider/kubernetes/api/v1alpha4/mocks/client.go index 2405f7232..398d164f9 100644 --- a/internal/rules/provider/kubernetes/api/v1alpha3/mocks/client.go +++ b/internal/rules/provider/kubernetes/api/v1alpha4/mocks/client.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks import ( - "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha3" + v1alpha4 "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha4" mock "github.com/stretchr/testify/mock" ) @@ -21,15 +21,19 @@ func (_m *ClientMock) EXPECT() *ClientMock_Expecter { } // RuleSetRepository provides a mock function with given fields: namespace -func (_m *ClientMock) RuleSetRepository(namespace string) v1alpha3.RuleSetRepository { +func (_m *ClientMock) RuleSetRepository(namespace string) v1alpha4.RuleSetRepository { ret := _m.Called(namespace) - var r0 v1alpha3.RuleSetRepository - if rf, ok := ret.Get(0).(func(string) v1alpha3.RuleSetRepository); ok { + if len(ret) == 0 { + panic("no return value specified for RuleSetRepository") + } + + var r0 v1alpha4.RuleSetRepository + if rf, ok := ret.Get(0).(func(string) v1alpha4.RuleSetRepository); ok { r0 = rf(namespace) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(v1alpha3.RuleSetRepository) + r0 = ret.Get(0).(v1alpha4.RuleSetRepository) } } @@ -54,23 +58,22 @@ func (_c *ClientMock_RuleSetRepository_Call) Run(run func(namespace string)) *Cl return _c } -func (_c *ClientMock_RuleSetRepository_Call) Return(_a0 v1alpha3.RuleSetRepository) *ClientMock_RuleSetRepository_Call { +func (_c *ClientMock_RuleSetRepository_Call) Return(_a0 v1alpha4.RuleSetRepository) *ClientMock_RuleSetRepository_Call { _c.Call.Return(_a0) return _c } -func (_c *ClientMock_RuleSetRepository_Call) RunAndReturn(run func(string) v1alpha3.RuleSetRepository) *ClientMock_RuleSetRepository_Call { +func (_c *ClientMock_RuleSetRepository_Call) RunAndReturn(run func(string) v1alpha4.RuleSetRepository) *ClientMock_RuleSetRepository_Call { _c.Call.Return(run) return _c } -type mockConstructorTestingTNewClientMock interface { +// NewClientMock creates a new instance of ClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClientMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewClientMock creates a new instance of ClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClientMock(t mockConstructorTestingTNewClientMock) *ClientMock { +}) *ClientMock { mock := &ClientMock{} mock.Mock.Test(t) diff --git a/internal/rules/provider/kubernetes/api/v1alpha3/mocks/rule_set_repository.go b/internal/rules/provider/kubernetes/api/v1alpha4/mocks/rule_set_repository.go similarity index 78% rename from internal/rules/provider/kubernetes/api/v1alpha3/mocks/rule_set_repository.go rename to internal/rules/provider/kubernetes/api/v1alpha4/mocks/rule_set_repository.go index d0051ce10..43e1a2c83 100644 --- a/internal/rules/provider/kubernetes/api/v1alpha3/mocks/rule_set_repository.go +++ b/internal/rules/provider/kubernetes/api/v1alpha4/mocks/rule_set_repository.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -10,7 +10,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1alpha3 "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha3" + v1alpha4 "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha4" watch "k8s.io/apimachinery/pkg/watch" ) @@ -29,19 +29,23 @@ func (_m *RuleSetRepositoryMock) EXPECT() *RuleSetRepositoryMock_Expecter { } // Get provides a mock function with given fields: ctx, key, opts -func (_m *RuleSetRepositoryMock) Get(ctx context.Context, key types.NamespacedName, opts v1.GetOptions) (*v1alpha3.RuleSet, error) { +func (_m *RuleSetRepositoryMock) Get(ctx context.Context, key types.NamespacedName, opts v1.GetOptions) (*v1alpha4.RuleSet, error) { ret := _m.Called(ctx, key, opts) - var r0 *v1alpha3.RuleSet + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *v1alpha4.RuleSet var r1 error - if rf, ok := ret.Get(0).(func(context.Context, types.NamespacedName, v1.GetOptions) (*v1alpha3.RuleSet, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, types.NamespacedName, v1.GetOptions) (*v1alpha4.RuleSet, error)); ok { return rf(ctx, key, opts) } - if rf, ok := ret.Get(0).(func(context.Context, types.NamespacedName, v1.GetOptions) *v1alpha3.RuleSet); ok { + if rf, ok := ret.Get(0).(func(context.Context, types.NamespacedName, v1.GetOptions) *v1alpha4.RuleSet); ok { r0 = rf(ctx, key, opts) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1alpha3.RuleSet) + r0 = ret.Get(0).(*v1alpha4.RuleSet) } } @@ -74,30 +78,34 @@ func (_c *RuleSetRepositoryMock_Get_Call) Run(run func(ctx context.Context, key return _c } -func (_c *RuleSetRepositoryMock_Get_Call) Return(_a0 *v1alpha3.RuleSet, _a1 error) *RuleSetRepositoryMock_Get_Call { +func (_c *RuleSetRepositoryMock_Get_Call) Return(_a0 *v1alpha4.RuleSet, _a1 error) *RuleSetRepositoryMock_Get_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *RuleSetRepositoryMock_Get_Call) RunAndReturn(run func(context.Context, types.NamespacedName, v1.GetOptions) (*v1alpha3.RuleSet, error)) *RuleSetRepositoryMock_Get_Call { +func (_c *RuleSetRepositoryMock_Get_Call) RunAndReturn(run func(context.Context, types.NamespacedName, v1.GetOptions) (*v1alpha4.RuleSet, error)) *RuleSetRepositoryMock_Get_Call { _c.Call.Return(run) return _c } // List provides a mock function with given fields: ctx, opts -func (_m *RuleSetRepositoryMock) List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.RuleSetList, error) { +func (_m *RuleSetRepositoryMock) List(ctx context.Context, opts v1.ListOptions) (*v1alpha4.RuleSetList, error) { ret := _m.Called(ctx, opts) - var r0 *v1alpha3.RuleSetList + if len(ret) == 0 { + panic("no return value specified for List") + } + + var r0 *v1alpha4.RuleSetList var r1 error - if rf, ok := ret.Get(0).(func(context.Context, v1.ListOptions) (*v1alpha3.RuleSetList, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, v1.ListOptions) (*v1alpha4.RuleSetList, error)); ok { return rf(ctx, opts) } - if rf, ok := ret.Get(0).(func(context.Context, v1.ListOptions) *v1alpha3.RuleSetList); ok { + if rf, ok := ret.Get(0).(func(context.Context, v1.ListOptions) *v1alpha4.RuleSetList); ok { r0 = rf(ctx, opts) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1alpha3.RuleSetList) + r0 = ret.Get(0).(*v1alpha4.RuleSetList) } } @@ -129,34 +137,38 @@ func (_c *RuleSetRepositoryMock_List_Call) Run(run func(ctx context.Context, opt return _c } -func (_c *RuleSetRepositoryMock_List_Call) Return(_a0 *v1alpha3.RuleSetList, _a1 error) *RuleSetRepositoryMock_List_Call { +func (_c *RuleSetRepositoryMock_List_Call) Return(_a0 *v1alpha4.RuleSetList, _a1 error) *RuleSetRepositoryMock_List_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *RuleSetRepositoryMock_List_Call) RunAndReturn(run func(context.Context, v1.ListOptions) (*v1alpha3.RuleSetList, error)) *RuleSetRepositoryMock_List_Call { +func (_c *RuleSetRepositoryMock_List_Call) RunAndReturn(run func(context.Context, v1.ListOptions) (*v1alpha4.RuleSetList, error)) *RuleSetRepositoryMock_List_Call { _c.Call.Return(run) return _c } // PatchStatus provides a mock function with given fields: ctx, patch, opts -func (_m *RuleSetRepositoryMock) PatchStatus(ctx context.Context, patch v1alpha3.Patch, opts v1.PatchOptions) (*v1alpha3.RuleSet, error) { +func (_m *RuleSetRepositoryMock) PatchStatus(ctx context.Context, patch v1alpha4.Patch, opts v1.PatchOptions) (*v1alpha4.RuleSet, error) { ret := _m.Called(ctx, patch, opts) - var r0 *v1alpha3.RuleSet + if len(ret) == 0 { + panic("no return value specified for PatchStatus") + } + + var r0 *v1alpha4.RuleSet var r1 error - if rf, ok := ret.Get(0).(func(context.Context, v1alpha3.Patch, v1.PatchOptions) (*v1alpha3.RuleSet, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, v1alpha4.Patch, v1.PatchOptions) (*v1alpha4.RuleSet, error)); ok { return rf(ctx, patch, opts) } - if rf, ok := ret.Get(0).(func(context.Context, v1alpha3.Patch, v1.PatchOptions) *v1alpha3.RuleSet); ok { + if rf, ok := ret.Get(0).(func(context.Context, v1alpha4.Patch, v1.PatchOptions) *v1alpha4.RuleSet); ok { r0 = rf(ctx, patch, opts) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1alpha3.RuleSet) + r0 = ret.Get(0).(*v1alpha4.RuleSet) } } - if rf, ok := ret.Get(1).(func(context.Context, v1alpha3.Patch, v1.PatchOptions) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, v1alpha4.Patch, v1.PatchOptions) error); ok { r1 = rf(ctx, patch, opts) } else { r1 = ret.Error(1) @@ -172,25 +184,25 @@ type RuleSetRepositoryMock_PatchStatus_Call struct { // PatchStatus is a helper method to define mock.On call // - ctx context.Context -// - patch v1alpha3.Patch +// - patch v1alpha4.Patch // - opts v1.PatchOptions func (_e *RuleSetRepositoryMock_Expecter) PatchStatus(ctx interface{}, patch interface{}, opts interface{}) *RuleSetRepositoryMock_PatchStatus_Call { return &RuleSetRepositoryMock_PatchStatus_Call{Call: _e.mock.On("PatchStatus", ctx, patch, opts)} } -func (_c *RuleSetRepositoryMock_PatchStatus_Call) Run(run func(ctx context.Context, patch v1alpha3.Patch, opts v1.PatchOptions)) *RuleSetRepositoryMock_PatchStatus_Call { +func (_c *RuleSetRepositoryMock_PatchStatus_Call) Run(run func(ctx context.Context, patch v1alpha4.Patch, opts v1.PatchOptions)) *RuleSetRepositoryMock_PatchStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(v1alpha3.Patch), args[2].(v1.PatchOptions)) + run(args[0].(context.Context), args[1].(v1alpha4.Patch), args[2].(v1.PatchOptions)) }) return _c } -func (_c *RuleSetRepositoryMock_PatchStatus_Call) Return(_a0 *v1alpha3.RuleSet, _a1 error) *RuleSetRepositoryMock_PatchStatus_Call { +func (_c *RuleSetRepositoryMock_PatchStatus_Call) Return(_a0 *v1alpha4.RuleSet, _a1 error) *RuleSetRepositoryMock_PatchStatus_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *RuleSetRepositoryMock_PatchStatus_Call) RunAndReturn(run func(context.Context, v1alpha3.Patch, v1.PatchOptions) (*v1alpha3.RuleSet, error)) *RuleSetRepositoryMock_PatchStatus_Call { +func (_c *RuleSetRepositoryMock_PatchStatus_Call) RunAndReturn(run func(context.Context, v1alpha4.Patch, v1.PatchOptions) (*v1alpha4.RuleSet, error)) *RuleSetRepositoryMock_PatchStatus_Call { _c.Call.Return(run) return _c } @@ -199,6 +211,10 @@ func (_c *RuleSetRepositoryMock_PatchStatus_Call) RunAndReturn(run func(context. func (_m *RuleSetRepositoryMock) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { ret := _m.Called(ctx, opts) + if len(ret) == 0 { + panic("no return value specified for Watch") + } + var r0 watch.Interface var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1.ListOptions) (watch.Interface, error)); ok { @@ -250,13 +266,12 @@ func (_c *RuleSetRepositoryMock_Watch_Call) RunAndReturn(run func(context.Contex return _c } -type mockConstructorTestingTNewRuleSetRepositoryMock interface { +// NewRuleSetRepositoryMock creates a new instance of RuleSetRepositoryMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRuleSetRepositoryMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewRuleSetRepositoryMock creates a new instance of RuleSetRepositoryMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRuleSetRepositoryMock(t mockConstructorTestingTNewRuleSetRepositoryMock) *RuleSetRepositoryMock { +}) *RuleSetRepositoryMock { mock := &RuleSetRepositoryMock{} mock.Mock.Test(t) diff --git a/internal/rules/provider/kubernetes/api/v1alpha3/rule_set_repository.go b/internal/rules/provider/kubernetes/api/v1alpha4/rule_set_repository.go similarity index 98% rename from internal/rules/provider/kubernetes/api/v1alpha3/rule_set_repository.go rename to internal/rules/provider/kubernetes/api/v1alpha4/rule_set_repository.go index 84fe9b74c..446bd8474 100644 --- a/internal/rules/provider/kubernetes/api/v1alpha3/rule_set_repository.go +++ b/internal/rules/provider/kubernetes/api/v1alpha4/rule_set_repository.go @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package v1alpha3 +package v1alpha4 import ( "context" diff --git a/internal/rules/provider/kubernetes/api/v1alpha3/rule_set_repository_impl.go b/internal/rules/provider/kubernetes/api/v1alpha4/rule_set_repository_impl.go similarity index 99% rename from internal/rules/provider/kubernetes/api/v1alpha3/rule_set_repository_impl.go rename to internal/rules/provider/kubernetes/api/v1alpha4/rule_set_repository_impl.go index 4fc0f29ee..72c4cda8c 100644 --- a/internal/rules/provider/kubernetes/api/v1alpha3/rule_set_repository_impl.go +++ b/internal/rules/provider/kubernetes/api/v1alpha4/rule_set_repository_impl.go @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package v1alpha3 +package v1alpha4 import ( "context" diff --git a/internal/rules/provider/kubernetes/api/v1alpha3/types.go b/internal/rules/provider/kubernetes/api/v1alpha4/types.go similarity index 99% rename from internal/rules/provider/kubernetes/api/v1alpha3/types.go rename to internal/rules/provider/kubernetes/api/v1alpha4/types.go index b7f7791b5..8fd24f56e 100644 --- a/internal/rules/provider/kubernetes/api/v1alpha3/types.go +++ b/internal/rules/provider/kubernetes/api/v1alpha4/types.go @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package v1alpha3 +package v1alpha4 //go:generate controller-gen object paths=$GOFILE diff --git a/internal/rules/provider/kubernetes/api/v1alpha3/zz_generated.deepcopy.go b/internal/rules/provider/kubernetes/api/v1alpha4/zz_generated.deepcopy.go similarity index 99% rename from internal/rules/provider/kubernetes/api/v1alpha3/zz_generated.deepcopy.go rename to internal/rules/provider/kubernetes/api/v1alpha4/zz_generated.deepcopy.go index 619a7bfce..6b0ace544 100644 --- a/internal/rules/provider/kubernetes/api/v1alpha3/zz_generated.deepcopy.go +++ b/internal/rules/provider/kubernetes/api/v1alpha4/zz_generated.deepcopy.go @@ -3,7 +3,7 @@ // Code generated by controller-gen. DO NOT EDIT. -package v1alpha3 +package v1alpha4 import ( "github.com/dadrus/heimdall/internal/rules/config" diff --git a/internal/rules/provider/kubernetes/provider.go b/internal/rules/provider/kubernetes/provider.go index 221d6d783..6785b34c0 100644 --- a/internal/rules/provider/kubernetes/provider.go +++ b/internal/rules/provider/kubernetes/provider.go @@ -44,7 +44,7 @@ import ( "github.com/dadrus/heimdall/internal/heimdall" config2 "github.com/dadrus/heimdall/internal/rules/config" "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/admissioncontroller" - "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha3" + "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha4" "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/x" "github.com/dadrus/heimdall/internal/x/errorchain" @@ -56,7 +56,7 @@ type ConfigFactory func() (*rest.Config, error) type provider struct { p rule.SetProcessor l zerolog.Logger - cl v1alpha3.Client + cl v1alpha4.Client adc admissioncontroller.AdmissionController cancel context.CancelFunc configured bool @@ -91,7 +91,7 @@ func newProvider( TLS *config.TLS `mapstructure:"tls"` } - client, err := v1alpha3.NewClient(k8sConf) + client, err := v1alpha4.NewClient(k8sConf) if err != nil { return nil, errorchain.NewWithMessage(heimdall.ErrConfiguration, "failed creating client for connecting to kubernetes cluster").CausedBy(err) @@ -129,7 +129,7 @@ func (p *provider) newController(ctx context.Context, namespace string) (cache.S ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { return repository.List(ctx, opts) }, WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { return repository.Watch(ctx, opts) }, }, - ObjectType: &v1alpha3.RuleSet{}, + ObjectType: &v1alpha4.RuleSet{}, Handler: cache.FilteringResourceEventHandler{ FilterFunc: p.filter, Handler: cache.ResourceEventHandlerFuncs{ @@ -206,7 +206,7 @@ func (p *provider) Stop(ctx context.Context) error { func (p *provider) filter(obj any) bool { // should never be of a different type. ok if panics - rs := obj.(*v1alpha3.RuleSet) // nolint: forcetypeassert + rs := obj.(*v1alpha4.RuleSet) // nolint: forcetypeassert return rs.Spec.AuthClassName == p.ac } @@ -219,7 +219,7 @@ func (p *provider) addRuleSet(obj any) { p.l.Info().Msg("New rule set received") // should never be of a different type. ok if panics - rs := obj.(*v1alpha3.RuleSet) // nolint: forcetypeassert + rs := obj.(*v1alpha4.RuleSet) // nolint: forcetypeassert conf := p.toRuleSetConfiguration(rs) if err := p.p.OnCreated(conf); err != nil { @@ -229,7 +229,7 @@ func (p *provider) addRuleSet(obj any) { context.Background(), rs, metav1.ConditionFalse, - v1alpha3.ConditionRuleSetActivationFailed, + v1alpha4.ConditionRuleSetActivationFailed, 1, 0, fmt.Sprintf("%s instance failed loading RuleSet, reason: %s", p.id, err.Error()), @@ -239,7 +239,7 @@ func (p *provider) addRuleSet(obj any) { context.Background(), rs, metav1.ConditionTrue, - v1alpha3.ConditionRuleSetActive, + v1alpha4.ConditionRuleSetActive, 1, 1, p.id+" instance successfully loaded RuleSet", @@ -253,8 +253,8 @@ func (p *provider) updateRuleSet(oldObj, newObj any) { } // should never be of a different type. ok if panics - newRS := newObj.(*v1alpha3.RuleSet) // nolint: forcetypeassert - oldRS := oldObj.(*v1alpha3.RuleSet) // nolint: forcetypeassert + newRS := newObj.(*v1alpha4.RuleSet) // nolint: forcetypeassert + oldRS := oldObj.(*v1alpha4.RuleSet) // nolint: forcetypeassert if oldRS.Generation == newRS.Generation { // we're only interested in Spec updates. Changes in metadata or status are not of relevance @@ -272,7 +272,7 @@ func (p *provider) updateRuleSet(oldObj, newObj any) { context.Background(), newRS, metav1.ConditionFalse, - v1alpha3.ConditionRuleSetActivationFailed, + v1alpha4.ConditionRuleSetActivationFailed, 0, -1, fmt.Sprintf("%s instance failed updating RuleSet, reason: %s", p.id, err.Error()), @@ -282,7 +282,7 @@ func (p *provider) updateRuleSet(oldObj, newObj any) { context.Background(), newRS, metav1.ConditionTrue, - v1alpha3.ConditionRuleSetActive, + v1alpha4.ConditionRuleSetActive, 0, 0, p.id+" instance successfully reloaded RuleSet", @@ -298,7 +298,7 @@ func (p *provider) deleteRuleSet(obj any) { p.l.Info().Msg("Rule set deletion received") // should never be of a different type. ok if panics - rs := obj.(*v1alpha3.RuleSet) // nolint: forcetypeassert + rs := obj.(*v1alpha4.RuleSet) // nolint: forcetypeassert conf := p.toRuleSetConfiguration(rs) if err := p.p.OnDeleted(conf); err != nil { @@ -308,7 +308,7 @@ func (p *provider) deleteRuleSet(obj any) { context.Background(), rs, metav1.ConditionTrue, - v1alpha3.ConditionRuleSetUnloadingFailed, + v1alpha4.ConditionRuleSetUnloadingFailed, 0, 0, p.id+" instance failed unloading RuleSet, reason: "+err.Error(), @@ -318,7 +318,7 @@ func (p *provider) deleteRuleSet(obj any) { context.Background(), rs, metav1.ConditionFalse, - v1alpha3.ConditionRuleSetUnloaded, + v1alpha4.ConditionRuleSetUnloaded, -1, -1, p.id+" instance dropped RuleSet", @@ -326,7 +326,7 @@ func (p *provider) deleteRuleSet(obj any) { } } -func (p *provider) toRuleSetConfiguration(rs *v1alpha3.RuleSet) *config2.RuleSet { +func (p *provider) toRuleSetConfiguration(rs *v1alpha4.RuleSet) *config2.RuleSet { return &config2.RuleSet{ MetaData: config2.MetaData{ Source: fmt.Sprintf("%s:%s:%s", ProviderType, rs.Namespace, rs.UID), @@ -339,15 +339,15 @@ func (p *provider) toRuleSetConfiguration(rs *v1alpha3.RuleSet) *config2.RuleSet } func (p *provider) mapVersion(_ string) string { - // currently the only possible version is v1alpha3, which is mapped to the version "1alpha3" used internally - return "1alpha3" + // currently the only possible version is v1alpha4, which is mapped to the version "1alpha4" used internally + return "1alpha4" } func (p *provider) updateStatus( ctx context.Context, - rs *v1alpha3.RuleSet, + rs *v1alpha4.RuleSet, status metav1.ConditionStatus, - reason v1alpha3.ConditionReason, + reason v1alpha4.ConditionReason, matchIncrement int, usageIncrement int, msg string, @@ -359,7 +359,7 @@ func (p *provider) updateStatus( conditionType := p.id + "/Reconciliation" - if reason == v1alpha3.ConditionControllerStopped || reason == v1alpha3.ConditionRuleSetUnloaded { + if reason == v1alpha4.ConditionControllerStopped || reason == v1alpha4.ConditionRuleSetUnloaded { meta.RemoveStatusCondition(&modRS.Status.Conditions, conditionType) } else { meta.SetStatusCondition(&modRS.Status.Conditions, metav1.Condition{ @@ -381,7 +381,7 @@ func (p *provider) updateStatus( _, err := repository.PatchStatus( p.l.WithContext(ctx), - v1alpha3.NewJSONPatch(rs, modRS, true), + v1alpha4.NewJSONPatch(rs, modRS, true), metav1.PatchOptions{}, ) if err == nil { @@ -421,10 +421,10 @@ func (p *provider) updateStatus( func (p *provider) finalize(ctx context.Context) { for _, rs := range slicex.Filter( // nolint: forcetypeassert - slicex.Map(p.store.List(), func(s any) *v1alpha3.RuleSet { return s.(*v1alpha3.RuleSet) }), - func(set *v1alpha3.RuleSet) bool { return set.Spec.AuthClassName == p.ac }, + slicex.Map(p.store.List(), func(s any) *v1alpha4.RuleSet { return s.(*v1alpha4.RuleSet) }), + func(set *v1alpha4.RuleSet) bool { return set.Spec.AuthClassName == p.ac }, ) { - p.updateStatus(ctx, rs, metav1.ConditionFalse, v1alpha3.ConditionControllerStopped, -1, -1, + p.updateStatus(ctx, rs, metav1.ConditionFalse, v1alpha4.ConditionControllerStopped, -1, -1, p.id+" instance stopped") } } diff --git a/internal/rules/provider/kubernetes/provider_test.go b/internal/rules/provider/kubernetes/provider_test.go index e708e58e6..1765c2459 100644 --- a/internal/rules/provider/kubernetes/provider_test.go +++ b/internal/rules/provider/kubernetes/provider_test.go @@ -41,7 +41,7 @@ import ( "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/heimdall" config2 "github.com/dadrus/heimdall/internal/rules/config" - "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha3" + "github.com/dadrus/heimdall/internal/rules/provider/kubernetes/api/v1alpha4" "github.com/dadrus/heimdall/internal/rules/rule/mocks" "github.com/dadrus/heimdall/internal/x" "github.com/dadrus/heimdall/internal/x/testsupport" @@ -116,18 +116,18 @@ func TestNewProvider(t *testing.T) { } type RuleSetResourceHandler struct { - statusUpdates []*v1alpha3.RuleSetStatus + statusUpdates []*v1alpha4.RuleSetStatus listCallIdx int watchCallIdx int updateStatusCallIdx int - rsCurrent v1alpha3.RuleSet + rsCurrent v1alpha4.RuleSet - rsUpdatedEvt chan v1alpha3.RuleSet - rsCurrentEvt chan v1alpha3.RuleSet + rsUpdatedEvt chan v1alpha4.RuleSet + rsCurrentEvt chan v1alpha4.RuleSet - updateStatus func(rs v1alpha3.RuleSet, callIdx int) (*metav1.Status, error) - watchEvent func(rs v1alpha3.RuleSet, callIdx int) (watch.Event, error) + updateStatus func(rs v1alpha4.RuleSet, callIdx int) (*metav1.Status, error) + watchEvent func(rs v1alpha4.RuleSet, callIdx int) (watch.Event, error) } func (h *RuleSetResourceHandler) close() { @@ -145,7 +145,7 @@ func (h *RuleSetResourceHandler) handle(t *testing.T, r *http.Request, w http.Re case r.URL.Query().Get("watch") == "true": h.watchCallIdx++ h.writeWatchResponse(t, w) - case r.URL.Path == "/apis/heimdall.dadrus.github.com/v1alpha3/rulesets": + case r.URL.Path == "/apis/heimdall.dadrus.github.com/v1alpha4/rulesets": h.listCallIdx++ h.writeListResponse(t, w) default: @@ -171,7 +171,7 @@ func (h *RuleSetResourceHandler) writeWatchResponse(t *testing.T, w http.Respons return } - h.rsCurrent = *wEvt.Object.(*v1alpha3.RuleSet) // nolint: forcetypeassert + h.rsCurrent = *wEvt.Object.(*v1alpha4.RuleSet) // nolint: forcetypeassert h.rsCurrentEvt <- h.rsCurrent @@ -192,9 +192,9 @@ func (h *RuleSetResourceHandler) writeWatchResponse(t *testing.T, w http.Respons func (h *RuleSetResourceHandler) writeListResponse(t *testing.T, w http.ResponseWriter) { t.Helper() - rs := v1alpha3.RuleSet{ + rs := v1alpha4.RuleSet{ TypeMeta: metav1.TypeMeta{ - APIVersion: fmt.Sprintf("%s/%s", v1alpha3.GroupName, v1alpha3.GroupVersion), + APIVersion: fmt.Sprintf("%s/%s", v1alpha4.GroupName, v1alpha4.GroupVersion), Kind: "RuleSet", }, ObjectMeta: metav1.ObjectMeta{ @@ -205,14 +205,16 @@ func (h *RuleSetResourceHandler) writeListResponse(t *testing.T, w http.Response Generation: 1, CreationTimestamp: metav1.NewTime(time.Now()), }, - Spec: v1alpha3.RuleSetSpec{ + Spec: v1alpha4.RuleSetSpec{ AuthClassName: "bar", Rules: []config2.Rule{ { ID: "test", - RuleMatcher: config2.Matcher{ - URL: "http://foo.bar", - Strategy: "glob", + Matcher: config2.Matcher{ + Routes: []config2.Route{{Path: "/"}}, + Scheme: "http", + Methods: []string{http.MethodGet}, + Hosts: []config2.HostMatcher{{Value: "foo.bar", Type: "glob"}}, }, Backend: &config2.Backend{ Host: "baz", @@ -223,7 +225,6 @@ func (h *RuleSetResourceHandler) writeListResponse(t *testing.T, w http.Response QueryParamsToRemove: []string{"baz"}, }, }, - Methods: []string{http.MethodGet}, Execute: []config.MechanismConfig{ {"authenticator": "authn"}, {"authorizer": "authz"}, @@ -233,13 +234,13 @@ func (h *RuleSetResourceHandler) writeListResponse(t *testing.T, w http.Response }, } - rsl := v1alpha3.RuleSetList{ + rsl := v1alpha4.RuleSetList{ TypeMeta: metav1.TypeMeta{ - APIVersion: fmt.Sprintf("%s/%s", v1alpha3.GroupName, v1alpha3.GroupVersion), + APIVersion: fmt.Sprintf("%s/%s", v1alpha4.GroupName, v1alpha4.GroupVersion), Kind: "RuleSetList", }, ListMeta: metav1.ListMeta{ResourceVersion: "735820"}, - Items: []v1alpha3.RuleSet{rs}, + Items: []v1alpha4.RuleSet{rs}, } h.rsUpdatedEvt <- rs @@ -272,7 +273,7 @@ func (h *RuleSetResourceHandler) writeUpdateStatusResponse(t *testing.T, r *http updatedRS, err := patch.Apply(rawRS) require.NoError(t, err) - var newRS v1alpha3.RuleSet + var newRS v1alpha4.RuleSet err = json.Unmarshal(updatedRS, &newRS) require.NoError(t, err) @@ -331,15 +332,15 @@ func TestProviderLifecycle(t *testing.T) { for _, tc := range []struct { uc string conf []byte - watchEvent func(rs v1alpha3.RuleSet, callIdx int) (watch.Event, error) - updateStatus func(rs v1alpha3.RuleSet, callIdx int) (*metav1.Status, error) + watchEvent func(rs v1alpha4.RuleSet, callIdx int) (watch.Event, error) + updateStatus func(rs v1alpha4.RuleSet, callIdx int) (*metav1.Status, error) setupProcessor func(t *testing.T, processor *mocks.RuleSetProcessorMock) - assert func(t *testing.T, statusList *[]*v1alpha3.RuleSetStatus, processor *mocks.RuleSetProcessorMock) + assert func(t *testing.T, statusList *[]*v1alpha4.RuleSetStatus, processor *mocks.RuleSetProcessorMock) }{ { uc: "rule set added", conf: []byte("auth_class: bar"), - watchEvent: func(rs v1alpha3.RuleSet, callIdx int) (watch.Event, error) { + watchEvent: func(rs v1alpha4.RuleSet, callIdx int) (watch.Event, error) { switch callIdx { case 1: return watch.Event{Type: watch.Modified, Object: &rs}, nil @@ -354,24 +355,28 @@ func TestProviderLifecycle(t *testing.T) { Run(mock2.NewArgumentCaptor[*config2.RuleSet](&processor.Mock, "captor1").Capture). Return(nil).Once() }, - assert: func(t *testing.T, statusList *[]*v1alpha3.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { + assert: func(t *testing.T, statusList *[]*v1alpha4.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { t.Helper() time.Sleep(250 * time.Millisecond) ruleSet := mock2.ArgumentCaptorFrom[*config2.RuleSet](&processor.Mock, "captor1").Value() assert.Contains(t, ruleSet.Source, "kubernetes:foo:dfb2a2f1-1ad2-4d8c-8456-516fc94abb86") - assert.Equal(t, "1alpha3", ruleSet.Version) + assert.Equal(t, "1alpha4", ruleSet.Version) assert.Equal(t, "test-rule", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) rule := ruleSet.Rules[0] assert.Equal(t, "test", rule.ID) - assert.Equal(t, "http://foo.bar", rule.RuleMatcher.URL) + assert.Equal(t, "http", rule.Matcher.Scheme) + assert.Len(t, rule.Matcher.Hosts, 1) + assert.Equal(t, "foo.bar", rule.Matcher.Hosts[0].Value) + assert.Equal(t, "glob", rule.Matcher.Hosts[0].Type) + assert.Len(t, rule.Matcher.Routes, 1) + assert.Equal(t, "/", rule.Matcher.Routes[0].Path) + assert.Len(t, rule.Matcher.Methods, 1) + assert.Contains(t, rule.Matcher.Methods, http.MethodGet) assert.Equal(t, "baz", rule.Backend.Host) - assert.Equal(t, "glob", rule.RuleMatcher.Strategy) - assert.Len(t, rule.Methods, 1) - assert.Contains(t, rule.Methods, http.MethodGet) assert.Empty(t, rule.ErrorHandler) assert.Len(t, rule.Execute, 2) assert.Equal(t, "authn", rule.Execute[0]["authenticator"]) @@ -383,13 +388,13 @@ func TestProviderLifecycle(t *testing.T) { assert.Len(t, (*statusList)[0].Conditions, 1) condition := (*statusList)[0].Conditions[0] assert.Equal(t, metav1.ConditionTrue, condition.Status) - assert.Equal(t, v1alpha3.ConditionRuleSetActive, v1alpha3.ConditionReason(condition.Reason)) + assert.Equal(t, v1alpha4.ConditionRuleSetActive, v1alpha4.ConditionReason(condition.Reason)) }, }, { uc: "adding rule set fails", conf: []byte("auth_class: bar"), - watchEvent: func(rs v1alpha3.RuleSet, _ int) (watch.Event, error) { + watchEvent: func(rs v1alpha4.RuleSet, _ int) (watch.Event, error) { return watch.Event{Type: watch.Bookmark, Object: &rs}, nil }, setupProcessor: func(t *testing.T, processor *mocks.RuleSetProcessorMock) { @@ -397,7 +402,7 @@ func TestProviderLifecycle(t *testing.T) { processor.EXPECT().OnCreated(mock.Anything).Return(testsupport.ErrTestPurpose).Once() }, - assert: func(t *testing.T, statusList *[]*v1alpha3.RuleSetStatus, _ *mocks.RuleSetProcessorMock) { + assert: func(t *testing.T, statusList *[]*v1alpha4.RuleSetStatus, _ *mocks.RuleSetProcessorMock) { t.Helper() time.Sleep(250 * time.Millisecond) @@ -408,13 +413,13 @@ func TestProviderLifecycle(t *testing.T) { assert.Len(t, (*statusList)[0].Conditions, 1) condition := (*statusList)[0].Conditions[0] assert.Equal(t, metav1.ConditionFalse, condition.Status) - assert.Equal(t, v1alpha3.ConditionRuleSetActivationFailed, v1alpha3.ConditionReason(condition.Reason)) + assert.Equal(t, v1alpha4.ConditionRuleSetActivationFailed, v1alpha4.ConditionReason(condition.Reason)) }, }, { uc: "a ruleset is added and then removed", conf: []byte("auth_class: bar"), - watchEvent: func(rs v1alpha3.RuleSet, callIdx int) (watch.Event, error) { + watchEvent: func(rs v1alpha4.RuleSet, callIdx int) (watch.Event, error) { switch callIdx { case 1: return watch.Event{Type: watch.Modified, Object: &rs}, nil @@ -424,7 +429,7 @@ func TestProviderLifecycle(t *testing.T) { return watch.Event{Type: watch.Bookmark, Object: &rs}, nil } }, - updateStatus: func(rs v1alpha3.RuleSet, callIdx int) (*metav1.Status, error) { + updateStatus: func(rs v1alpha4.RuleSet, callIdx int) (*metav1.Status, error) { switch callIdx { case 2: return &metav1.Status{ @@ -453,24 +458,28 @@ func TestProviderLifecycle(t *testing.T) { Run(mock2.NewArgumentCaptor[*config2.RuleSet](&processor.Mock, "captor2").Capture). Return(nil).Once() }, - assert: func(t *testing.T, statusList *[]*v1alpha3.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { + assert: func(t *testing.T, statusList *[]*v1alpha4.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { t.Helper() time.Sleep(250 * time.Millisecond) ruleSet := mock2.ArgumentCaptorFrom[*config2.RuleSet](&processor.Mock, "captor1").Value() assert.Equal(t, "kubernetes:foo:dfb2a2f1-1ad2-4d8c-8456-516fc94abb86", ruleSet.Source) - assert.Equal(t, "1alpha3", ruleSet.Version) + assert.Equal(t, "1alpha4", ruleSet.Version) assert.Equal(t, "test-rule", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) createdRule := ruleSet.Rules[0] assert.Equal(t, "test", createdRule.ID) - assert.Equal(t, "http://foo.bar", createdRule.RuleMatcher.URL) + assert.Equal(t, "http", createdRule.Matcher.Scheme) + assert.Len(t, createdRule.Matcher.Hosts, 1) + assert.Equal(t, "glob", createdRule.Matcher.Hosts[0].Type) + assert.Equal(t, "foo.bar", createdRule.Matcher.Hosts[0].Value) + assert.Len(t, createdRule.Matcher.Routes, 1) + assert.Equal(t, "/", createdRule.Matcher.Routes[0].Path) + assert.Len(t, createdRule.Matcher.Methods, 1) + assert.Contains(t, createdRule.Matcher.Methods, http.MethodGet) assert.Equal(t, "baz", createdRule.Backend.Host) - assert.Equal(t, "glob", createdRule.RuleMatcher.Strategy) - assert.Len(t, createdRule.Methods, 1) - assert.Contains(t, createdRule.Methods, http.MethodGet) assert.Empty(t, createdRule.ErrorHandler) assert.Len(t, createdRule.Execute, 2) assert.Equal(t, "authn", createdRule.Execute[0]["authenticator"]) @@ -478,7 +487,7 @@ func TestProviderLifecycle(t *testing.T) { ruleSet = mock2.ArgumentCaptorFrom[*config2.RuleSet](&processor.Mock, "captor2").Value() assert.Equal(t, "kubernetes:foo:dfb2a2f1-1ad2-4d8c-8456-516fc94abb86", ruleSet.Source) - assert.Equal(t, "1alpha3", ruleSet.Version) + assert.Equal(t, "1alpha4", ruleSet.Version) assert.Equal(t, "test-rule", ruleSet.Name) assert.Len(t, *statusList, 1) @@ -487,13 +496,13 @@ func TestProviderLifecycle(t *testing.T) { assert.Len(t, (*statusList)[0].Conditions, 1) condition := (*statusList)[0].Conditions[0] assert.Equal(t, metav1.ConditionTrue, condition.Status) - assert.Equal(t, v1alpha3.ConditionRuleSetActive, v1alpha3.ConditionReason(condition.Reason)) + assert.Equal(t, v1alpha4.ConditionRuleSetActive, v1alpha4.ConditionReason(condition.Reason)) }, }, { uc: "a ruleset is added with failing status update", conf: []byte("auth_class: bar"), - watchEvent: func(rs v1alpha3.RuleSet, callIdx int) (watch.Event, error) { + watchEvent: func(rs v1alpha4.RuleSet, callIdx int) (watch.Event, error) { switch callIdx { case 1: rv, err := strconv.Atoi(rs.ResourceVersion) @@ -506,7 +515,7 @@ func TestProviderLifecycle(t *testing.T) { return watch.Event{Type: watch.Bookmark, Object: &rs}, nil } }, - updateStatus: func(_ v1alpha3.RuleSet, _ int) (*metav1.Status, error) { + updateStatus: func(_ v1alpha4.RuleSet, _ int) (*metav1.Status, error) { return nil, errors.New("test error") }, setupProcessor: func(t *testing.T, processor *mocks.RuleSetProcessorMock) { @@ -516,24 +525,28 @@ func TestProviderLifecycle(t *testing.T) { Run(mock2.NewArgumentCaptor[*config2.RuleSet](&processor.Mock, "captor1").Capture). Return(nil).Once() }, - assert: func(t *testing.T, statusList *[]*v1alpha3.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { + assert: func(t *testing.T, statusList *[]*v1alpha4.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { t.Helper() time.Sleep(250 * time.Millisecond) ruleSet := mock2.ArgumentCaptorFrom[*config2.RuleSet](&processor.Mock, "captor1").Value() assert.Equal(t, "kubernetes:foo:dfb2a2f1-1ad2-4d8c-8456-516fc94abb86", ruleSet.Source) - assert.Equal(t, "1alpha3", ruleSet.Version) + assert.Equal(t, "1alpha4", ruleSet.Version) assert.Equal(t, "test-rule", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) createdRule := ruleSet.Rules[0] assert.Equal(t, "test", createdRule.ID) - assert.Equal(t, "http://foo.bar", createdRule.RuleMatcher.URL) + assert.Equal(t, "http", createdRule.Matcher.Scheme) + assert.Len(t, createdRule.Matcher.Hosts, 1) + assert.Equal(t, "glob", createdRule.Matcher.Hosts[0].Type) + assert.Equal(t, "foo.bar", createdRule.Matcher.Hosts[0].Value) + assert.Len(t, createdRule.Matcher.Routes, 1) + assert.Equal(t, "/", createdRule.Matcher.Routes[0].Path) + assert.Len(t, createdRule.Matcher.Methods, 1) + assert.Contains(t, createdRule.Matcher.Methods, http.MethodGet) assert.Equal(t, "baz", createdRule.Backend.Host) - assert.Equal(t, "glob", createdRule.RuleMatcher.Strategy) - assert.Len(t, createdRule.Methods, 1) - assert.Contains(t, createdRule.Methods, http.MethodGet) assert.Empty(t, createdRule.ErrorHandler) assert.Len(t, createdRule.Execute, 2) assert.Equal(t, "authn", createdRule.Execute[0]["authenticator"]) @@ -545,7 +558,7 @@ func TestProviderLifecycle(t *testing.T) { { uc: "a ruleset is added with conflicting status update", conf: []byte("auth_class: bar"), - watchEvent: func(rs v1alpha3.RuleSet, callIdx int) (watch.Event, error) { + watchEvent: func(rs v1alpha4.RuleSet, callIdx int) (watch.Event, error) { switch callIdx { case 1: rv, err := strconv.Atoi(rs.ResourceVersion) @@ -558,7 +571,7 @@ func TestProviderLifecycle(t *testing.T) { return watch.Event{Type: watch.Bookmark, Object: &rs}, nil } }, - updateStatus: func(rs v1alpha3.RuleSet, callIdx int) (*metav1.Status, error) { + updateStatus: func(rs v1alpha4.RuleSet, callIdx int) (*metav1.Status, error) { switch callIdx { case 1: return &metav1.Status{ @@ -583,24 +596,28 @@ func TestProviderLifecycle(t *testing.T) { Run(mock2.NewArgumentCaptor[*config2.RuleSet](&processor.Mock, "captor1").Capture). Return(nil).Once() }, - assert: func(t *testing.T, statusList *[]*v1alpha3.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { + assert: func(t *testing.T, statusList *[]*v1alpha4.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { t.Helper() time.Sleep(250 * time.Millisecond) ruleSet := mock2.ArgumentCaptorFrom[*config2.RuleSet](&processor.Mock, "captor1").Value() assert.Equal(t, "kubernetes:foo:dfb2a2f1-1ad2-4d8c-8456-516fc94abb86", ruleSet.Source) - assert.Equal(t, "1alpha3", ruleSet.Version) + assert.Equal(t, "1alpha4", ruleSet.Version) assert.Equal(t, "test-rule", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) createdRule := ruleSet.Rules[0] assert.Equal(t, "test", createdRule.ID) - assert.Equal(t, "http://foo.bar", createdRule.RuleMatcher.URL) + assert.Equal(t, "http", createdRule.Matcher.Scheme) + assert.Len(t, createdRule.Matcher.Hosts, 1) + assert.Equal(t, "glob", createdRule.Matcher.Hosts[0].Type) + assert.Equal(t, "foo.bar", createdRule.Matcher.Hosts[0].Value) + assert.Len(t, createdRule.Matcher.Routes, 1) + assert.Equal(t, "/", createdRule.Matcher.Routes[0].Path) + assert.Len(t, createdRule.Matcher.Methods, 1) + assert.Contains(t, createdRule.Matcher.Methods, http.MethodGet) assert.Equal(t, "baz", createdRule.Backend.Host) - assert.Equal(t, "glob", createdRule.RuleMatcher.Strategy) - assert.Len(t, createdRule.Methods, 1) - assert.Contains(t, createdRule.Methods, http.MethodGet) assert.Empty(t, createdRule.ErrorHandler) assert.Len(t, createdRule.Execute, 2) assert.Equal(t, "authn", createdRule.Execute[0]["authenticator"]) @@ -612,13 +629,13 @@ func TestProviderLifecycle(t *testing.T) { assert.Len(t, (*statusList)[0].Conditions, 1) condition := (*statusList)[0].Conditions[0] assert.Equal(t, metav1.ConditionTrue, condition.Status) - assert.Equal(t, v1alpha3.ConditionRuleSetActive, v1alpha3.ConditionReason(condition.Reason)) + assert.Equal(t, v1alpha4.ConditionRuleSetActive, v1alpha4.ConditionReason(condition.Reason)) }, }, { uc: "removing rule set fails", conf: []byte("auth_class: bar"), - watchEvent: func(rs v1alpha3.RuleSet, callIdx int) (watch.Event, error) { + watchEvent: func(rs v1alpha4.RuleSet, callIdx int) (watch.Event, error) { switch callIdx { case 1: return watch.Event{Type: watch.Modified, Object: &rs}, nil @@ -635,7 +652,7 @@ func TestProviderLifecycle(t *testing.T) { processor.EXPECT().OnCreated(mock.Anything).Return(nil).Once() processor.EXPECT().OnDeleted(mock.Anything).Return(testsupport.ErrTestPurpose).Once() }, - assert: func(t *testing.T, statusList *[]*v1alpha3.RuleSetStatus, _ *mocks.RuleSetProcessorMock) { + assert: func(t *testing.T, statusList *[]*v1alpha4.RuleSetStatus, _ *mocks.RuleSetProcessorMock) { t.Helper() time.Sleep(250 * time.Millisecond) @@ -645,19 +662,19 @@ func TestProviderLifecycle(t *testing.T) { assert.Len(t, (*statusList)[0].Conditions, 1) condition := (*statusList)[0].Conditions[0] assert.Equal(t, metav1.ConditionTrue, condition.Status) - assert.Equal(t, v1alpha3.ConditionRuleSetActive, v1alpha3.ConditionReason(condition.Reason)) + assert.Equal(t, v1alpha4.ConditionRuleSetActive, v1alpha4.ConditionReason(condition.Reason)) assert.Equal(t, "1/1", (*statusList)[1].ActiveIn) assert.Len(t, (*statusList)[1].Conditions, 1) condition = (*statusList)[1].Conditions[0] assert.Equal(t, metav1.ConditionTrue, condition.Status) - assert.Equal(t, v1alpha3.ConditionRuleSetUnloadingFailed, v1alpha3.ConditionReason(condition.Reason)) + assert.Equal(t, v1alpha4.ConditionRuleSetUnloadingFailed, v1alpha4.ConditionReason(condition.Reason)) }, }, { uc: "a ruleset is added and then updated", conf: []byte("auth_class: bar"), - watchEvent: func(rs v1alpha3.RuleSet, callIdx int) (watch.Event, error) { + watchEvent: func(rs v1alpha4.RuleSet, callIdx int) (watch.Event, error) { switch callIdx { case 1: return watch.Event{Type: watch.Modified, Object: &rs}, nil @@ -667,14 +684,16 @@ func TestProviderLifecycle(t *testing.T) { rs.ResourceVersion = strconv.Itoa(rv + 1) rs.Generation++ - rs.Spec = v1alpha3.RuleSetSpec{ + rs.Spec = v1alpha4.RuleSetSpec{ AuthClassName: "bar", Rules: []config2.Rule{ { ID: "test", - RuleMatcher: config2.Matcher{ - URL: "http://foo.bar", - Strategy: "glob", + Matcher: config2.Matcher{ + Routes: []config2.Route{{Path: "/"}}, + Scheme: "http", + Methods: []string{http.MethodGet}, + Hosts: []config2.HostMatcher{{Value: "foo.bar", Type: "glob"}}, }, Backend: &config2.Backend{ Host: "bar", @@ -685,7 +704,6 @@ func TestProviderLifecycle(t *testing.T) { QueryParamsToRemove: []string{"baz"}, }, }, - Methods: []string{http.MethodGet}, Execute: []config.MechanismConfig{ {"authenticator": "test_authn"}, {"authorizer": "test_authz"}, @@ -711,24 +729,28 @@ func TestProviderLifecycle(t *testing.T) { Run(mock2.NewArgumentCaptor[*config2.RuleSet](&processor.Mock, "captor2").Capture). Return(nil).Once() }, - assert: func(t *testing.T, statusList *[]*v1alpha3.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { + assert: func(t *testing.T, statusList *[]*v1alpha4.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { t.Helper() time.Sleep(250 * time.Millisecond) ruleSet := mock2.ArgumentCaptorFrom[*config2.RuleSet](&processor.Mock, "captor1").Value() assert.Equal(t, "kubernetes:foo:dfb2a2f1-1ad2-4d8c-8456-516fc94abb86", ruleSet.Source) - assert.Equal(t, "1alpha3", ruleSet.Version) + assert.Equal(t, "1alpha4", ruleSet.Version) assert.Equal(t, "test-rule", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) createdRule := ruleSet.Rules[0] assert.Equal(t, "test", createdRule.ID) - assert.Equal(t, "http://foo.bar", createdRule.RuleMatcher.URL) + assert.Equal(t, "http", createdRule.Matcher.Scheme) + assert.Len(t, createdRule.Matcher.Hosts, 1) + assert.Equal(t, "glob", createdRule.Matcher.Hosts[0].Type) + assert.Equal(t, "foo.bar", createdRule.Matcher.Hosts[0].Value) + assert.Len(t, createdRule.Matcher.Routes, 1) + assert.Equal(t, "/", createdRule.Matcher.Routes[0].Path) + assert.Len(t, createdRule.Matcher.Methods, 1) + assert.Contains(t, createdRule.Matcher.Methods, http.MethodGet) assert.Equal(t, "baz", createdRule.Backend.Host) - assert.Equal(t, "glob", createdRule.RuleMatcher.Strategy) - assert.Len(t, createdRule.Methods, 1) - assert.Contains(t, createdRule.Methods, http.MethodGet) assert.Empty(t, createdRule.ErrorHandler) assert.Len(t, createdRule.Execute, 2) assert.Equal(t, "authn", createdRule.Execute[0]["authenticator"]) @@ -736,17 +758,21 @@ func TestProviderLifecycle(t *testing.T) { ruleSet = mock2.ArgumentCaptorFrom[*config2.RuleSet](&processor.Mock, "captor2").Value() assert.Equal(t, "kubernetes:foo:dfb2a2f1-1ad2-4d8c-8456-516fc94abb86", ruleSet.Source) - assert.Equal(t, "1alpha3", ruleSet.Version) + assert.Equal(t, "1alpha4", ruleSet.Version) assert.Equal(t, "test-rule", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) updatedRule := ruleSet.Rules[0] assert.Equal(t, "test", updatedRule.ID) - assert.Equal(t, "http://foo.bar", updatedRule.RuleMatcher.URL) + assert.Equal(t, "http", createdRule.Matcher.Scheme) + assert.Len(t, createdRule.Matcher.Hosts, 1) + assert.Equal(t, "glob", createdRule.Matcher.Hosts[0].Type) + assert.Equal(t, "foo.bar", createdRule.Matcher.Hosts[0].Value) + assert.Len(t, createdRule.Matcher.Routes, 1) + assert.Equal(t, "/", createdRule.Matcher.Routes[0].Path) + assert.Len(t, createdRule.Matcher.Methods, 1) + assert.Contains(t, createdRule.Matcher.Methods, http.MethodGet) assert.Equal(t, "bar", updatedRule.Backend.Host) - assert.Equal(t, "glob", updatedRule.RuleMatcher.Strategy) - assert.Len(t, updatedRule.Methods, 1) - assert.Contains(t, updatedRule.Methods, http.MethodGet) assert.Empty(t, updatedRule.ErrorHandler) assert.Len(t, updatedRule.Execute, 2) assert.Equal(t, "test_authn", updatedRule.Execute[0]["authenticator"]) @@ -757,19 +783,19 @@ func TestProviderLifecycle(t *testing.T) { assert.Len(t, (*statusList)[0].Conditions, 1) condition := (*statusList)[0].Conditions[0] assert.Equal(t, metav1.ConditionTrue, condition.Status) - assert.Equal(t, v1alpha3.ConditionRuleSetActive, v1alpha3.ConditionReason(condition.Reason)) + assert.Equal(t, v1alpha4.ConditionRuleSetActive, v1alpha4.ConditionReason(condition.Reason)) assert.Equal(t, "1/1", (*statusList)[1].ActiveIn) assert.Len(t, (*statusList)[1].Conditions, 1) condition = (*statusList)[1].Conditions[0] assert.Equal(t, metav1.ConditionTrue, condition.Status) - assert.Equal(t, v1alpha3.ConditionRuleSetActive, v1alpha3.ConditionReason(condition.Reason)) + assert.Equal(t, v1alpha4.ConditionRuleSetActive, v1alpha4.ConditionReason(condition.Reason)) }, }, { uc: "a ruleset is added and then updated with a mismatching authClassName", conf: []byte("auth_class: bar"), - watchEvent: func(rs v1alpha3.RuleSet, callIdx int) (watch.Event, error) { + watchEvent: func(rs v1alpha4.RuleSet, callIdx int) (watch.Event, error) { switch callIdx { case 1: rs.Status.ActiveIn = "1/1" @@ -800,24 +826,28 @@ func TestProviderLifecycle(t *testing.T) { Run(mock2.NewArgumentCaptor[*config2.RuleSet](&processor.Mock, "captor2").Capture). Return(nil).Once() }, - assert: func(t *testing.T, statusList *[]*v1alpha3.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { + assert: func(t *testing.T, statusList *[]*v1alpha4.RuleSetStatus, processor *mocks.RuleSetProcessorMock) { t.Helper() time.Sleep(250 * time.Millisecond) ruleSet := mock2.ArgumentCaptorFrom[*config2.RuleSet](&processor.Mock, "captor1").Value() assert.Equal(t, "kubernetes:foo:dfb2a2f1-1ad2-4d8c-8456-516fc94abb86", ruleSet.Source) - assert.Equal(t, "1alpha3", ruleSet.Version) + assert.Equal(t, "1alpha4", ruleSet.Version) assert.Equal(t, "test-rule", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) createdRule := ruleSet.Rules[0] assert.Equal(t, "test", createdRule.ID) - assert.Equal(t, "http://foo.bar", createdRule.RuleMatcher.URL) + assert.Equal(t, "http", createdRule.Matcher.Scheme) + assert.Len(t, createdRule.Matcher.Hosts, 1) + assert.Equal(t, "glob", createdRule.Matcher.Hosts[0].Type) + assert.Equal(t, "foo.bar", createdRule.Matcher.Hosts[0].Value) + assert.Len(t, createdRule.Matcher.Routes, 1) + assert.Equal(t, "/", createdRule.Matcher.Routes[0].Path) + assert.Len(t, createdRule.Matcher.Methods, 1) + assert.Contains(t, createdRule.Matcher.Methods, http.MethodGet) assert.Equal(t, "baz", createdRule.Backend.Host) - assert.Equal(t, "glob", createdRule.RuleMatcher.Strategy) - assert.Len(t, createdRule.Methods, 1) - assert.Contains(t, createdRule.Methods, http.MethodGet) assert.Empty(t, createdRule.ErrorHandler) assert.Len(t, createdRule.Execute, 2) assert.Equal(t, "authn", createdRule.Execute[0]["authenticator"]) @@ -825,17 +855,21 @@ func TestProviderLifecycle(t *testing.T) { ruleSet = mock2.ArgumentCaptorFrom[*config2.RuleSet](&processor.Mock, "captor2").Value() assert.Equal(t, "kubernetes:foo:dfb2a2f1-1ad2-4d8c-8456-516fc94abb86", ruleSet.Source) - assert.Equal(t, "1alpha3", ruleSet.Version) + assert.Equal(t, "1alpha4", ruleSet.Version) assert.Equal(t, "test-rule", ruleSet.Name) assert.Len(t, ruleSet.Rules, 1) deleteRule := ruleSet.Rules[0] assert.Equal(t, "test", deleteRule.ID) - assert.Equal(t, "http://foo.bar", deleteRule.RuleMatcher.URL) + assert.Equal(t, "http", createdRule.Matcher.Scheme) + assert.Len(t, createdRule.Matcher.Hosts, 1) + assert.Equal(t, "glob", createdRule.Matcher.Hosts[0].Type) + assert.Equal(t, "foo.bar", createdRule.Matcher.Hosts[0].Value) + assert.Len(t, createdRule.Matcher.Routes, 1) + assert.Equal(t, "/", createdRule.Matcher.Routes[0].Path) + assert.Len(t, createdRule.Matcher.Methods, 1) + assert.Contains(t, createdRule.Matcher.Methods, http.MethodGet) assert.Equal(t, "baz", deleteRule.Backend.Host) - assert.Equal(t, "glob", deleteRule.RuleMatcher.Strategy) - assert.Len(t, deleteRule.Methods, 1) - assert.Contains(t, deleteRule.Methods, http.MethodGet) assert.Empty(t, deleteRule.ErrorHandler) assert.Len(t, deleteRule.Execute, 2) assert.Equal(t, "authn", deleteRule.Execute[0]["authenticator"]) @@ -846,13 +880,13 @@ func TestProviderLifecycle(t *testing.T) { assert.Len(t, (*statusList)[0].Conditions, 1) condition := (*statusList)[0].Conditions[0] assert.Equal(t, metav1.ConditionTrue, condition.Status) - assert.Equal(t, v1alpha3.ConditionRuleSetActive, v1alpha3.ConditionReason(condition.Reason)) + assert.Equal(t, v1alpha4.ConditionRuleSetActive, v1alpha4.ConditionReason(condition.Reason)) }, }, { uc: "failed updating rule set", conf: []byte("auth_class: bar"), - watchEvent: func(rs v1alpha3.RuleSet, callIdx int) (watch.Event, error) { + watchEvent: func(rs v1alpha4.RuleSet, callIdx int) (watch.Event, error) { switch callIdx { case 1: return watch.Event{Type: watch.Modified, Object: &rs}, nil @@ -862,14 +896,16 @@ func TestProviderLifecycle(t *testing.T) { rs.ResourceVersion = strconv.Itoa(rv + 1) rs.Generation++ - rs.Spec = v1alpha3.RuleSetSpec{ + rs.Spec = v1alpha4.RuleSetSpec{ AuthClassName: "bar", Rules: []config2.Rule{ { ID: "test", - RuleMatcher: config2.Matcher{ - URL: "http://foo.bar", - Strategy: "glob", + Matcher: config2.Matcher{ + Routes: []config2.Route{{Path: "/"}}, + Scheme: "http", + Methods: []string{http.MethodGet}, + Hosts: []config2.HostMatcher{{Value: "foo.bar", Type: "glob"}}, }, Backend: &config2.Backend{ Host: "bar", @@ -880,7 +916,6 @@ func TestProviderLifecycle(t *testing.T) { QueryParamsToRemove: []string{"baz"}, }, }, - Methods: []string{http.MethodGet}, Execute: []config.MechanismConfig{ {"authenticator": "test_authn"}, {"authorizer": "test_authz"}, @@ -901,7 +936,7 @@ func TestProviderLifecycle(t *testing.T) { processor.EXPECT().OnCreated(mock.Anything).Return(nil).Once() processor.EXPECT().OnUpdated(mock.Anything).Return(testsupport.ErrTestPurpose).Once() }, - assert: func(t *testing.T, statusList *[]*v1alpha3.RuleSetStatus, _ *mocks.RuleSetProcessorMock) { + assert: func(t *testing.T, statusList *[]*v1alpha4.RuleSetStatus, _ *mocks.RuleSetProcessorMock) { t.Helper() time.Sleep(250 * time.Millisecond) @@ -911,13 +946,13 @@ func TestProviderLifecycle(t *testing.T) { assert.Len(t, (*statusList)[0].Conditions, 1) condition := (*statusList)[0].Conditions[0] assert.Equal(t, metav1.ConditionTrue, condition.Status) - assert.Equal(t, v1alpha3.ConditionRuleSetActive, v1alpha3.ConditionReason(condition.Reason)) + assert.Equal(t, v1alpha4.ConditionRuleSetActive, v1alpha4.ConditionReason(condition.Reason)) assert.Equal(t, "0/1", (*statusList)[1].ActiveIn) assert.Len(t, (*statusList)[1].Conditions, 1) condition = (*statusList)[1].Conditions[0] assert.Equal(t, metav1.ConditionFalse, condition.Status) - assert.Equal(t, v1alpha3.ConditionRuleSetActivationFailed, v1alpha3.ConditionReason(condition.Reason)) + assert.Equal(t, v1alpha4.ConditionRuleSetActivationFailed, v1alpha4.ConditionReason(condition.Reason)) }, }, } { @@ -927,8 +962,8 @@ func TestProviderLifecycle(t *testing.T) { require.NoError(t, err) handler := &RuleSetResourceHandler{ - rsUpdatedEvt: make(chan v1alpha3.RuleSet, 2), - rsCurrentEvt: make(chan v1alpha3.RuleSet, 2), + rsUpdatedEvt: make(chan v1alpha4.RuleSet, 2), + rsCurrentEvt: make(chan v1alpha4.RuleSet, 2), watchEvent: tc.watchEvent, updateStatus: tc.updateStatus, } diff --git a/internal/rules/repository_impl.go b/internal/rules/repository_impl.go index c846b1f2a..064002dc7 100644 --- a/internal/rules/repository_impl.go +++ b/internal/rules/repository_impl.go @@ -17,265 +17,200 @@ package rules import ( - "bytes" - "context" - "net/url" + "slices" "sync" - "github.com/rs/zerolog" - "github.com/dadrus/heimdall/internal/heimdall" - "github.com/dadrus/heimdall/internal/rules/event" "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/x" "github.com/dadrus/heimdall/internal/x/errorchain" + "github.com/dadrus/heimdall/internal/x/radixtree" "github.com/dadrus/heimdall/internal/x/slicex" ) -func newRepository( - queue event.RuleSetChangedEventQueue, - ruleFactory rule.Factory, - logger zerolog.Logger, -) *repository { +type repository struct { + dr rule.Rule + + knownRules []rule.Rule + knownRulesMutex sync.Mutex + + index *radixtree.Tree[rule.Route] + rulesTreeMutex sync.RWMutex +} + +func newRepository(ruleFactory rule.Factory) rule.Repository { return &repository{ dr: x.IfThenElseExec(ruleFactory.HasDefaultRule(), func() rule.Rule { return ruleFactory.DefaultRule() }, func() rule.Rule { return nil }), - logger: logger, - queue: queue, - quit: make(chan bool), + index: radixtree.New[rule.Route]( + radixtree.WithValuesConstraints(func(oldValues []rule.Route, newValue rule.Route) bool { + // only rules from the same rule set can be placed in one node + return len(oldValues) == 0 || oldValues[0].Rule().SrcID() == newValue.Rule().SrcID() + }), + ), } } -type repository struct { - dr rule.Rule - logger zerolog.Logger - - rules []rule.Rule - mutex sync.RWMutex - - queue event.RuleSetChangedEventQueue - quit chan bool -} - -func (r *repository) FindRule(requestURL *url.URL) (rule.Rule, error) { - r.mutex.RLock() - defer r.mutex.RUnlock() - - for _, rul := range r.rules { - if rul.MatchesURL(requestURL) { - return rul, nil +func (r *repository) FindRule(ctx heimdall.Context) (rule.Rule, error) { + request := ctx.Request() + + r.rulesTreeMutex.RLock() + defer r.rulesTreeMutex.RUnlock() + + entry, err := r.index.Find( + x.IfThenElse(len(request.URL.RawPath) != 0, request.URL.RawPath, request.URL.Path), + radixtree.LookupMatcherFunc[rule.Route](func(route rule.Route, keys, values []string) bool { + return route.Matches(ctx, keys, values) + }), + ) + if err != nil { + if r.dr != nil { + return r.dr, nil } - } - if r.dr != nil { - return r.dr, nil + return nil, errorchain.NewWithMessagef(heimdall.ErrNoRuleFound, + "no applicable rule found for %s", request.URL.String()) } - return nil, errorchain.NewWithMessagef(heimdall.ErrNoRuleFound, - "no applicable rule found for %s", requestURL.String()) -} + request.URL.Captures = entry.Parameters -func (r *repository) Start(_ context.Context) error { - r.logger.Info().Msg("Starting rule definition loader") - - go r.watchRuleSetChanges() - - return nil + return entry.Value.Rule(), nil } -func (r *repository) Stop(_ context.Context) error { - r.logger.Info().Msg("Tearing down rule definition loader") - - r.quit <- true +func (r *repository) AddRuleSet(_ string, rules []rule.Rule) error { + r.knownRulesMutex.Lock() + defer r.knownRulesMutex.Unlock() - close(r.quit) + tmp := r.index.Clone() - return nil -} - -func (r *repository) watchRuleSetChanges() { - for { - select { - case evt, ok := <-r.queue: - if !ok { - r.logger.Debug().Msg("Rule set definition queue closed") - } - - switch evt.ChangeType { - case event.Create: - r.addRuleSet(evt.Source, evt.Rules) - case event.Update: - r.updateRuleSet(evt.Source, evt.Rules) - case event.Remove: - r.deleteRuleSet(evt.Source) - } - case <-r.quit: - r.logger.Info().Msg("Rule definition loader stopped") - - return - } + if err := r.addRulesTo(tmp, rules); err != nil { + return err } -} -func (r *repository) addRuleSet(srcID string, rules []rule.Rule) { - // create rules - r.logger.Info().Str("_src", srcID).Msg("Adding rule set") + r.knownRules = append(r.knownRules, rules...) - r.mutex.Lock() - defer r.mutex.Unlock() + r.rulesTreeMutex.Lock() + r.index = tmp + r.rulesTreeMutex.Unlock() - // add them - r.addRules(rules) + return nil } -func (r *repository) updateRuleSet(srcID string, rules []rule.Rule) { +func (r *repository) UpdateRuleSet(srcID string, rules []rule.Rule) error { // create rules - r.logger.Info().Str("_src", srcID).Msg("Updating rule set") + r.knownRulesMutex.Lock() + defer r.knownRulesMutex.Unlock() // find all rules for the given src id - applicable := func() []rule.Rule { - r.mutex.Lock() - defer r.mutex.Unlock() - - return slicex.Filter(r.rules, func(r rule.Rule) bool { return r.SrcID() == srcID }) - }() - - // find new rules - newRules := slicex.Filter(rules, func(r rule.Rule) bool { - var known bool + applicable := slicex.Filter(r.knownRules, func(r rule.Rule) bool { return r.SrcID() == srcID }) - for _, existing := range applicable { - if existing.ID() == r.ID() { - known = true + // find new rules, as well as those, which have been changed. + toBeAdded := slicex.Filter(rules, func(newRule rule.Rule) bool { + ruleIsNew := !slices.ContainsFunc(applicable, func(existingRule rule.Rule) bool { + return existingRule.SameAs(newRule) + }) - break - } - } + ruleChanged := slices.ContainsFunc(applicable, func(existingRule rule.Rule) bool { + return existingRule.SameAs(newRule) && !existingRule.EqualTo(newRule) + }) - return !known + return ruleIsNew || ruleChanged }) - // find updated rules - updatedRules := slicex.Filter(rules, func(r rule.Rule) bool { - loaded := r.(*ruleImpl) // nolint: forcetypeassert - - var updated bool - - for _, existing := range applicable { - known := existing.(*ruleImpl) // nolint: forcetypeassert + // find deleted rules, as well as those, which have been changed. + toBeDeleted := slicex.Filter(applicable, func(existingRule rule.Rule) bool { + ruleGone := !slices.ContainsFunc(rules, func(newRule rule.Rule) bool { + return newRule.SameAs(existingRule) + }) - if known.id == loaded.id && !bytes.Equal(known.hash, loaded.hash) { - updated = true + ruleChanged := slices.ContainsFunc(rules, func(newRule rule.Rule) bool { + return newRule.SameAs(existingRule) && !newRule.EqualTo(existingRule) + }) - break - } - } - - return updated + return ruleGone || ruleChanged }) - // find deleted rules - deletedRules := slicex.Filter(applicable, func(r rule.Rule) bool { - var present bool + tmp := r.index.Clone() - for _, loaded := range rules { - if loaded.ID() == r.ID() { - present = true + // delete rules + if err := r.removeRulesFrom(tmp, toBeDeleted); err != nil { + return err + } - break - } - } + // add rules + if err := r.addRulesTo(tmp, toBeAdded); err != nil { + return err + } - return !present + r.knownRules = slices.DeleteFunc(r.knownRules, func(loaded rule.Rule) bool { + return slices.Contains(toBeDeleted, loaded) }) + r.knownRules = append(r.knownRules, toBeAdded...) - func() { - r.mutex.Lock() - defer r.mutex.Unlock() - - // remove deleted rules - r.removeRules(deletedRules) - - // replace updated rules - r.replaceRules(updatedRules) + r.rulesTreeMutex.Lock() + r.index = tmp + r.rulesTreeMutex.Unlock() - // add new rules - r.addRules(newRules) - }() + return nil } -func (r *repository) deleteRuleSet(srcID string) { - r.logger.Info().Str("_src", srcID).Msg("Deleting rule set") - - r.mutex.Lock() - defer r.mutex.Unlock() +func (r *repository) DeleteRuleSet(srcID string) error { + r.knownRulesMutex.Lock() + defer r.knownRulesMutex.Unlock() // find all rules for the given src id - applicable := slicex.Filter(r.rules, func(r rule.Rule) bool { return r.SrcID() == srcID }) - - // remove them - r.removeRules(applicable) -} + applicable := slicex.Filter(r.knownRules, func(r rule.Rule) bool { return r.SrcID() == srcID }) -func (r *repository) addRules(rules []rule.Rule) { - for _, rul := range rules { - r.rules = append(r.rules, rul) + tmp := r.index.Clone() - r.logger.Debug().Str("_src", rul.SrcID()).Str("_id", rul.ID()).Msg("Rule added") + // remove them + if err := r.removeRulesFrom(tmp, applicable); err != nil { + return err } -} -func (r *repository) removeRules(rules []rule.Rule) { - // find all indexes for affected rules - var idxs []int + r.knownRules = slices.DeleteFunc(r.knownRules, func(r rule.Rule) bool { + return slices.Contains(applicable, r) + }) - for idx, rul := range r.rules { - for _, tbd := range rules { - if rul.SrcID() == tbd.SrcID() && rul.ID() == tbd.ID() { - idxs = append(idxs, idx) + r.rulesTreeMutex.Lock() + r.index = tmp + r.rulesTreeMutex.Unlock() - r.logger.Debug().Str("_src", rul.SrcID()).Str("_id", rul.ID()).Msg("Rule removed") + return nil +} + +func (r *repository) addRulesTo(tree *radixtree.Tree[rule.Route], rules []rule.Rule) error { + for _, rul := range rules { + for _, route := range rul.Routes() { + if err := tree.Add( + route.Path(), + route, + radixtree.WithBacktracking[rule.Route](rul.AllowsBacktracking()), + ); err != nil { + return errorchain.NewWithMessagef(heimdall.ErrInternal, "failed adding rule ID='%s'", rul.ID()). + CausedBy(err) } } } - // if all rules should be dropped, just create a new slice - if len(idxs) == len(r.rules) { - r.rules = nil - - return - } - - // move the elements from the end of the rules slice to the found positions - // and set the corresponding "emptied" values to nil - for i, idx := range idxs { - tailIdx := len(r.rules) - (1 + i) - - r.rules[idx] = r.rules[tailIdx] - - // the below re-slice preserves the capacity of the slice. - // this is required to avoid memory leaks - r.rules[tailIdx] = nil - } - - // re-slice - r.rules = r.rules[:len(r.rules)-len(idxs)] + return nil } -func (r *repository) replaceRules(rules []rule.Rule) { - for _, updated := range rules { - for idx, existing := range r.rules { - if updated.SrcID() == existing.SrcID() && existing.ID() == updated.ID() { - r.rules[idx] = updated - - r.logger.Debug(). - Str("_src", existing.SrcID()). - Str("_id", existing.ID()). - Msg("Rule updated") - - break +func (r *repository) removeRulesFrom(tree *radixtree.Tree[rule.Route], tbdRules []rule.Rule) error { + for _, rul := range tbdRules { + for _, route := range rul.Routes() { + if err := tree.Delete( + route.Path(), + radixtree.ValueMatcherFunc[rule.Route](func(route rule.Route) bool { + return route.Rule().SameAs(rul) + }), + ); err != nil { + return errorchain.NewWithMessagef(heimdall.ErrInternal, "failed deleting rule ID='%s'", rul.ID()). + CausedBy(err) } } } + + return nil } diff --git a/internal/rules/repository_impl_test.go b/internal/rules/repository_impl_test.go index 9d84f17ee..6db22388d 100644 --- a/internal/rules/repository_impl_test.go +++ b/internal/rules/repository_impl_test.go @@ -18,45 +18,262 @@ package rules import ( "context" + "net/http" "net/url" "testing" - "time" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/dadrus/heimdall/internal/heimdall" - "github.com/dadrus/heimdall/internal/rules/event" - "github.com/dadrus/heimdall/internal/rules/patternmatcher" + mocks2 "github.com/dadrus/heimdall/internal/heimdall/mocks" "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/rules/rule/mocks" "github.com/dadrus/heimdall/internal/x" + "github.com/dadrus/heimdall/internal/x/radixtree" ) -func TestRepositoryAddAndRemoveRulesFromSameRuleSet(t *testing.T) { +func TestRepositoryAddRuleSetWithoutViolation(t *testing.T) { t.Parallel() // GIVEN - repo := newRepository(nil, &ruleFactory{}, *zerolog.Ctx(context.Background())) + repo := newRepository(&ruleFactory{}).(*repository) //nolint: forcetypeassert + + rule1 := &ruleImpl{id: "1", srcID: "1"} + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/foo/1"}) + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/foo/2"}) + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/foo/3"}) + + rules := []rule.Rule{rule1} + + // WHEN + err := repo.AddRuleSet("1", rules) + + // THEN + require.NoError(t, err) + assert.Len(t, repo.knownRules, 1) + assert.False(t, repo.index.Empty()) + assert.ElementsMatch(t, repo.knownRules, rules) + + _, err = repo.index.Find("/foo/1", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.NoError(t, err) + _, err = repo.index.Find("/foo/2", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.NoError(t, err) + _, err = repo.index.Find("/foo/3", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.NoError(t, err) +} + +func TestRepositoryAddRuleSetWithViolation(t *testing.T) { + t.Parallel() + + // GIVEN + repo := newRepository(&ruleFactory{}).(*repository) //nolint: forcetypeassert + + rule1 := &ruleImpl{id: "1", srcID: "1"} + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/foo/1"}) + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/foo/2"}) + + rule2 := &ruleImpl{id: "2", srcID: "2"} + rule2.routes = append(rule2.routes, &routeImpl{rule: rule2, path: "/foo/1"}) + + rules1 := []rule.Rule{rule1} + rules2 := []rule.Rule{rule2} + + require.NoError(t, repo.AddRuleSet("1", rules1)) + + // WHEN + err := repo.AddRuleSet("2", rules2) + + // THEN + require.Error(t, err) + require.ErrorIs(t, err, radixtree.ErrConstraintsViolation) + + assert.Len(t, repo.knownRules, 1) + assert.False(t, repo.index.Empty()) + assert.ElementsMatch(t, repo.knownRules, rules1) + _, err = repo.index.Find("/foo/1", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.NoError(t, err) + _, err = repo.index.Find("/foo/1", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.NoError(t, err) +} + +func TestRepositoryRemoveRuleSet(t *testing.T) { + t.Parallel() + + // GIVEN + repo := newRepository(&ruleFactory{}).(*repository) //nolint: forcetypeassert + + rule1 := &ruleImpl{id: "1", srcID: "1"} + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/foo/1"}) + + rule2 := &ruleImpl{id: "2", srcID: "1"} + rule2.routes = append(rule2.routes, &routeImpl{rule: rule2, path: "/foo/2"}) + + rule3 := &ruleImpl{id: "3", srcID: "1"} + rule3.routes = append(rule3.routes, &routeImpl{rule: rule3, path: "/foo/4"}) + + rule4 := &ruleImpl{id: "4", srcID: "1"} + rule4.routes = append(rule4.routes, &routeImpl{rule: rule4, path: "/foo/4"}) + + rules := []rule.Rule{rule1, rule2, rule3, rule4} + + require.NoError(t, repo.AddRuleSet("1", rules)) + assert.Len(t, repo.knownRules, 4) + assert.False(t, repo.index.Empty()) // WHEN - repo.addRuleSet("bar", []rule.Rule{ - &ruleImpl{id: "1", srcID: "bar"}, - &ruleImpl{id: "2", srcID: "bar"}, - &ruleImpl{id: "3", srcID: "bar"}, - &ruleImpl{id: "4", srcID: "bar"}, - }) + err := repo.DeleteRuleSet("1") // THEN - assert.Len(t, repo.rules, 4) + require.NoError(t, err) + assert.Empty(t, repo.knownRules) + assert.True(t, repo.index.Empty()) +} + +func TestRepositoryRemoveRulesFromDifferentRuleSets(t *testing.T) { + t.Parallel() + + // GIVEN + repo := newRepository(&ruleFactory{}).(*repository) //nolint: forcetypeassert + + rule1 := &ruleImpl{id: "1", srcID: "bar"} + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/bar/1"}) + + rule2 := &ruleImpl{id: "3", srcID: "bar"} + rule2.routes = append(rule2.routes, &routeImpl{rule: rule2, path: "/bar/3"}) + + rule3 := &ruleImpl{id: "4", srcID: "bar"} + rule3.routes = append(rule3.routes, &routeImpl{rule: rule3, path: "/bar/4"}) + + rule4 := &ruleImpl{id: "2", srcID: "baz"} + rule4.routes = append(rule4.routes, &routeImpl{rule: rule4, path: "/baz/2"}) + + rule5 := &ruleImpl{id: "4", srcID: "foo"} + rule5.routes = append(rule5.routes, &routeImpl{rule: rule5, path: "/foo/4"}) + + rules1 := []rule.Rule{rule1, rule2, rule3} + rules2 := []rule.Rule{rule4} + rules3 := []rule.Rule{rule5} + + // WHEN + require.NoError(t, repo.AddRuleSet("bar", rules1)) + require.NoError(t, repo.AddRuleSet("baz", rules2)) + require.NoError(t, repo.AddRuleSet("foo", rules3)) + + // THEN + assert.Len(t, repo.knownRules, 5) + assert.False(t, repo.index.Empty()) + + // WHEN + err := repo.DeleteRuleSet("bar") + + // THEN + require.NoError(t, err) + assert.Len(t, repo.knownRules, 2) + assert.ElementsMatch(t, repo.knownRules, []rule.Rule{rules2[0], rules3[0]}) + + _, err = repo.index.Find("/bar/1", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + assert.Error(t, err) //nolint:testifylint + + _, err = repo.index.Find("/bar/3", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + assert.Error(t, err) //nolint:testifylint + + _, err = repo.index.Find("/bar/4", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + assert.Error(t, err) //nolint:testifylint + + _, err = repo.index.Find("/baz/2", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + assert.NoError(t, err) //nolint:testifylint + + _, err = repo.index.Find("/foo/4", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + assert.NoError(t, err) //nolint:testifylint + + // WHEN + err = repo.DeleteRuleSet("foo") + + // THEN + require.NoError(t, err) + assert.Len(t, repo.knownRules, 1) + assert.ElementsMatch(t, repo.knownRules, []rule.Rule{rules2[0]}) + + _, err = repo.index.Find("/foo/4", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + assert.Error(t, err) //nolint:testifylint + + _, err = repo.index.Find("/baz/2", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + assert.NoError(t, err) //nolint:testifylint + + // WHEN + err = repo.DeleteRuleSet("baz") + + // THEN + require.NoError(t, err) + assert.Empty(t, repo.knownRules) + assert.True(t, repo.index.Empty()) +} + +func TestRepositoryUpdateRuleSet(t *testing.T) { + t.Parallel() + + // GIVEN + repo := newRepository(&ruleFactory{}).(*repository) //nolint: forcetypeassert + + rule1 := &ruleImpl{id: "1", srcID: "1", hash: []byte{1}} + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/bar/1"}) + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/bar/1a"}) + + rule2 := &ruleImpl{id: "2", srcID: "1", hash: []byte{1}} + rule2.routes = append(rule2.routes, &routeImpl{rule: rule2, path: "/bar/2"}) + + rule3 := &ruleImpl{id: "3", srcID: "1", hash: []byte{1}} + rule3.routes = append(rule3.routes, &routeImpl{rule: rule3, path: "/bar/3"}) + + rule4 := &ruleImpl{id: "4", srcID: "1", hash: []byte{1}} + rule4.routes = append(rule4.routes, &routeImpl{rule: rule4, path: "/bar/4"}) + + initialRules := []rule.Rule{rule1, rule2, rule3, rule4} + + require.NoError(t, repo.AddRuleSet("1", initialRules)) + + // rule 1 changed: /bar/1a gone, /bar/1b added + rule1 = &ruleImpl{id: "1", srcID: "1", hash: []byte{2}} + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/bar/1"}) + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/bar/1b"}) + // rule with id 2 is deleted + // rule 3 changed: /bar/2 gone, /foo/3 and /foo/4 added + rule3 = &ruleImpl{id: "3", srcID: "1", hash: []byte{2}} + rule3.routes = append(rule3.routes, &routeImpl{rule: rule3, path: "/foo/3"}) + rule3.routes = append(rule3.routes, &routeImpl{rule: rule3, path: "/foo/4"}) + // rule 4 same as before + + updatedRules := []rule.Rule{rule1, rule3, rule4} // WHEN - repo.deleteRuleSet("bar") + err := repo.UpdateRuleSet("1", updatedRules) // THEN - assert.Empty(t, repo.rules) + require.NoError(t, err) + + assert.Len(t, repo.knownRules, 3) + assert.False(t, repo.index.Empty()) + + _, err = repo.index.Find("/bar/1", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.NoError(t, err) + _, err = repo.index.Find("/bar/1a", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.Error(t, err) + _, err = repo.index.Find("/bar/1b", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.NoError(t, err) + + _, err = repo.index.Find("/bar/2", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.Error(t, err) + + _, err = repo.index.Find("/bar/3", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.Error(t, err) + _, err = repo.index.Find("/foo/3", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.NoError(t, err) + _, err = repo.index.Find("/foo/4", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.NoError(t, err) + + _, err = repo.index.Find("/bar/4", radixtree.LookupMatcherFunc[rule.Route](func(_ rule.Route, _, _ []string) bool { return true })) + require.NoError(t, err) } func TestRepositoryFindRule(t *testing.T) { @@ -70,7 +287,7 @@ func TestRepositoryFindRule(t *testing.T) { assert func(t *testing.T, err error, rul rule.Rule) }{ { - uc: "no matching rule without default rule", + uc: "no matching rule", requestURL: &url.URL{Scheme: "http", Host: "foo.bar", Path: "/baz"}, configureFactory: func(t *testing.T, factory *mocks.FactoryMock) { t.Helper() @@ -85,7 +302,7 @@ func TestRepositoryFindRule(t *testing.T) { }, }, { - uc: "no matching rule with default rule", + uc: "matches default rule", requestURL: &url.URL{Scheme: "http", Host: "foo.bar", Path: "/baz"}, configureFactory: func(t *testing.T, factory *mocks.FactoryMock) { t.Helper() @@ -101,8 +318,8 @@ func TestRepositoryFindRule(t *testing.T) { }, }, { - uc: "matching rule", - requestURL: &url.URL{Scheme: "http", Host: "foo.bar", Path: "/baz"}, + uc: "matches upstream rule", + requestURL: &url.URL{Scheme: "http", Host: "foo.bar", Path: "/baz/bar"}, configureFactory: func(t *testing.T, factory *mocks.FactoryMock) { t.Helper() @@ -111,28 +328,11 @@ func TestRepositoryFindRule(t *testing.T) { addRules: func(t *testing.T, repo *repository) { t.Helper() - repo.rules = append(repo.rules, - &ruleImpl{ - id: "test1", - srcID: "bar", - urlMatcher: func() patternmatcher.PatternMatcher { - matcher, _ := patternmatcher.NewPatternMatcher("glob", - "http://heimdall.test.local/baz") - - return matcher - }(), - }, - &ruleImpl{ - id: "test2", - srcID: "baz", - urlMatcher: func() patternmatcher.PatternMatcher { - matcher, _ := patternmatcher.NewPatternMatcher("glob", - "http://foo.bar/baz") - - return matcher - }(), - }, - ) + rule1 := &ruleImpl{id: "test2", srcID: "baz", hash: []byte{1}} + rule1.routes = append(rule1.routes, &routeImpl{rule: rule1, path: "/baz/bar", matcher: compositeMatcher{}}) + + err := repo.AddRuleSet("baz", []rule.Rule{rule1}) + require.NoError(t, err) }, assert: func(t *testing.T, err error, rul rule.Rule) { t.Helper() @@ -156,206 +356,20 @@ func TestRepositoryFindRule(t *testing.T) { factory := mocks.NewFactoryMock(t) tc.configureFactory(t, factory) - repo := newRepository(nil, factory, *zerolog.Ctx(context.Background())) + repo := newRepository(factory).(*repository) //nolint: forcetypeassert addRules(t, repo) - // WHEN - rul, err := repo.FindRule(tc.requestURL) - - // THEN - tc.assert(t, err, rul) - }) - } -} - -func TestRepositoryAddAndRemoveRulesFromDifferentRuleSets(t *testing.T) { - t.Parallel() - - // GIVEN - repo := newRepository(nil, &ruleFactory{}, *zerolog.Ctx(context.Background())) - - // WHEN - repo.addRules([]rule.Rule{ - &ruleImpl{id: "1", srcID: "bar"}, - &ruleImpl{id: "2", srcID: "baz"}, - &ruleImpl{id: "3", srcID: "bar"}, - &ruleImpl{id: "4", srcID: "bar"}, - &ruleImpl{id: "4", srcID: "foo"}, - }) - - // THEN - assert.Len(t, repo.rules, 5) - - // WHEN - repo.deleteRuleSet("bar") - - // THEN - assert.Len(t, repo.rules, 2) - assert.ElementsMatch(t, repo.rules, []rule.Rule{ - &ruleImpl{id: "2", srcID: "baz"}, - &ruleImpl{id: "4", srcID: "foo"}, - }) - - // WHEN - repo.deleteRuleSet("foo") - - // THEN - assert.Len(t, repo.rules, 1) - assert.ElementsMatch(t, repo.rules, []rule.Rule{ - &ruleImpl{id: "2", srcID: "baz"}, - }) - - // WHEN - repo.deleteRuleSet("baz") - - // THEN - assert.Empty(t, repo.rules) -} - -func TestRepositoryRuleSetLifecycleManagement(t *testing.T) { - t.Parallel() - - for _, tc := range []struct { - uc string - events []event.RuleSetChanged - assert func(t *testing.T, repo *repository) - }{ - { - uc: "empty rule set definition", - events: []event.RuleSetChanged{{Source: "test", ChangeType: event.Create}}, - assert: func(t *testing.T, repo *repository) { - t.Helper() - - assert.Empty(t, repo.rules) - }, - }, - { - uc: "rule set with one rule", - events: []event.RuleSetChanged{ - { - Source: "test", - ChangeType: event.Create, - Rules: []rule.Rule{&ruleImpl{id: "rule:foo", srcID: "test"}}, - }, - }, - assert: func(t *testing.T, repo *repository) { - t.Helper() - - assert.Len(t, repo.rules, 1) - assert.Equal(t, &ruleImpl{id: "rule:foo", srcID: "test"}, repo.rules[0]) - }, - }, - { - uc: "multiple rule sets", - events: []event.RuleSetChanged{ - { - Source: "test1", - ChangeType: event.Create, - Rules: []rule.Rule{&ruleImpl{id: "rule:bar", srcID: "test1"}}, - }, - { - Source: "test2", - ChangeType: event.Create, - Rules: []rule.Rule{&ruleImpl{id: "rule:foo", srcID: "test2"}}, - }, - }, - assert: func(t *testing.T, repo *repository) { - t.Helper() - - assert.Len(t, repo.rules, 2) - assert.Equal(t, &ruleImpl{id: "rule:bar", srcID: "test1"}, repo.rules[0]) - assert.Equal(t, &ruleImpl{id: "rule:foo", srcID: "test2"}, repo.rules[1]) - }, - }, - { - uc: "multiple rule sets created and one of these deleted", - events: []event.RuleSetChanged{ - { - Source: "test1", - ChangeType: event.Create, - Rules: []rule.Rule{&ruleImpl{id: "rule:bar", srcID: "test1"}}, - }, - { - Source: "test2", - ChangeType: event.Create, - Rules: []rule.Rule{&ruleImpl{id: "rule:foo", srcID: "test2"}}, - }, - { - Source: "test2", - ChangeType: event.Remove, - }, - }, - assert: func(t *testing.T, repo *repository) { - t.Helper() - - assert.Len(t, repo.rules, 1) - assert.Equal(t, &ruleImpl{id: "rule:bar", srcID: "test1"}, repo.rules[0]) - }, - }, - { - uc: "multiple rule sets created and one updated", - events: []event.RuleSetChanged{ - { - Source: "test1", - ChangeType: event.Create, - Rules: []rule.Rule{&ruleImpl{id: "rule:bar", srcID: "test1"}}, - }, - { - Source: "test2", - ChangeType: event.Create, - Rules: []rule.Rule{ - &ruleImpl{id: "rule:bar", srcID: "test2", hash: []byte{1}}, - &ruleImpl{id: "rule:foo2", srcID: "test2", hash: []byte{2}}, - &ruleImpl{id: "rule:foo3", srcID: "test2", hash: []byte{3}}, - &ruleImpl{id: "rule:foo4", srcID: "test2", hash: []byte{4}}, - }, - }, - { - Source: "test2", - ChangeType: event.Update, - Rules: []rule.Rule{ - &ruleImpl{id: "rule:bar", srcID: "test2", hash: []byte{5}}, // updated - &ruleImpl{id: "rule:foo2", srcID: "test2", hash: []byte{2}}, // as before - // &ruleImpl{id: "rule:foo3", srcID: "test2", hash: []byte{3}}, // deleted - &ruleImpl{id: "rule:foo4", srcID: "test2", hash: []byte{4}}, // as before - }, - }, - }, - assert: func(t *testing.T, repo *repository) { - t.Helper() - - require.Len(t, repo.rules, 4) - assert.ElementsMatch(t, repo.rules, []rule.Rule{ - &ruleImpl{id: "rule:bar", srcID: "test1"}, - &ruleImpl{id: "rule:bar", srcID: "test2", hash: []byte{5}}, - &ruleImpl{id: "rule:foo2", srcID: "test2", hash: []byte{2}}, - &ruleImpl{id: "rule:foo4", srcID: "test2", hash: []byte{4}}, - }) - }, - }, - } { - t.Run("case="+tc.uc, func(t *testing.T) { - // GIVEN - ctx := context.Background() - - queue := make(event.RuleSetChangedEventQueue, 10) - defer close(queue) - - repo := newRepository(queue, &ruleFactory{}, log.Logger) - require.NoError(t, repo.Start(ctx)) - - defer repo.Stop(ctx) + req := &heimdall.Request{Method: http.MethodGet, URL: &heimdall.URL{URL: *tc.requestURL}} + ctx := mocks2.NewContextMock(t) + ctx.EXPECT().AppContext().Maybe().Return(context.TODO()) + ctx.EXPECT().Request().Return(req) // WHEN - for _, evt := range tc.events { - queue <- evt - } - - time.Sleep(100 * time.Millisecond) + rul, err := repo.FindRule(ctx) // THEN - tc.assert(t, repo) + tc.assert(t, err, rul) }) } } diff --git a/internal/rules/route_matcher.go b/internal/rules/route_matcher.go new file mode 100644 index 000000000..bb1b3a531 --- /dev/null +++ b/internal/rules/route_matcher.go @@ -0,0 +1,226 @@ +// Copyright 2024 Dimitrij Drus +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package rules + +import ( + "errors" + "net/http" + "net/url" + "slices" + "strings" + + "github.com/dadrus/heimdall/internal/heimdall" + "github.com/dadrus/heimdall/internal/rules/config" + "github.com/dadrus/heimdall/internal/x/errorchain" + "github.com/dadrus/heimdall/internal/x/slicex" +) + +var ( + ErrRequestSchemeMismatch = errors.New("request scheme mismatch") + ErrRequestMethodMismatch = errors.New("request method mismatch") + ErrRequestHostMismatch = errors.New("request host mismatch") + ErrRequestPathMismatch = errors.New("request path mismatch") +) + +type RouteMatcher interface { + Matches(request *heimdall.Request, keys, values []string) error +} + +type compositeMatcher []RouteMatcher + +func (c compositeMatcher) Matches(request *heimdall.Request, keys, values []string) error { + for _, matcher := range c { + if err := matcher.Matches(request, keys, values); err != nil { + return err + } + } + + return nil +} + +type schemeMatcher string + +func (s schemeMatcher) Matches(request *heimdall.Request, _, _ []string) error { + if len(s) != 0 && string(s) != request.URL.Scheme { + return errorchain.NewWithMessagef(ErrRequestSchemeMismatch, "expected '%s', got '%s'", s, request.URL.Scheme) + } + + return nil +} + +type methodMatcher []string + +func (m methodMatcher) Matches(request *heimdall.Request, _, _ []string) error { + if len(m) == 0 { + return nil + } + + if !slices.Contains(m, request.Method) { + return errorchain.NewWithMessagef(ErrRequestMethodMismatch, "'%s' is not expected", request.Method) + } + + return nil +} + +type hostMatcher struct { + typedMatcher +} + +func (m *hostMatcher) Matches(request *heimdall.Request, _, _ []string) error { + if !m.match(request.URL.Host) { + return errorchain.NewWithMessagef(ErrRequestHostMismatch, "'%s' is not expected", request.URL.Host) + } + + return nil +} + +type pathParamMatcher struct { + typedMatcher + + name string + slashHandling config.EncodedSlashesHandling +} + +func (m *pathParamMatcher) Matches(request *heimdall.Request, keys, values []string) error { + idx := slices.Index(keys, m.name) + if idx == -1 { + return errorchain.NewWithMessagef(ErrRequestPathMismatch, "path parameter '%s' is not expected", m.name) + } + + value := values[idx] + // URL.RawPath is set only if the original url contains url encoded parts + if len(request.URL.RawPath) != 0 { + switch m.slashHandling { + case config.EncodedSlashesOff: + if strings.Contains(request.URL.RawPath, "%2F") { + return errorchain.NewWithMessage(ErrRequestPathMismatch, + "request path contains encoded slashes which are not allowed") + } + case config.EncodedSlashesOn: + value, _ = url.PathUnescape(value) + default: + unescaped, _ := url.PathUnescape(strings.ReplaceAll(value, "%2F", "$$$escaped-slash$$$")) + value = strings.ReplaceAll(unescaped, "$$$escaped-slash$$$", "%2F") + } + } + + if !m.match(value) { + return errorchain.NewWithMessagef(ErrRequestPathMismatch, + "captured value '%s' for path parameter '%s' is not expected", value, m.name) + } + + return nil +} + +func createMethodMatcher(methods []string) (methodMatcher, error) { + if len(methods) == 0 { + return methodMatcher{}, nil + } + + if slices.Contains(methods, "ALL") { + methods = slices.DeleteFunc(methods, func(method string) bool { return method == "ALL" }) + + methods = append(methods, + http.MethodGet, http.MethodHead, http.MethodPost, http.MethodPut, http.MethodPatch, + http.MethodDelete, http.MethodConnect, http.MethodOptions, http.MethodTrace) + } + + slices.SortFunc(methods, strings.Compare) + + methods = slices.Compact(methods) + if res := slicex.Filter(methods, func(s string) bool { return len(s) == 0 }); len(res) != 0 { + return nil, errorchain.NewWithMessage(heimdall.ErrConfiguration, + "methods list contains empty values. "+ + "have you forgotten to put the corresponding value into braces?") + } + + tbr := slicex.Filter(methods, func(s string) bool { return strings.HasPrefix(s, "!") }) + methods = slicex.Subtract(methods, tbr) + tbr = slicex.Map[string, string](tbr, func(s string) string { return strings.TrimPrefix(s, "!") }) + + return slicex.Subtract(methods, tbr), nil +} + +func createHostMatcher(hosts []config.HostMatcher) (RouteMatcher, error) { + matchers := make(compositeMatcher, len(hosts)) + + for idx, host := range hosts { + var ( + tm typedMatcher + err error + ) + + switch host.Type { + case "glob": + tm, err = newGlobMatcher(host.Value, '.') + case "regex": + tm, err = newRegexMatcher(host.Value) + case "exact": + tm = newExactMatcher(host.Value) + default: + return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, + "unsupported host matching expression type '%s' at index %d", host.Type, idx) + } + + if err != nil { + return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, + "failed to compile host matching expression at index %d", idx).CausedBy(err) + } + + matchers[idx] = &hostMatcher{tm} + } + + return matchers, nil +} + +func createPathParamsMatcher( + params []config.ParameterMatcher, + esh config.EncodedSlashesHandling, +) (RouteMatcher, error) { + matchers := make(compositeMatcher, len(params)) + + for idx, param := range params { + var ( + tm typedMatcher + err error + ) + + switch param.Type { + case "glob": + tm, err = newGlobMatcher(param.Value, '/') + case "regex": + tm, err = newRegexMatcher(param.Value) + case "exact": + tm = newExactMatcher(param.Value) + default: + return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, + "unsupported path parameter expression type '%s' for parameter '%s' at index %d", + param.Type, param.Name, idx) + } + + if err != nil { + return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, + "failed to compile path params matching expression for parameter '%s' at index %d", + param.Name, idx). + CausedBy(err) + } + + matchers[idx] = &pathParamMatcher{tm, param.Name, esh} + } + + return matchers, nil +} diff --git a/internal/rules/route_matcher_test.go b/internal/rules/route_matcher_test.go new file mode 100644 index 000000000..9ad4b047e --- /dev/null +++ b/internal/rules/route_matcher_test.go @@ -0,0 +1,533 @@ +// Copyright 2024 Dimitrij Drus +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package rules + +import ( + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/dadrus/heimdall/internal/heimdall" + "github.com/dadrus/heimdall/internal/rules/config" +) + +func TestCreateMethodMatcher(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + configured []string + expected methodMatcher + shouldError bool + }{ + { + uc: "empty configuration", + expected: methodMatcher{}, + }, + { + uc: "empty method in list", + configured: []string{"FOO", ""}, + shouldError: true, + }, + { + uc: "duplicates should be removed", + configured: []string{"BAR", "BAZ", "BAZ", "FOO", "FOO", "ZAB"}, + expected: methodMatcher{"BAR", "BAZ", "FOO", "ZAB"}, + }, + { + uc: "only ALL configured", + configured: []string{"ALL"}, + expected: methodMatcher{ + http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, + http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace, + }, + }, + { + uc: "ALL without POST and TRACE", + configured: []string{"ALL", "!POST", "!TRACE"}, + expected: methodMatcher{ + http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, + http.MethodOptions, http.MethodPatch, http.MethodPut, + }, + }, + { + uc: "ALL with duplicates and without POST and TRACE", + configured: []string{"ALL", "GET", "!POST", "!TRACE", "!TRACE"}, + expected: methodMatcher{ + http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, + http.MethodOptions, http.MethodPatch, http.MethodPut, + }, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + // WHEN + res, err := createMethodMatcher(tc.configured) + + // THEN + if tc.shouldError { + require.Error(t, err) + } else { + require.Equal(t, tc.expected, res) + } + }) + } +} + +func TestCreateHostMatcher(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + conf []config.HostMatcher + assert func(t *testing.T, matcher RouteMatcher, err error) + }{ + { + uc: "empty configuration", + assert: func(t *testing.T, matcher RouteMatcher, err error) { + t.Helper() + + require.NoError(t, err) + assert.IsType(t, compositeMatcher{}, matcher) + assert.Empty(t, matcher) + }, + }, + { + uc: "valid glob expression", + conf: []config.HostMatcher{{Value: "/**", Type: "glob"}}, + assert: func(t *testing.T, matcher RouteMatcher, err error) { + t.Helper() + + require.NoError(t, err) + assert.IsType(t, compositeMatcher{}, matcher) + assert.Len(t, matcher, 1) + + hms := matcher.(compositeMatcher) + assert.IsType(t, &hostMatcher{}, hms[0]) + assert.IsType(t, &globMatcher{}, hms[0].(*hostMatcher).typedMatcher) + }, + }, + { + uc: "invalid glob expression", + conf: []config.HostMatcher{{Value: "!*][)(*", Type: "glob"}}, + assert: func(t *testing.T, _ RouteMatcher, err error) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "failed to compile host matching expression at index 0") + }, + }, + { + uc: "valid regex expression", + conf: []config.HostMatcher{{Value: ".*", Type: "regex"}}, + assert: func(t *testing.T, matcher RouteMatcher, err error) { + t.Helper() + + require.NoError(t, err) + assert.IsType(t, compositeMatcher{}, matcher) + assert.Len(t, matcher, 1) + + hms := matcher.(compositeMatcher) + assert.IsType(t, &hostMatcher{}, hms[0]) + assert.IsType(t, ®expMatcher{}, hms[0].(*hostMatcher).typedMatcher) + }, + }, + { + uc: "invalid regex expression", + conf: []config.HostMatcher{{Value: "?>?<*??", Type: "regex"}}, + assert: func(t *testing.T, _ RouteMatcher, err error) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "failed to compile host matching expression at index 0") + }, + }, + { + uc: "exact expression", + conf: []config.HostMatcher{{Value: "?>?<*??", Type: "exact"}}, + assert: func(t *testing.T, matcher RouteMatcher, err error) { + t.Helper() + + require.NoError(t, err) + assert.IsType(t, compositeMatcher{}, matcher) + assert.Len(t, matcher, 1) + + hms := matcher.(compositeMatcher) + assert.IsType(t, &hostMatcher{}, hms[0]) + assert.IsType(t, &exactMatcher{}, hms[0].(*hostMatcher).typedMatcher) + }, + }, + { + uc: "unsupported type", + conf: []config.HostMatcher{{Value: "foo", Type: "bar"}}, + assert: func(t *testing.T, _ RouteMatcher, err error) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "unsupported host matching expression type 'bar' at index 0") + }, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + hm, err := createHostMatcher(tc.conf) + + tc.assert(t, hm, err) + }) + } +} + +func TestCreatePathParamsMatcher(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + conf []config.ParameterMatcher + assert func(t *testing.T, matcher RouteMatcher, err error) + }{ + { + uc: "empty configuration", + assert: func(t *testing.T, matcher RouteMatcher, err error) { + t.Helper() + + require.NoError(t, err) + assert.IsType(t, compositeMatcher{}, matcher) + assert.Empty(t, matcher) + }, + }, + { + uc: "valid glob expression", + conf: []config.ParameterMatcher{{Name: "foo", Value: "/**", Type: "glob"}}, + assert: func(t *testing.T, matcher RouteMatcher, err error) { + t.Helper() + + require.NoError(t, err) + assert.IsType(t, compositeMatcher{}, matcher) + assert.Len(t, matcher, 1) + + hms := matcher.(compositeMatcher) + assert.IsType(t, &pathParamMatcher{}, hms[0]) + assert.IsType(t, &globMatcher{}, hms[0].(*pathParamMatcher).typedMatcher) + }, + }, + { + uc: "invalid glob expression", + conf: []config.ParameterMatcher{{Name: "foo", Value: "!*][)(*", Type: "glob"}}, + assert: func(t *testing.T, _ RouteMatcher, err error) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "failed to compile path params matching expression for parameter 'foo' at index 0") + }, + }, + { + uc: "valid regex expression", + conf: []config.ParameterMatcher{{Name: "foo", Value: ".*", Type: "regex"}}, + assert: func(t *testing.T, matcher RouteMatcher, err error) { + t.Helper() + + require.NoError(t, err) + assert.IsType(t, compositeMatcher{}, matcher) + assert.Len(t, matcher, 1) + + hms := matcher.(compositeMatcher) + assert.IsType(t, &pathParamMatcher{}, hms[0]) + assert.IsType(t, ®expMatcher{}, hms[0].(*pathParamMatcher).typedMatcher) + }, + }, + { + uc: "invalid regex expression", + conf: []config.ParameterMatcher{{Name: "foo", Value: "?>?<*??", Type: "regex"}}, + assert: func(t *testing.T, _ RouteMatcher, err error) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "failed to compile path params matching expression for parameter 'foo' at index 0") + }, + }, + { + uc: "exact expression", + conf: []config.ParameterMatcher{{Name: "foo", Value: "?>?<*??", Type: "exact"}}, + assert: func(t *testing.T, matcher RouteMatcher, err error) { + t.Helper() + + require.NoError(t, err) + assert.IsType(t, compositeMatcher{}, matcher) + assert.Len(t, matcher, 1) + + hms := matcher.(compositeMatcher) + assert.IsType(t, &pathParamMatcher{}, hms[0]) + assert.IsType(t, &exactMatcher{}, hms[0].(*pathParamMatcher).typedMatcher) + }, + }, + { + uc: "unsupported type", + conf: []config.ParameterMatcher{{Name: "foo", Value: "foo", Type: "bar"}}, + assert: func(t *testing.T, _ RouteMatcher, err error) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "unsupported path parameter expression type 'bar' for parameter 'foo' at index 0") + }, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + pm, err := createPathParamsMatcher(tc.conf, config.EncodedSlashesOff) + + tc.assert(t, pm, err) + }) + } +} + +func TestSchemeMatcherMatches(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + matcher schemeMatcher + toMatch string + matches bool + }{ + {uc: "matches any schemes", matcher: schemeMatcher(""), toMatch: "foo", matches: true}, + {uc: "matches", matcher: schemeMatcher("http"), toMatch: "http", matches: true}, + {uc: "does not match", matcher: schemeMatcher("http"), toMatch: "https"}, + } { + t.Run(tc.uc, func(t *testing.T) { + err := tc.matcher.Matches( + &heimdall.Request{URL: &heimdall.URL{URL: url.URL{Scheme: tc.toMatch}}}, + nil, + nil, + ) + + if tc.matches { + require.NoError(t, err) + } else { + require.Error(t, err) + require.ErrorIs(t, err, ErrRequestSchemeMismatch) + } + }) + } +} + +func TestMethodMatcherMatches(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + matcher methodMatcher + toMatch string + matches bool + }{ + {uc: "matches any methods", matcher: methodMatcher{}, toMatch: "GET", matches: true}, + {uc: "matches", matcher: methodMatcher{"GET"}, toMatch: "GET", matches: true}, + {uc: "does not match", matcher: methodMatcher{"GET"}, toMatch: "POST"}, + } { + t.Run(tc.uc, func(t *testing.T) { + err := tc.matcher.Matches(&heimdall.Request{Method: tc.toMatch}, nil, nil) + + if tc.matches { + require.NoError(t, err) + } else { + require.Error(t, err) + require.ErrorIs(t, err, ErrRequestMethodMismatch) + } + }) + } +} + +func TestHostMatcherMatches(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + conf []config.HostMatcher + toMatch string + matches bool + }{ + {uc: "matches any host", conf: []config.HostMatcher{{Value: "**", Type: "glob"}}, toMatch: "foo.example.com", matches: true}, + {uc: "matches", conf: []config.HostMatcher{{Value: "example.com", Type: "exact"}}, toMatch: "example.com", matches: true}, + {uc: "does not match", conf: []config.HostMatcher{{Value: "^example.com", Type: "regex"}}, toMatch: "foo.example.com"}, + } { + t.Run(tc.uc, func(t *testing.T) { + hm, err := createHostMatcher(tc.conf) + require.NoError(t, err) + + err = hm.Matches(&heimdall.Request{URL: &heimdall.URL{URL: url.URL{Host: tc.toMatch}}}, nil, nil) + + if tc.matches { + require.NoError(t, err) + } else { + require.Error(t, err) + require.ErrorIs(t, err, ErrRequestHostMismatch) + } + }) + } +} + +func TestPathParamsMatcherMatches(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + conf []config.ParameterMatcher + slashHandling config.EncodedSlashesHandling + toMatch url.URL + keys []string + values []string + matches bool + }{ + { + uc: "parameter not present in keys", + conf: []config.ParameterMatcher{ + {Name: "foo", Type: "exact", Value: "bar"}, + }, + keys: []string{"bar"}, + values: []string{"baz"}, + }, + { + uc: "encoded slashes are not allowed", + conf: []config.ParameterMatcher{ + {Name: "foo", Type: "exact", Value: "bar%2Fbaz"}, + }, + slashHandling: config.EncodedSlashesOff, + keys: []string{"foo"}, + values: []string{"bar%2Fbaz"}, + toMatch: func() url.URL { + uri, err := url.Parse("http://example.com/bar%2Fbaz") + require.NoError(t, err) + + return *uri + }(), + }, + { + uc: "matches with path having allowed but not decoded encoded slashes", + conf: []config.ParameterMatcher{ + {Name: "foo", Type: "exact", Value: "bar%2Fbaz[id]"}, + }, + slashHandling: config.EncodedSlashesOnNoDecode, + keys: []string{"foo"}, + values: []string{"bar%2Fbaz%5Bid%5D"}, + toMatch: func() url.URL { + uri, err := url.Parse("http://example.com/bar%2Fbaz%5Bid%5D") + require.NoError(t, err) + + return *uri + }(), + matches: true, + }, + { + uc: "matches with path having allowed decoded slashes", + conf: []config.ParameterMatcher{ + {Name: "foo", Type: "exact", Value: "bar/baz[id]"}, + }, + slashHandling: config.EncodedSlashesOn, + keys: []string{"foo"}, + values: []string{"bar%2Fbaz%5Bid%5D"}, + toMatch: func() url.URL { + uri, err := url.Parse("http://example.com/foo%2Fbaz%5Bid%5D") + require.NoError(t, err) + + return *uri + }(), + matches: true, + }, + { + uc: "doesn't match", + conf: []config.ParameterMatcher{ + {Name: "foo", Type: "exact", Value: "bar"}, + }, + slashHandling: config.EncodedSlashesOn, + keys: []string{"foo"}, + values: []string{"baz"}, + toMatch: func() url.URL { + uri, err := url.Parse("http://example.com/bar") + require.NoError(t, err) + + return *uri + }(), + }, + } { + t.Run(tc.uc, func(t *testing.T) { + hm, err := createPathParamsMatcher(tc.conf, tc.slashHandling) + require.NoError(t, err) + + err = hm.Matches(&heimdall.Request{URL: &heimdall.URL{URL: tc.toMatch}}, tc.keys, tc.values) + + if tc.matches { + require.NoError(t, err) + } else { + require.Error(t, err) + require.ErrorIs(t, err, ErrRequestPathMismatch) + } + }) + } +} + +func TestCompositeMatcherMatches(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + matcher compositeMatcher + method string + scheme string + matches bool + }{ + { + uc: "matches anything", + matcher: compositeMatcher{}, + method: "GET", + scheme: "foo", + matches: true, + }, + { + uc: "matches", + matcher: compositeMatcher{methodMatcher{"GET"}, schemeMatcher("https")}, + method: "GET", + scheme: "https", + matches: true, + }, + { + uc: "does not match", + matcher: compositeMatcher{methodMatcher{"POST"}}, + method: "GET", + scheme: "https", + matches: false, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + err := tc.matcher.Matches( + &heimdall.Request{Method: tc.method, URL: &heimdall.URL{URL: url.URL{Scheme: tc.scheme}}}, + nil, + nil, + ) + + if tc.matches { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} diff --git a/internal/rules/rule/mocks/factory.go b/internal/rules/rule/mocks/factory.go index dae8918ff..af0296480 100644 --- a/internal/rules/rule/mocks/factory.go +++ b/internal/rules/rule/mocks/factory.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -26,6 +26,10 @@ func (_m *FactoryMock) EXPECT() *FactoryMock_Expecter { func (_m *FactoryMock) CreateRule(version string, srcID string, ruleConfig config.Rule) (rule.Rule, error) { ret := _m.Called(version, srcID, ruleConfig) + if len(ret) == 0 { + panic("no return value specified for CreateRule") + } + var r0 rule.Rule var r1 error if rf, ok := ret.Get(0).(func(string, string, config.Rule) (rule.Rule, error)); ok { @@ -82,6 +86,10 @@ func (_c *FactoryMock_CreateRule_Call) RunAndReturn(run func(string, string, con func (_m *FactoryMock) DefaultRule() rule.Rule { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for DefaultRule") + } + var r0 rule.Rule if rf, ok := ret.Get(0).(func() rule.Rule); ok { r0 = rf() @@ -125,6 +133,10 @@ func (_c *FactoryMock_DefaultRule_Call) RunAndReturn(run func() rule.Rule) *Fact func (_m *FactoryMock) HasDefaultRule() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for HasDefaultRule") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -162,13 +174,12 @@ func (_c *FactoryMock_HasDefaultRule_Call) RunAndReturn(run func() bool) *Factor return _c } -type mockConstructorTestingTNewFactoryMock interface { +// NewFactoryMock creates a new instance of FactoryMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFactoryMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewFactoryMock creates a new instance of FactoryMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewFactoryMock(t mockConstructorTestingTNewFactoryMock) *FactoryMock { +}) *FactoryMock { mock := &FactoryMock{} mock.Mock.Test(t) diff --git a/internal/rules/rule/mocks/repository.go b/internal/rules/rule/mocks/repository.go index d59c4edea..345b7d240 100644 --- a/internal/rules/rule/mocks/repository.go +++ b/internal/rules/rule/mocks/repository.go @@ -1,12 +1,12 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks import ( - url "net/url" + heimdall "github.com/dadrus/heimdall/internal/heimdall" + mock "github.com/stretchr/testify/mock" rule "github.com/dadrus/heimdall/internal/rules/rule" - mock "github.com/stretchr/testify/mock" ) // RepositoryMock is an autogenerated mock type for the Repository type @@ -22,25 +22,122 @@ func (_m *RepositoryMock) EXPECT() *RepositoryMock_Expecter { return &RepositoryMock_Expecter{mock: &_m.Mock} } -// FindRule provides a mock function with given fields: _a0 -func (_m *RepositoryMock) FindRule(_a0 *url.URL) (rule.Rule, error) { - ret := _m.Called(_a0) +// AddRuleSet provides a mock function with given fields: srcID, rules +func (_m *RepositoryMock) AddRuleSet(srcID string, rules []rule.Rule) error { + ret := _m.Called(srcID, rules) + + if len(ret) == 0 { + panic("no return value specified for AddRuleSet") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, []rule.Rule) error); ok { + r0 = rf(srcID, rules) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RepositoryMock_AddRuleSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddRuleSet' +type RepositoryMock_AddRuleSet_Call struct { + *mock.Call +} + +// AddRuleSet is a helper method to define mock.On call +// - srcID string +// - rules []rule.Rule +func (_e *RepositoryMock_Expecter) AddRuleSet(srcID interface{}, rules interface{}) *RepositoryMock_AddRuleSet_Call { + return &RepositoryMock_AddRuleSet_Call{Call: _e.mock.On("AddRuleSet", srcID, rules)} +} + +func (_c *RepositoryMock_AddRuleSet_Call) Run(run func(srcID string, rules []rule.Rule)) *RepositoryMock_AddRuleSet_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].([]rule.Rule)) + }) + return _c +} + +func (_c *RepositoryMock_AddRuleSet_Call) Return(_a0 error) *RepositoryMock_AddRuleSet_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RepositoryMock_AddRuleSet_Call) RunAndReturn(run func(string, []rule.Rule) error) *RepositoryMock_AddRuleSet_Call { + _c.Call.Return(run) + return _c +} + +// DeleteRuleSet provides a mock function with given fields: srcID +func (_m *RepositoryMock) DeleteRuleSet(srcID string) error { + ret := _m.Called(srcID) + + if len(ret) == 0 { + panic("no return value specified for DeleteRuleSet") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(srcID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RepositoryMock_DeleteRuleSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteRuleSet' +type RepositoryMock_DeleteRuleSet_Call struct { + *mock.Call +} + +// DeleteRuleSet is a helper method to define mock.On call +// - srcID string +func (_e *RepositoryMock_Expecter) DeleteRuleSet(srcID interface{}) *RepositoryMock_DeleteRuleSet_Call { + return &RepositoryMock_DeleteRuleSet_Call{Call: _e.mock.On("DeleteRuleSet", srcID)} +} + +func (_c *RepositoryMock_DeleteRuleSet_Call) Run(run func(srcID string)) *RepositoryMock_DeleteRuleSet_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *RepositoryMock_DeleteRuleSet_Call) Return(_a0 error) *RepositoryMock_DeleteRuleSet_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RepositoryMock_DeleteRuleSet_Call) RunAndReturn(run func(string) error) *RepositoryMock_DeleteRuleSet_Call { + _c.Call.Return(run) + return _c +} + +// FindRule provides a mock function with given fields: ctx +func (_m *RepositoryMock) FindRule(ctx heimdall.Context) (rule.Rule, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for FindRule") + } var r0 rule.Rule var r1 error - if rf, ok := ret.Get(0).(func(*url.URL) (rule.Rule, error)); ok { - return rf(_a0) + if rf, ok := ret.Get(0).(func(heimdall.Context) (rule.Rule, error)); ok { + return rf(ctx) } - if rf, ok := ret.Get(0).(func(*url.URL) rule.Rule); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(heimdall.Context) rule.Rule); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(rule.Rule) } } - if rf, ok := ret.Get(1).(func(*url.URL) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(heimdall.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -54,14 +151,14 @@ type RepositoryMock_FindRule_Call struct { } // FindRule is a helper method to define mock.On call -// - _a0 *url.URL -func (_e *RepositoryMock_Expecter) FindRule(_a0 interface{}) *RepositoryMock_FindRule_Call { - return &RepositoryMock_FindRule_Call{Call: _e.mock.On("FindRule", _a0)} +// - ctx heimdall.Context +func (_e *RepositoryMock_Expecter) FindRule(ctx interface{}) *RepositoryMock_FindRule_Call { + return &RepositoryMock_FindRule_Call{Call: _e.mock.On("FindRule", ctx)} } -func (_c *RepositoryMock_FindRule_Call) Run(run func(_a0 *url.URL)) *RepositoryMock_FindRule_Call { +func (_c *RepositoryMock_FindRule_Call) Run(run func(ctx heimdall.Context)) *RepositoryMock_FindRule_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*url.URL)) + run(args[0].(heimdall.Context)) }) return _c } @@ -71,18 +168,64 @@ func (_c *RepositoryMock_FindRule_Call) Return(_a0 rule.Rule, _a1 error) *Reposi return _c } -func (_c *RepositoryMock_FindRule_Call) RunAndReturn(run func(*url.URL) (rule.Rule, error)) *RepositoryMock_FindRule_Call { +func (_c *RepositoryMock_FindRule_Call) RunAndReturn(run func(heimdall.Context) (rule.Rule, error)) *RepositoryMock_FindRule_Call { _c.Call.Return(run) return _c } -type mockConstructorTestingTNewRepositoryMock interface { - mock.TestingT - Cleanup(func()) +// UpdateRuleSet provides a mock function with given fields: srcID, rules +func (_m *RepositoryMock) UpdateRuleSet(srcID string, rules []rule.Rule) error { + ret := _m.Called(srcID, rules) + + if len(ret) == 0 { + panic("no return value specified for UpdateRuleSet") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, []rule.Rule) error); ok { + r0 = rf(srcID, rules) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RepositoryMock_UpdateRuleSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateRuleSet' +type RepositoryMock_UpdateRuleSet_Call struct { + *mock.Call +} + +// UpdateRuleSet is a helper method to define mock.On call +// - srcID string +// - rules []rule.Rule +func (_e *RepositoryMock_Expecter) UpdateRuleSet(srcID interface{}, rules interface{}) *RepositoryMock_UpdateRuleSet_Call { + return &RepositoryMock_UpdateRuleSet_Call{Call: _e.mock.On("UpdateRuleSet", srcID, rules)} +} + +func (_c *RepositoryMock_UpdateRuleSet_Call) Run(run func(srcID string, rules []rule.Rule)) *RepositoryMock_UpdateRuleSet_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].([]rule.Rule)) + }) + return _c +} + +func (_c *RepositoryMock_UpdateRuleSet_Call) Return(_a0 error) *RepositoryMock_UpdateRuleSet_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RepositoryMock_UpdateRuleSet_Call) RunAndReturn(run func(string, []rule.Rule) error) *RepositoryMock_UpdateRuleSet_Call { + _c.Call.Return(run) + return _c } // NewRepositoryMock creates a new instance of RepositoryMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRepositoryMock(t mockConstructorTestingTNewRepositoryMock) *RepositoryMock { +// The first argument is typically a *testing.T value. +func NewRepositoryMock(t interface { + mock.TestingT + Cleanup(func()) +}) *RepositoryMock { mock := &RepositoryMock{} mock.Mock.Test(t) diff --git a/internal/rules/rule/mocks/rule.go b/internal/rules/rule/mocks/rule.go index 2c7dc6cb6..153234324 100644 --- a/internal/rules/rule/mocks/rule.go +++ b/internal/rules/rule/mocks/rule.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" rule "github.com/dadrus/heimdall/internal/rules/rule" - - url "net/url" ) // RuleMock is an autogenerated mock type for the Rule type @@ -24,17 +22,112 @@ func (_m *RuleMock) EXPECT() *RuleMock_Expecter { return &RuleMock_Expecter{mock: &_m.Mock} } -// Execute provides a mock function with given fields: _a0 -func (_m *RuleMock) Execute(_a0 heimdall.Context) (rule.Backend, error) { - ret := _m.Called(_a0) +// AllowsBacktracking provides a mock function with given fields: +func (_m *RuleMock) AllowsBacktracking() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AllowsBacktracking") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// RuleMock_AllowsBacktracking_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllowsBacktracking' +type RuleMock_AllowsBacktracking_Call struct { + *mock.Call +} + +// AllowsBacktracking is a helper method to define mock.On call +func (_e *RuleMock_Expecter) AllowsBacktracking() *RuleMock_AllowsBacktracking_Call { + return &RuleMock_AllowsBacktracking_Call{Call: _e.mock.On("AllowsBacktracking")} +} + +func (_c *RuleMock_AllowsBacktracking_Call) Run(run func()) *RuleMock_AllowsBacktracking_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *RuleMock_AllowsBacktracking_Call) Return(_a0 bool) *RuleMock_AllowsBacktracking_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RuleMock_AllowsBacktracking_Call) RunAndReturn(run func() bool) *RuleMock_AllowsBacktracking_Call { + _c.Call.Return(run) + return _c +} + +// EqualTo provides a mock function with given fields: other +func (_m *RuleMock) EqualTo(other rule.Rule) bool { + ret := _m.Called(other) + + if len(ret) == 0 { + panic("no return value specified for EqualTo") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(rule.Rule) bool); ok { + r0 = rf(other) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// RuleMock_EqualTo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EqualTo' +type RuleMock_EqualTo_Call struct { + *mock.Call +} + +// EqualTo is a helper method to define mock.On call +// - other rule.Rule +func (_e *RuleMock_Expecter) EqualTo(other interface{}) *RuleMock_EqualTo_Call { + return &RuleMock_EqualTo_Call{Call: _e.mock.On("EqualTo", other)} +} + +func (_c *RuleMock_EqualTo_Call) Run(run func(other rule.Rule)) *RuleMock_EqualTo_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(rule.Rule)) + }) + return _c +} + +func (_c *RuleMock_EqualTo_Call) Return(_a0 bool) *RuleMock_EqualTo_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RuleMock_EqualTo_Call) RunAndReturn(run func(rule.Rule) bool) *RuleMock_EqualTo_Call { + _c.Call.Return(run) + return _c +} + +// Execute provides a mock function with given fields: ctx +func (_m *RuleMock) Execute(ctx heimdall.Context) (rule.Backend, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Execute") + } var r0 rule.Backend var r1 error if rf, ok := ret.Get(0).(func(heimdall.Context) (rule.Backend, error)); ok { - return rf(_a0) + return rf(ctx) } if rf, ok := ret.Get(0).(func(heimdall.Context) rule.Backend); ok { - r0 = rf(_a0) + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(rule.Backend) @@ -42,7 +135,7 @@ func (_m *RuleMock) Execute(_a0 heimdall.Context) (rule.Backend, error) { } if rf, ok := ret.Get(1).(func(heimdall.Context) error); ok { - r1 = rf(_a0) + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -56,12 +149,12 @@ type RuleMock_Execute_Call struct { } // Execute is a helper method to define mock.On call -// - _a0 heimdall.Context -func (_e *RuleMock_Expecter) Execute(_a0 interface{}) *RuleMock_Execute_Call { - return &RuleMock_Execute_Call{Call: _e.mock.On("Execute", _a0)} +// - ctx heimdall.Context +func (_e *RuleMock_Expecter) Execute(ctx interface{}) *RuleMock_Execute_Call { + return &RuleMock_Execute_Call{Call: _e.mock.On("Execute", ctx)} } -func (_c *RuleMock_Execute_Call) Run(run func(_a0 heimdall.Context)) *RuleMock_Execute_Call { +func (_c *RuleMock_Execute_Call) Run(run func(ctx heimdall.Context)) *RuleMock_Execute_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(heimdall.Context)) }) @@ -82,6 +175,10 @@ func (_c *RuleMock_Execute_Call) RunAndReturn(run func(heimdall.Context) (rule.B func (_m *RuleMock) ID() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ID") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -119,55 +216,64 @@ func (_c *RuleMock_ID_Call) RunAndReturn(run func() string) *RuleMock_ID_Call { return _c } -// MatchesMethod provides a mock function with given fields: _a0 -func (_m *RuleMock) MatchesMethod(_a0 string) bool { - ret := _m.Called(_a0) +// Routes provides a mock function with given fields: +func (_m *RuleMock) Routes() []rule.Route { + ret := _m.Called() - var r0 bool - if rf, ok := ret.Get(0).(func(string) bool); ok { - r0 = rf(_a0) + if len(ret) == 0 { + panic("no return value specified for Routes") + } + + var r0 []rule.Route + if rf, ok := ret.Get(0).(func() []rule.Route); ok { + r0 = rf() } else { - r0 = ret.Get(0).(bool) + if ret.Get(0) != nil { + r0 = ret.Get(0).([]rule.Route) + } } return r0 } -// RuleMock_MatchesMethod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MatchesMethod' -type RuleMock_MatchesMethod_Call struct { +// RuleMock_Routes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Routes' +type RuleMock_Routes_Call struct { *mock.Call } -// MatchesMethod is a helper method to define mock.On call -// - _a0 string -func (_e *RuleMock_Expecter) MatchesMethod(_a0 interface{}) *RuleMock_MatchesMethod_Call { - return &RuleMock_MatchesMethod_Call{Call: _e.mock.On("MatchesMethod", _a0)} +// Routes is a helper method to define mock.On call +func (_e *RuleMock_Expecter) Routes() *RuleMock_Routes_Call { + return &RuleMock_Routes_Call{Call: _e.mock.On("Routes")} } -func (_c *RuleMock_MatchesMethod_Call) Run(run func(_a0 string)) *RuleMock_MatchesMethod_Call { +func (_c *RuleMock_Routes_Call) Run(run func()) *RuleMock_Routes_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string)) + run() }) return _c } -func (_c *RuleMock_MatchesMethod_Call) Return(_a0 bool) *RuleMock_MatchesMethod_Call { +func (_c *RuleMock_Routes_Call) Return(_a0 []rule.Route) *RuleMock_Routes_Call { _c.Call.Return(_a0) return _c } -func (_c *RuleMock_MatchesMethod_Call) RunAndReturn(run func(string) bool) *RuleMock_MatchesMethod_Call { +func (_c *RuleMock_Routes_Call) RunAndReturn(run func() []rule.Route) *RuleMock_Routes_Call { _c.Call.Return(run) return _c } -// MatchesURL provides a mock function with given fields: _a0 -func (_m *RuleMock) MatchesURL(_a0 *url.URL) bool { - ret := _m.Called(_a0) +// SameAs provides a mock function with given fields: other +func (_m *RuleMock) SameAs(other rule.Rule) bool { + ret := _m.Called(other) + + if len(ret) == 0 { + panic("no return value specified for SameAs") + } var r0 bool - if rf, ok := ret.Get(0).(func(*url.URL) bool); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(rule.Rule) bool); ok { + r0 = rf(other) } else { r0 = ret.Get(0).(bool) } @@ -175,30 +281,30 @@ func (_m *RuleMock) MatchesURL(_a0 *url.URL) bool { return r0 } -// RuleMock_MatchesURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MatchesURL' -type RuleMock_MatchesURL_Call struct { +// RuleMock_SameAs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SameAs' +type RuleMock_SameAs_Call struct { *mock.Call } -// MatchesURL is a helper method to define mock.On call -// - _a0 *url.URL -func (_e *RuleMock_Expecter) MatchesURL(_a0 interface{}) *RuleMock_MatchesURL_Call { - return &RuleMock_MatchesURL_Call{Call: _e.mock.On("MatchesURL", _a0)} +// SameAs is a helper method to define mock.On call +// - other rule.Rule +func (_e *RuleMock_Expecter) SameAs(other interface{}) *RuleMock_SameAs_Call { + return &RuleMock_SameAs_Call{Call: _e.mock.On("SameAs", other)} } -func (_c *RuleMock_MatchesURL_Call) Run(run func(_a0 *url.URL)) *RuleMock_MatchesURL_Call { +func (_c *RuleMock_SameAs_Call) Run(run func(other rule.Rule)) *RuleMock_SameAs_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*url.URL)) + run(args[0].(rule.Rule)) }) return _c } -func (_c *RuleMock_MatchesURL_Call) Return(_a0 bool) *RuleMock_MatchesURL_Call { +func (_c *RuleMock_SameAs_Call) Return(_a0 bool) *RuleMock_SameAs_Call { _c.Call.Return(_a0) return _c } -func (_c *RuleMock_MatchesURL_Call) RunAndReturn(run func(*url.URL) bool) *RuleMock_MatchesURL_Call { +func (_c *RuleMock_SameAs_Call) RunAndReturn(run func(rule.Rule) bool) *RuleMock_SameAs_Call { _c.Call.Return(run) return _c } @@ -207,6 +313,10 @@ func (_c *RuleMock_MatchesURL_Call) RunAndReturn(run func(*url.URL) bool) *RuleM func (_m *RuleMock) SrcID() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SrcID") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -244,13 +354,12 @@ func (_c *RuleMock_SrcID_Call) RunAndReturn(run func() string) *RuleMock_SrcID_C return _c } -type mockConstructorTestingTNewRuleMock interface { +// NewRuleMock creates a new instance of RuleMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRuleMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewRuleMock creates a new instance of RuleMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRuleMock(t mockConstructorTestingTNewRuleMock) *RuleMock { +}) *RuleMock { mock := &RuleMock{} mock.Mock.Test(t) diff --git a/internal/rules/rule/repository.go b/internal/rules/rule/repository.go index 34a0a187d..1108aa509 100644 --- a/internal/rules/rule/repository.go +++ b/internal/rules/rule/repository.go @@ -17,11 +17,15 @@ package rule import ( - "net/url" + "github.com/dadrus/heimdall/internal/heimdall" ) //go:generate mockery --name Repository --structname RepositoryMock type Repository interface { - FindRule(toMatch *url.URL) (Rule, error) + FindRule(ctx heimdall.Context) (Rule, error) + + AddRuleSet(srcID string, rules []Rule) error + UpdateRuleSet(srcID string, rules []Rule) error + DeleteRuleSet(srcID string) error } diff --git a/internal/rules/rule/route.go b/internal/rules/rule/route.go new file mode 100644 index 000000000..5512bf463 --- /dev/null +++ b/internal/rules/rule/route.go @@ -0,0 +1,9 @@ +package rule + +import "github.com/dadrus/heimdall/internal/heimdall" + +type Route interface { + Path() string + Matches(ctx heimdall.Context, keys, values []string) bool + Rule() Rule +} diff --git a/internal/rules/rule/rule.go b/internal/rules/rule/rule.go index d3d0aefa3..fe59916de 100644 --- a/internal/rules/rule/rule.go +++ b/internal/rules/rule/rule.go @@ -17,8 +17,6 @@ package rule import ( - "net/url" - "github.com/dadrus/heimdall/internal/heimdall" ) @@ -28,6 +26,8 @@ type Rule interface { ID() string SrcID() string Execute(ctx heimdall.Context) (Backend, error) - MatchesURL(match *url.URL) bool - MatchesMethod(method string) bool + Routes() []Route + SameAs(other Rule) bool + EqualTo(other Rule) bool + AllowsBacktracking() bool } diff --git a/internal/rules/rule_executor_impl.go b/internal/rules/rule_executor_impl.go index 3dac1d8c4..5b27c8583 100644 --- a/internal/rules/rule_executor_impl.go +++ b/internal/rules/rule_executor_impl.go @@ -21,7 +21,6 @@ import ( "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/rules/rule" - "github.com/dadrus/heimdall/internal/x/errorchain" ) type ruleExecutor struct { @@ -33,24 +32,17 @@ func newRuleExecutor(repository rule.Repository) rule.Executor { } func (e *ruleExecutor) Execute(ctx heimdall.Context) (rule.Backend, error) { - req := ctx.Request() + request := ctx.Request() - //nolint:contextcheck zerolog.Ctx(ctx.AppContext()).Debug(). - Str("_method", req.Method). - Str("_url", req.URL.String()). + Str("_method", request.Method). + Str("_url", request.URL.String()). Msg("Analyzing request") - rul, err := e.r.FindRule(req.URL) + rul, err := e.r.FindRule(ctx) if err != nil { return nil, err } - method := ctx.Request().Method - if !rul.MatchesMethod(method) { - return nil, errorchain.NewWithMessagef(heimdall.ErrMethodNotAllowed, - "rule (id=%s, src=%s) doesn't match %s method", rul.ID(), rul.SrcID(), method) - } - return rul.Execute(ctx) } diff --git a/internal/rules/rule_executor_impl_test.go b/internal/rules/rule_executor_impl_test.go index 28e237688..e1f23ddd2 100644 --- a/internal/rules/rule_executor_impl_test.go +++ b/internal/rules/rule_executor_impl_test.go @@ -43,28 +43,16 @@ func TestRuleExecutorExecute(t *testing.T) { assertResponse func(t *testing.T, err error, response *http.Response) }{ { - uc: "no rules configured", + uc: "no matching rules", expErr: heimdall.ErrNoRuleFound, configureMocks: func(t *testing.T, ctx *mocks2.ContextMock, repo *mocks4.RepositoryMock, _ *mocks4.RuleMock) { t.Helper() - ctx.EXPECT().AppContext().Return(context.Background()) - ctx.EXPECT().Request().Return(&heimdall.Request{Method: http.MethodPost, URL: matchingURL}) - repo.EXPECT().FindRule(matchingURL).Return(nil, heimdall.ErrNoRuleFound) - }, - }, - { - uc: "rule doesn't match method", - expErr: heimdall.ErrMethodNotAllowed, - configureMocks: func(t *testing.T, ctx *mocks2.ContextMock, repo *mocks4.RepositoryMock, rule *mocks4.RuleMock) { - t.Helper() + req := &heimdall.Request{Method: http.MethodPost, URL: &heimdall.URL{URL: *matchingURL}} ctx.EXPECT().AppContext().Return(context.Background()) - ctx.EXPECT().Request().Return(&heimdall.Request{Method: http.MethodPost, URL: matchingURL}) - rule.EXPECT().MatchesMethod(http.MethodPost).Return(false) - rule.EXPECT().ID().Return("test_id") - rule.EXPECT().SrcID().Return("test_src") - repo.EXPECT().FindRule(matchingURL).Return(rule, nil) + ctx.EXPECT().Request().Return(req) + repo.EXPECT().FindRule(ctx).Return(nil, heimdall.ErrNoRuleFound) }, }, { @@ -73,11 +61,12 @@ func TestRuleExecutorExecute(t *testing.T) { configureMocks: func(t *testing.T, ctx *mocks2.ContextMock, repo *mocks4.RepositoryMock, rule *mocks4.RuleMock) { t.Helper() + req := &heimdall.Request{Method: http.MethodGet, URL: &heimdall.URL{URL: *matchingURL}} + ctx.EXPECT().AppContext().Return(context.Background()) - ctx.EXPECT().Request().Return(&heimdall.Request{Method: http.MethodGet, URL: matchingURL}) - rule.EXPECT().MatchesMethod(http.MethodGet).Return(true) + ctx.EXPECT().Request().Return(req) + repo.EXPECT().FindRule(ctx).Return(rule, nil) rule.EXPECT().Execute(ctx).Return(nil, heimdall.ErrAuthentication) - repo.EXPECT().FindRule(matchingURL).Return(rule, nil) }, }, { @@ -86,12 +75,12 @@ func TestRuleExecutorExecute(t *testing.T) { t.Helper() upstream := mocks4.NewBackendMock(t) + req := &heimdall.Request{Method: http.MethodGet, URL: &heimdall.URL{URL: *matchingURL}} ctx.EXPECT().AppContext().Return(context.Background()) - ctx.EXPECT().Request().Return(&heimdall.Request{Method: http.MethodGet, URL: matchingURL}) - rule.EXPECT().MatchesMethod(http.MethodGet).Return(true) + ctx.EXPECT().Request().Return(req) + repo.EXPECT().FindRule(ctx).Return(rule, nil) rule.EXPECT().Execute(ctx).Return(upstream, nil) - repo.EXPECT().FindRule(matchingURL).Return(rule, nil) }, }, } { diff --git a/internal/rules/rule_factory_impl.go b/internal/rules/rule_factory_impl.go index 30e0b2ee6..00ca464a1 100644 --- a/internal/rules/rule_factory_impl.go +++ b/internal/rules/rule_factory_impl.go @@ -17,29 +17,22 @@ package rules import ( - "crypto" "errors" "fmt" - "net/http" - "slices" - "strings" - "github.com/goccy/go-json" "github.com/rs/zerolog" "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/heimdall" config2 "github.com/dadrus/heimdall/internal/rules/config" "github.com/dadrus/heimdall/internal/rules/mechanisms" - "github.com/dadrus/heimdall/internal/rules/patternmatcher" "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/x" "github.com/dadrus/heimdall/internal/x/errorchain" - "github.com/dadrus/heimdall/internal/x/slicex" ) func NewRuleFactory( - hf mechanisms.Factory, + hf mechanisms.MechanismFactory, conf *config.Configuration, mode config.OperationMode, logger zerolog.Logger, @@ -58,11 +51,101 @@ func NewRuleFactory( } type ruleFactory struct { - hf mechanisms.Factory - logger zerolog.Logger - defaultRule *ruleImpl - hasDefaultRule bool - mode config.OperationMode + hf mechanisms.MechanismFactory + logger zerolog.Logger + defaultRule *ruleImpl + hasDefaultRule bool + mode config.OperationMode + defaultBacktracking bool +} + +func (f *ruleFactory) DefaultRule() rule.Rule { return f.defaultRule } +func (f *ruleFactory) HasDefaultRule() bool { return f.hasDefaultRule } + +// nolint:cyclop,funlen +func (f *ruleFactory) CreateRule(version, srcID string, ruleConfig config2.Rule) (rule.Rule, error) { + if f.mode == config.ProxyMode && ruleConfig.Backend == nil { + return nil, errorchain.NewWithMessage(heimdall.ErrConfiguration, "proxy mode requires forward_to definition") + } + + slashesHandling := x.IfThenElse(len(ruleConfig.EncodedSlashesHandling) != 0, + ruleConfig.EncodedSlashesHandling, + config2.EncodedSlashesOff, + ) + + authenticators, subHandlers, finalizers, err := f.createExecutePipeline(version, ruleConfig.Execute) + if err != nil { + return nil, err + } + + errorHandlers, err := f.createOnErrorPipeline(version, ruleConfig.ErrorHandler) + if err != nil { + return nil, err + } + + var allowsBacktracking bool + + if f.defaultRule != nil { + authenticators = x.IfThenElse(len(authenticators) != 0, authenticators, f.defaultRule.sc) + subHandlers = x.IfThenElse(len(subHandlers) != 0, subHandlers, f.defaultRule.sh) + finalizers = x.IfThenElse(len(finalizers) != 0, finalizers, f.defaultRule.fi) + errorHandlers = x.IfThenElse(len(errorHandlers) != 0, errorHandlers, f.defaultRule.eh) + allowsBacktracking = x.IfThenElseExec(ruleConfig.Matcher.BacktrackingEnabled != nil, + func() bool { return *ruleConfig.Matcher.BacktrackingEnabled }, + func() bool { return f.defaultBacktracking }) + } + + if len(authenticators) == 0 { + return nil, errorchain.NewWithMessage(heimdall.ErrConfiguration, "no authenticator defined") + } + + hash, err := ruleConfig.Hash() + if err != nil { + return nil, err + } + + rul := &ruleImpl{ + id: ruleConfig.ID, + srcID: srcID, + slashesHandling: slashesHandling, + allowsBacktracking: allowsBacktracking, + backend: ruleConfig.Backend, + hash: hash, + sc: authenticators, + sh: subHandlers, + fi: finalizers, + eh: errorHandlers, + } + + mm, err := createMethodMatcher(ruleConfig.Matcher.Methods) + if err != nil { + return nil, err + } + + hm, err := createHostMatcher(ruleConfig.Matcher.Hosts) + if err != nil { + return nil, err + } + + sm := schemeMatcher(ruleConfig.Matcher.Scheme) + + for _, rc := range ruleConfig.Matcher.Routes { + ppm, err := createPathParamsMatcher(rc.PathParams, slashesHandling) + if err != nil { + return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, + "failed creating route '%s'", rc.Path). + CausedBy(err) + } + + rul.routes = append(rul.routes, + &routeImpl{ + rule: rul, + path: rc.Path, + matcher: compositeMatcher{sm, mm, hm, ppm}, + }) + } + + return rul, nil } //nolint:funlen,gocognit,cyclop @@ -151,133 +234,6 @@ func (f *ruleFactory) createExecutePipeline( return authenticators, subjectHandlers, finalizers, nil } -func (f *ruleFactory) DefaultRule() rule.Rule { return f.defaultRule } -func (f *ruleFactory) HasDefaultRule() bool { return f.hasDefaultRule } - -//nolint:cyclop, funlen -func (f *ruleFactory) CreateRule(version, srcID string, ruleConfig config2.Rule) ( - rule.Rule, error, -) { - if len(ruleConfig.ID) == 0 { - return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, - "no ID defined for rule ID=%s from %s", ruleConfig.ID, srcID) - } - - if f.mode == config.ProxyMode { - if err := checkProxyModeApplicability(srcID, ruleConfig); err != nil { - return nil, err - } - } - - matcher, err := patternmatcher.NewPatternMatcher( - ruleConfig.RuleMatcher.Strategy, ruleConfig.RuleMatcher.URL) - if err != nil { - return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, - "bad URL pattern for %s strategy defined for rule ID=%s from %s", - ruleConfig.RuleMatcher.Strategy, ruleConfig.ID, srcID).CausedBy(err) - } - - authenticators, subHandlers, finalizers, err := f.createExecutePipeline(version, ruleConfig.Execute) - if err != nil { - return nil, err - } - - errorHandlers, err := f.createOnErrorPipeline(version, ruleConfig.ErrorHandler) - if err != nil { - return nil, err - } - - methods, err := expandHTTPMethods(ruleConfig.Methods) - if err != nil { - return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, - "failed to expand allowed HTTP methods for rule ID=%s from %s", ruleConfig.ID, srcID).CausedBy(err) - } - - if f.defaultRule != nil { - authenticators = x.IfThenElse(len(authenticators) != 0, authenticators, f.defaultRule.sc) - subHandlers = x.IfThenElse(len(subHandlers) != 0, subHandlers, f.defaultRule.sh) - finalizers = x.IfThenElse(len(finalizers) != 0, finalizers, f.defaultRule.fi) - errorHandlers = x.IfThenElse(len(errorHandlers) != 0, errorHandlers, f.defaultRule.eh) - methods = x.IfThenElse(len(methods) != 0, methods, f.defaultRule.methods) - } - - if len(authenticators) == 0 { - return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, - "no authenticator defined for rule ID=%s from %s", ruleConfig.ID, srcID) - } - - if len(methods) == 0 { - return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, - "no methods defined for rule ID=%s from %s", ruleConfig.ID, srcID) - } - - hash, err := f.createHash(ruleConfig) - if err != nil { - return nil, errorchain.NewWithMessagef(heimdall.ErrConfiguration, - "failed to create hash for rule ID=%s from %s", ruleConfig.ID, srcID) - } - - return &ruleImpl{ - id: ruleConfig.ID, - encodedSlashesHandling: x.IfThenElse( - len(ruleConfig.EncodedSlashesHandling) != 0, - ruleConfig.EncodedSlashesHandling, - config2.EncodedSlashesOff, - ), - urlMatcher: matcher, - backend: ruleConfig.Backend, - methods: methods, - srcID: srcID, - isDefault: false, - hash: hash, - sc: authenticators, - sh: subHandlers, - fi: finalizers, - eh: errorHandlers, - }, nil -} - -func checkProxyModeApplicability(srcID string, ruleConfig config2.Rule) error { - if ruleConfig.Backend == nil { - return errorchain.NewWithMessagef(heimdall.ErrConfiguration, - "heimdall is operated in proxy mode, but no forward_to is defined in rule ID=%s from %s", - ruleConfig.ID, srcID) - } - - if len(ruleConfig.Backend.Host) == 0 { - return errorchain.NewWithMessagef(heimdall.ErrConfiguration, - "missing host definition in forward_to in rule ID=%s from %s", - ruleConfig.ID, srcID) - } - - urlRewriter := ruleConfig.Backend.URLRewriter - if urlRewriter == nil { - return nil - } - - if len(urlRewriter.Scheme) == 0 && - len(urlRewriter.PathPrefixToAdd) == 0 && - len(urlRewriter.PathPrefixToCut) == 0 && - len(urlRewriter.QueryParamsToRemove) == 0 { - return errorchain.NewWithMessagef(heimdall.ErrConfiguration, - "rewrite is defined in forward_to in rule ID=%s from %s, but is empty", ruleConfig.ID, srcID) - } - - return nil -} - -func (f *ruleFactory) createHash(ruleConfig config2.Rule) ([]byte, error) { - rawRuleConfig, err := json.Marshal(ruleConfig) - if err != nil { - return nil, err - } - - md := crypto.SHA256.New() - md.Write(rawRuleConfig) - - return md.Sum(nil), nil -} - func (f *ruleFactory) createOnErrorPipeline( version string, ehConfigs []config.MechanismConfig, @@ -288,18 +244,18 @@ func (f *ruleFactory) createOnErrorPipeline( id, found := ehStep["error_handler"] if found { conf := getConfig(ehStep["config"]) - condition := ehStep["if"] - if condition != nil { - conf["if"] = condition + condition, err := getExecutionCondition(ehStep["if"]) + if err != nil { + return nil, err } - eh, err := f.hf.CreateErrorHandler(version, id.(string), conf) + handler, err := f.hf.CreateErrorHandler(version, id.(string), conf) if err != nil { return nil, err } - errorHandlers = append(errorHandlers, eh) + errorHandlers = append(errorHandlers, &conditionalErrorHandler{h: handler, c: condition}) } else { return nil, errorchain.NewWithMessage(heimdall.ErrConfiguration, "unsupported configuration in error handler") @@ -340,57 +296,23 @@ func (f *ruleFactory) initWithDefaultRule(ruleConfig *config.DefaultRule, logger return errorchain.NewWithMessage(heimdall.ErrConfiguration, "no authenticator defined for default rule") } - methods, err := expandHTTPMethods(ruleConfig.Methods) - if err != nil { - return errorchain.NewWithMessagef(heimdall.ErrConfiguration, "failed to expand allowed HTTP methods"). - CausedBy(err) - } - - if len(methods) == 0 { - return errorchain.NewWithMessagef(heimdall.ErrConfiguration, "no methods defined for default rule") - } - f.defaultRule = &ruleImpl{ - id: "default", - encodedSlashesHandling: config2.EncodedSlashesOff, - methods: methods, - srcID: "config", - isDefault: true, - sc: authenticators, - sh: subHandlers, - fi: finalizers, - eh: errorHandlers, + id: "default", + slashesHandling: config2.EncodedSlashesOff, + srcID: "config", + isDefault: true, + sc: authenticators, + sh: subHandlers, + fi: finalizers, + eh: errorHandlers, } f.hasDefaultRule = true + f.defaultBacktracking = ruleConfig.BacktrackingEnabled return nil } -func expandHTTPMethods(methods []string) ([]string, error) { - if slices.Contains(methods, "ALL") { - methods = slices.DeleteFunc(methods, func(method string) bool { return method == "ALL" }) - - methods = append(methods, - http.MethodGet, http.MethodHead, http.MethodPost, http.MethodPut, http.MethodPatch, - http.MethodDelete, http.MethodConnect, http.MethodOptions, http.MethodTrace) - } - - slices.SortFunc(methods, strings.Compare) - - methods = slices.Compact(methods) - if res := slicex.Filter(methods, func(s string) bool { return len(s) == 0 }); len(res) != 0 { - return nil, errorchain.NewWithMessage(heimdall.ErrConfiguration, - "methods list contains empty values. have you forgotten to put the corresponding value into braces?") - } - - tbr := slicex.Filter(methods, func(s string) bool { return strings.HasPrefix(s, "!") }) - methods = slicex.Subtract(methods, tbr) - tbr = slicex.Map[string, string](tbr, func(s string) string { return strings.TrimPrefix(s, "!") }) - - return slicex.Subtract(methods, tbr), nil -} - type CheckFunc func() error var errHandlerNotFound = errors.New("handler not found") @@ -429,11 +351,12 @@ func getConfig(conf any) config.MechanismConfig { return nil } - if m, ok := conf.(map[string]any); ok { - return m + m, ok := conf.(map[string]any) + if !ok { + panic(fmt.Sprintf("unexpected type for config %T", conf)) } - panic(fmt.Sprintf("unexpected type for config %T", conf)) + return m } func getExecutionCondition(conf any) (executionCondition, error) { diff --git a/internal/rules/rule_factory_impl_test.go b/internal/rules/rule_factory_impl_test.go index 21fad4011..24a73d614 100644 --- a/internal/rules/rule_factory_impl_test.go +++ b/internal/rules/rule_factory_impl_test.go @@ -17,7 +17,6 @@ package rules import ( - "net/http" "net/url" "testing" @@ -46,7 +45,7 @@ func TestRuleFactoryNew(t *testing.T) { for _, tc := range []struct { uc string config *config.Configuration - configureMocks func(t *testing.T, mhf *mocks3.FactoryMock) + configureMocks func(t *testing.T, mhf *mocks3.MechanismFactoryMock) assert func(t *testing.T, err error, ruleFactory *ruleFactory) }{ { @@ -105,7 +104,7 @@ func TestRuleFactoryNew(t *testing.T) { }, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateContextualizer(mock.Anything, "bar", mock.Anything). @@ -129,7 +128,7 @@ func TestRuleFactoryNew(t *testing.T) { }, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateFinalizer(mock.Anything, "bar", mock.Anything).Return(nil, nil) @@ -149,7 +148,7 @@ func TestRuleFactoryNew(t *testing.T) { Execute: []config.MechanismConfig{{"authenticator": "foo"}}, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator(mock.Anything, "foo", mock.Anything).Return(nil, testsupport.ErrTestPurpose) @@ -171,7 +170,7 @@ func TestRuleFactoryNew(t *testing.T) { }, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateFinalizer(mock.Anything, "bar", mock.Anything).Return(nil, nil) @@ -191,7 +190,7 @@ func TestRuleFactoryNew(t *testing.T) { Execute: []config.MechanismConfig{{"authorizer": "foo"}}, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthorizer(mock.Anything, "foo", mock.Anything).Return(nil, testsupport.ErrTestPurpose) @@ -213,7 +212,7 @@ func TestRuleFactoryNew(t *testing.T) { }, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateFinalizer(mock.Anything, "bar", mock.Anything).Return(nil, nil) @@ -233,7 +232,7 @@ func TestRuleFactoryNew(t *testing.T) { Execute: []config.MechanismConfig{{"contextualizer": "foo"}}, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateContextualizer(mock.Anything, "foo", mock.Anything). @@ -253,7 +252,7 @@ func TestRuleFactoryNew(t *testing.T) { Execute: []config.MechanismConfig{{"finalizer": "foo"}}, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateFinalizer(mock.Anything, "foo", mock.Anything).Return(nil, testsupport.ErrTestPurpose) @@ -272,7 +271,7 @@ func TestRuleFactoryNew(t *testing.T) { ErrorHandler: []config.MechanismConfig{{"error_handler": "foo"}}, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateErrorHandler(mock.Anything, "foo", mock.Anything).Return(nil, testsupport.ErrTestPurpose) @@ -299,127 +298,6 @@ func TestRuleFactoryNew(t *testing.T) { require.ErrorContains(t, err, "no authenticator") }, }, - { - uc: "new factory with default rule, consisting of authenticator only", - config: &config.Configuration{ - Default: &config.DefaultRule{ - Execute: []config.MechanismConfig{ - {"authenticator": "bar"}, - }, - }, - }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { - t.Helper() - - mhf.EXPECT().CreateAuthenticator(mock.Anything, "bar", mock.Anything).Return(nil, nil) - }, - assert: func(t *testing.T, err error, _ *ruleFactory) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - require.ErrorContains(t, err, "no methods") - }, - }, - { - uc: "new factory with default rule, consisting of authorizer and contextualizer", - config: &config.Configuration{ - Default: &config.DefaultRule{ - Execute: []config.MechanismConfig{ - {"authenticator": "bar"}, - {"contextualizer": "baz"}, - }, - }, - }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { - t.Helper() - - mhf.EXPECT().CreateAuthenticator(mock.Anything, "bar", mock.Anything).Return(nil, nil) - mhf.EXPECT().CreateContextualizer(mock.Anything, "baz", mock.Anything).Return(nil, nil) - }, - assert: func(t *testing.T, err error, _ *ruleFactory) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - require.ErrorContains(t, err, "no methods") - }, - }, - { - uc: "new factory with default rule, consisting of authorizer, contextualizer and authorizer", - config: &config.Configuration{ - Default: &config.DefaultRule{ - Execute: []config.MechanismConfig{ - {"authenticator": "bar"}, - {"contextualizer": "baz"}, - {"authorizer": "zab"}, - }, - }, - }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { - t.Helper() - - mhf.EXPECT().CreateAuthenticator(mock.Anything, "bar", mock.Anything).Return(nil, nil) - mhf.EXPECT().CreateContextualizer(mock.Anything, "baz", mock.Anything).Return(nil, nil) - mhf.EXPECT().CreateAuthorizer(mock.Anything, "zab", mock.Anything).Return(nil, nil) - }, - assert: func(t *testing.T, err error, _ *ruleFactory) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - require.ErrorContains(t, err, "no methods") - }, - }, - { - uc: "new factory with default rule, consisting of authorizer and finalizer with error while expanding methods", - config: &config.Configuration{ - Default: &config.DefaultRule{ - Execute: []config.MechanismConfig{ - {"authenticator": "bar"}, - {"finalizer": "baz"}, - }, - Methods: []string{"FOO", ""}, - }, - }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { - t.Helper() - - mhf.EXPECT().CreateAuthenticator(mock.Anything, "bar", mock.Anything).Return(nil, nil) - mhf.EXPECT().CreateFinalizer(mock.Anything, "baz", mock.Anything).Return(nil, nil) - }, - assert: func(t *testing.T, err error, _ *ruleFactory) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - require.ErrorContains(t, err, "failed to expand") - }, - }, - { - uc: "new factory with default rule, consisting of authorizer and finalizer without methods defined", - config: &config.Configuration{ - Default: &config.DefaultRule{ - Execute: []config.MechanismConfig{ - {"authenticator": "bar"}, - {"finalizer": "baz"}, - }, - }, - }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { - t.Helper() - - mhf.EXPECT().CreateAuthenticator(mock.Anything, "bar", mock.Anything).Return(nil, nil) - mhf.EXPECT().CreateFinalizer(mock.Anything, "baz", mock.Anything).Return(nil, nil) - }, - assert: func(t *testing.T, err error, _ *ruleFactory) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - require.ErrorContains(t, err, "no methods defined") - }, - }, { uc: "new factory with default rule, configured with all required elements", config: &config.Configuration{ @@ -427,10 +305,9 @@ func TestRuleFactoryNew(t *testing.T) { Execute: []config.MechanismConfig{ {"authenticator": "bar"}, }, - Methods: []string{"FOO"}, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator(mock.Anything, "bar", mock.Anything).Return(nil, nil) @@ -447,8 +324,7 @@ func TestRuleFactoryNew(t *testing.T) { assert.True(t, defRule.isDefault) assert.Equal(t, "default", defRule.id) assert.Equal(t, "config", defRule.srcID) - assert.Equal(t, config2.EncodedSlashesOff, defRule.encodedSlashesHandling) - assert.ElementsMatch(t, defRule.methods, []string{"FOO"}) + assert.Equal(t, config2.EncodedSlashesOff, defRule.slashesHandling) assert.Len(t, defRule.sc, 1) assert.Empty(t, defRule.sh) assert.Empty(t, defRule.fi) @@ -469,10 +345,9 @@ func TestRuleFactoryNew(t *testing.T) { {"error_handler": "foobar"}, {"error_handler": "barfoo"}, }, - Methods: []string{"FOO", "BAR"}, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator(mock.Anything, "bar", mock.Anything).Return(nil, nil) @@ -494,8 +369,7 @@ func TestRuleFactoryNew(t *testing.T) { assert.True(t, defRule.isDefault) assert.Equal(t, "default", defRule.id) assert.Equal(t, "config", defRule.srcID) - assert.Equal(t, config2.EncodedSlashesOff, defRule.encodedSlashesHandling) - assert.ElementsMatch(t, defRule.methods, []string{"FOO", "BAR"}) + assert.Equal(t, config2.EncodedSlashesOff, defRule.slashesHandling) assert.Len(t, defRule.sc, 1) assert.Len(t, defRule.sh, 2) assert.Len(t, defRule.fi, 1) @@ -507,9 +381,9 @@ func TestRuleFactoryNew(t *testing.T) { // GIVEN configureMocks := x.IfThenElse(tc.configureMocks != nil, tc.configureMocks, - func(t *testing.T, _ *mocks3.FactoryMock) { t.Helper() }) + func(t *testing.T, _ *mocks3.MechanismFactoryMock) { t.Helper() }) - handlerFactory := mocks3.NewFactoryMock(t) + handlerFactory := mocks3.NewMechanismFactoryMock(t) configureMocks(t, handlerFactory) // WHEN @@ -535,149 +409,73 @@ func TestRuleFactoryNew(t *testing.T) { func TestRuleFactoryCreateRule(t *testing.T) { t.Parallel() + trueValue := true + for _, tc := range []struct { uc string opMode config.OperationMode config config2.Rule defaultRule *ruleImpl - configureMocks func(t *testing.T, mhf *mocks3.FactoryMock) + configureMocks func(t *testing.T, mhf *mocks3.MechanismFactoryMock) assert func(t *testing.T, err error, rul *ruleImpl) }{ { - uc: "without default rule and with missing id", - config: config2.Rule{}, - assert: func(t *testing.T, err error, _ *ruleImpl) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "no ID defined") - }, - }, - { - uc: "in proxy mode, with id, but missing forward_to definition", + uc: "in proxy mode without forward_to definition", opMode: config.ProxyMode, - config: config2.Rule{ID: "foobar"}, - assert: func(t *testing.T, err error, _ *ruleImpl) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "no forward_to") + config: config2.Rule{ + ID: "foobar", + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, }, - }, - { - uc: "in proxy mode, with id and empty forward_to definition", - opMode: config.ProxyMode, - config: config2.Rule{ID: "foobar", Backend: &config2.Backend{}}, assert: func(t *testing.T, err error, _ *ruleImpl) { t.Helper() require.Error(t, err) require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "missing host") + assert.Contains(t, err.Error(), "requires forward_to") }, }, { - uc: "in proxy mode, with id and forward_to.host, but empty rewrite definition", - opMode: config.ProxyMode, + uc: "with error while creating method matcher", config: config2.Rule{ ID: "foobar", - Backend: &config2.Backend{ - Host: "foo.bar", - URLRewriter: &config2.URLRewriter{}, + Matcher: config2.Matcher{ + Routes: []config2.Route{{Path: "/foo/bar"}}, + Methods: []string{""}, + }, + Execute: []config.MechanismConfig{ + {"authenticator": "foo"}, }, }, - assert: func(t *testing.T, err error, _ *ruleImpl) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "rewrite is defined") - }, - }, - { - uc: "without default rule, with id, but without url", - config: config2.Rule{ID: "foobar"}, - assert: func(t *testing.T, err error, _ *ruleImpl) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "bad URL pattern") - }, - }, - { - uc: "without default rule, with id, but bad url pattern", - config: config2.Rule{ID: "foobar", RuleMatcher: config2.Matcher{URL: "?>?<*??"}}, - assert: func(t *testing.T, err error, _ *ruleImpl) { - t.Helper() - - require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "bad URL pattern") - }, - }, - { - uc: "with error while creating execute pipeline", - config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "regex"}, - Execute: []config.MechanismConfig{{"authenticator": "foo"}}, - }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { - t.Helper() - - mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(nil, testsupport.ErrTestPurpose) - }, - assert: func(t *testing.T, err error, _ *ruleImpl) { - t.Helper() - - require.Error(t, err) - assert.Equal(t, testsupport.ErrTestPurpose, err) - }, - }, - { - uc: "with error while creating on_error pipeline", - config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, - ErrorHandler: []config.MechanismConfig{{"error_handler": "foo"}}, - }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { - t.Helper() - - mhf.EXPECT().CreateErrorHandler("test", "foo", mock.Anything).Return(nil, testsupport.ErrTestPurpose) - }, - assert: func(t *testing.T, err error, _ *ruleImpl) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() - require.Error(t, err) - assert.Equal(t, testsupport.ErrTestPurpose, err) - }, - }, - { - uc: "without default rule and without any execute configuration", - config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "regex"}, + mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(&mocks2.AuthenticatorMock{}, nil) }, assert: func(t *testing.T, err error, _ *ruleImpl) { t.Helper() require.Error(t, err) require.ErrorIs(t, err, heimdall.ErrConfiguration) - assert.Contains(t, err.Error(), "no authenticator defined") + require.ErrorContains(t, err, "methods list contains empty values") }, }, { - uc: "without default rule and with only authenticator configured", + uc: "with error while creating route path params matcher", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, - Execute: []config.MechanismConfig{{"authenticator": "foo"}}, + ID: "foobar", + Matcher: config2.Matcher{ + Routes: []config2.Route{ + { + Path: "/foo/:bar", + PathParams: []config2.ParameterMatcher{{Name: "bar", Type: "foo", Value: "baz"}}, + }, + }, + }, + Execute: []config.MechanismConfig{ + {"authenticator": "foo"}, + }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(&mocks2.AuthenticatorMock{}, nil) @@ -687,119 +485,96 @@ func TestRuleFactoryCreateRule(t *testing.T) { require.Error(t, err) require.ErrorIs(t, err, heimdall.ErrConfiguration) - require.ErrorContains(t, err, "no methods defined") + require.ErrorContains(t, err, "failed creating route '/foo/:bar'") }, }, { - uc: "without default rule and with only authenticator and contextualizer configured", + uc: "with error while creating host matcher", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, + ID: "foobar", + Matcher: config2.Matcher{ + Routes: []config2.Route{{Path: "/foo/bar"}}, + Hosts: []config2.HostMatcher{{Type: "regex", Value: "?>?<*??"}}, + }, Execute: []config.MechanismConfig{ {"authenticator": "foo"}, - {"contextualizer": "bar"}, }, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(&mocks2.AuthenticatorMock{}, nil) - mhf.EXPECT().CreateContextualizer("test", "bar", mock.Anything).Return(&mocks5.ContextualizerMock{}, nil) }, assert: func(t *testing.T, err error, _ *ruleImpl) { t.Helper() require.Error(t, err) require.ErrorIs(t, err, heimdall.ErrConfiguration) - require.ErrorContains(t, err, "no methods defined") + require.ErrorContains(t, err, "failed to compile host matching expression") }, }, { - uc: "without default rule and with only authenticator, contextualizer and authorizer configured", + uc: "with error while creating execute pipeline", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "regex"}, - Execute: []config.MechanismConfig{ - {"authenticator": "foo"}, - {"contextualizer": "bar"}, - {"authorizer": "baz"}, - }, + ID: "foobar", + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, + Execute: []config.MechanismConfig{{"authenticator": "foo"}}, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() - mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(&mocks2.AuthenticatorMock{}, nil) - mhf.EXPECT().CreateContextualizer("test", "bar", mock.Anything).Return(&mocks5.ContextualizerMock{}, nil) - mhf.EXPECT().CreateAuthorizer("test", "baz", mock.Anything).Return(&mocks4.AuthorizerMock{}, nil) + mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(nil, testsupport.ErrTestPurpose) }, assert: func(t *testing.T, err error, _ *ruleImpl) { t.Helper() require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - require.ErrorContains(t, err, "no methods defined") + assert.Equal(t, testsupport.ErrTestPurpose, err) }, }, { - uc: "without default rule and with authenticator and finalizer configured, but with error while expanding methods", + uc: "with error while creating on_error pipeline", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, - Execute: []config.MechanismConfig{ - {"authenticator": "foo"}, - {"finalizer": "bar"}, - }, - Methods: []string{"FOO", ""}, + ID: "foobar", + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, + ErrorHandler: []config.MechanismConfig{{"error_handler": "foo"}}, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() - mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(&mocks2.AuthenticatorMock{}, nil) - mhf.EXPECT().CreateFinalizer("test", "bar", mock.Anything).Return(&mocks7.FinalizerMock{}, nil) + mhf.EXPECT().CreateErrorHandler("test", "foo", mock.Anything).Return(nil, testsupport.ErrTestPurpose) }, assert: func(t *testing.T, err error, _ *ruleImpl) { t.Helper() require.Error(t, err) - require.ErrorIs(t, err, heimdall.ErrConfiguration) - require.ErrorContains(t, err, "failed to expand") + assert.Equal(t, testsupport.ErrTestPurpose, err) }, }, { - uc: "without default rule and with authenticator and finalizer configured, but without methods", + uc: "without default rule and without any execute configuration", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, - Execute: []config.MechanismConfig{ - {"authenticator": "foo"}, - {"finalizer": "bar"}, - }, - }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { - t.Helper() - - mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(&mocks2.AuthenticatorMock{}, nil) - mhf.EXPECT().CreateFinalizer("test", "bar", mock.Anything).Return(&mocks7.FinalizerMock{}, nil) + ID: "foobar", + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, }, assert: func(t *testing.T, err error, _ *ruleImpl) { t.Helper() require.Error(t, err) require.ErrorIs(t, err, heimdall.ErrConfiguration) - require.ErrorContains(t, err, "no methods defined") + assert.Contains(t, err.Error(), "no authenticator defined") }, }, { - uc: "without default rule but with minimum required configuration in decision mode", + uc: "without default rule and minimum required configuration in decision mode", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, + ID: "foobar", + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, Execute: []config.MechanismConfig{ {"authenticator": "foo"}, }, - Methods: []string{"FOO", "BAR"}, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(&mocks2.AuthenticatorMock{}, nil) @@ -813,9 +588,10 @@ func TestRuleFactoryCreateRule(t *testing.T) { assert.Equal(t, "test", rul.srcID) assert.False(t, rul.isDefault) assert.Equal(t, "foobar", rul.id) - assert.Equal(t, config2.EncodedSlashesOff, rul.encodedSlashesHandling) - assert.NotNil(t, rul.urlMatcher) - assert.ElementsMatch(t, rul.methods, []string{"FOO", "BAR"}) + assert.Equal(t, config2.EncodedSlashesOff, rul.slashesHandling) + assert.Len(t, rul.Routes(), 1) + assert.Equal(t, rul, rul.Routes()[0].Rule()) + assert.Equal(t, "/foo/bar", rul.Routes()[0].Path()) assert.Len(t, rul.sc, 1) assert.Empty(t, rul.sh) assert.Empty(t, rul.fi) @@ -823,18 +599,17 @@ func TestRuleFactoryCreateRule(t *testing.T) { }, }, { - uc: "without default rule but with minimum required configuration in proxy mode", + uc: "without default rule and minimum required configuration in proxy mode", opMode: config.ProxyMode, config: config2.Rule{ - ID: "foobar", - Backend: &config2.Backend{Host: "foo.bar"}, - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, + ID: "foobar", + Backend: &config2.Backend{Host: "foo.bar"}, + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, Execute: []config.MechanismConfig{ {"authenticator": "foo"}, }, - Methods: []string{"FOO", "BAR"}, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(&mocks2.AuthenticatorMock{}, nil) @@ -848,9 +623,10 @@ func TestRuleFactoryCreateRule(t *testing.T) { assert.Equal(t, "test", rul.srcID) assert.False(t, rul.isDefault) assert.Equal(t, "foobar", rul.id) - assert.Equal(t, config2.EncodedSlashesOff, rul.encodedSlashesHandling) - assert.NotNil(t, rul.urlMatcher) - assert.ElementsMatch(t, rul.methods, []string{"FOO", "BAR"}) + assert.Equal(t, config2.EncodedSlashesOff, rul.slashesHandling) + assert.Len(t, rul.Routes(), 1) + assert.Equal(t, rul, rul.Routes()[0].Rule()) + assert.Equal(t, "/foo/bar", rul.Routes()[0].Path()) assert.Len(t, rul.sc, 1) assert.Empty(t, rul.sh) assert.Empty(t, rul.fi) @@ -859,17 +635,16 @@ func TestRuleFactoryCreateRule(t *testing.T) { }, }, { - uc: "with default rule and with id and url only", + uc: "with default rule and regular rule with id and a single route only", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, + ID: "foobar", + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, }, defaultRule: &ruleImpl{ - methods: []string{"FOO"}, - sc: compositeSubjectCreator{&mocks.SubjectCreatorMock{}}, - sh: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, - fi: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, - eh: compositeErrorHandler{&mocks.ErrorHandlerMock{}}, + sc: compositeSubjectCreator{&mocks.SubjectCreatorMock{}}, + sh: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, + fi: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, + eh: compositeErrorHandler{&mocks.ErrorHandlerMock{}}, }, assert: func(t *testing.T, err error, rul *ruleImpl) { t.Helper() @@ -880,8 +655,9 @@ func TestRuleFactoryCreateRule(t *testing.T) { assert.Equal(t, "test", rul.srcID) assert.False(t, rul.isDefault) assert.Equal(t, "foobar", rul.id) - assert.NotNil(t, rul.urlMatcher) - assert.ElementsMatch(t, rul.methods, []string{"FOO"}) + assert.Len(t, rul.Routes(), 1) + assert.Equal(t, rul, rul.Routes()[0].Rule()) + assert.Equal(t, "/foo/bar", rul.Routes()[0].Path()) assert.Len(t, rul.sc, 1) assert.Len(t, rul.sh, 1) assert.Len(t, rul.fi, 1) @@ -889,11 +665,26 @@ func TestRuleFactoryCreateRule(t *testing.T) { }, }, { - uc: "with default rule and with all attributes defined by the rule itself in decision mode", + uc: "with default rule and with all attributes defined by the regular rule itself in decision mode", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, - EncodedSlashesHandling: config2.EncodedSlashesNoDecode, + ID: "foobar", + Matcher: config2.Matcher{ + Routes: []config2.Route{ + { + Path: "/foo/:resource", + PathParams: []config2.ParameterMatcher{{Name: "resource", Type: "regex", Value: "(bar|baz)"}}, + }, + { + Path: "/bar/:resource", + PathParams: []config2.ParameterMatcher{{Name: "resource", Type: "glob", Value: "{a,b}"}}, + }, + }, + BacktrackingEnabled: &trueValue, + Scheme: "https", + Methods: []string{"BAR", "BAZ"}, + Hosts: []config2.HostMatcher{{Type: "glob", Value: "**.example.com"}}, + }, + EncodedSlashesHandling: config2.EncodedSlashesOnNoDecode, Execute: []config.MechanismConfig{ {"authenticator": "foo"}, {"contextualizer": "bar"}, @@ -903,16 +694,14 @@ func TestRuleFactoryCreateRule(t *testing.T) { ErrorHandler: []config.MechanismConfig{ {"error_handler": "foo"}, }, - Methods: []string{"BAR", "BAZ"}, }, defaultRule: &ruleImpl{ - methods: []string{"FOO"}, - sc: compositeSubjectCreator{&mocks.SubjectCreatorMock{}}, - sh: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, - fi: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, - eh: compositeErrorHandler{&mocks.ErrorHandlerMock{}}, + sc: compositeSubjectCreator{&mocks.SubjectCreatorMock{}}, + sh: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, + fi: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, + eh: compositeErrorHandler{&mocks.ErrorHandlerMock{}}, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything). @@ -935,9 +724,12 @@ func TestRuleFactoryCreateRule(t *testing.T) { assert.Equal(t, "test", rul.srcID) assert.False(t, rul.isDefault) assert.Equal(t, "foobar", rul.id) - assert.Equal(t, config2.EncodedSlashesNoDecode, rul.encodedSlashesHandling) - assert.NotNil(t, rul.urlMatcher) - assert.ElementsMatch(t, rul.methods, []string{"BAR", "BAZ"}) + assert.Equal(t, config2.EncodedSlashesOnNoDecode, rul.slashesHandling) + assert.Len(t, rul.Routes(), 2) + assert.Equal(t, rul, rul.Routes()[0].Rule()) + assert.Equal(t, "/foo/:resource", rul.Routes()[0].Path()) + assert.Equal(t, rul, rul.Routes()[1].Rule()) + assert.Equal(t, "/bar/:resource", rul.Routes()[1].Path()) // nil checks above mean the responses from the mockHandlerFactory are used // and not the values from the default rule @@ -956,8 +748,22 @@ func TestRuleFactoryCreateRule(t *testing.T) { uc: "with default rule and with all attributes defined by the rule itself in proxy mode", opMode: config.ProxyMode, config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, + ID: "foobar", + Matcher: config2.Matcher{ + Routes: []config2.Route{ + { + Path: "/foo/:resource", + PathParams: []config2.ParameterMatcher{{Name: "resource", Type: "regex", Value: "(bar|baz)"}}, + }, + { + Path: "/bar/:resource", + PathParams: []config2.ParameterMatcher{{Name: "resource", Type: "glob", Value: "{a,b}"}}, + }, + }, + Scheme: "https", + Methods: []string{"BAR", "BAZ"}, + Hosts: []config2.HostMatcher{{Type: "glob", Value: "**.example.com"}}, + }, EncodedSlashesHandling: config2.EncodedSlashesOn, Backend: &config2.Backend{ Host: "bar.foo", @@ -977,16 +783,14 @@ func TestRuleFactoryCreateRule(t *testing.T) { ErrorHandler: []config.MechanismConfig{ {"error_handler": "foo"}, }, - Methods: []string{"BAR", "BAZ"}, }, defaultRule: &ruleImpl{ - methods: []string{"FOO"}, - sc: compositeSubjectCreator{&mocks.SubjectCreatorMock{}}, - sh: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, - fi: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, - eh: compositeErrorHandler{&mocks.ErrorHandlerMock{}}, + sc: compositeSubjectCreator{&mocks.SubjectCreatorMock{}}, + sh: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, + fi: compositeSubjectHandler{&mocks.SubjectHandlerMock{}}, + eh: compositeErrorHandler{&mocks.ErrorHandlerMock{}}, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything). @@ -1009,9 +813,12 @@ func TestRuleFactoryCreateRule(t *testing.T) { assert.Equal(t, "test", rul.srcID) assert.False(t, rul.isDefault) assert.Equal(t, "foobar", rul.id) - assert.Equal(t, config2.EncodedSlashesOn, rul.encodedSlashesHandling) - assert.NotNil(t, rul.urlMatcher) - assert.ElementsMatch(t, rul.methods, []string{"BAR", "BAZ"}) + assert.Equal(t, config2.EncodedSlashesOn, rul.slashesHandling) + assert.Len(t, rul.Routes(), 2) + assert.Equal(t, rul, rul.Routes()[0].Rule()) + assert.Equal(t, "/foo/:resource", rul.Routes()[0].Path()) + assert.Equal(t, rul, rul.Routes()[1].Rule()) + assert.Equal(t, "/bar/:resource", rul.Routes()[1].Path()) assert.Equal(t, "https://bar.foo/baz/bar?foo=bar", rul.backend.CreateURL(&url.URL{ Scheme: "http", Host: "foo.bar:8888", @@ -1034,17 +841,16 @@ func TestRuleFactoryCreateRule(t *testing.T) { }, }, { - uc: "with conditional execution configuration type error", + uc: "with conditional execution configuration type error in the regular pipeline", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, + ID: "foobar", + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, Execute: []config.MechanismConfig{ {"authenticator": "foo"}, {"finalizer": "bar", "if": 1}, }, - Methods: []string{"FOO"}, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(&mocks2.AuthenticatorMock{}, nil) @@ -1060,15 +866,14 @@ func TestRuleFactoryCreateRule(t *testing.T) { { uc: "with empty conditional execution configuration", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, + ID: "foobar", + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, Execute: []config.MechanismConfig{ {"authenticator": "foo"}, {"finalizer": "bar", "if": ""}, }, - Methods: []string{"FOO"}, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything).Return(&mocks2.AuthenticatorMock{}, nil) @@ -1084,8 +889,8 @@ func TestRuleFactoryCreateRule(t *testing.T) { { uc: "with conditional execution for some mechanisms", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, + ID: "foobar", + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, Execute: []config.MechanismConfig{ {"authenticator": "foo"}, {"authorizer": "bar", "if": "false"}, @@ -1093,9 +898,8 @@ func TestRuleFactoryCreateRule(t *testing.T) { {"authorizer": "baz"}, {"finalizer": "bar", "if": "true"}, }, - Methods: []string{"FOO"}, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything). @@ -1116,8 +920,9 @@ func TestRuleFactoryCreateRule(t *testing.T) { assert.Equal(t, "test", rul.srcID) assert.False(t, rul.isDefault) assert.Equal(t, "foobar", rul.id) - assert.NotNil(t, rul.urlMatcher) - assert.ElementsMatch(t, rul.methods, []string{"FOO"}) + assert.Len(t, rul.Routes(), 1) + assert.Equal(t, rul, rul.Routes()[0].Rule()) + assert.Equal(t, "/foo/bar", rul.Routes()[0].Path()) require.Len(t, rul.sc, 1) assert.NotNil(t, rul.sc[0]) @@ -1147,11 +952,47 @@ func TestRuleFactoryCreateRule(t *testing.T) { require.Empty(t, rul.eh) }, }, + { + uc: "with bad conditional expression in the error pipeline", + config: config2.Rule{ + ID: "foobar", + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, + Execute: []config.MechanismConfig{ + {"authenticator": "foo"}, + {"authorizer": "bar"}, + {"finalizer": "baz"}, + }, + ErrorHandler: []config.MechanismConfig{ + {"error_handler": "foo", "if": "true", "config": map[string]any{}}, + {"error_handler": "bar", "if": 1, "config": map[string]any{}}, + }, + }, + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { + t.Helper() + + mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything). + Return(&mocks2.AuthenticatorMock{}, nil) + mhf.EXPECT().CreateAuthorizer("test", "bar", mock.Anything). + Return(&mocks4.AuthorizerMock{}, nil) + mhf.EXPECT().CreateFinalizer("test", "baz", mock.Anything). + Return(&mocks7.FinalizerMock{}, nil) + mhf.EXPECT().CreateErrorHandler("test", "foo", config.MechanismConfig{}).Return(&mocks6.ErrorHandlerMock{}, nil) + }, + assert: func(t *testing.T, err error, _ *ruleImpl) { + t.Helper() + + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrConfiguration) + require.ErrorContains(t, err, "unexpected type") + }, + }, { uc: "with conditional execution for error handler", config: config2.Rule{ - ID: "foobar", - RuleMatcher: config2.Matcher{URL: "http://foo.bar", Strategy: "glob"}, + ID: "foobar", + Matcher: config2.Matcher{Routes: []config2.Route{{Path: "/foo/bar"}}}, Execute: []config.MechanismConfig{ {"authenticator": "foo"}, {"authorizer": "bar"}, @@ -1161,9 +1002,8 @@ func TestRuleFactoryCreateRule(t *testing.T) { {"error_handler": "foo", "if": "true", "config": map[string]any{}}, {"error_handler": "bar", "if": "false", "config": map[string]any{}}, }, - Methods: []string{"FOO"}, }, - configureMocks: func(t *testing.T, mhf *mocks3.FactoryMock) { + configureMocks: func(t *testing.T, mhf *mocks3.MechanismFactoryMock) { t.Helper() mhf.EXPECT().CreateAuthenticator("test", "foo", mock.Anything). @@ -1172,12 +1012,8 @@ func TestRuleFactoryCreateRule(t *testing.T) { Return(&mocks4.AuthorizerMock{}, nil) mhf.EXPECT().CreateFinalizer("test", "baz", mock.Anything). Return(&mocks7.FinalizerMock{}, nil) - mhf.EXPECT().CreateErrorHandler("test", "foo", - mock.MatchedBy(func(conf map[string]any) bool { return conf["if"] == "true" }), - ).Return(&mocks6.ErrorHandlerMock{}, nil) - mhf.EXPECT().CreateErrorHandler("test", "bar", - mock.MatchedBy(func(conf map[string]any) bool { return conf["if"] == "false" }), - ).Return(&mocks6.ErrorHandlerMock{}, nil) + mhf.EXPECT().CreateErrorHandler("test", "foo", config.MechanismConfig{}).Return(&mocks6.ErrorHandlerMock{}, nil) + mhf.EXPECT().CreateErrorHandler("test", "bar", config.MechanismConfig{}).Return(&mocks6.ErrorHandlerMock{}, nil) }, assert: func(t *testing.T, err error, rul *ruleImpl) { t.Helper() @@ -1188,8 +1024,9 @@ func TestRuleFactoryCreateRule(t *testing.T) { assert.Equal(t, "test", rul.srcID) assert.False(t, rul.isDefault) assert.Equal(t, "foobar", rul.id) - assert.NotNil(t, rul.urlMatcher) - assert.ElementsMatch(t, rul.methods, []string{"FOO"}) + assert.Len(t, rul.Routes(), 1) + assert.Equal(t, rul, rul.Routes()[0].Rule()) + assert.Equal(t, "/foo/bar", rul.Routes()[0].Path()) require.Len(t, rul.sc, 1) assert.NotNil(t, rul.sc[0]) @@ -1214,9 +1051,9 @@ func TestRuleFactoryCreateRule(t *testing.T) { // GIVEN configureMocks := x.IfThenElse(tc.configureMocks != nil, tc.configureMocks, - func(t *testing.T, _ *mocks3.FactoryMock) { t.Helper() }) + func(t *testing.T, _ *mocks3.MechanismFactoryMock) { t.Helper() }) - handlerFactory := mocks3.NewFactoryMock(t) + handlerFactory := mocks3.NewMechanismFactoryMock(t) configureMocks(t, handlerFactory) factory := &ruleFactory{ @@ -1284,157 +1121,3 @@ func TestRuleFactoryConfigExtraction(t *testing.T) { }) } } - -func TestRuleFactoryProxyModeApplicability(t *testing.T) { - t.Parallel() - - for _, tc := range []struct { - uc string - ruleConfig config2.Rule - shouldError bool - }{ - { - uc: "no upstream url factory", - ruleConfig: config2.Rule{}, - shouldError: true, - }, - { - uc: "no host defined", - ruleConfig: config2.Rule{Backend: &config2.Backend{}}, - shouldError: true, - }, - { - uc: "with host but no rewrite options", - ruleConfig: config2.Rule{Backend: &config2.Backend{Host: "foo.bar"}}, - }, - { - uc: "with host and empty rewrite option", - ruleConfig: config2.Rule{ - Backend: &config2.Backend{ - Host: "foo.bar", - URLRewriter: &config2.URLRewriter{}, - }, - }, - shouldError: true, - }, - { - uc: "with host and scheme rewrite option", - ruleConfig: config2.Rule{ - Backend: &config2.Backend{ - Host: "foo.bar", - URLRewriter: &config2.URLRewriter{Scheme: "https"}, - }, - }, - }, - { - uc: "with host and strip path prefix rewrite option", - ruleConfig: config2.Rule{ - Backend: &config2.Backend{ - Host: "foo.bar", - URLRewriter: &config2.URLRewriter{PathPrefixToCut: "/foo"}, - }, - }, - }, - { - uc: "with host and add path prefix rewrite option", - ruleConfig: config2.Rule{ - Backend: &config2.Backend{ - Host: "foo.bar", - URLRewriter: &config2.URLRewriter{PathPrefixToAdd: "/foo"}, - }, - }, - }, - { - uc: "with host and empty strip query parameter rewrite option", - ruleConfig: config2.Rule{ - Backend: &config2.Backend{ - Host: "foo.bar", - URLRewriter: &config2.URLRewriter{QueryParamsToRemove: []string{}}, - }, - }, - shouldError: true, - }, - { - uc: "with host and strip query parameter rewrite option", - ruleConfig: config2.Rule{ - Backend: &config2.Backend{ - Host: "foo.bar", - URLRewriter: &config2.URLRewriter{QueryParamsToRemove: []string{"foo"}}, - }, - }, - }, - } { - t.Run(tc.uc, func(t *testing.T) { - // WHEN - err := checkProxyModeApplicability("test", tc.ruleConfig) - - // THEN - if tc.shouldError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } -} - -func TestExpandHTTPMethods(t *testing.T) { - t.Parallel() - - for _, tc := range []struct { - uc string - configured []string - expected []string - shouldError bool - }{ - { - uc: "empty configuration", - }, - { - uc: "empty method in list", - configured: []string{"FOO", ""}, - shouldError: true, - }, - { - uc: "duplicates should be removed", - configured: []string{"BAR", "BAZ", "BAZ", "FOO", "FOO", "ZAB"}, - expected: []string{"BAR", "BAZ", "FOO", "ZAB"}, - }, - { - uc: "only ALL configured", - configured: []string{"ALL"}, - expected: []string{ - http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, - http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace, - }, - }, - { - uc: "ALL without POST and TRACE", - configured: []string{"ALL", "!POST", "!TRACE"}, - expected: []string{ - http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, - http.MethodOptions, http.MethodPatch, http.MethodPut, - }, - }, - { - uc: "ALL with duplicates and without POST and TRACE", - configured: []string{"ALL", "GET", "!POST", "!TRACE", "!TRACE"}, - expected: []string{ - http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, - http.MethodOptions, http.MethodPatch, http.MethodPut, - }, - }, - } { - t.Run(tc.uc, func(t *testing.T) { - // WHEN - res, err := expandHTTPMethods(tc.configured) - - // THEN - if tc.shouldError { - require.Error(t, err) - } else { - require.Equal(t, tc.expected, res) - } - }) - } -} diff --git a/internal/rules/rule_impl.go b/internal/rules/rule_impl.go index 7e882d072..6bbe34ec5 100644 --- a/internal/rules/rule_impl.go +++ b/internal/rules/rule_impl.go @@ -17,32 +17,31 @@ package rules import ( - "fmt" + "bytes" "net/url" - "slices" "strings" "github.com/rs/zerolog" "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/rules/config" - "github.com/dadrus/heimdall/internal/rules/patternmatcher" "github.com/dadrus/heimdall/internal/rules/rule" + "github.com/dadrus/heimdall/internal/x/errorchain" ) type ruleImpl struct { - id string - encodedSlashesHandling config.EncodedSlashesHandling - urlMatcher patternmatcher.PatternMatcher - backend *config.Backend - methods []string - srcID string - isDefault bool - hash []byte - sc compositeSubjectCreator - sh compositeSubjectHandler - fi compositeSubjectHandler - eh compositeErrorHandler + id string + srcID string + isDefault bool + allowsBacktracking bool + hash []byte + routes []rule.Route + slashesHandling config.EncodedSlashesHandling + backend *config.Backend + sc compositeSubjectCreator + sh compositeSubjectHandler + fi compositeSubjectHandler + eh compositeErrorHandler } func (r *ruleImpl) Execute(ctx heimdall.Context) (rule.Backend, error) { @@ -54,6 +53,25 @@ func (r *ruleImpl) Execute(ctx heimdall.Context) (rule.Backend, error) { logger.Info().Str("_src", r.srcID).Str("_id", r.id).Msg("Executing rule") } + request := ctx.Request() + + switch r.slashesHandling { //nolint:exhaustive + case config.EncodedSlashesOn: + // unescape path + request.URL.RawPath = "" + case config.EncodedSlashesOff: + if strings.Contains(request.URL.RawPath, "%2F") { + return nil, errorchain.NewWithMessage(heimdall.ErrArgument, + "path contains encoded slash, which is not allowed") + } + } + + // unescape captures + captures := request.URL.Captures + for k, v := range captures { + captures[k] = unescape(v, r.slashesHandling) + } + // authenticators sub, err := r.sc.Execute(ctx) if err != nil { @@ -73,54 +91,76 @@ func (r *ruleImpl) Execute(ctx heimdall.Context) (rule.Backend, error) { var upstream rule.Backend if r.backend != nil { - targetURL := *ctx.Request().URL - if r.encodedSlashesHandling == config.EncodedSlashesOn && len(targetURL.RawPath) != 0 { - targetURL.RawPath = "" - } - upstream = &backend{ - targetURL: r.backend.CreateURL(&targetURL), + targetURL: r.backend.CreateURL(&request.URL.URL), } } return upstream, nil } -func (r *ruleImpl) MatchesURL(requestURL *url.URL) bool { - var path string +func (r *ruleImpl) ID() string { return r.id } - switch r.encodedSlashesHandling { - case config.EncodedSlashesOff: - if strings.Contains(requestURL.RawPath, "%2F") { - return false - } +func (r *ruleImpl) SrcID() string { return r.srcID } - path = requestURL.Path - case config.EncodedSlashesNoDecode: - if len(requestURL.RawPath) != 0 { - path = strings.ReplaceAll(requestURL.RawPath, "%2F", "$$$escaped-slash$$$") - path, _ = url.PathUnescape(path) - path = strings.ReplaceAll(path, "$$$escaped-slash$$$", "%2F") +func (r *ruleImpl) SameAs(other rule.Rule) bool { + return r.ID() == other.ID() && r.SrcID() == other.SrcID() +} - break - } +func (r *ruleImpl) Routes() []rule.Route { return r.routes } - fallthrough - default: - path = requestURL.Path - } +func (r *ruleImpl) EqualTo(other rule.Rule) bool { + return r.ID() == other.ID() && + r.SrcID() == other.SrcID() && + bytes.Equal(r.hash, other.(*ruleImpl).hash) // nolint: forcetypeassert +} - return r.urlMatcher.Match(fmt.Sprintf("%s://%s%s", requestURL.Scheme, requestURL.Host, path)) +func (r *ruleImpl) AllowsBacktracking() bool { return r.allowsBacktracking } + +type routeImpl struct { + rule *ruleImpl + path string + matcher RouteMatcher } -func (r *ruleImpl) MatchesMethod(method string) bool { return slices.Contains(r.methods, method) } +func (r *routeImpl) Matches(ctx heimdall.Context, keys, values []string) bool { + logger := zerolog.Ctx(ctx.AppContext()).With(). + Str("_source", r.rule.srcID). + Str("_id", r.rule.id). + Str("route", r.path). + Logger() -func (r *ruleImpl) ID() string { return r.id } + logger.Debug().Msg("Matching rule") -func (r *ruleImpl) SrcID() string { return r.srcID } + if err := r.matcher.Matches(ctx.Request(), keys, values); err != nil { + logger.Debug().Err(err).Msg("Request does not satisfy matching conditions") + + return false + } + + logger.Debug().Msg("Rule matched") + + return true +} + +func (r *routeImpl) Path() string { return r.path } + +func (r *routeImpl) Rule() rule.Rule { return r.rule } type backend struct { targetURL *url.URL } func (b *backend) URL() *url.URL { return b.targetURL } + +func unescape(value string, handling config.EncodedSlashesHandling) string { + if handling == config.EncodedSlashesOn { + unescaped, _ := url.PathUnescape(value) + + return unescaped + } + + unescaped, _ := url.PathUnescape(strings.ReplaceAll(value, "%2F", "$$$escaped-slash$$$")) + + return strings.ReplaceAll(unescaped, "$$$escaped-slash$$$", "%2F") +} diff --git a/internal/rules/rule_impl_test.go b/internal/rules/rule_impl_test.go index 4374fcb81..a1e3d9d17 100644 --- a/internal/rules/rule_impl_test.go +++ b/internal/rules/rule_impl_test.go @@ -29,206 +29,11 @@ import ( "github.com/dadrus/heimdall/internal/rules/config" "github.com/dadrus/heimdall/internal/rules/mechanisms/subject" "github.com/dadrus/heimdall/internal/rules/mocks" - "github.com/dadrus/heimdall/internal/rules/patternmatcher" "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/x" "github.com/dadrus/heimdall/internal/x/testsupport" ) -func TestRuleMatchMethod(t *testing.T) { - t.Parallel() - - for _, tc := range []struct { - uc string - methods []string - toBeMatched string - assert func(t *testing.T, matched bool) - }{ - { - uc: "matches", - methods: []string{"FOO", "BAR"}, - toBeMatched: "BAR", - assert: func(t *testing.T, matched bool) { - t.Helper() - - assert.True(t, matched) - }, - }, - { - uc: "doesn't match", - methods: []string{"FOO", "BAR"}, - toBeMatched: "BAZ", - assert: func(t *testing.T, matched bool) { - t.Helper() - - assert.False(t, matched) - }, - }, - } { - t.Run("case="+tc.uc, func(t *testing.T) { - // GIVEN - rul := &ruleImpl{methods: tc.methods} - - // WHEN - matched := rul.MatchesMethod(tc.toBeMatched) - - // THEN - tc.assert(t, matched) - }) - } -} - -func TestRuleMatchURL(t *testing.T) { - t.Parallel() - - for _, tc := range []struct { - uc string - slashHandling config.EncodedSlashesHandling - matcher func(t *testing.T) patternmatcher.PatternMatcher - toBeMatched string - assert func(t *testing.T, matched bool) - }{ - { - uc: "matches", - matcher: func(t *testing.T) patternmatcher.PatternMatcher { - t.Helper() - - matcher, err := patternmatcher.NewPatternMatcher("glob", "http://foo.bar/baz") - require.NoError(t, err) - - return matcher - }, - toBeMatched: "http://foo.bar/baz", - assert: func(t *testing.T, matched bool) { - t.Helper() - - assert.True(t, matched) - }, - }, - { - uc: "matches with urlencoded path fragments", - matcher: func(t *testing.T) patternmatcher.PatternMatcher { - t.Helper() - - matcher, err := patternmatcher.NewPatternMatcher("glob", "http://foo.bar/[id]/baz") - require.NoError(t, err) - - return matcher - }, - toBeMatched: "http://foo.bar/%5Bid%5D/baz", - assert: func(t *testing.T, matched bool) { - t.Helper() - - assert.True(t, matched) - }, - }, - { - uc: "doesn't match with urlencoded slash in path", - matcher: func(t *testing.T) patternmatcher.PatternMatcher { - t.Helper() - - matcher, err := patternmatcher.NewPatternMatcher("glob", "http://foo.bar/foo%2Fbaz") - require.NoError(t, err) - - return matcher - }, - toBeMatched: "http://foo.bar/foo%2Fbaz", - assert: func(t *testing.T, matched bool) { - t.Helper() - - assert.False(t, matched) - }, - }, - { - uc: "matches with urlencoded slash in path if allowed with decoding", - slashHandling: config.EncodedSlashesOn, - matcher: func(t *testing.T) patternmatcher.PatternMatcher { - t.Helper() - - matcher, err := patternmatcher.NewPatternMatcher("glob", "http://foo.bar/foo/baz/[id]") - require.NoError(t, err) - - return matcher - }, - toBeMatched: "http://foo.bar/foo%2Fbaz/%5Bid%5D", - assert: func(t *testing.T, matched bool) { - t.Helper() - - assert.True(t, matched) - }, - }, - { - uc: "matches with urlencoded slash in path if allowed without decoding", - slashHandling: config.EncodedSlashesNoDecode, - matcher: func(t *testing.T) patternmatcher.PatternMatcher { - t.Helper() - - matcher, err := patternmatcher.NewPatternMatcher("glob", "http://foo.bar/foo%2Fbaz/[id]") - require.NoError(t, err) - - return matcher - }, - toBeMatched: "http://foo.bar/foo%2Fbaz/%5Bid%5D", - assert: func(t *testing.T, matched bool) { - t.Helper() - - assert.True(t, matched) - }, - }, - { - uc: "doesn't match", - matcher: func(t *testing.T) patternmatcher.PatternMatcher { - t.Helper() - - matcher, err := patternmatcher.NewPatternMatcher("glob", "http://foo.bar/baz") - require.NoError(t, err) - - return matcher - }, - toBeMatched: "https://foo.bar/baz", - assert: func(t *testing.T, matched bool) { - t.Helper() - - assert.False(t, matched) - }, - }, - { - uc: "query params are ignored while matching", - matcher: func(t *testing.T) patternmatcher.PatternMatcher { - t.Helper() - - matcher, err := patternmatcher.NewPatternMatcher("glob", "http://foo.bar/baz") - require.NoError(t, err) - - return matcher - }, - toBeMatched: "https://foo.bar/baz?foo=bar", - assert: func(t *testing.T, matched bool) { - t.Helper() - - assert.False(t, matched) - }, - }, - } { - t.Run("case="+tc.uc, func(t *testing.T) { - // GIVEN - rul := &ruleImpl{ - urlMatcher: tc.matcher(t), - encodedSlashesHandling: x.IfThenElse(len(tc.slashHandling) != 0, tc.slashHandling, config.EncodedSlashesOff), - } - - tbmu, err := url.Parse(tc.toBeMatched) - require.NoError(t, err) - - // WHEN - matched := rul.MatchesURL(tbmu) - - // THEN - tc.assert(t, matched) - }) - } -} - func TestRuleExecute(t *testing.T) { t.Parallel() @@ -244,7 +49,7 @@ func TestRuleExecute(t *testing.T) { finalizer *mocks.SubjectHandlerMock, errHandler *mocks.ErrorHandlerMock, ) - assert func(t *testing.T, err error, backend rule.Backend) + assert func(t *testing.T, err error, backend rule.Backend, captures map[string]string) }{ { uc: "authenticator fails, but error handler succeeds", @@ -254,12 +59,13 @@ func TestRuleExecute(t *testing.T) { ) { t.Helper() + ctx.EXPECT().Request().Return(&heimdall.Request{URL: &heimdall.URL{}}) + authenticator.EXPECT().Execute(ctx).Return(nil, testsupport.ErrTestPurpose) authenticator.EXPECT().IsFallbackOnErrorAllowed().Return(false) - errHandler.EXPECT().CanExecute(ctx, testsupport.ErrTestPurpose).Return(true) errHandler.EXPECT().Execute(ctx, testsupport.ErrTestPurpose).Return(nil) }, - assert: func(t *testing.T, err error, backend rule.Backend) { + assert: func(t *testing.T, err error, backend rule.Backend, _ map[string]string) { t.Helper() require.NoError(t, err) @@ -274,12 +80,13 @@ func TestRuleExecute(t *testing.T) { ) { t.Helper() + ctx.EXPECT().Request().Return(&heimdall.Request{URL: &heimdall.URL{}}) + authenticator.EXPECT().Execute(ctx).Return(nil, testsupport.ErrTestPurpose) authenticator.EXPECT().IsFallbackOnErrorAllowed().Return(false) - errHandler.EXPECT().CanExecute(ctx, testsupport.ErrTestPurpose).Return(true) errHandler.EXPECT().Execute(ctx, testsupport.ErrTestPurpose).Return(testsupport.ErrTestPurpose2) }, - assert: func(t *testing.T, err error, backend rule.Backend) { + assert: func(t *testing.T, err error, backend rule.Backend, _ map[string]string) { t.Helper() require.Error(t, err) @@ -295,15 +102,16 @@ func TestRuleExecute(t *testing.T) { ) { t.Helper() + ctx.EXPECT().Request().Return(&heimdall.Request{URL: &heimdall.URL{}}) + sub := &subject.Subject{ID: "Foo"} authenticator.EXPECT().Execute(ctx).Return(sub, nil) authorizer.EXPECT().Execute(ctx, sub).Return(testsupport.ErrTestPurpose) authorizer.EXPECT().ContinueOnError().Return(false) - errHandler.EXPECT().CanExecute(ctx, testsupport.ErrTestPurpose).Return(true) errHandler.EXPECT().Execute(ctx, testsupport.ErrTestPurpose).Return(nil) }, - assert: func(t *testing.T, err error, backend rule.Backend) { + assert: func(t *testing.T, err error, backend rule.Backend, _ map[string]string) { t.Helper() require.NoError(t, err) @@ -318,15 +126,16 @@ func TestRuleExecute(t *testing.T) { ) { t.Helper() + ctx.EXPECT().Request().Return(&heimdall.Request{URL: &heimdall.URL{}}) + sub := &subject.Subject{ID: "Foo"} authenticator.EXPECT().Execute(ctx).Return(sub, nil) authorizer.EXPECT().Execute(ctx, sub).Return(testsupport.ErrTestPurpose) authorizer.EXPECT().ContinueOnError().Return(false) - errHandler.EXPECT().CanExecute(ctx, testsupport.ErrTestPurpose).Return(true) errHandler.EXPECT().Execute(ctx, testsupport.ErrTestPurpose).Return(testsupport.ErrTestPurpose2) }, - assert: func(t *testing.T, err error, backend rule.Backend) { + assert: func(t *testing.T, err error, backend rule.Backend, _ map[string]string) { t.Helper() require.Error(t, err) @@ -342,16 +151,17 @@ func TestRuleExecute(t *testing.T) { ) { t.Helper() + ctx.EXPECT().Request().Return(&heimdall.Request{URL: &heimdall.URL{}}) + sub := &subject.Subject{ID: "Foo"} authenticator.EXPECT().Execute(ctx).Return(sub, nil) authorizer.EXPECT().Execute(ctx, sub).Return(nil) finalizer.EXPECT().Execute(ctx, sub).Return(testsupport.ErrTestPurpose) finalizer.EXPECT().ContinueOnError().Return(false) - errHandler.EXPECT().CanExecute(ctx, testsupport.ErrTestPurpose).Return(true) errHandler.EXPECT().Execute(ctx, testsupport.ErrTestPurpose).Return(nil) }, - assert: func(t *testing.T, err error, backend rule.Backend) { + assert: func(t *testing.T, err error, backend rule.Backend, _ map[string]string) { t.Helper() require.NoError(t, err) @@ -366,16 +176,17 @@ func TestRuleExecute(t *testing.T) { ) { t.Helper() + ctx.EXPECT().Request().Return(&heimdall.Request{URL: &heimdall.URL{}}) + sub := &subject.Subject{ID: "Foo"} authenticator.EXPECT().Execute(ctx).Return(sub, nil) authorizer.EXPECT().Execute(ctx, sub).Return(nil) finalizer.EXPECT().Execute(ctx, sub).Return(testsupport.ErrTestPurpose) finalizer.EXPECT().ContinueOnError().Return(false) - errHandler.EXPECT().CanExecute(ctx, testsupport.ErrTestPurpose).Return(true) errHandler.EXPECT().Execute(ctx, testsupport.ErrTestPurpose).Return(testsupport.ErrTestPurpose2) }, - assert: func(t *testing.T, err error, backend rule.Backend) { + assert: func(t *testing.T, err error, backend rule.Backend, _ map[string]string) { t.Helper() require.Error(t, err) @@ -384,32 +195,30 @@ func TestRuleExecute(t *testing.T) { }, }, { - uc: "all handler succeed with disallowed urlencoded slashes", + uc: "all handler succeed with disallowed urlencoded slashes", + slashHandling: config.EncodedSlashesOff, backend: &config.Backend{ Host: "foo.bar", }, - configureMocks: func(t *testing.T, ctx *heimdallmocks.ContextMock, authenticator *mocks.SubjectCreatorMock, - authorizer *mocks.SubjectHandlerMock, finalizer *mocks.SubjectHandlerMock, - _ *mocks.ErrorHandlerMock, + configureMocks: func(t *testing.T, ctx *heimdallmocks.ContextMock, _ *mocks.SubjectCreatorMock, + _ *mocks.SubjectHandlerMock, _ *mocks.SubjectHandlerMock, _ *mocks.ErrorHandlerMock, ) { t.Helper() - sub := &subject.Subject{ID: "Foo"} - - authenticator.EXPECT().Execute(ctx).Return(sub, nil) - authorizer.EXPECT().Execute(ctx, sub).Return(nil) - finalizer.EXPECT().Execute(ctx, sub).Return(nil) - - targetURL, _ := url.Parse("http://foo.local/api/v1/foo%5Bid%5D") - ctx.EXPECT().Request().Return(&heimdall.Request{URL: targetURL}) + targetURL, _ := url.Parse("http://foo.local/api%2Fv1/foo%5Bid%5D") + ctx.EXPECT().Request().Return(&heimdall.Request{ + URL: &heimdall.URL{ + URL: *targetURL, + Captures: map[string]string{"first": "api%2Fv1", "second": "foo%5Bid%5D"}, + }, + }) }, - assert: func(t *testing.T, err error, backend rule.Backend) { + assert: func(t *testing.T, err error, _ rule.Backend, _ map[string]string) { t.Helper() - require.NoError(t, err) - - expectedURL, _ := url.Parse("http://foo.bar/api/v1/foo%5Bid%5D") - assert.Equal(t, expectedURL, backend.URL()) + require.Error(t, err) + require.ErrorIs(t, err, heimdall.ErrArgument) + require.ErrorContains(t, err, "path contains encoded slash") }, }, { @@ -431,15 +240,24 @@ func TestRuleExecute(t *testing.T) { finalizer.EXPECT().Execute(ctx, sub).Return(nil) targetURL, _ := url.Parse("http://foo.local/api/v1/foo%5Bid%5D") - ctx.EXPECT().Request().Return(&heimdall.Request{URL: targetURL}) + ctx.EXPECT().Request().Return(&heimdall.Request{ + URL: &heimdall.URL{ + URL: *targetURL, + Captures: map[string]string{"first": "api", "second": "v1", "third": "foo%5Bid%5D"}, + }, + }) }, - assert: func(t *testing.T, err error, backend rule.Backend) { + assert: func(t *testing.T, err error, backend rule.Backend, captures map[string]string) { t.Helper() require.NoError(t, err) expectedURL, _ := url.Parse("http://foo.bar/api/v1/foo%5Bid%5D") assert.Equal(t, expectedURL, backend.URL()) + + assert.Equal(t, "api", captures["first"]) + assert.Equal(t, "v1", captures["second"]) + assert.Equal(t, "foo[id]", captures["third"]) }, }, { @@ -461,20 +279,28 @@ func TestRuleExecute(t *testing.T) { finalizer.EXPECT().Execute(ctx, sub).Return(nil) targetURL, _ := url.Parse("http://foo.local/api%2Fv1/foo%5Bid%5D") - ctx.EXPECT().Request().Return(&heimdall.Request{URL: targetURL}) + ctx.EXPECT().Request().Return(&heimdall.Request{ + URL: &heimdall.URL{ + URL: *targetURL, + Captures: map[string]string{"first": "api%2Fv1", "second": "foo%5Bid%5D"}, + }, + }) }, - assert: func(t *testing.T, err error, backend rule.Backend) { + assert: func(t *testing.T, err error, backend rule.Backend, captures map[string]string) { t.Helper() require.NoError(t, err) expectedURL, _ := url.Parse("http://foo.bar/api/v1/foo%5Bid%5D") assert.Equal(t, expectedURL, backend.URL()) + + assert.Equal(t, "api/v1", captures["first"]) + assert.Equal(t, "foo[id]", captures["second"]) }, }, { uc: "all handler succeed with urlencoded slashes on with urlencoded slash but without decoding it", - slashHandling: config.EncodedSlashesNoDecode, + slashHandling: config.EncodedSlashesOnNoDecode, backend: &config.Backend{ Host: "foo.bar", }, @@ -491,15 +317,23 @@ func TestRuleExecute(t *testing.T) { finalizer.EXPECT().Execute(ctx, sub).Return(nil) targetURL, _ := url.Parse("http://foo.local/api%2Fv1/foo%5Bid%5D") - ctx.EXPECT().Request().Return(&heimdall.Request{URL: targetURL}) + ctx.EXPECT().Request().Return(&heimdall.Request{ + URL: &heimdall.URL{ + URL: *targetURL, + Captures: map[string]string{"first": "api%2Fv1", "second": "foo%5Bid%5D"}, + }, + }) }, - assert: func(t *testing.T, err error, backend rule.Backend) { + assert: func(t *testing.T, err error, backend rule.Backend, captures map[string]string) { t.Helper() require.NoError(t, err) expectedURL, _ := url.Parse("http://foo.bar/api%2Fv1/foo%5Bid%5D") assert.Equal(t, expectedURL, backend.URL()) + + assert.Equal(t, "api%2Fv1", captures["first"]) + assert.Equal(t, "foo[id]", captures["second"]) }, }, { @@ -521,9 +355,9 @@ func TestRuleExecute(t *testing.T) { finalizer.EXPECT().Execute(ctx, sub).Return(nil) targetURL, _ := url.Parse("http://foo.local/api/v1/foo") - ctx.EXPECT().Request().Return(&heimdall.Request{URL: targetURL}) + ctx.EXPECT().Request().Return(&heimdall.Request{URL: &heimdall.URL{URL: *targetURL}}) }, - assert: func(t *testing.T, err error, backend rule.Backend) { + assert: func(t *testing.T, err error, backend rule.Backend, _ map[string]string) { t.Helper() require.NoError(t, err) @@ -544,12 +378,12 @@ func TestRuleExecute(t *testing.T) { errHandler := mocks.NewErrorHandlerMock(t) rul := &ruleImpl{ - backend: tc.backend, - encodedSlashesHandling: x.IfThenElse(len(tc.slashHandling) != 0, tc.slashHandling, config.EncodedSlashesOff), - sc: compositeSubjectCreator{authenticator}, - sh: compositeSubjectHandler{authorizer}, - fi: compositeSubjectHandler{finalizer}, - eh: compositeErrorHandler{errHandler}, + backend: tc.backend, + slashesHandling: x.IfThenElse(len(tc.slashHandling) != 0, tc.slashHandling, config.EncodedSlashesOff), + sc: compositeSubjectCreator{authenticator}, + sh: compositeSubjectHandler{authorizer}, + fi: compositeSubjectHandler{finalizer}, + eh: compositeErrorHandler{errHandler}, } tc.configureMocks(t, ctx, authenticator, authorizer, finalizer, errHandler) @@ -558,7 +392,7 @@ func TestRuleExecute(t *testing.T) { upstream, err := rul.Execute(ctx) // THEN - tc.assert(t, err, upstream) + tc.assert(t, err, upstream, ctx.Request().URL.Captures) }) } } diff --git a/internal/rules/ruleset_processor_impl.go b/internal/rules/ruleset_processor_impl.go index 21462d1c7..2b409eba6 100644 --- a/internal/rules/ruleset_processor_impl.go +++ b/internal/rules/ruleset_processor_impl.go @@ -19,11 +19,8 @@ package rules import ( "errors" - "github.com/rs/zerolog" - "github.com/dadrus/heimdall/internal/heimdall" "github.com/dadrus/heimdall/internal/rules/config" - "github.com/dadrus/heimdall/internal/rules/event" "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/x/errorchain" ) @@ -31,18 +28,14 @@ import ( var ErrUnsupportedRuleSetVersion = errors.New("unsupported rule set version") type ruleSetProcessor struct { - q event.RuleSetChangedEventQueue + r rule.Repository f rule.Factory - l zerolog.Logger } -func NewRuleSetProcessor( - queue event.RuleSetChangedEventQueue, factory rule.Factory, logger zerolog.Logger, -) rule.SetProcessor { +func NewRuleSetProcessor(repository rule.Repository, factory rule.Factory) rule.SetProcessor { return &ruleSetProcessor{ - q: queue, + r: repository, f: factory, - l: logger, } } @@ -56,7 +49,8 @@ func (p *ruleSetProcessor) loadRules(ruleSet *config.RuleSet) ([]rule.Rule, erro for idx, rc := range ruleSet.Rules { rul, err := p.f.CreateRule(ruleSet.Version, ruleSet.Source, rc) if err != nil { - return nil, errorchain.NewWithMessage(heimdall.ErrInternal, "failed loading rule").CausedBy(err) + return nil, errorchain.NewWithMessagef(heimdall.ErrInternal, + "loading rule ID='%s' failed", rc.ID).CausedBy(err) } rules[idx] = rul @@ -75,16 +69,7 @@ func (p *ruleSetProcessor) OnCreated(ruleSet *config.RuleSet) error { return err } - evt := event.RuleSetChanged{ - Source: ruleSet.Source, - Name: ruleSet.Name, - Rules: rules, - ChangeType: event.Create, - } - - p.sendEvent(evt) - - return nil + return p.r.AddRuleSet(ruleSet.Source, rules) } func (p *ruleSetProcessor) OnUpdated(ruleSet *config.RuleSet) error { @@ -97,34 +82,9 @@ func (p *ruleSetProcessor) OnUpdated(ruleSet *config.RuleSet) error { return err } - evt := event.RuleSetChanged{ - Source: ruleSet.Source, - Name: ruleSet.Name, - Rules: rules, - ChangeType: event.Update, - } - - p.sendEvent(evt) - - return nil + return p.r.UpdateRuleSet(ruleSet.Source, rules) } func (p *ruleSetProcessor) OnDeleted(ruleSet *config.RuleSet) error { - evt := event.RuleSetChanged{ - Source: ruleSet.Source, - Name: ruleSet.Name, - ChangeType: event.Remove, - } - - p.sendEvent(evt) - - return nil -} - -func (p *ruleSetProcessor) sendEvent(evt event.RuleSetChanged) { - p.l.Info(). - Str("_src", evt.Source). - Str("_type", evt.ChangeType.String()). - Msg("Rule set changed") - p.q <- evt + return p.r.DeleteRuleSet(ruleSet.Source) } diff --git a/internal/rules/ruleset_processor_test.go b/internal/rules/ruleset_processor_test.go index a185877c6..839eb67e9 100644 --- a/internal/rules/ruleset_processor_test.go +++ b/internal/rules/ruleset_processor_test.go @@ -17,33 +17,32 @@ package rules import ( + "errors" "testing" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/dadrus/heimdall/internal/rules/config" - "github.com/dadrus/heimdall/internal/rules/event" + "github.com/dadrus/heimdall/internal/rules/rule" "github.com/dadrus/heimdall/internal/rules/rule/mocks" - "github.com/dadrus/heimdall/internal/x" - "github.com/dadrus/heimdall/internal/x/testsupport" ) func TestRuleSetProcessorOnCreated(t *testing.T) { t.Parallel() for _, tc := range []struct { - uc string - ruleset *config.RuleSet - configureFactory func(t *testing.T, mhf *mocks.FactoryMock) - assert func(t *testing.T, err error, queue event.RuleSetChangedEventQueue) + uc string + ruleset *config.RuleSet + configure func(t *testing.T, mhf *mocks.FactoryMock, repo *mocks.RepositoryMock) + assert func(t *testing.T, err error) }{ { - uc: "unsupported version", - ruleset: &config.RuleSet{Version: "foo"}, - assert: func(t *testing.T, err error, _ event.RuleSetChangedEventQueue) { + uc: "unsupported version", + ruleset: &config.RuleSet{Version: "foo"}, + configure: func(t *testing.T, _ *mocks.FactoryMock, _ *mocks.RepositoryMock) { t.Helper() }, + assert: func(t *testing.T, err error) { t.Helper() require.Error(t, err) @@ -53,18 +52,32 @@ func TestRuleSetProcessorOnCreated(t *testing.T) { { uc: "error while loading rule set", ruleset: &config.RuleSet{Version: config.CurrentRuleSetVersion, Rules: []config.Rule{{ID: "foo"}}}, - configureFactory: func(t *testing.T, mhf *mocks.FactoryMock) { + configure: func(t *testing.T, mhf *mocks.FactoryMock, _ *mocks.RepositoryMock) { t.Helper() - mhf.EXPECT().CreateRule(mock.Anything, mock.Anything, mock.Anything). - Return(nil, testsupport.ErrTestPurpose) + mhf.EXPECT().CreateRule(mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("test error")) }, - assert: func(t *testing.T, err error, _ event.RuleSetChangedEventQueue) { + assert: func(t *testing.T, err error) { t.Helper() require.Error(t, err) - require.ErrorIs(t, err, testsupport.ErrTestPurpose) - assert.Contains(t, err.Error(), "failed loading") + assert.Contains(t, err.Error(), "loading rule ID='foo' failed") + }, + }, + { + uc: "error while adding rule set", + ruleset: &config.RuleSet{Version: config.CurrentRuleSetVersion, Rules: []config.Rule{{ID: "foo"}}}, + configure: func(t *testing.T, mhf *mocks.FactoryMock, repo *mocks.RepositoryMock) { + t.Helper() + + mhf.EXPECT().CreateRule(config.CurrentRuleSetVersion, mock.Anything, mock.Anything).Return(&mocks.RuleMock{}, nil) + repo.EXPECT().AddRuleSet(mock.Anything, mock.Anything).Return(errors.New("test error")) + }, + assert: func(t *testing.T, err error) { + t.Helper() + + require.Error(t, err) + assert.Contains(t, err.Error(), "test error") }, }, { @@ -75,45 +88,37 @@ func TestRuleSetProcessorOnCreated(t *testing.T) { Name: "foobar", Rules: []config.Rule{{ID: "foo"}}, }, - configureFactory: func(t *testing.T, mhf *mocks.FactoryMock) { + configure: func(t *testing.T, mhf *mocks.FactoryMock, repo *mocks.RepositoryMock) { t.Helper() - mhf.EXPECT().CreateRule(config.CurrentRuleSetVersion, mock.Anything, mock.Anything).Return(&mocks.RuleMock{}, nil) + rul := &mocks.RuleMock{} + + mhf.EXPECT().CreateRule(config.CurrentRuleSetVersion, mock.Anything, mock.Anything).Return(rul, nil) + repo.EXPECT().AddRuleSet("test", mock.MatchedBy(func(rules []rule.Rule) bool { + return len(rules) == 1 && rules[0] == rul + })).Return(nil) }, - assert: func(t *testing.T, err error, queue event.RuleSetChangedEventQueue) { + assert: func(t *testing.T, err error) { t.Helper() require.NoError(t, err) - require.Len(t, queue, 1) - - evt := <-queue - require.Len(t, evt.Rules, 1) - assert.Equal(t, event.Create, evt.ChangeType) - assert.Equal(t, "test", evt.Source) - assert.Equal(t, "foobar", evt.Name) - - assert.Equal(t, &mocks.RuleMock{}, evt.Rules[0]) }, }, } { t.Run(tc.uc, func(t *testing.T) { - // GIVEM - configureFactory := x.IfThenElse(tc.configureFactory != nil, - tc.configureFactory, - func(t *testing.T, _ *mocks.FactoryMock) { t.Helper() }) - - queue := make(event.RuleSetChangedEventQueue, 10) - + // GIVEN factory := mocks.NewFactoryMock(t) - configureFactory(t, factory) + repo := mocks.NewRepositoryMock(t) + + tc.configure(t, factory, repo) - processor := NewRuleSetProcessor(queue, factory, log.Logger) + processor := NewRuleSetProcessor(repo, factory) // WHEN err := processor.OnCreated(tc.ruleset) // THEN - tc.assert(t, err, queue) + tc.assert(t, err) }) } } @@ -122,15 +127,18 @@ func TestRuleSetProcessorOnUpdated(t *testing.T) { t.Parallel() for _, tc := range []struct { - uc string - ruleset *config.RuleSet - configureFactory func(t *testing.T, mhf *mocks.FactoryMock) - assert func(t *testing.T, err error, queue event.RuleSetChangedEventQueue) + uc string + ruleset *config.RuleSet + configure func(t *testing.T, mhf *mocks.FactoryMock, repo *mocks.RepositoryMock) + assert func(t *testing.T, err error) }{ { uc: "unsupported version", ruleset: &config.RuleSet{Version: "foo"}, - assert: func(t *testing.T, err error, _ event.RuleSetChangedEventQueue) { + configure: func(t *testing.T, _ *mocks.FactoryMock, _ *mocks.RepositoryMock) { + t.Helper() + }, + assert: func(t *testing.T, err error) { t.Helper() require.Error(t, err) @@ -140,18 +148,32 @@ func TestRuleSetProcessorOnUpdated(t *testing.T) { { uc: "error while loading rule set", ruleset: &config.RuleSet{Version: config.CurrentRuleSetVersion, Rules: []config.Rule{{ID: "foo"}}}, - configureFactory: func(t *testing.T, mhf *mocks.FactoryMock) { + configure: func(t *testing.T, mhf *mocks.FactoryMock, _ *mocks.RepositoryMock) { + t.Helper() + + mhf.EXPECT().CreateRule(mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("test error")) + }, + assert: func(t *testing.T, err error) { + t.Helper() + + require.Error(t, err) + assert.Contains(t, err.Error(), "loading rule ID='foo' failed") + }, + }, + { + uc: "error while updating rule set", + ruleset: &config.RuleSet{Version: config.CurrentRuleSetVersion, Rules: []config.Rule{{ID: "foo"}}}, + configure: func(t *testing.T, mhf *mocks.FactoryMock, repo *mocks.RepositoryMock) { t.Helper() - mhf.EXPECT().CreateRule(mock.Anything, mock.Anything, mock.Anything). - Return(nil, testsupport.ErrTestPurpose) + mhf.EXPECT().CreateRule(mock.Anything, mock.Anything, mock.Anything).Return(&mocks.RuleMock{}, nil) + repo.EXPECT().UpdateRuleSet(mock.Anything, mock.Anything).Return(errors.New("test error")) }, - assert: func(t *testing.T, err error, _ event.RuleSetChangedEventQueue) { + assert: func(t *testing.T, err error) { t.Helper() require.Error(t, err) - require.ErrorIs(t, err, testsupport.ErrTestPurpose) - assert.Contains(t, err.Error(), "failed loading") + assert.Contains(t, err.Error(), "test error") }, }, { @@ -162,46 +184,37 @@ func TestRuleSetProcessorOnUpdated(t *testing.T) { Name: "foobar", Rules: []config.Rule{{ID: "foo"}}, }, - configureFactory: func(t *testing.T, mhf *mocks.FactoryMock) { + configure: func(t *testing.T, mhf *mocks.FactoryMock, repo *mocks.RepositoryMock) { t.Helper() - mhf.EXPECT().CreateRule(config.CurrentRuleSetVersion, mock.Anything, mock.Anything). - Return(&mocks.RuleMock{}, nil) + rul := &mocks.RuleMock{} + + mhf.EXPECT().CreateRule(config.CurrentRuleSetVersion, mock.Anything, mock.Anything).Return(rul, nil) + repo.EXPECT().UpdateRuleSet("test", mock.MatchedBy(func(rules []rule.Rule) bool { + return len(rules) == 1 && rules[0] == rul + })).Return(nil) }, - assert: func(t *testing.T, err error, queue event.RuleSetChangedEventQueue) { + assert: func(t *testing.T, err error) { t.Helper() require.NoError(t, err) - require.Len(t, queue, 1) - - evt := <-queue - require.Len(t, evt.Rules, 1) - assert.Equal(t, event.Update, evt.ChangeType) - assert.Equal(t, "test", evt.Source) - assert.Equal(t, "foobar", evt.Name) - - assert.Equal(t, &mocks.RuleMock{}, evt.Rules[0]) }, }, } { t.Run(tc.uc, func(t *testing.T) { // GIVEM - configureFactory := x.IfThenElse(tc.configureFactory != nil, - tc.configureFactory, - func(t *testing.T, _ *mocks.FactoryMock) { t.Helper() }) - - queue := make(event.RuleSetChangedEventQueue, 10) - factory := mocks.NewFactoryMock(t) - configureFactory(t, factory) + repo := mocks.NewRepositoryMock(t) + + tc.configure(t, factory, repo) - processor := NewRuleSetProcessor(queue, factory, log.Logger) + processor := NewRuleSetProcessor(repo, factory) // WHEN err := processor.OnUpdated(tc.ruleset) // THEN - tc.assert(t, err, queue) + tc.assert(t, err) }) } } @@ -210,10 +223,30 @@ func TestRuleSetProcessorOnDeleted(t *testing.T) { t.Parallel() for _, tc := range []struct { - uc string - ruleset *config.RuleSet - assert func(t *testing.T, err error, queue event.RuleSetChangedEventQueue) + uc string + ruleset *config.RuleSet + configure func(t *testing.T, repo *mocks.RepositoryMock) + assert func(t *testing.T, err error) }{ + { + uc: "failed removing rule set", + ruleset: &config.RuleSet{ + MetaData: config.MetaData{Source: "test"}, + Version: config.CurrentRuleSetVersion, + Name: "foobar", + }, + configure: func(t *testing.T, repo *mocks.RepositoryMock) { + t.Helper() + + repo.EXPECT().DeleteRuleSet("test").Return(errors.New("test error")) + }, + assert: func(t *testing.T, err error) { + t.Helper() + + require.Error(t, err) + require.ErrorContains(t, err, "test error") + }, + }, { uc: "successful", ruleset: &config.RuleSet{ @@ -221,29 +254,30 @@ func TestRuleSetProcessorOnDeleted(t *testing.T) { Version: config.CurrentRuleSetVersion, Name: "foobar", }, - assert: func(t *testing.T, err error, queue event.RuleSetChangedEventQueue) { + configure: func(t *testing.T, repo *mocks.RepositoryMock) { t.Helper() - require.NoError(t, err) - require.Len(t, queue, 1) + repo.EXPECT().DeleteRuleSet("test").Return(nil) + }, + assert: func(t *testing.T, err error) { + t.Helper() - evt := <-queue - assert.Equal(t, event.Remove, evt.ChangeType) - assert.Equal(t, "test", evt.Source) - assert.Equal(t, "foobar", evt.Name) + require.NoError(t, err) }, }, } { t.Run(tc.uc, func(t *testing.T) { // GIVEM - queue := make(event.RuleSetChangedEventQueue, 10) - processor := NewRuleSetProcessor(queue, mocks.NewFactoryMock(t), log.Logger) + repo := mocks.NewRepositoryMock(t) + tc.configure(t, repo) + + processor := NewRuleSetProcessor(repo, mocks.NewFactoryMock(t)) // WHEN err := processor.OnDeleted(tc.ruleset) // THEN - tc.assert(t, err, queue) + tc.assert(t, err) }) } } diff --git a/internal/rules/typed_matcher.go b/internal/rules/typed_matcher.go new file mode 100644 index 000000000..11cda713e --- /dev/null +++ b/internal/rules/typed_matcher.go @@ -0,0 +1,85 @@ +// Copyright 2024 Dimitrij Drus +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package rules + +import ( + "errors" + "regexp" + + "github.com/gobwas/glob" +) + +var ( + ErrNoGlobPatternDefined = errors.New("no glob pattern defined") + ErrNoRegexPatternDefined = errors.New("no regex pattern defined") +) + +type ( + typedMatcher interface { + match(pattern string) bool + } + + globMatcher struct { + compiled glob.Glob + } + + regexpMatcher struct { + compiled *regexp.Regexp + } + + exactMatcher struct { + value string + } +) + +func (m *globMatcher) match(value string) bool { + return m.compiled.Match(value) +} + +func (m *regexpMatcher) match(matchAgainst string) bool { + return m.compiled.MatchString(matchAgainst) +} + +func (m *exactMatcher) match(value string) bool { return m.value == value } + +func newGlobMatcher(pattern string, separator rune) (typedMatcher, error) { + if len(pattern) == 0 { + return nil, ErrNoGlobPatternDefined + } + + compiled, err := glob.Compile(pattern, separator) + if err != nil { + return nil, err + } + + return &globMatcher{compiled: compiled}, nil +} + +func newRegexMatcher(pattern string) (typedMatcher, error) { + if len(pattern) == 0 { + return nil, ErrNoRegexPatternDefined + } + + compiled, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + + return ®expMatcher{compiled: compiled}, nil +} + +func newExactMatcher(value string) typedMatcher { return &exactMatcher{value: value} } diff --git a/internal/rules/typed_matcher_test.go b/internal/rules/typed_matcher_test.go new file mode 100644 index 000000000..a9d05a826 --- /dev/null +++ b/internal/rules/typed_matcher_test.go @@ -0,0 +1,173 @@ +// Copyright 2024 Dimitrij Drus +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package rules + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRegexPatternMatcher(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + expression string + matches string + assert func(t *testing.T, err error, matched bool) + }{ + { + uc: "with empty expression", + assert: func(t *testing.T, err error, _ bool) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, ErrNoRegexPatternDefined) + }, + }, + { + uc: "with bad regex expression", + expression: "?>?<*??", + assert: func(t *testing.T, err error, _ bool) { + t.Helper() + + require.Error(t, err) + assert.Contains(t, err.Error(), "error parsing regexp") + }, + }, + { + uc: "doesn't match", + expression: "^/foo/(bar|baz)/zab", + matches: "/foo/zab/zab", + assert: func(t *testing.T, err error, matched bool) { + t.Helper() + + require.NoError(t, err) + assert.False(t, matched) + }, + }, + { + uc: "successful", + expression: "^/foo/(bar|baz)/zab", + matches: "/foo/bar/zab", + assert: func(t *testing.T, err error, matched bool) { + t.Helper() + + require.NoError(t, err) + assert.True(t, matched) + }, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + var matched bool + + matcher, err := newRegexMatcher(tc.expression) + if matcher != nil { + matched = matcher.match(tc.matches) + } + + tc.assert(t, err, matched) + }) + } +} + +func TestGlobPatternMatcher(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + expression string + matches string + assert func(t *testing.T, err error, matched bool) + }{ + { + uc: "with empty expression", + assert: func(t *testing.T, err error, _ bool) { + t.Helper() + + require.Error(t, err) + require.ErrorIs(t, err, ErrNoGlobPatternDefined) + }, + }, + { + uc: "with bad glob expression", + expression: "!*][)(*", + assert: func(t *testing.T, err error, _ bool) { + t.Helper() + + require.Error(t, err) + assert.Contains(t, err.Error(), "unexpected end of input") + }, + }, + { + uc: "doesn't match", + expression: "{/**.foo,/**.bar}", + matches: "/foo.baz", + assert: func(t *testing.T, err error, matched bool) { + t.Helper() + + require.NoError(t, err) + assert.False(t, matched) + }, + }, + { + uc: "successful", + expression: "{/**.foo,/**.bar}", + matches: "/foo.bar", + assert: func(t *testing.T, err error, matched bool) { + t.Helper() + + require.NoError(t, err) + assert.True(t, matched) + }, + }, + } { + t.Run(tc.uc, func(t *testing.T) { + var matched bool + + matcher, err := newGlobMatcher(tc.expression, '/') + if matcher != nil { + matched = matcher.match(tc.matches) + } + + tc.assert(t, err, matched) + }) + } +} + +func TestExactMatcher(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + expression string + toMatch string + matches bool + }{ + {uc: "matches", expression: "foo", toMatch: "foo", matches: true}, + {uc: "doesn't match", expression: "foo", toMatch: "bar"}, + } { + t.Run(tc.uc, func(t *testing.T) { + matcher := newExactMatcher(tc.expression) + + matches := matcher.match(tc.toMatch) + assert.Equal(t, tc.matches, matches) + }) + } +} diff --git a/internal/signer/module.go b/internal/signer/module.go deleted file mode 100644 index a9df4bf76..000000000 --- a/internal/signer/module.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2022 Dimitrij Drus -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package signer - -import "go.uber.org/fx" - -// Module is used on app bootstrap. -// nolint: gochecknoglobals -var Module = fx.Options( - fx.Provide(NewJWTSigner), -) diff --git a/internal/validation/validation.go b/internal/validation/validation.go index b86a17c65..ed56c4db8 100644 --- a/internal/validation/validation.go +++ b/internal/validation/validation.go @@ -46,8 +46,19 @@ func init() { panic(err) } + getTagValue := func(tag reflect.StructTag) string { + for _, tagName := range []string{"mapstructure", "json", "yaml"} { + val := tag.Get(tagName) + if len(val) != 0 { + return val + } + } + + return "" + } + validate.RegisterTagNameFunc(func(fld reflect.StructField) string { - return "'" + strings.SplitN(fld.Tag.Get("mapstructure"), ",", 2)[0] + "'" // nolint: mnd + return "'" + strings.SplitN(getTagValue(fld.Tag), ",", 2)[0] + "'" // nolint: mnd }) } diff --git a/internal/watcher/module.go b/internal/watcher/module.go index 75fc74025..740bc70bd 100644 --- a/internal/watcher/module.go +++ b/internal/watcher/module.go @@ -35,7 +35,7 @@ var Module = fx.Options( return newWatcher(logger) } - return &noopWatcher{}, nil + return &NoopWatcher{}, nil }, // nolint: forcetypeassert fx.OnStart(func(ctx context.Context, w Watcher) error { diff --git a/internal/watcher/noop_watcher.go b/internal/watcher/noop_watcher.go index a5c10cb1a..65ad859bf 100644 --- a/internal/watcher/noop_watcher.go +++ b/internal/watcher/noop_watcher.go @@ -18,8 +18,8 @@ package watcher import "context" -type noopWatcher struct{} +type NoopWatcher struct{} -func (*noopWatcher) start(_ context.Context) {} -func (*noopWatcher) stop(_ context.Context) error { return nil } -func (*noopWatcher) Add(_ string, _ ChangeListener) error { return nil } +func (*NoopWatcher) start(_ context.Context) {} +func (*NoopWatcher) stop(_ context.Context) error { return nil } +func (*NoopWatcher) Add(_ string, _ ChangeListener) error { return nil } diff --git a/internal/x/opentelemetry/mocks/mock.go b/internal/x/opentelemetry/mocks/mock.go index f4b857a0a..9b90b043f 100644 --- a/internal/x/opentelemetry/mocks/mock.go +++ b/internal/x/opentelemetry/mocks/mock.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/bridge/opentracing/migration" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" diff --git a/internal/x/radixtree/matcher.go b/internal/x/radixtree/matcher.go new file mode 100644 index 000000000..622e838b6 --- /dev/null +++ b/internal/x/radixtree/matcher.go @@ -0,0 +1,33 @@ +package radixtree + +// LookupMatcher is used for additional checks while performing the lookup of values in the spanned tree. +type LookupMatcher[V any] interface { + // Match should return true if the value should be returned by the lookup. + Match(value V, keys, values []string) bool +} + +// The LookupMatcherFunc type is an adapter to allow the use of ordinary functions as match functions. +// If f is a function with the appropriate signature, LookupMatcherFunc(f) is a [LookupMatcher] +// that calls f. +type LookupMatcherFunc[V any] func(value V, keys, values []string) bool + +// Match calls f(value). +func (f LookupMatcherFunc[V]) Match(value V, keys, values []string) bool { + return f(value, keys, values) +} + +// ValueMatcher is used for additional checks while deleting of values in the spanned tree. +type ValueMatcher[V any] interface { + // Match should return true if the value should be deleted from the tree. + Match(value V) bool +} + +// The ValueMatcherFunc type is an adapter to allow the use of ordinary functions as match functions. +// If f is a function with the appropriate signature, ValueMatcherFunc(f) is a [ValueMatcher] +// that calls f. +type ValueMatcherFunc[V any] func(value V) bool + +// Match calls f(value). +func (f ValueMatcherFunc[V]) Match(value V) bool { + return f(value) +} diff --git a/internal/x/radixtree/options.go b/internal/x/radixtree/options.go new file mode 100644 index 000000000..b3f85ba77 --- /dev/null +++ b/internal/x/radixtree/options.go @@ -0,0 +1,19 @@ +package radixtree + +type Option[V any] func(n *Tree[V]) + +func WithValuesConstraints[V any](constraints ConstraintsFunc[V]) Option[V] { + return func(n *Tree[V]) { + if constraints != nil { + n.canAdd = constraints + } + } +} + +type AddOption[V any] func(n *Tree[V]) + +func WithBacktracking[V any](flag bool) AddOption[V] { + return func(n *Tree[V]) { + n.backtrackingEnabled = flag + } +} diff --git a/internal/x/radixtree/options_test.go b/internal/x/radixtree/options_test.go new file mode 100644 index 000000000..9c3289f2a --- /dev/null +++ b/internal/x/radixtree/options_test.go @@ -0,0 +1,32 @@ +package radixtree + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestValuesConstrainedTree(t *testing.T) { + t.Parallel() + + // GIVEN + tree1 := New[string](WithValuesConstraints[string](func(oldValues []string, _ string) bool { + return len(oldValues) == 0 + })) + + tree2 := New[string]() + + err := tree1.Add("/foo", "bar") + require.NoError(t, err) + + err = tree2.Add("/foo", "bar") + require.NoError(t, err) + + // WHEN + err1 := tree1.Add("/foo", "bar") + err2 := tree2.Add("/foo", "bar") + + // THEN + require.Error(t, err1) + require.NoError(t, err2) +} diff --git a/internal/x/radixtree/package.go b/internal/x/radixtree/package.go new file mode 100644 index 000000000..20b63b6f0 --- /dev/null +++ b/internal/x/radixtree/package.go @@ -0,0 +1,7 @@ +/* +Package radixtree implements a tree lookup for values associated to +paths. + +This package is a fork of https://github.com/dimfeld/httptreemux. +*/ +package radixtree diff --git a/internal/x/radixtree/tree.go b/internal/x/radixtree/tree.go new file mode 100644 index 000000000..bea7e7d37 --- /dev/null +++ b/internal/x/radixtree/tree.go @@ -0,0 +1,534 @@ +package radixtree + +import ( + "errors" + "fmt" + "slices" + "strings" +) + +var ( + ErrInvalidPath = errors.New("invalid path") + ErrNotFound = errors.New("not found") + ErrFailedToDelete = errors.New("failed to delete") + ErrConstraintsViolation = errors.New("constraints violation") +) + +type ( + ConstraintsFunc[V any] func(oldValues []V, newValue V) bool + + Entry[V any] struct { + Value V + Parameters map[string]string + } + + Tree[V any] struct { + path string + + priority int + + // The list of static children to check. + staticIndices []byte + staticChildren []*Tree[V] + + // If none of the above match, check the wildcard children + wildcardChild *Tree[V] + + // If none of the above match, then we use the catch-all, if applicable. + catchAllChild *Tree[V] + + isCatchAll bool + isWildcard bool + + values []V + wildcardKeys []string + + // global options + canAdd ConstraintsFunc[V] + + // node local options + backtrackingEnabled bool + } +) + +func New[V any](opts ...Option[V]) *Tree[V] { + root := &Tree[V]{ + canAdd: func(_ []V, _ V) bool { return true }, + } + + for _, opt := range opts { + opt(root) + } + + return root +} + +func (n *Tree[V]) sortStaticChildren(i int) { + for i > 0 && n.staticChildren[i].priority > n.staticChildren[i-1].priority { + n.staticChildren[i], n.staticChildren[i-1] = n.staticChildren[i-1], n.staticChildren[i] + n.staticIndices[i], n.staticIndices[i-1] = n.staticIndices[i-1], n.staticIndices[i] + + i-- + } +} + +func (n *Tree[V]) nextSeparator(path string) int { + if idx := strings.IndexByte(path, '/'); idx != -1 { + return idx + } + + return len(path) +} + +//nolint:funlen,gocognit,cyclop +func (n *Tree[V]) addNode(path string, wildcardKeys []string, inStaticToken bool) (*Tree[V], error) { + if len(path) == 0 { + // we have a leaf node + if len(wildcardKeys) != 0 { + // Ensure the current wildcard keys are the same as the old ones. + if len(n.wildcardKeys) != 0 && !slices.Equal(n.wildcardKeys, wildcardKeys) { + return nil, fmt.Errorf("%w: %s is ambigous - wildcard keys differ", ErrInvalidPath, path) + } + + n.wildcardKeys = wildcardKeys + } + + return n, nil + } + + token := path[0] + nextSlash := strings.IndexByte(path, '/') + + var ( + thisToken string + tokenEnd int + unescaped bool + ) + + switch { + case token == '/': + thisToken = "/" + tokenEnd = 1 + case nextSlash == -1: + thisToken = path + tokenEnd = len(path) + default: + thisToken = path[0:nextSlash] + tokenEnd = nextSlash + } + + remainingPath := path[tokenEnd:] + + if !inStaticToken { //nolint:nestif + switch token { + case '*': + thisToken = thisToken[1:] + + if nextSlash != -1 { + return nil, fmt.Errorf("%w: %s has '/' after a free wildcard", ErrInvalidPath, path) + } + + if n.catchAllChild == nil { + n.catchAllChild = &Tree[V]{ + path: thisToken, + isCatchAll: true, + } + + if len(n.values) == 0 { + n.backtrackingEnabled = true + } + } + + if path[1:] != n.catchAllChild.path { + return nil, fmt.Errorf("%w: free wildcard name in %s doesn't match %s", + ErrInvalidPath, path, n.catchAllChild.path) + } + + wildcardKeys = append(wildcardKeys, thisToken) + n.catchAllChild.wildcardKeys = wildcardKeys + + return n.catchAllChild, nil + case ':': + if n.wildcardChild == nil { + n.wildcardChild = &Tree[V]{path: "wildcard", isWildcard: true} + + if len(n.values) == 0 { + n.backtrackingEnabled = true + } + } + + return n.wildcardChild.addNode(remainingPath, append(wildcardKeys, thisToken[1:]), false) + } + } + + if !inStaticToken && + len(thisToken) >= 2 && + thisToken[0] == '\\' && + (thisToken[1] == '*' || thisToken[1] == ':' || thisToken[1] == '\\') { + // The token starts with a character escaped by a backslash. Drop the backslash. + token = thisToken[1] + thisToken = thisToken[1:] + unescaped = true + } + + for i, index := range n.staticIndices { + if token == index { + // Yes. Split it based on the common prefix of the existing + // node and the new one. + child, prefixSplit := n.splitCommonPrefix(i, thisToken) + child.priority++ + + n.sortStaticChildren(i) + + if unescaped { + // Account for the removed backslash. + prefixSplit++ + } + + // Ensure that the rest of this token is not mistaken for a wildcard + // if a prefix split occurs at a '*' or ':'. + return child.addNode(path[prefixSplit:], wildcardKeys, token != '/') + } + } + + child := &Tree[V]{path: thisToken} + + n.staticIndices = append(n.staticIndices, token) + n.staticChildren = append(n.staticChildren, child) + + if len(n.values) == 0 { + n.backtrackingEnabled = true + } + + // Ensure that the rest of this token is not mistaken for a wildcard + // if a prefix split occurs at a '*' or ':'. + return child.addNode(remainingPath, wildcardKeys, token != '/') +} + +//nolint:cyclop,funlen +func (n *Tree[V]) delNode(path string, matcher ValueMatcher[V]) bool { + pathLen := len(path) + if pathLen == 0 { + if len(n.values) == 0 { + return false + } + + oldSize := len(n.values) + n.values = slices.DeleteFunc(n.values, matcher.Match) + newSize := len(n.values) + + if newSize == 0 { + n.backtrackingEnabled = true + } + + return oldSize != newSize + } + + var ( + nextPath string + child *Tree[V] + ) + + token := path[0] + + switch token { + case ':': + if n.wildcardChild == nil { + return false + } + + child = n.wildcardChild + nextSeparator := n.nextSeparator(path) + nextPath = path[nextSeparator:] + case '*': + if n.catchAllChild == nil { + return false + } + + child = n.catchAllChild + nextPath = "" + } + + if child != nil { + if child.delNode(nextPath, matcher) { + if len(child.values) == 0 { + n.deleteChild(child, token) + } + + return true + } + + return false + } + + if len(path) >= 2 && + path[0] == '\\' && + (path[1] == '*' || path[1] == ':' || path[1] == '\\') { + // The token starts with a character escaped by a backslash. Drop the backslash. + token = path[1] + path = path[1:] + } + + for i, staticIndex := range n.staticIndices { + if token == staticIndex { + child = n.staticChildren[i] + childPathLen := len(child.path) + + if pathLen >= childPathLen && child.path == path[:childPathLen] && + child.delNode(path[childPathLen:], matcher) { + if len(child.values) == 0 { + n.deleteChild(child, token) + } + + return true + } + + break + } + } + + return false +} + +//nolint:cyclop +func (n *Tree[V]) deleteChild(child *Tree[V], token uint8) { + if len(child.staticIndices) == 1 && child.staticIndices[0] != '/' && child.path != "/" { + if len(child.staticChildren) == 1 { + grandChild := child.staticChildren[0] + grandChild.path = child.path + grandChild.path + *child = *grandChild + } + + // new leaf created + if len(child.values) != 0 { + return + } + } + + // Delete the child from the parent only if the child has no children + if len(child.staticIndices) == 0 && child.wildcardChild == nil && child.catchAllChild == nil { + switch { + case child.isWildcard: + n.wildcardChild = nil + case child.isCatchAll: + n.catchAllChild = nil + default: + n.delEdge(token) + } + } +} + +func (n *Tree[V]) delEdge(token byte) { + for i, index := range n.staticIndices { + if token == index { + n.staticChildren = append(n.staticChildren[:i], n.staticChildren[i+1:]...) + n.staticIndices = append(n.staticIndices[:i], n.staticIndices[i+1:]...) + + return + } + } +} + +//nolint:funlen,gocognit,cyclop +func (n *Tree[V]) findNode(path string, captures []string, matcher LookupMatcher[V]) (*Tree[V], int, []string, bool) { + var ( + found *Tree[V] + idx int + value V + ) + + backtrack := true + + pathLen := len(path) + if pathLen == 0 { + if len(n.values) == 0 { + return nil, 0, nil, true + } + + for idx, value = range n.values { + if match := matcher.Match(value, n.wildcardKeys, captures); match { + return n, idx, captures, false + } + } + + return nil, 0, nil, n.backtrackingEnabled + } + + // First see if this matches a static token. + firstChar := path[0] + for i, staticIndex := range n.staticIndices { + if staticIndex == firstChar { + child := n.staticChildren[i] + childPathLen := len(child.path) + + if pathLen >= childPathLen && child.path == path[:childPathLen] { + nextPath := path[childPathLen:] + found, idx, captures, backtrack = child.findNode(nextPath, captures, matcher) + } + + break + } + } + + if found != nil || !backtrack { + return found, idx, captures, backtrack + } + + if n.wildcardChild != nil { //nolint:nestif + // Didn't find a static token, so check for a wildcard. + nextSeparator := n.nextSeparator(path) + thisToken := path[0:nextSeparator] + nextToken := path[nextSeparator:] + + if len(thisToken) > 0 { // Don't match on empty tokens. + var tmp []string + + found, idx, tmp, backtrack = n.wildcardChild.findNode(nextToken, append(captures, thisToken), matcher) + if found != nil { + return found, idx, tmp, backtrack + } else if !backtrack { + return nil, 0, nil, false + } + } + } + + if n.catchAllChild != nil { + // Hit the catchall, so just assign the whole remaining path. + for idx, value = range n.catchAllChild.values { + if match := matcher.Match(value, n.wildcardKeys, captures); match { + return n.catchAllChild, idx, append(captures, path), false + } + } + + return nil, 0, captures, n.backtrackingEnabled + } + + return nil, 0, captures, true +} + +func (n *Tree[V]) splitCommonPrefix(existingNodeIndex int, path string) (*Tree[V], int) { + childNode := n.staticChildren[existingNodeIndex] + + if strings.HasPrefix(path, childNode.path) { + // No split needs to be done. Rather, the new path shares the entire + // prefix with the existing node, so the new node is just a child of + // the existing one. Or the new path is the same as the existing path, + // which means that we just move on to the next token. Either way, + // this return accomplishes that + return childNode, len(childNode.path) + } + + // Find the length of the common prefix of the child node and the new path. + i := commonPrefixLen(childNode.path, path) + + commonPrefix := path[0:i] + childNode.path = childNode.path[i:] + + // Create a new intermediary node in the place of the existing node, with + // the existing node as a child. + newNode := &Tree[V]{ + path: commonPrefix, + priority: childNode.priority, + // Index is the first byte of the non-common part of the path. + staticIndices: []byte{childNode.path[0]}, + staticChildren: []*Tree[V]{childNode}, + } + n.staticChildren[existingNodeIndex] = newNode + + return newNode, i +} + +func (n *Tree[V]) Find(path string, matcher LookupMatcher[V]) (*Entry[V], error) { + found, idx, params, _ := n.findNode(path, make([]string, 0, 3), matcher) + if found == nil { + return nil, fmt.Errorf("%w: %s", ErrNotFound, path) + } + + entry := &Entry[V]{ + Value: found.values[idx], + } + + entry.Parameters = make(map[string]string, len(params)) + + for i, param := range params { + key := found.wildcardKeys[i] + if key != "*" { + entry.Parameters[key] = param + } + } + + return entry, nil +} + +func (n *Tree[V]) Add(path string, value V, opts ...AddOption[V]) error { + node, err := n.addNode(path, nil, false) + if err != nil { + return err + } + + if !n.canAdd(node.values, value) { + return fmt.Errorf("%w: %s", ErrConstraintsViolation, path) + } + + for _, apply := range opts { + apply(node) + } + + node.values = append(node.values, value) + + return nil +} + +func (n *Tree[V]) Delete(path string, matcher ValueMatcher[V]) error { + if !n.delNode(path, matcher) { + return fmt.Errorf("%w: %s", ErrFailedToDelete, path) + } + + return nil +} + +func (n *Tree[V]) Empty() bool { + return len(n.values) == 0 && len(n.staticChildren) == 0 && n.wildcardChild == nil && n.catchAllChild == nil +} + +func (n *Tree[V]) Clone() *Tree[V] { + root := &Tree[V]{} + + n.cloneInto(root) + + return root +} + +func (n *Tree[V]) cloneInto(out *Tree[V]) { + *out = *n + + if len(n.wildcardKeys) != 0 { + out.wildcardKeys = slices.Clone(n.wildcardKeys) + } + + if len(n.values) != 0 { + out.values = slices.Clone(n.values) + } + + if n.catchAllChild != nil { + out.catchAllChild = &Tree[V]{} + n.catchAllChild.cloneInto(out.catchAllChild) + } + + if n.wildcardChild != nil { + out.wildcardChild = &Tree[V]{} + n.wildcardChild.cloneInto(out.wildcardChild) + } + + if len(n.staticChildren) != 0 { + out.staticIndices = slices.Clone(n.staticIndices) + out.staticChildren = make([]*Tree[V], len(n.staticChildren)) + + for idx, child := range n.staticChildren { + newChild := &Tree[V]{} + + child.cloneInto(newChild) + out.staticChildren[idx] = newChild + } + } +} diff --git a/internal/x/radixtree/tree_benchmark_test.go b/internal/x/radixtree/tree_benchmark_test.go new file mode 100644 index 000000000..4f90aa6e0 --- /dev/null +++ b/internal/x/radixtree/tree_benchmark_test.go @@ -0,0 +1,122 @@ +package radixtree + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func BenchmarkNodeSearchNoPaths(b *testing.B) { + tm := lookupMatcher[string](true) + tree := &Tree[string]{ + path: "/", + canAdd: func(_ []string, _ string) bool { return true }, + } + + b.ReportAllocs() + b.ResetTimer() + + for range b.N { + tree.findNode("", nil, tm) + } +} + +func BenchmarkNodeSearchRoot(b *testing.B) { + tm := lookupMatcher[string](true) + tree := &Tree[string]{ + path: "/", + canAdd: func(_ []string, _ string) bool { return true }, + } + + b.ReportAllocs() + b.ResetTimer() + + for range b.N { + tree.findNode("/", nil, tm) + } +} + +func BenchmarkNodeSearchOneStaticPath(b *testing.B) { + tm := lookupMatcher[string](true) + tree := &Tree[string]{ + path: "/", + canAdd: func(_ []string, _ string) bool { return true }, + } + + tree.Add("abc", "foo") + + b.ReportAllocs() + b.ResetTimer() + + for range b.N { + tree.findNode("abc", nil, tm) + } +} + +func BenchmarkNodeSearchOneLongStaticPath(b *testing.B) { + tm := lookupMatcher[string](true) + tree := &Tree[string]{ + path: "/", + canAdd: func(_ []string, _ string) bool { return true }, + } + + tree.Add("foo/bar/baz", "foo") + + b.ReportAllocs() + b.ResetTimer() + + for range b.N { + tree.findNode("foo/bar/baz", nil, tm) + } +} + +func BenchmarkNodeSearchOneWildcardPath(b *testing.B) { + tm := lookupMatcher[string](true) + tree := &Tree[string]{ + path: "/", + canAdd: func(_ []string, _ string) bool { return true }, + } + + require.NoError(b, tree.Add(":abc", "foo")) + + b.ReportAllocs() + b.ResetTimer() + + for range b.N { + tree.findNode("abc", nil, tm) + } +} + +func BenchmarkNodeSearchOneLongWildcards(b *testing.B) { + tm := lookupMatcher[string](true) + tree := &Tree[string]{ + path: "/", + canAdd: func(_ []string, _ string) bool { return true }, + } + + tree.Add(":abc/:def/:ghi", "foo") + + b.ReportAllocs() + b.ResetTimer() + + for range b.N { + tree.findNode("abcdefghijklmnop/aaaabbbbccccddddeeeeffffgggg/hijkl", nil, tm) + } +} + +func BenchmarkNodeSearchOneFreeWildcard(b *testing.B) { + tm := lookupMatcher[string](true) + tree := &Tree[string]{ + path: "/", + canAdd: func(_ []string, _ string) bool { return true }, + } + + require.NoError(b, tree.Add("*abc", "foo")) + + b.ReportAllocs() + b.ResetTimer() + + for range b.N { + tree.findNode("foo", nil, tm) + } +} diff --git a/internal/x/radixtree/tree_test.go b/internal/x/radixtree/tree_test.go new file mode 100644 index 000000000..2b34b95b1 --- /dev/null +++ b/internal/x/radixtree/tree_test.go @@ -0,0 +1,426 @@ +package radixtree + +import ( + "slices" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" +) + +func lookupMatcher[V any](matches bool) LookupMatcherFunc[V] { + return func(_ V, _, _ []string) bool { return matches } +} + +func deleteMatcher[V any](matches bool) ValueMatcherFunc[V] { + return func(_ V) bool { return matches } +} + +func TestTreeSearch(t *testing.T) { + t.Parallel() + + // Setup & populate tree + tree := New[string]() + + for _, path := range []string{ + "/", + "/i", + "/i/:aaa", + "/images", + "/images/abc.jpg", + "/images/:imgname", + "/images/*path", + "/ima", + "/ima/:par", + "/images1", + "/images2", + "/apples", + "/app/les", + "/apples1", + "/appeasement", + "/appealing", + "/date/:year/:month", + "/date/:year/month", + "/date/:year/:month/abc", + "/date/:year/:month/:post", + "/date/:year/:month/*post", + "/:page", + "/:page/:index", + "/post/:post/page/:page", + "/plaster", + "/users/:pk/:related", + "/users/:id/updatePassword", + "/:something/abc", + "/:something/def", + "/something/**", + "/images/\\*path", + "/images/\\*patch", + "/date/\\:year/\\:month", + "/apples/ab:cde/:fg/*hi", + "/apples/ab*cde/:fg/*hi", + "/apples/ab\\*cde/:fg/*hi", + "/apples/ab*dde", + "/マ", + "/カ", + } { + err := tree.Add(path, path) + require.NoError(t, err) + } + + trueMatcher := lookupMatcher[string](true) + falseMatcher := lookupMatcher[string](false) + + for _, tc := range []struct { + path string + expPath string + expErr error + expParams map[string]string + matcher LookupMatcher[string] + }{ + {path: "/users/abc/updatePassword", expPath: "/users/:id/updatePassword", expParams: map[string]string{"id": "abc"}}, + {path: "/users/all/something", expPath: "/users/:pk/:related", expParams: map[string]string{"pk": "all", "related": "something"}}, + {path: "/aaa/abc", expPath: "/:something/abc", expParams: map[string]string{"something": "aaa"}}, + {path: "/aaa/def", expPath: "/:something/def", expParams: map[string]string{"something": "aaa"}}, + {path: "/paper", expPath: "/:page", expParams: map[string]string{"page": "paper"}}, + {path: "/", expPath: "/"}, + {path: "/i", expPath: "/i"}, + {path: "/images", expPath: "/images"}, + {path: "/images/abc.jpg", expPath: "/images/abc.jpg"}, + {path: "/images/something", expPath: "/images/:imgname", expParams: map[string]string{"imgname": "something"}}, + {path: "/images/long/path", expPath: "/images/*path", expParams: map[string]string{"path": "long/path"}}, + {path: "/images/long/path", matcher: falseMatcher, expErr: ErrNotFound}, + {path: "/images/even/longer/path", expPath: "/images/*path", expParams: map[string]string{"path": "even/longer/path"}}, + {path: "/ima", expPath: "/ima"}, + {path: "/apples", expPath: "/apples"}, + {path: "/app/les", expPath: "/app/les"}, + {path: "/abc", expPath: "/:page", expParams: map[string]string{"page": "abc"}}, + {path: "/abc/100", expPath: "/:page/:index", expParams: map[string]string{"page": "abc", "index": "100"}}, + {path: "/post/a/page/2", expPath: "/post/:post/page/:page", expParams: map[string]string{"post": "a", "page": "2"}}, + {path: "/date/2014/5", expPath: "/date/:year/:month", expParams: map[string]string{"year": "2014", "month": "5"}}, + {path: "/date/2014/month", expPath: "/date/:year/month", expParams: map[string]string{"year": "2014"}}, + {path: "/date/2014/5/abc", expPath: "/date/:year/:month/abc", expParams: map[string]string{"year": "2014", "month": "5"}}, + {path: "/date/2014/5/def", expPath: "/date/:year/:month/:post", expParams: map[string]string{"year": "2014", "month": "5", "post": "def"}}, + {path: "/date/2014/5/def/hij", expPath: "/date/:year/:month/*post", expParams: map[string]string{"year": "2014", "month": "5", "post": "def/hij"}}, + {path: "/date/2014/5/def/hij/", expPath: "/date/:year/:month/*post", expParams: map[string]string{"year": "2014", "month": "5", "post": "def/hij/"}}, + {path: "/date/2014/ab%2f", expPath: "/date/:year/:month", expParams: map[string]string{"year": "2014", "month": "ab%2f"}}, + {path: "/post/ab%2fdef/page/2%2f", expPath: "/post/:post/page/:page", expParams: map[string]string{"post": "ab%2fdef", "page": "2%2f"}}, + {path: "/ima/bcd/fgh", expErr: ErrNotFound}, + {path: "/date/2014//month", expErr: ErrNotFound}, + {path: "/date/2014/05/", expErr: ErrNotFound}, // Empty catchall should not match + {path: "/post//abc/page/2", expErr: ErrNotFound}, + {path: "/post/abc//page/2", expErr: ErrNotFound}, + {path: "/post/abc/page//2", expErr: ErrNotFound}, + {path: "//post/abc/page/2", expErr: ErrNotFound}, + {path: "//post//abc//page//2", expErr: ErrNotFound}, + {path: "/something/foo/bar", expPath: "/something/**", expParams: map[string]string{}}, + {path: "/images/*path", expPath: "/images/\\*path"}, + {path: "/images/*patch", expPath: "/images/\\*patch"}, + {path: "/date/:year/:month", expPath: "/date/\\:year/\\:month"}, + {path: "/apples/ab*cde/lala/baba/dada", expPath: "/apples/ab*cde/:fg/*hi", expParams: map[string]string{"fg": "lala", "hi": "baba/dada"}}, + {path: "/apples/ab\\*cde/lala/baba/dada", expPath: "/apples/ab\\*cde/:fg/*hi", expParams: map[string]string{"fg": "lala", "hi": "baba/dada"}}, + {path: "/apples/ab:cde/:fg/*hi", expPath: "/apples/ab:cde/:fg/*hi", expParams: map[string]string{"fg": ":fg", "hi": "*hi"}}, + {path: "/apples/ab*cde/:fg/*hi", expPath: "/apples/ab*cde/:fg/*hi", expParams: map[string]string{"fg": ":fg", "hi": "*hi"}}, + {path: "/apples/ab*cde/one/two/three", expPath: "/apples/ab*cde/:fg/*hi", expParams: map[string]string{"fg": "one", "hi": "two/three"}}, + {path: "/apples/ab*dde", expPath: "/apples/ab*dde"}, + {path: "/マ", expPath: "/マ"}, + {path: "/カ", expPath: "/カ"}, + } { + t.Run(tc.path, func(t *testing.T) { + var matcher LookupMatcher[string] + if tc.matcher == nil { + matcher = trueMatcher + } else { + matcher = tc.matcher + } + + entry, err := tree.Find(tc.path, matcher) + if tc.expErr != nil { + require.Error(t, err) + require.ErrorIs(t, err, tc.expErr) + + return + } + + require.NoError(t, err) + assert.Equalf(t, tc.expPath, entry.Value, "Path %s matched %s, expected %s", tc.path, entry.Value, tc.expPath) + + expParams := tc.expParams + if expParams == nil { + expParams = map[string]string{} + } + + assert.Equal(t, expParams, entry.Parameters, "Path %s expected parameters are %v, saw %v", tc.path, tc.expParams, entry.Parameters) + }) + } +} + +func TestTreeSearchWithBacktracking(t *testing.T) { + t.Parallel() + + // GIVEN + tree := New[string]() + + err := tree.Add("/date/:year/abc", "first", WithBacktracking[string](true)) + require.NoError(t, err) + + err = tree.Add("/date/**", "second") + require.NoError(t, err) + + // WHEN + entry, err := tree.Find("/date/2024/abc", + LookupMatcherFunc[string](func(value string, _, _ []string) bool { return value != "first" })) + + // THEN + require.NoError(t, err) + assert.Equal(t, "second", entry.Value) +} + +func TestTreeSearchWithoutBacktracking(t *testing.T) { + t.Parallel() + + // GIVEN + tree := New[string]() + + err := tree.Add("/date/:year/abc", "first") + require.NoError(t, err) + + err = tree.Add("/date/**", "second") + require.NoError(t, err) + + // WHEN + entry, err := tree.Find("/date/2024/abc", + LookupMatcherFunc[string](func(value string, _, _ []string) bool { + return value != "first" + })) + + // THEN + require.Error(t, err) + require.ErrorIs(t, err, ErrNotFound) + require.Nil(t, entry) +} + +func TestTreeAddPathDuplicates(t *testing.T) { + t.Parallel() + + tree := New[string]() + path := "/date/:year/:month/abc" + + err := tree.Add(path, "first") + require.NoError(t, err) + + err = tree.Add(path, "second") + require.NoError(t, err) + + entry, err := tree.Find("/date/2024/04/abc", + LookupMatcherFunc[string](func(value string, _, _ []string) bool { + return value == "first" + })) + require.NoError(t, err) + assert.Equal(t, "first", entry.Value) + assert.Equal(t, map[string]string{"year": "2024", "month": "04"}, entry.Parameters) + + entry, err = tree.Find("/date/2024/04/abc", + LookupMatcherFunc[string](func(value string, _, _ []string) bool { + return value == "second" + })) + require.NoError(t, err) + assert.Equal(t, "second", entry.Value) + assert.Equal(t, map[string]string{"year": "2024", "month": "04"}, entry.Parameters) +} + +func TestTreeAddPath(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + uc string + paths []string + shouldFail bool + }{ + {"slash after catch-all", []string{"/abc/*path/"}, true}, + {"path segment after catch-all", []string{"/abc/*path/def"}, true}, + {"conflicting catch-alls", []string{"/abc/*path", "/abc/*paths"}, true}, + {"ambiguous wildcards", []string{"/abc/:foo/:bar", "/abc/:oof/:rab"}, true}, + {"multiple path segments without wildcard", []string{"/", "/i", "/images", "/images/abc.jpg"}, false}, + {"multiple path segments with wildcard", []string{"/i", "/i/:aaa", "/images/:imgname", "/:images/*path", "/ima", "/ima/:par", "/images1"}, false}, + {"multiple wildcards", []string{"/date/:year/:month", "/date/:year/month", "/date/:year/:month/:post"}, false}, + {"escaped : at the beginning of path segment", []string{"/abc/\\:cd"}, false}, + {"escaped * at the beginning of path segment", []string{"/abc/\\*cd"}, false}, + {": in middle of path segment", []string{"/abc/ab:cd"}, false}, + {": in middle of path segment with existing path", []string{"/abc/ab", "/abc/ab:cd"}, false}, + {"* in middle of path segment", []string{"/abc/ab*cd"}, false}, + {"* in middle of path segment with existing path", []string{"/abc/ab", "/abc/ab*cd"}, false}, + {"katakana /マ", []string{"/マ"}, false}, + {"katakana /カ", []string{"/カ"}, false}, + } { + t.Run(tc.uc, func(t *testing.T) { + tree := New[string]() + + var err error + + for _, path := range tc.paths { + err = tree.Add(path, path) + if err != nil { + break + } + } + + if tc.shouldFail { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestTreeDeleteStaticPaths(t *testing.T) { + t.Parallel() + + paths := []string{ + "/apples", + "/app/les", + "/abc", + "/abc/100", + "/aaa/abc", + "/aaa/def", + "/args", + "/app/les/and/bananas", + "/app/les/or/bananas", + } + + tree := New[int]() + + for idx, path := range paths { + err := tree.Add(path, idx) + require.NoError(t, err) + } + + for i := len(paths) - 1; i >= 0; i-- { + err := tree.Delete(paths[i], deleteMatcher[int](true)) + require.NoError(t, err) + + err = tree.Delete(paths[i], deleteMatcher[int](true)) + require.Error(t, err) + } +} + +func TestTreeDeleteStaticAndWildcardPaths(t *testing.T) { + t.Parallel() + + paths := []string{ + "/:foo/bar", + "/:foo/:bar/baz", + "/apples", + "/app/awesome/:id", + "/app/:name/:id", + "/app/awesome", + "/abc", + "/abc/:les", + "/abc/:les/bananas", + } + + tree := New[int]() + + for idx, path := range paths { + err := tree.Add(path, idx+1) + require.NoError(t, err) + } + + var deletedPaths []string + + for i := len(paths) - 1; i >= 0; i-- { + tbdPath := paths[i] + + err := tree.Delete(tbdPath, deleteMatcher[int](true)) + require.NoErrorf(t, err, "Should be able to delete %s", paths[i]) + + err = tree.Delete(tbdPath, deleteMatcher[int](true)) + require.Errorf(t, err, "Should not be able to delete %s", paths[i]) + + deletedPaths = append(deletedPaths, tbdPath) + + for idx, path := range paths { + entry, err := tree.Find(path, lookupMatcher[int](true)) + + if slices.Contains(deletedPaths, path) { + require.Errorf(t, err, "Should not be able to find %s after deleting %s", path, tbdPath) + } else { + require.NoErrorf(t, err, "Should be able to find %s after deleting %s", path, tbdPath) + assert.Equal(t, idx+1, entry.Value) + } + } + } +} + +func TestTreeDeleteMixedPaths(t *testing.T) { + t.Parallel() + + paths := []string{ + "/foo/*bar", + "/:foo/:bar/baz", + "/apples", + "/app/awesome/:id", + "/app/:name/:id", + "/app/*awesome", + "/abc/cba", + "/abc/:les", + "/abc/les/bananas", + "/abc/\\:les/bananas", + "/abc/:les/bananas", + "/abc/:les/\\*all", + "/abc/:les/*all", + "/abb/\\:ba/*all", + "/abb/:ba/*all", + "/abb/\\*all", + "/abb/*all", + } + + tree := New[int]() + + for idx, path := range paths { + err := tree.Add(path, idx+1) + require.NoError(t, err) + } + + for i := len(paths) - 1; i >= 0; i-- { + tbdPath := paths[i] + + err := tree.Delete(tbdPath, deleteMatcher[int](true)) + require.NoErrorf(t, err, "Should be able to delete %s", paths[i]) + + err = tree.Delete(tbdPath, deleteMatcher[int](true)) + require.Errorf(t, err, "Should not be able to delete %s", paths[i]) + } + + require.True(t, tree.Empty()) +} + +func TestTreeClone(t *testing.T) { + t.Parallel() + + tree := New[string]() + paths := map[string]string{ + "/abc/bca/bbb": "/abc/bca/bbb", + "/abb/abc/bbb": "/abb/abc/bbb", + "/**": "/foo", + "/abc/*foo": "/abc/bar/baz", + "/:foo/abc": "/bar/abc", + "/:foo/:bar/**": "/bar/baz/foo", + "/:foo/:bar/abc": "/bar/baz/abc", + } + + for expr, path := range paths { + require.NoError(t, tree.Add(expr, path)) + } + + clone := tree.Clone() + + for _, path := range maps.Values(paths) { + entry, err := clone.Find(path, + LookupMatcherFunc[string](func(_ string, _, _ []string) bool { return true })) + + require.NoError(t, err) + assert.Equal(t, path, entry.Value) + } +} diff --git a/internal/x/radixtree/utils.go b/internal/x/radixtree/utils.go new file mode 100644 index 000000000..00475af37 --- /dev/null +++ b/internal/x/radixtree/utils.go @@ -0,0 +1,10 @@ +package radixtree + +func commonPrefixLen(a, b string) int { + n := 0 + for n < len(a) && n < len(b) && a[n] == b[n] { + n++ + } + + return n +} diff --git a/internal/x/tlsx/certificate_supplier.go b/internal/x/tlsx/certificate_supplier.go new file mode 100644 index 000000000..0454d2f4b --- /dev/null +++ b/internal/x/tlsx/certificate_supplier.go @@ -0,0 +1,13 @@ +package tlsx + +import "crypto/x509" + +type certificateSupplier struct { + name string + ks *keyStore +} + +func (c *certificateSupplier) Name() string { return c.name } +func (c *certificateSupplier) Certificates() []*x509.Certificate { + return c.ks.activeCertificateChain() +} diff --git a/internal/x/tlsx/key_store.go b/internal/x/tlsx/key_store.go index 3ef4dde63..bd090dbca 100644 --- a/internal/x/tlsx/key_store.go +++ b/internal/x/tlsx/key_store.go @@ -18,6 +18,7 @@ package tlsx import ( "crypto/tls" + "crypto/x509" "sync" "github.com/rs/zerolog" @@ -38,8 +39,9 @@ type keyStore struct { password string keyID string - tlsCert *tls.Certificate - mut sync.Mutex + tlsCert *tls.Certificate + certChain []*x509.Certificate + mut sync.RWMutex } func newTLSKeyStore(path, keyID, password string) (*keyStore, error) { @@ -88,17 +90,25 @@ func (cr *keyStore) load() error { cr.mut.Lock() cr.tlsCert = &cert + cr.certChain = entry.CertChain cr.mut.Unlock() return nil } +func (cr *keyStore) activeCertificateChain() []*x509.Certificate { + cr.mut.RLock() + defer cr.mut.RUnlock() + + return cr.certChain +} + func (cr *keyStore) certificate(cc compatibilityChecker) (*tls.Certificate, error) { var cert *tls.Certificate - cr.mut.Lock() + cr.mut.RLock() cert = cr.tlsCert - cr.mut.Unlock() + cr.mut.RUnlock() if err := cc.SupportsCertificate(cert); err != nil { return nil, err diff --git a/internal/x/tlsx/mock_compatibility_checker_test.go b/internal/x/tlsx/mock_compatibility_checker_test.go index 78f7bdb9d..b237fdc2a 100644 --- a/internal/x/tlsx/mock_compatibility_checker_test.go +++ b/internal/x/tlsx/mock_compatibility_checker_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.23.1. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package tlsx @@ -25,6 +25,10 @@ func (_m *compatibilityCheckerMock) EXPECT() *compatibilityCheckerMock_Expecter func (_m *compatibilityCheckerMock) SupportsCertificate(c *tls.Certificate) error { ret := _m.Called(c) + if len(ret) == 0 { + panic("no return value specified for SupportsCertificate") + } + var r0 error if rf, ok := ret.Get(0).(func(*tls.Certificate) error); ok { r0 = rf(c) @@ -63,13 +67,12 @@ func (_c *compatibilityCheckerMock_SupportsCertificate_Call) RunAndReturn(run fu return _c } -type mockConstructorTestingTnewCompatibilityCheckerMock interface { +// newCompatibilityCheckerMock creates a new instance of compatibilityCheckerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newCompatibilityCheckerMock(t interface { mock.TestingT Cleanup(func()) -} - -// newCompatibilityCheckerMock creates a new instance of compatibilityCheckerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newCompatibilityCheckerMock(t mockConstructorTestingTnewCompatibilityCheckerMock) *compatibilityCheckerMock { +}) *compatibilityCheckerMock { mock := &compatibilityCheckerMock{} mock.Mock.Test(t) diff --git a/internal/x/tlsx/options.go b/internal/x/tlsx/options.go index e3e4a83d0..270e6d37f 100644 --- a/internal/x/tlsx/options.go +++ b/internal/x/tlsx/options.go @@ -16,12 +16,30 @@ package tlsx -import "github.com/dadrus/heimdall/internal/watcher" +import ( + "github.com/dadrus/heimdall/internal/otel/metrics/certificate" + "github.com/dadrus/heimdall/internal/watcher" +) + +type noopObserver struct{} + +func (*noopObserver) Add(_ certificate.Supplier) {} +func (*noopObserver) Start() error { return nil } type options struct { - serverAuthRequired bool - clientAuthRequired bool - secretsWatcher watcher.Watcher + name string + serverAuthRequired bool + clientAuthRequired bool + secretsWatcher watcher.Watcher + certificateObserver certificate.Observer +} + +func newOptions() *options { + return &options{ + name: "unknown", + secretsWatcher: &watcher.NoopWatcher{}, + certificateObserver: &noopObserver{}, + } } type Option func(*options) @@ -40,6 +58,17 @@ func WithClientAuthentication(flag bool) Option { func WithSecretsWatcher(cw watcher.Watcher) Option { return func(o *options) { - o.secretsWatcher = cw + if cw != nil { + o.secretsWatcher = cw + } + } +} + +func WithCertificateObserver(name string, co certificate.Observer) Option { + return func(o *options) { + if co != nil { + o.name = name + o.certificateObserver = co + } } } diff --git a/internal/x/tlsx/tls.go b/internal/x/tlsx/tls.go index d178b3aef..23e60b839 100644 --- a/internal/x/tlsx/tls.go +++ b/internal/x/tlsx/tls.go @@ -25,13 +25,13 @@ import ( func ToTLSConfig(tlsCfg *config.TLS, opts ...Option) (*tls.Config, error) { var ( - args options - ks *keyStore - err error + ks *keyStore + err error ) + args := newOptions() for _, opt := range opts { - opt(&args) + opt(args) } if args.serverAuthRequired || args.clientAuthRequired { @@ -39,11 +39,11 @@ func ToTLSConfig(tlsCfg *config.TLS, opts ...Option) (*tls.Config, error) { return nil, err } - if args.secretsWatcher != nil { - if err = args.secretsWatcher.Add(ks.path, ks); err != nil { - return nil, err - } + if err = args.secretsWatcher.Add(ks.path, ks); err != nil { + return nil, err } + + args.certificateObserver.Add(&certificateSupplier{name: args.name, ks: ks}) } // nolint:gosec diff --git a/internal/x/tlsx/tls_test.go b/internal/x/tlsx/tls_test.go index bf37afe6f..cec198dab 100644 --- a/internal/x/tlsx/tls_test.go +++ b/internal/x/tlsx/tls_test.go @@ -36,6 +36,7 @@ import ( "github.com/dadrus/heimdall/internal/config" "github.com/dadrus/heimdall/internal/heimdall" + mocks2 "github.com/dadrus/heimdall/internal/otel/metrics/certificate/mocks" "github.com/dadrus/heimdall/internal/watcher/mocks" "github.com/dadrus/heimdall/internal/x/pkix/pemx" "github.com/dadrus/heimdall/internal/x/testsupport" @@ -80,14 +81,14 @@ func TestToTLSConfig(t *testing.T) { for _, tc := range []struct { uc string - conf func(t *testing.T, wm *mocks.WatcherMock) config.TLS + conf func(t *testing.T, wm *mocks.WatcherMock, co *mocks2.ObserverMock) config.TLS serverAuth bool clientAuth bool assert func(t *testing.T, err error, conf *tls.Config) }{ { uc: "empty config", - conf: func(t *testing.T, _ *mocks.WatcherMock) config.TLS { + conf: func(t *testing.T, _ *mocks.WatcherMock, _ *mocks2.ObserverMock) config.TLS { t.Helper() return config.TLS{} @@ -108,7 +109,7 @@ func TestToTLSConfig(t *testing.T) { { uc: "empty config, but requires server auth", serverAuth: true, - conf: func(t *testing.T, _ *mocks.WatcherMock) config.TLS { + conf: func(t *testing.T, _ *mocks.WatcherMock, _ *mocks2.ObserverMock) config.TLS { t.Helper() return config.TLS{} @@ -124,7 +125,7 @@ func TestToTLSConfig(t *testing.T) { { uc: "fails due to not existent key store for TLS usage", serverAuth: true, - conf: func(t *testing.T, _ *mocks.WatcherMock) config.TLS { + conf: func(t *testing.T, _ *mocks.WatcherMock, _ *mocks2.ObserverMock) config.TLS { t.Helper() return config.TLS{KeyStore: config.KeyStore{Path: "/no/such/file"}} @@ -140,7 +141,7 @@ func TestToTLSConfig(t *testing.T) { { uc: "fails due to not existent key for the given key id for TLS usage", serverAuth: true, - conf: func(t *testing.T, _ *mocks.WatcherMock) config.TLS { + conf: func(t *testing.T, _ *mocks.WatcherMock, _ *mocks2.ObserverMock) config.TLS { t.Helper() return config.TLS{ @@ -160,7 +161,7 @@ func TestToTLSConfig(t *testing.T) { { uc: "fails due to not present certificates for the given key id", serverAuth: true, - conf: func(t *testing.T, _ *mocks.WatcherMock) config.TLS { + conf: func(t *testing.T, _ *mocks.WatcherMock, _ *mocks2.ObserverMock) config.TLS { t.Helper() return config.TLS{ @@ -180,7 +181,7 @@ func TestToTLSConfig(t *testing.T) { { uc: "fails due to failing watcher registration", serverAuth: true, - conf: func(t *testing.T, wm *mocks.WatcherMock) config.TLS { + conf: func(t *testing.T, wm *mocks.WatcherMock, _ *mocks2.ObserverMock) config.TLS { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(errors.New("test error")) @@ -200,10 +201,19 @@ func TestToTLSConfig(t *testing.T) { { uc: "successful with default key for TLS server auth", serverAuth: true, - conf: func(t *testing.T, wm *mocks.WatcherMock) config.TLS { + conf: func(t *testing.T, wm *mocks.WatcherMock, co *mocks2.ObserverMock) config.TLS { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) + co.EXPECT().Add(mock.MatchedBy(func(sup *certificateSupplier) bool { + assert.Equal(t, "test", sup.Name()) + + certs := sup.Certificates() + assert.Len(t, certs, 1) + assert.Equal(t, cert, certs[0]) + + return true + })) return config.TLS{ KeyStore: config.KeyStore{Path: pemFile.Name()}, @@ -226,10 +236,19 @@ func TestToTLSConfig(t *testing.T) { { uc: "successful with default key for TLS client auth", clientAuth: true, - conf: func(t *testing.T, wm *mocks.WatcherMock) config.TLS { + conf: func(t *testing.T, wm *mocks.WatcherMock, co *mocks2.ObserverMock) config.TLS { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) + co.EXPECT().Add(mock.MatchedBy(func(sup *certificateSupplier) bool { + assert.Equal(t, "test", sup.Name()) + + certs := sup.Certificates() + assert.Len(t, certs, 1) + assert.Equal(t, cert, certs[0]) + + return true + })) return config.TLS{ KeyStore: config.KeyStore{Path: pemFile.Name()}, @@ -252,10 +271,19 @@ func TestToTLSConfig(t *testing.T) { { uc: "successful with specified key id for TLS server auth", serverAuth: true, - conf: func(t *testing.T, wm *mocks.WatcherMock) config.TLS { + conf: func(t *testing.T, wm *mocks.WatcherMock, co *mocks2.ObserverMock) config.TLS { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) + co.EXPECT().Add(mock.MatchedBy(func(sup *certificateSupplier) bool { + assert.Equal(t, "test", sup.Name()) + + certs := sup.Certificates() + assert.Len(t, certs, 1) + assert.Equal(t, cert, certs[0]) + + return true + })) return config.TLS{ KeyStore: config.KeyStore{Path: pemFile.Name()}, @@ -279,10 +307,19 @@ func TestToTLSConfig(t *testing.T) { { uc: "successful with specified key id for TLS client auth", clientAuth: true, - conf: func(t *testing.T, wm *mocks.WatcherMock) config.TLS { + conf: func(t *testing.T, wm *mocks.WatcherMock, co *mocks2.ObserverMock) config.TLS { t.Helper() wm.EXPECT().Add(mock.Anything, mock.Anything).Return(nil) + co.EXPECT().Add(mock.MatchedBy(func(sup *certificateSupplier) bool { + assert.Equal(t, "test", sup.Name()) + + certs := sup.Certificates() + assert.Len(t, certs, 1) + assert.Equal(t, cert, certs[0]) + + return true + })) return config.TLS{ KeyStore: config.KeyStore{Path: pemFile.Name()}, @@ -307,14 +344,16 @@ func TestToTLSConfig(t *testing.T) { t.Run(tc.uc, func(t *testing.T) { // WHEN wm := mocks.NewWatcherMock(t) + om := mocks2.NewObserverMock(t) - tlsCfg := tc.conf(t, wm) + tlsCfg := tc.conf(t, wm, om) conf, err := ToTLSConfig( &tlsCfg, WithServerAuthentication(tc.serverAuth), WithClientAuthentication(tc.clientAuth), WithSecretsWatcher(wm), + WithCertificateObserver("test", om), ) // THEN diff --git a/osv-scanner.toml b/osv-scanner.toml new file mode 100644 index 000000000..31e7744a6 --- /dev/null +++ b/osv-scanner.toml @@ -0,0 +1,3 @@ +[[IgnoredVulns]] +id = "GO-2022-0646" +reason = "The access to the S3 bucket happens in read-only mode without making use of the EncryptionClient." diff --git a/schema/config.schema.json b/schema/config.schema.json index c7e08b0e9..45fea00f5 100644 --- a/schema/config.schema.json +++ b/schema/config.schema.json @@ -551,9 +551,6 @@ "authorization_error": { "$ref": "#/definitions/responseOverride" }, - "method_error": { - "$ref": "#/definitions/responseOverride" - }, "communication_error": { "$ref": "#/definitions/responseOverride" }, @@ -716,80 +713,6 @@ } } }, - "ruleSetEndpointConfiguration": { - "description": "Endpoint to load rule sets from", - "type": "object", - "additionalProperties": false, - "required": [ - "url" - ], - "properties": { - "url": { - "description": "The URL to communicate to.", - "type": "string", - "format": "uri", - "examples": [ - "https://session-store-host" - ] - }, - "headers": { - "description": "The HTTP headers to be send to the endpoint", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "minLength": 0, - "uniqueItems": true, - "default": [] - }, - "retry": { - "description": "How the implementation should behave when trying to access the configured endpoint", - "type": "object", - "additionalProperties": false, - "properties": { - "give_up_after": { - "description": "When the implementation should finally give up, if the endpoint is not answering.", - "type": "string", - "default": "1s", - "pattern": "^[0-9]+(ns|us|ms|s|m|h)$" - }, - "max_delay": { - "description": "How long the implementation should wait between the attempts", - "type": "string", - "pattern": "^[0-9]+(ns|us|ms|s|m|h)$", - "default": "100ms" - } - } - }, - "auth": { - "description": "How to authenticate against the endpoint", - "type": "object", - "oneOf": [ - { - "$ref": "#/definitions/endpointAuthApiKeyProperties" - }, - { - "$ref": "#/definitions/endpointAuthBasicAuthProperties" - }, - { - "$ref": "#/definitions/endpointAuth2ClientCredentialsProperties" - } - ] - }, - "rule_path_match_prefix": { - "description": "The path prefix to be checked in each rule retrieved from the endpoint", - "type": "string", - "examples": [ - "/foo/bar" - ] - }, - "enable_http_cache": { - "description": "Enables or disables http cache usage according to RFC 7234", - "type": "boolean", - "default": true - } - } - }, "endpointConfiguration": { "description": "Endpoint to to communicate to", "anyOf": [ @@ -1725,9 +1648,29 @@ "type": "object", "additionalProperties": false, "required": [ - "claims" + "signer" ], "properties": { + "signer": { + "description": "Configures signer options for issued JWTs.", + "type": "object", + "additionalProperties": false, + "required": [ "key_store" ], + "properties": { + "name": { + "description": "The name of the signer (string or URL). Used for the 'iss' claim in the issued JWTs", + "type": "string", + "default": "heimdall" + }, + "key_store": { + "$ref": "#/definitions/keyStore" + }, + "key_id": { + "description": "The key id referencing the entry in the key store.", + "type": "string" + } + } + }, "claims": { "description": "Custom claims, which should be included into the JWT.", "type": "string" @@ -1906,7 +1849,6 @@ "required": [ "id", "type", - "if", "config" ], "properties": { @@ -1917,13 +1859,6 @@ "description": "The unique id of the error handler to be used in the rule definition", "type": "string" }, - "if": { - "description": "Condition, when this error handler should be executed", - "type": "string", - "examples": [ - "type(Error) == authentication_error" - ] - }, "config": { "type": "object", "additionalProperties": false, @@ -1944,7 +1879,6 @@ "required": [ "id", "type", - "if", "config" ], "properties": { @@ -1955,13 +1889,6 @@ "description": "The unique id of the error handler to be used in the rule definition", "type": "string" }, - "if": { - "description": "Condition, when this error handler should be executed", - "type": "string", - "examples": [ - "type(Error) == authentication_error" - ] - }, "config": { "type": "object", "additionalProperties": false, @@ -2027,7 +1954,7 @@ "type": "array", "additionalItems": false, "items": { - "$ref": "#/definitions/ruleSetEndpointConfiguration" + "$ref": "#/definitions/endpointConfiguration" } }, "watch_interval": { @@ -2078,13 +2005,6 @@ "prefix": { "description": "Indicates that only blobs with a key starting with this prefix should be retrieved", "type": "string" - }, - "rule_path_match_prefix": { - "description": "The path prefix to be checked in each url pattern of each rule retrieved from the bucket", - "type": "string", - "examples": [ - "/foo/bar" - ] } } } @@ -2477,25 +2397,6 @@ } } }, - "signer": { - "description": "Configures signer options for issued JWTs.", - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "description": "The name of the signer (string or URL). Used for the 'iss' claim in the issued JWTs", - "type": "string", - "default": "heimdall" - }, - "key_store": { - "$ref": "#/definitions/keyStore" - }, - "key_id": { - "description": "The key id referencing the entry in the key store.", - "type": "string" - } - } - }, "mechanisms": { "$ref": "#/definitions/mechanismDefinitions" }, @@ -2523,18 +2424,10 @@ "type": "object", "additionalProperties": false, "properties": { - "methods": { - "description": "Allowed HTTP methods for any endpoint", - "type": "array", - "additionalItems": false, - "uniqueItems": true, - "items": { - "type": "string" - }, - "examples": [ - "GET", - "POST" - ] + "backtracking_enabled": { + "description": "Enables or disables backtracking while matching the rules globally. Defaults to false.", + "type": "boolean", + "default": false }, "execute": { "description": "The mechanisms to execute (authenticators, authorizers, etc)",