Skip to content

Use absolute timestamps when dealing with expiry from Redis #1543

Use absolute timestamps when dealing with expiry from Redis

Use absolute timestamps when dealing with expiry from Redis #1543

Workflow file for this run

---
name: e2e tests
on:
push:
branches:
- '*'
pull_request:
branches:
- '*'
jobs:
limits_file_watcher:
name: Limits File Watcher in k8s environment
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
limitador-testing
tags: |
type=raw,value=latest
- name: Build Limitador Image
id: build-image
uses: docker/build-push-action@v5
with:
context: .
tags: ${{ steps.meta.outputs.tags }}
cache-from: type=gha
cache-to: type=gha,mode=max
load: true
- name: Create k8s Kind Cluster
uses: helm/[email protected]
with:
version: v0.20.0
config: limitador-server/script/kind-cluster.yaml
cluster_name: limitador-local
wait: 120s
- name: Check cluster info
run: |
kubectl cluster-info dump
- name: Load limitador docker image
run: |
kind load docker-image ${{ steps.meta.outputs.tags }} -n limitador-local
- name: Deploy limitador
run: |
kubectl apply -f limitador-server/e2e/file-watcher/configmap.yaml
kubectl apply -f limitador-server/e2e/file-watcher/deployment.yaml
kubectl apply -f limitador-server/e2e/file-watcher/service.yaml
- name: Watch limitador pod resource (background)
timeout-minutes: 5
run: |
limitador-server/e2e/file-watcher/watch-pod-resource.sh </dev/null 2>/dev/null &
- name: Wait for limitador deployment availability
run: |
# use 'wait' to check for Available status in .status.conditions[]
kubectl wait deployment limitador --for condition=Available=True --timeout=300s
- name: Read limits from HTTP endpoint
uses: nick-fields/retry@v2
with:
timeout_seconds: 10
max_attempts: 10
command: |
ip=$(kubectl get nodes -lkubernetes.io/hostname!=kind-control-plane -ojsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}')
port=$(kubectl get service limitador -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}')
curl -s "http://${ip}:${port}/limits/test" | tee output-limits.json
jq --exit-status '.[] | select(.max_value == 1000)' output-limits.json
- name: Update limit in the configmap
run: |
kubectl apply -f limitador-server/e2e/file-watcher/configmap_updated.yaml
# Little tric to force kubelet to update local file
kubectl annotate $(kubectl get pods -l app=limitador -o name) bla=bla1
- name: Watch limitador pod resource (background)
timeout-minutes: 5
run: |
limitador-server/e2e/file-watcher/watch-pod-env.sh </dev/null 2>/dev/null &
- name: Read new limits from HTTP endpoint
uses: nick-fields/retry@v2
with:
timeout_seconds: 10
max_attempts: 10
retry_wait_seconds: 30
command: |
ip=$(kubectl get nodes -lkubernetes.io/hostname!=kind-control-plane -ojsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}')
port=$(kubectl get service limitador -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}')
curl -s "http://${ip}:${port}/limits/test" | tee output-limits-new.json
jq --exit-status '.[] | select(.max_value == 2000)' output-limits-new.json