Skip to content

Latest commit

 

History

History
1232 lines (1145 loc) · 32.6 KB

README.org

File metadata and controls

1232 lines (1145 loc) · 32.6 KB

Prow-Config GCP

Authenticate

gcloud auth login
gcloud config set project k8s-infra-ii-sandbox
gcloud auth application-default login

Prepare

terraform init

Apply

terraform apply -var "cluster_name=ii-sandbox-${SHARINGIO_PAIR_NAME}"

Get credentials

gcloud container clusters get-credentials ii-sandbox-${SHARINGIO_PAIR_NAME} --region us-central1

Deploy

Namespaces

REGISTRIES=(helm-operator nginx-ingress prow prow-workloads distribution registry-k8s-io-envoy)
for ns in ${REGISTRIES[@]}; do
  kubectl get ns $ns 2>&1 /dev/null || kubectl create ns $ns
done

Helm-Operator

helm repo add fluxcd https://charts.fluxcd.io
kubectl apply -f https://raw.githubusercontent.com/fluxcd/helm-operator/1.2.0/deploy/crds.yaml
helm upgrade -i \
    helm-operator \
    --namespace helm-operator \
    --set helm.versions=v3 \
    fluxcd/helm-operator

nginx-ingress

apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
  name: nginx-ingress
  namespace: nginx-ingress
spec:
  chart:
    repository: https://kubernetes.github.io/ingress-nginx
    name: ingress-nginx
    version: 3.30.0
  values:
    controller:
      service:
        externalTrafficPolicy: Local
      publishService:
        enabled: true
      autoscaling:
        enabled: true
        minReplicas: 3
        maxReplicas: 5
        targetCPUUtilizationPercentage: 80
      minAvailable: 3
      metrics:
        enabled: true
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: app.kubernetes.io/component
                    operator: In
                    values:
                      - controller
              topologyKey: "kubernetes.io/hostname"
kubectl apply -f nginx-ingress.yaml

cert-manager

kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml

DNS

apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
  name: wildcard.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}-pair-sharing-io
  namespace: powerdns
spec:
  endpoints:
  - dnsName: "*.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}"
    recordTTL: 60
    recordType: A
    targets:
    - ${LOAD_BALANCER_IP}
export LOAD_BALANCER_IP=$(kubectl -n nginx-ingress get svc nginx-ingress-nginx-ingress-controller -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')
envsubst < dnsendpoint.yaml | kubectl --context in-cluster apply -f -

Certificate + cluster issuer

apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
  name: letsencrypt-prod
spec:
  acme:
    email: ${GIT_AUTHOR_EMAIL}
    privateKeySecretRef:
      name: letsencrypt-prod
    server: https://acme-v02.api.letsencrypt.org/directory
    solvers:
    - http01:
        ingress:
          class: nginx
      selector:
        dnsNames:
        - prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
        - distribution.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
        - envoy.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
        - envoy-admin.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
        - artifacts.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
        - demo.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
        - reveal-multiplex.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
  name: letsencrypt-prod
  namespace: prow
spec:
  commonName: prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
  dnsNames:
  - prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
  issuerRef:
    kind: ClusterIssuer
    name: letsencrypt-prod
  secretName: letsencrypt-prod
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
  name: letsencrypt-prod
  namespace: distribution
spec:
  commonName: distribution.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
  dnsNames:
  - distribution.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
  issuerRef:
    kind: ClusterIssuer
    name: letsencrypt-prod
  secretName: letsencrypt-prod
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
  name: letsencrypt-prod
  namespace: registry-k8s-io-envoy
spec:
  commonName: envoy.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
  dnsNames:
  - envoy.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
  - envoy-admin.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
  issuerRef:
    kind: ClusterIssuer
    name: letsencrypt-prod
  secretName: letsencrypt-prod
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
  name: letsencrypt-prod
  namespace: default
spec:
  dnsNames:
  - artifactserver.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
  - demo.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
  - reveal-multiplex.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
  issuerRef:
    kind: ClusterIssuer
    name: letsencrypt-prod
  secretName: letsencrypt-prod
envsubst < cluster-issuer.yaml | kubectl apply -f -
envsubst < cert.yaml | kubectl apply -f -

Humacs

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: humacs-home-ii
spec:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 500Gi
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
  name: humacs
spec:
  chart:
    git: https://github.com/humacs/humacs
    path: chart/humacs
    ref: 5878f1722291e08df707a6444d941eb146905405
  releaseName: humacs
  values:
    initContainers:
      - name: humacs-home-ii-fix-permissions
        image: alpine:3.12
        command:
          - sh
          - -c
          - chown 1000:1000 -R /home/ii && chown 1000 /run/containerd/containerd.sock
        volumeMounts:
          - mountPath: /home/ii
            name: home-ii
          - name: run-containerd-containerd-sock
            mountPath: /run/containerd/containerd.sock
    extraEnvVars:
      - name: SHARINGIO_PAIR_USER
        value: ${SHARINGIO_PAIR_USER}
      - name: SHARINGIO_PAIR_LOAD_BALANCER_IP
        value: ${LOAD_BALANCER_IP}
      - name: HUMACS_DEBUG
        value: "true"
      - name: REINIT_HOME_FOLDER
        value: "true"
      - name: SHARINGIO_PAIR_BASE_DNS_NAME
        value: ${SHARINGIO_PAIR_BASE_DNS_NAME}
      - name: SHARINGIO_PAIR_BASE_DNS_NAME_SVC_ING_RECONCILER_OVERRIDE
        value: ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
      - name: CONTAINER_RUNTIME_ENDPOINT
        value: unix:///run/containerd/containerd.sock
      - name: CONTAINER_ADDRESS
        value: /run/containerd/containerd.sock
      - name: CONTAINERD_NAMESPACE
        value: k8s.io
      - name: K8S_NODE
        valueFrom:
          fieldRef:
            fieldPath: spec.nodeName
    extraVolumeMounts:
      - mountPath: /home/ii
        name: home-ii
      - mountPath: /var/run/host
        name: host
      - name: run-containerd-containerd-sock
        mountPath: /run/containerd/containerd.sock
    extraVolumes:
      - name: home-ii
        persistentVolumeClaim:
          claimName: humacs-home-ii
      - hostPath:
          path: /
        name: host
      - name: run-containerd-containerd-sock
        hostPath:
          path: /run/containerd/containerd.sock
    image:
      repository: registry.gitlab.com/humacs/humacs/ii
      tag: latest-main
      pullPolicy: Always
    options:
      gitEmail: ${GIT_AUTHOR_EMAIL}
      gitName: ${GIT_AUTHOR_NAME}
      hostDockerSocket: true
      hostTmp: false
      profile: ""
      repos:
        - https://github.com/cncf-infra/prow-config
        - https://github.com/kubernetes/test-infra
        - https://github.com/kubernetes/k8s.io
        - https://github.com/sharingio/.sharing.io
      timezone: Pacific/Auckland
export LOAD_BALANCER_IP=$(kubectl -n nginx-ingress get svc nginx-ingress-nginx-ingress-controller -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')
envsubst < humacs.yaml | kubectl -n default apply -f -

Get the tmate session

kubectl -n default exec -it humacs-0 -- tmate -S /tmp/ii.default.target.iisocket display -p "#{tmate_ssh} # #{tmate_web} $(date) #{client_tty}@#{host}"

go-http-server

apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
  name: public-html
  namespace: default
spec:
  chart:
    git: https://gitlab.com/safesurfer/go-http-server
    path: deployments/go-http-server
    ref: 1.2.0
  releaseName: public-html
  values:
    extraVolumeMounts:
    - mountPath: /home/ii
      name: humacs-home-ii
    - mountPath: /var/run/host
      name: host
    extraVolumes:
    - name: humacs-home-ii
      persistentVolumeClaim:
        claimName: humacs-home-ii
    - hostPath:
        path: /
      name: host
    image:
      tag: 1.2.0
    ingress:
      enabled: true
      hosts:
      - host: demo.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
        paths:
        - /
      realIPHeader: X-Real-Ip
      tls:
      - hosts:
        - demo.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
        secretName: letsencrypt-prod
    serveFolder: /home/ii/public_html
    vuejsHistoryMode: false
envsubst < go-http-server.yaml | kubectl apply -f -

Prow

apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
  name: prow
  namespace: prow
spec:
  chart:
    git: https://github.com/cncf-infra/prow-config
    path: charts/prow
    ref: 5928f3bf17a0d38272255b97e5dfd37e4ca5af79
  releaseName: prow
  values:
    podNamespace: prow-workloads
    githubFromSecretRef:
      enabled: true
      oauth:
        name: prow-github-oauth
      hmac:
        name: prow-github-hmac
      cookie:
        name: prow-github-cookie
      oauthConfig:
        name: prow-github-oauth-config

    ingress:
      certmanager:
        enabled: false
      hosts:
        - host: prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
      tls:
        - secretName: letsencrypt-prod
          hosts:
            - prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}

    configFromConfigMap:
      enabled: true
      name: prow-config

    pluginsFromConfigMap:
      enabled: true
      name: prow-plugins
envsubst < prow.yaml | kubectl apply -f -

Create the cookie

COOKIE=$(openssl rand -base64 32)
GIT_ROOT=$(git rev-parse --show-toplevel)
kubectl \
    -n prow \
    create secret generic prow-github-cookie \
    --from-literal=secret="$COOKIE" \
    --dry-run=client -o yaml \
    | kubectl apply -f -

Create the hmac

HMAC=$(openssl rand -hex 20)
GIT_ROOT=$(git rev-parse --show-toplevel)
kubectl \
    -n prow \
    create secret generic prow-github-hmac \
    --from-literal=hmac=$HMAC \
    --dry-run=client -o yaml \
    | kubectl apply -f -

Create the OAuth

client_id: ${OAUTH_CLIENT_ID}
client_secret: ${OAUTH_CLIENT_SECRET}
redirect_url: https://prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}/github-login/redirect
final_redirect_url: https://prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}/pr
read -p "Prow OAuth app id    : " OAUTH_CLIENT_ID
read -p "Prow OAuth app secret: " OAUTH_CLIENT_SECRET
kubectl \
    -n prow \
    create secret generic prow-github-oauth \
    --from-literal=oauth="$GITHUB_TOKEN" \
    --dry-run=client -o yaml \
    | kubectl apply -f -

Create OAuth config

export OAUTH_CLIENT_ID OAUTH_CLIENT_SECRET
kubectl \
    -n prow \
    create secret generic prow-github-oauth-config \
    --from-file=secret=<(envsubst < ./github-oauth-template.yaml) \
    --dry-run=client -o yaml \
    | kubectl apply -f -

Prow Config

log_level: debug
prowjob_namespace: prow
pod_namespace: prow-workloads
managed_webhooks:
  respect_legacy_global_token: true
  org_repo_config:
    ${SHARINGIO_PAIR_USER}/prow-config:
      token_created_after: 2020-06-24T00:10:00Z
in_repo_config:
  enabled:
    '*': true
deck:
  spyglass:
    lenses:
      - lens:
          name: metadata
        required_files:
          - started.json|finished.json
      - lens:
          config:
          name: buildlog
        required_files:
          - build-log.txt
      - lens:
          name: junit
        required_files:
          - .*/junit.*\.xml
      - lens:
          name: podinfo
        required_files:
          - podinfo.json
plank:
  job_url_template: |
    {{if .Spec.Refs}}
      {{if eq .Spec.Refs.Org "kubernetes-security"}}https://console.cloud.google.com/storage/browser/kubernetes-security-prow/{{else}}https://prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}/view/gcs/kubernetes-jenkins/{{end}}
    {{else}}https://prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}/view/gcs/kubernetes-jenkins/{{end}}
    {{if eq .Spec.Type "presubmit"}}pr-logs/pull{{else if eq .Spec.Type "batch"}}pr-logs/pull{{else}}logs{{end}}
    {{if .Spec.Refs}}
      {{if ne .Spec.Refs.Org ""}}{{if ne .Spec.Refs.Org "kubernetes"}}/{{if and (eq .Spec.Refs.Org "kubernetes-sigs") (ne .Spec.Refs.Repo "poseidon")}}sigs.k8s.io{{else}}{{.Spec.Refs.Org}}{{end}}_{{.Spec.Refs.Repo}}{{else if ne .Spec.Refs.Repo "kubernetes"}}/{{.Spec.Refs.Repo}}{{end}}{{end}}{{end}}{{if eq .Spec.Type "presubmit"}}/{{with index .Spec.Refs.Pulls 0}}{{.Number}}{{end}}{{else if eq .Spec.Type "batch"}}/batch{{end}}/{{.Spec.Job}}/{{.Status.BuildID}}/
  report_templates:
    '*': '[Full PR test history](https://prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}/pr-history?org={{.Spec.Refs.Org}}&repo={{.Spec.Refs.Repo}}&pr={{with index .Spec.Refs.Pulls 0}}{{.Number}}{{end}}). [Your PR dashboard](https://prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}/pr?query=is:pr+state:open+author:{{with index .Spec.Refs.Pulls 0}}{{.Author}}{{end}}).'
  job_url_prefix_config:
    '*': https://prow.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}/view/
  default_decoration_configs:
    '*':
      gcs_configuration:
        bucket: s3://prow-logs
        path_strategy: explicit
      # secret must be set to RELEASE_NAME-s3-credentials
      s3_credentials_secret: prow-${SHARINGIO_PAIR_NAME}-s3-credentials
      utility_images:
        clonerefs: gcr.io/k8s-prow/clonerefs:v20210504-af1ac03335
        entrypoint: gcr.io/k8s-prow/entrypoint:v20210504-af1ac03335
        initupload: gcr.io/k8s-prow/initupload:v20210504-af1ac03335
        sidecar: gcr.io/k8s-prow/sidecar:v20210504-af1ac03335
decorate_all_jobs: true
periodics:
  - interval: 10m
    agent: kubernetes
    name: echo-test
    decorate: true
    spec:
      containers:
        - image: alpine
          command:
            - /bin/date
envsubst < ./prow-config-template.yaml > ./prow-config-ii-sandbox.yaml
kubectl -n prow \
    create configmap prow-config \
    --from-file=config\.yaml=$PWD/prow-config-ii-sandbox.yaml \
    --dry-run=client \
    -o yaml \
    | kubectl apply -f -

Prow Plugins

plugins:
  "${SHARINGIO_PAIR_USER}/*":
    plugins:
      - approve
      - assign
      - cat
      - dog
      - goose
      - hold
      - label
      - lgtm
      - lifecycle
      - owners-label
      - pony
      - shrug
      - size
      - skip
      - trigger
      - verify-owners
      - wip
      - yuks
envsubst < ./prow-plugins-template.yaml > ./prow-plugins-ii-sandbox.yaml
kubectl -n prow \
    create configmap prow-plugins \
    --from-file=plugins\.yaml=./prow-plugins-ii-sandbox.yaml \
    --dry-run=client \
    -o yaml \
    | kubectl apply -f -

Restart the rollout

kubectl -n prow rollout restart $(kubectl -n prow get deployments -o=jsonpath='{range .items[*]}{.kind}/{.metadata.name} {end}')

Run Prow workloads on the GKE cluster from a Pair instance’s Prow

Get gencred

go install ./gencred

Generate a Kubeconfig for use in Prow

set -x
CRED_NEW_CONTEXT=default
CRED_EXISTING_CONTEXT=gke_k8s-infra-ii-sandbox_us-central1_ii-sandbox-${SHARINGIO_PAIR_NAME}
CRED_SERVICEACCOUNT=humacs
CRED_OUTPUT=/tmp/prow-gke-humacs-kubeconfig-admin
gencred \
    -n $CRED_NEW_CONTEXT \
    --context $CRED_EXISTING_CONTEXT \
    -s humacs \
    --overwrite \
    -o $CRED_OUTPUT
ls -alh $CRED_OUTPUT

TODO: Patch existing Prow HelmRelease

Create Kubeconfig secret for GKE cluster

kubectl -n prow-${SHARINGIO_PAIR_NAME} create secret generic kubeconfig-gke --from-file=config=/tmp/prow-gke-humacs-kubeconfig-admin --dry-run=client -o yaml | kubectl --context in-cluster apply -f -

Distribution

Install Distribution (with fs)

Create basic auth htpasswd:

kubectl -n distribution create secret generic distribution-auth --from-literal=htpasswd="$(htpasswd -Bbn distribution Distritest1234!)"

Configure the Distribution deployment:

apiVersion: v1
kind: Namespace
metadata:
  name: distribution
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: distribution-config
data:
  config.yml: |
    version: 0.1
    log:
      accesslog:
        disabled: true
      level: debug
      fields:
        service: registry
        environment: development
    # auth:
    #   htpasswd:
    #     realm: basic-realm
    #     path: /etc/docker/registry/htpasswd
    storage:
      delete:
        enabled: true
      filesystem:
        rootdirectory: /var/lib/registry
      maintenance:
        uploadpurging:
          enabled: false
    http:
      addr: :5000
      secret: registry-k8s-io-registry-k8s-io
      debug:
        addr: :5001
        prometheus:
          enabled: true
          path: /metrics
        headers:
          X-Content-Type-Options: [nosniff]
    health:
      storagedriver:
        enabled: true
        interval: 10s
        threshold: 3
    proxy:
      remoteurl: https://k8s.gcr.io
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: distribution-data
  namespace: distribution
spec:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: distribution
  namespace: distribution
spec:
  replicas: 1
  selector:
    matchLabels:
      app: distribution
  template:
    metadata:
      labels:
        app: distribution
    spec:
      containers:
      - name: distribution
        image: registry:2.7.1
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            cpu: 10m
            memory: 30Mi
          requests:
            cpu: 10m
            memory: 30Mi
        ports:
          - containerPort: 5000
        env:
          - name: TZ
            value: "Pacific/Auckland"
        volumeMounts:
          - name: distribution-data
            mountPath: /var/lib/registry
          - name: distribution-config
            mountPath: /etc/docker/registry/config.yml
            subPath: config.yml
          - name: distribution-auth
            mountPath: /etc/docker/registry/htpasswd
            subPath: htpasswd
        readinessProbe:
          tcpSocket:
            port: 5000
          initialDelaySeconds: 2
          periodSeconds: 10
        livenessProbe:
          tcpSocket:
            port: 5000
          initialDelaySeconds: 1
          periodSeconds: 20
      volumes:
        - name: distribution-data
          persistentVolumeClaim:
            claimName: distribution-data
        - name: distribution-config
          configMap:
            name: distribution-config
        - name: distribution-auth
          secret:
            secretName: distribution-auth
---
apiVersion: v1
kind: Service
metadata:
  name: distribution
  namespace: distribution
spec:
  ports:
  - port: 5000
    targetPort: 5000
  selector:
    app: distribution
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: distribution
  namespace: distribution
  annotations:
    kubernetes.io/ingress.class: nginx
    nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
  tls:
    - hosts:
      - distribution.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
      secretName: letsencrypt-prod
  rules:
  - host: distribution.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
    http:
      paths:
      - path: /
        backend:
          serviceName: distribution
          servicePort: 5000

TODO: ensure that the registry doesn’t require authentication to pull from

Install a basic installation of Distribution:

envsubst < distribution-fs.yaml | kubectl -n distribution apply -f -

Restart the deployment rollout if needed:

kubectl -n distribution rollout restart deployment/distribution

Log into the registry:

echo Distritest1234! | docker login distribution.ii-sandbox.$SHARINGIO_PAIR_BASE_DNS_NAME -u distribution --password-stdin

Envoy

envoy-config.yaml

node:
  id: web_service
  cluster: web_service

dynamic_resources:
  lds_config:
    path: /var/lib/envoy/lds.yaml

static_resources:
  clusters:
  - name: web_service
    connect_timeout: 0.25s
    type: LOGICAL_DNS
    lb_policy: round_robin
    load_assignment:
      cluster_name: web_service
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address:
                address: k8s.io
                port_value: 443
admin:
  access_log_path: /dev/null
  address:
    socket_address:
      address: 0.0.0.0
      port_value: 9003

envoy-lds.yaml

resources:
- "@type": type.googleapis.com/envoy.config.listener.v3.Listener
  name: listener_0
  address:
    socket_address:
      address: 0.0.0.0
      port_value: 10000
  filter_chains:
  - filters:
      name: envoy.http_connection_manager
      typed_config:
        "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
        stat_prefix: ingress_http
        route_config:
          name: local_route
          virtual_hosts:
          - name: local_service
            domains:
            - "*"
            routes:
            - match:
                prefix: "/"
              route:
                cluster: web_service
        http_filters:
          - name: envoy.filters.http.lua
            typed_config:
              "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua
              inline_code: |
                local reg1 = "k8s.gcr.io"
                local reg2 = "distribution.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}"
                local reg2WithIP = "${HUMACS_POD_IP}"
                function envoy_on_request(request_handle)
                  local reg = reg1
                  remoteAddr = request_handle:headers():get("x-real-ip")
                  if remoteAddr == reg2WithIP then
                    request_handle:logInfo("remoteAddr: "..reg2WithIP)
                    reg = reg2
                  end
                  request_handle:logInfo("REG: "..reg)
                  request_handle:logInfo("REMOTEADDR: "..remoteAddr)
                  if request_handle:headers():get(":method") == "GET" then
                    request_handle:respond(
                      {
                        [":status"] = "302",
                        ["location"] = "https://"..reg..request_handle:headers():get(":path"),
                        ["Content-Type"] = "text/html; charset=utf-8",
                        [":authority"] = "web_service"
                      },
                      '<a href="'.."https://"..reg..request_handle:headers():get(":path")..'">'.."302".."</a>.\n")
                  end
                end
          - name: envoy.filters.http.router
            typed_config: {}

Apply configuration

export HUMACS_POD_IP=$(kubectl -n default get pod humacs-0 -o=jsonpath='{.status.podIP}')

kubectl -n registry-k8s-io-envoy create configmap envoy-config --from-file=envoy\.yaml=envoy-config.yaml --dry-run=client -o yaml | kubectl apply -f -
kubectl -n registry-k8s-io-envoy create configmap envoy-config-lds --from-file=lds\.yaml=<(envsubst < envoy-lds.yaml) --dry-run=client -o yaml | kubectl apply -f -

Deploying Envoy

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: registry-k8s-io-envoy
  name: registry-k8s-io-envoy
  namespace: registry-k8s-io-envoy
spec:
  replicas: 1
  selector:
    matchLabels:
      app: registry-k8s-io-envoy
  template:
    metadata:
      labels:
        app: registry-k8s-io-envoy
    spec:
      containers:
      - name: envoy
        command:
        - /usr/local/bin/envoy
        - -c
        - /etc/envoy.yaml
        - -l
        - debug
        resources:
          limits:
            cpu: 10m
            memory: 30Mi
          requests:
            cpu: 10m
            memory: 30Mi
        image: envoyproxy/envoy:v1.18.2
        volumeMounts:
          - name: envoy-config
            mountPath: /etc/envoy.yaml
            subPath: envoy.yaml
          - name: envoy-config-lds
            mountPath: /var/lib/envoy/
        ports:
          - name: http
            containerPort: 10000
      volumes:
      - name: envoy-config
        configMap:
          name: envoy-config
      - name: envoy-config-lds
        configMap:
          name: envoy-config-lds
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: registry-k8s-io-envoy
  name: registry-k8s-io-envoy
  namespace: registry-k8s-io-envoy
spec:
  ports:
  - name: registry-k8s-io
    port: 10000
    protocol: TCP
    targetPort: 10000
  - name: registry-k8s-io-admin
    port: 9003
    protocol: TCP
    targetPort: 9003
  selector:
    app: registry-k8s-io-envoy
  type: ClusterIP
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  name: registry-k8s-io-envoy
  namespace: registry-k8s-io-envoy
spec:
  rules:
  - host: envoy.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
    http:
      paths:
      - backend:
          serviceName: registry-k8s-io-envoy
          servicePort: 10000
        path: /
        pathType: ImplementationSpecific
  tls:
  - hosts:
    - envoy.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
    secretName: letsencrypt-prod

Deploy Envoy

envsubst < envoy.yaml | kubectl apply -f -

Restart Envoy

kubectl -n registry-k8s-io-envoy rollout restart deployment/registry-k8s-io-envoy

Autoscale Envoy

kubectl -n registry-k8s-io-envoy autoscale deployment/registry-k8s-io-envoy --max=30

Delete Envoy

kubectl delete -f envoy.yaml

ArtifactServer

apiVersion: v1
kind: Namespace
metadata:
  name: artifactserver
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: artifactserver
data:
  config.yaml: |
    backends:
      local-distribution:
        host: distribution.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
        pathPrefix: /
        conditions:
          headers:
            # Humacs Pod ip on ii-sandbox GKE cluster
            X-Real-Ip: ${HUMACS_POD_IP}
      k8s.gcr.io:
        host: k8s.gcr.io
        pathPrefix: /
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: artifactserver
  namespace: artifactserver
  labels:
    app: artifactserver
spec:
  replicas: 2
  selector:
    matchLabels:
      app: artifactserver
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: artifactserver
    spec:
      terminationGracePeriodSeconds: 30
      containers:
        - name: artifactserver
          image: justinsb/artifactserver:latest
          ports:
            - containerPort: 8080
              protocol: TCP
          resources:
            requests:
              cpu: 0.1
              memory: 256Mi
            limits:
              memory: 256Mi
          livenessProbe:
            httpGet:
              path: /_/healthz
              port: 8080
            initialDelaySeconds: 3
            timeoutSeconds: 2
            failureThreshold: 2
---
apiVersion: v1
kind: Service
metadata:
  name: artifactserver
  namespace: artifactserver
  labels:
    app: artifactserver
spec:
  selector:
    app: artifactserver
  ports:
    - name: http
      port: 80
      targetPort: 8080
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  annotations:
    nginx.ingress.kubernetes.io/proxy-body-size: "0"
  name: artifactserver
  namespace: artifactserver
spec:
  rules:
  - host: artifacts.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
    http:
      paths:
      - backend:
          serviceName: artifactserver
          servicePort: 8080
        path: /
        pathType: ImplementationSpecific
  tls:
  - hosts:
    - artifacts.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
    secretName: letsencrypt-prod

Install ArtifactServer

export HUMACS_POD_IP=$(kubectl -n default get pod humacs-0 -o=jsonpath='{.status.podIP}')
envsubst < artifactserver.yaml | kubectl delete -f -
kubectl -n artifactserver logs -l app=artifactserver -f --tail=100 --prefix

ArtifactsServer on Pair

Configuration for ArtifactsServer

backends:
  kops:
    host: kubeupv2.s3.amazonaws.com
    conditions:
      paths:
        - /kops/
  local-distribution:
    host: distribution.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}
    conditions:
      headers:
        # Humacs Pod ip on ii-sandbox GKE cluster
        X-Real-Ip:
          - ${HUMACS_POD_IP}
  k8s.gcr.io:
    host: k8s.gcr.io

Bring up ArtifactsServer based this PR

export HUMACS_POD_IP=$(kubectl -n default get pod humacs-0 -o=jsonpath='{.status.podIP}')

cd ~/kubernetes/k8s.io/artifactserver
git remote add BobyMCbobs https://github.com/ii/k8s.io
git fetch BobyMCbobs
git checkout update-artifactserver-with-conditions-and-config-file
echo "https://artifactserver.${SHARINGIO_PAIR_BASE_DNS_NAME}"
go run ./cmd/artifactserver --config=<(envsubst < /tmp/artifactserver.yaml)
curl https://artifactserver.ii-sandbox.${SHARINGIO_PAIR_BASE_DNS_NAME}/v2/

Copy presentation back from the Humacs GKE Pod

kubectl -n default cp -c humacs humacs-0:/home/ii/registry-k8s-io-demo.org $(git rev-parse --show-toplevel)/docs/presentations/k8s-infra-wg-2021-05-27/registry-k8s-io-demo.org

SSH key forward

NODE_NAME=$(kubectl -n default get pod humacs-0 -o=jsonpath='{.spec.nodeName}')
gcloud compute ssh --ssh-flag="-aT" $NODE_NAME

Teardown

Delete all the things in cluster

kubectl -n default delete -f humacs.yaml
kubectl delete -f nginx-ingress.yaml

Destroy the cluster

cd clusters/projects/k8s-infra-ii-sandbox
terraform destroy