Skip to content

amd64: drupal:10.2.12-php8.2-apache-bullseye (c0aaa026e47470d2d8cb8be0eca6abe29cc341e06cfa2d9ee3a063a9cb8f9a2f) #69665

amd64: drupal:10.2.12-php8.2-apache-bullseye (c0aaa026e47470d2d8cb8be0eca6abe29cc341e06cfa2d9ee3a063a9cb8f9a2f)

amd64: drupal:10.2.12-php8.2-apache-bullseye (c0aaa026e47470d2d8cb8be0eca6abe29cc341e06cfa2d9ee3a063a9cb8f9a2f) #69665

Workflow file for this run

name: Build
on:
workflow_dispatch:
inputs:
buildId:
required: true
type: string
bashbrewArch:
required: true
type: choice
options:
- amd64
- i386
- windows-amd64
firstTag: # informational only, because "run-name" can't be set to a useful value otherwise
type: string
windowsVersion:
type: choice
options:
- '' # without this, it's technically "required" 🙃
- 2022
- 2019
run-name: '${{ inputs.bashbrewArch }}: ${{ inputs.firstTag }} (${{ inputs.buildId }})'
permissions:
contents: read
actions: write # for https://github.com/andymckay/cancel-action (see usage below)
id-token: write # for AWS KMS signing (see usage below)
concurrency:
group: ${{ github.event.inputs.buildId }}
cancel-in-progress: false
defaults:
run:
shell: 'bash -Eeuo pipefail -x {0}'
env:
BUILD_ID: ${{ inputs.buildId }}
BASHBREW_ARCH: ${{ inputs.bashbrewArch }}
jobs:
build:
name: Build ${{ inputs.buildId }}
runs-on: ${{ inputs.bashbrewArch == 'windows-amd64' && format('windows-{0}', inputs.windowsVersion) || 'ubuntu-latest' }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: true
# TODO on Linux, install Tianon's Docker builds (switch off "ubuntu-latest" to pin to something closer to something we publish Debian builds for OR just run Docker-in-Docker and use GITHUB_ENV to set DOCKER_HOST to a suitable value)
- uses: ./.doi/.github/workflows/.bashbrew
with:
# avoid building because we want to skip the build and download a release instead (which will be way faster)
build: none # this will set BASHBREW_VERSION for us
# TODO improve the bashbrew action to download a release binary instead of building from source ("build: download", perhaps?)
- name: Tools
run: |
mkdir .gha-bin
echo "$PWD/.gha-bin" >> "$GITHUB_PATH"
ext=''
if [ "$BASHBREW_ARCH" = 'windows-amd64' ]; then # TODO should we run "bashbrew-host-arch.sh" here instead?
ext='.exe'
fi
_download() {
# prefer wget, but "windows-2019" doesn't have it, so fall back to curl
local target="$1"; shift
local url="$1"; shift
if command -v wget > /dev/null; then
wget --timeout=5 -O "$target" "$url" --progress=dot:giga
else
curl -fL -o "$target" "$url"
fi
}
# https://github.com/docker-library/bashbrew/releases
[ -n "$BASHBREW_VERSION" ]
_download ".gha-bin/bashbrew$ext" "https://github.com/docker-library/bashbrew/releases/download/$BASHBREW_VERSION/bashbrew-$BASHBREW_ARCH$ext"
chmod +x ".gha-bin/bashbrew$ext"
".gha-bin/bashbrew$ext" --version
# https://doi-janky.infosiftr.net/job/wip/job/crane
_download ".gha-bin/crane$ext" "https://doi-janky.infosiftr.net/job/wip/job/crane/lastSuccessfulBuild/artifact/crane-$BASHBREW_ARCH$ext"
# TODO checksum verification ("checksums.txt")
chmod +x ".gha-bin/crane$ext"
".gha-bin/crane$ext" version
- name: JSON
id: json
run: |
json="$(
jq -L.scripts '
include "meta";
include "doi";
.[env.BUILD_ID]
| select(needs_build and .build.arch == env.BASHBREW_ARCH) # sanity check
| .commands = commands
| .shouldSign = build_should_sign
' builds.json
)"
[ -n "$json" ]
{
EOJSON="EOJSON-$RANDOM-$RANDOM-$RANDOM"
echo "json<<$EOJSON"
cat <<<"$json"
echo "$EOJSON"
} | tee -a "$GITHUB_ENV" "$GITHUB_OUTPUT" > /dev/null
mkdir build
- name: Check
run: |
img="$(jq <<<"$json" -r '.build.img')"
if crane digest "$img"; then
echo >&2 "error: '$img' already exists! cowardly refusing to overwrite it"
echo 'cancel=exists' >> "$GITHUB_OUTPUT"
else
echo 'cancel=' >> "$GITHUB_OUTPUT"
fi
id: check
- name: Cancel If Built
if: steps.check.outputs.cancel == 'exists'
uses: andymckay/cancel-action@435124153eb37d6a62a29d053a7e449652f89d51 # https://github.com/andymckay/cancel-action/commits/HEAD
# https://github.com/andymckay/cancel-action/issues/12
- name: Spin Wheels If Built (waiting for cancellation)
if: steps.check.outputs.cancel == 'exists'
run: |
while true; do
echo 'Waiting for build cancellation...'
sleep 30
done
exit 1
- name: Pull
run: |
cd build
shell="$(jq <<<"$json" -r '.commands.pull')"
eval "$shell"
- name: Build
run: |
cd build
shell="$(jq <<<"$json" -r '.commands.build')"
if grep <<<"$shell" -q ' buildx '; then
bk="$(../.doi/.bin/bashbrew-buildkit-env-setup.sh)"
bk="$(jq <<<"$bk" -r 'to_entries | map(.key + "=" + .value | @sh) | "export " + join(" ")')"
eval "$bk"
fi
eval "$shell"
# TODO signing prototype (see above where "shouldSign" is populated)
- name: Configure AWS (for signing)
if: fromJSON(steps.json.outputs.json).shouldSign
# https://github.com/aws-actions/configure-aws-credentials/releases
uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1
with:
aws-region: ${{ github.ref_name == 'main' && secrets.AWS_KMS_PROD_REGION || secrets.AWS_KMS_STAGE_REGION }}
role-to-assume: ${{ github.ref_name == 'main' && secrets.AWS_KMS_PROD_ROLE_ARN || secrets.AWS_KMS_STAGE_ROLE_ARN }}
# TODO figure out if there's some way we could make our secrets ternaries here more DRY without major headaches 🙈
- name: Sign
if: fromJSON(steps.json.outputs.json).shouldSign
env:
AWS_KMS_REGION: ${{ github.ref_name == 'main' && secrets.AWS_KMS_PROD_REGION || secrets.AWS_KMS_STAGE_REGION }}
AWS_KMS_KEY_ARN: ${{ github.ref_name == 'main' && secrets.AWS_KMS_PROD_KEY_ARN || secrets.AWS_KMS_STAGE_KEY_ARN }}
run: |
cd build
args=(
--interactive
--rm
--read-only
--workdir /tmp # see "--tmpfs" below (TODO the signer currently uses PWD as TMPDIR -- something to fix in the future so we can drop this --workdir and only keep --tmpfs perhaps adding --env TMPDIR=/tmp if necessary)
)
if [ -t 0 ] && [ -t 1 ]; then
args+=( --tty )
fi
user="$(id -u)"
args+=( --tmpfs "/tmp:uid=$user" )
user+=":$(id -g)"
args+=( --user "$user" )
awsEnvs=( "${!AWS_@}" )
args+=( "${awsEnvs[@]/#/--env=}" )
# some very light assumption verification (see TODO in --mount below)
validate-oci-layout() {
local dir="$1"
jq -s '
if length != 1 then
error("unexpected 'oci-layout' document count: " + length)
else .[0] end
| if .imageLayoutVersion != "1.0.0" then
error("unsupported imageLayoutVersion: " + .imageLayoutVersion)
else . end
' "$dir/oci-layout" || return "$?"
jq -s '
if length != 1 then
error("unexpected 'index.json' document count: " + length)
else .[0] end
| if .schemaVersion != 2 then
error("unsupported schemaVersion: " + .schemaVersion)
else . end
| if .mediaType != "application/vnd.oci.image.index.v1+json" and .mediaType then # TODO drop the second half of this validation: https://github.com/moby/buildkit/issues/4595
error("unsupported index mediaType: " + .mediaType)
else . end
| if .manifests | length != 1 then
error("expected only one manifests entry, not " + (.manifests | length))
else . end
| .manifests[0] |= (
if .mediaType != "application/vnd.oci.image.index.v1+json" then
error("unsupported descriptor mediaType: " + .mediaType)
else . end
# TODO validate .digest somehow (`crane validate`?) - would also be good to validate all descriptors recursively
| if .size < 0 then
error("invalid descriptor size: " + .size)
else . end
)
' "$dir/index.json" || return "$?"
local manifest
manifest="$dir/blobs/$(jq -r '.manifests[0].digest | sub(":"; "/")' "$dir/index.json")" || return "$?"
jq -s '
if length != 1 then
error("unexpected image index document count: " + length)
else .[0] end
| if .schemaVersion != 2 then
error("unsupported schemaVersion: " + .schemaVersion)
else . end
| if .mediaType != "application/vnd.oci.image.index.v1+json" then
error("unsupported image index mediaType: " + .mediaType)
else . end
# TODO more validation?
' "$manifest" || return "$?"
}
validate-oci-layout temp
mkdir signed
args+=(
--mount "type=bind,src=$PWD/temp,dst=/doi-build/unsigned" # TODO this currently assumes normalized_builder == "buildkit" and !should_use_docker_buildx_driver -- we need to factor that in later (although this signs the attestations, not the image, so buildkit/buildx is the only builder whose output we *can* sign right now)
--mount "type=bind,src=$PWD/signed,dst=/doi-build/signed"
# https://explore.ggcr.dev/?repo=docker/image-signer-verifier
docker/image-signer-verifier:0.3.3@sha256:a5351e6495596429bacea85fbf8f41a77ce7237c26c74fd7c3b94c3e6d409c82
sign
--envelope-style oci-content-descriptor
--aws_region "$AWS_KMS_REGION"
--aws_arn "awskms:///$AWS_KMS_KEY_ARN"
--input oci:///doi-build/unsigned
--output oci:///doi-build/signed
)
docker run "${args[@]}"
validate-oci-layout signed
# TODO validate that "signed" still has all the original layer blobs from "temp" (ie, that the attestation manifest *just* has some new layers and everything else is unchanged)
rm -rf temp
mv signed temp
- name: Push
env:
DOCKER_HUB_USERNAME: ${{ secrets.DOCKER_HUB_USERNAME }}
DOCKER_HUB_PASSWORD: ${{ secrets.DOCKER_HUB_PASSWORD }}
run: |
export DOCKER_CONFIG="$PWD/.docker"
mkdir "$DOCKER_CONFIG"
trap 'find "$DOCKER_CONFIG" -type f -exec shred -fuvz "{}" + || :; rm -rf "$DOCKER_CONFIG"' EXIT
docker login --username "$DOCKER_HUB_USERNAME" --password-stdin <<<"$DOCKER_HUB_PASSWORD"
unset DOCKER_HUB_USERNAME DOCKER_HUB_PASSWORD
cd build
shell="$(jq <<<"$json" -r '.commands.push')"
eval "$shell"