diff --git a/.cirrus.yml b/.cirrus.yml index 4215feba..adc757eb 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -9,7 +9,7 @@ env: # No need to go crazy, but grab enough to cover most PRs CIRRUS_CLONE_DEPTH: 50 # Version of packer to use when building images - PACKER_VERSION: &PACKER_VERSION "1.7.0" + PACKER_VERSION: &PACKER_VERSION "1.8.0" # Unique suffix label to use for all images produced by _this_ run (build) IMG_SFX: "${CIRRUS_BUILD_ID}" @@ -32,8 +32,8 @@ validate_task: - "make clean" - "make help" - "make image_builder/gce.json" - - "make base_images/gce.json" - - "make cache_images/gce.json" + - "make base_images/cloud.json" + - "make cache_images/cloud.json" image_builder_task: @@ -138,10 +138,11 @@ base_images_task: skip: *ci_docs_tooling depends_on: - container_images + - image_builder # Packer needs time to clean up partially created VM images auto_cancellation: $CI != "true" stateful: true - timeout_in: 30m + timeout_in: 45m # Cannot use a container for this task, virt required for fedora image conversion gce_instance: <<: *ibi_vm @@ -157,11 +158,18 @@ base_images_task: - <<: *base_image env: PACKER_BUILDS: "prior-fedora" + - <<: *base_image + env: + PACKER_BUILDS: "fedora-aws" + - <<: *base_image + env: + PACKER_BUILDS: "fedora-aws-arm64" - <<: *base_image env: PACKER_BUILDS: "ubuntu" env: GAC_JSON: ENCRYPTED[7fba7fb26ab568ae39f799ab58a476123206576b0135b3d1019117c6d682391370c801e149f29324ff4b50133012aed9] + AWS_INI: ENCRYPTED[4cd69097cd29a9899e51acf3bbacceeb83cb5c907d272ca1e2a8ccd515b03f2368a0680870c0d120fc32bc578bb0a930] script: "ci/make_base_images.sh" manifest_artifacts: path: base_images/manifest.json @@ -174,11 +182,12 @@ cache_images_task: only_if: *is_pr skip: *ci_docs_tooling depends_on: + - image_builder - base_images # Packer needs time to clean up partially created VM images auto_cancellation: $CI != "true" stateful: true - timeout_in: 40m + timeout_in: 45m container: dockerfile: "image_builder/Containerfile" cpu: 2 @@ -201,12 +210,19 @@ cache_images_task: PACKER_BUILDS: "fedora-podman-py" - <<: *cache_image env: - PACKER_BUILDS: "ubuntu" + PACKER_BUILDS: "fedora-aws" + - <<: *cache_image + env: + PACKER_BUILDS: "fedora-netavark-aws" - <<: *cache_image env: PACKER_BUILDS: "build-push" + - <<: *cache_image + env: + PACKER_BUILDS: "ubuntu" env: GAC_JSON: ENCRYPTED[7fba7fb26ab568ae39f799ab58a476123206576b0135b3d1019117c6d682391370c801e149f29324ff4b50133012aed9] + AWS_INI: ENCRYPTED[4cd69097cd29a9899e51acf3bbacceeb83cb5c907d272ca1e2a8ccd515b03f2368a0680870c0d120fc32bc578bb0a930] script: "ci/make_cache_images.sh" manifest_artifacts: path: cache_images/manifest.json @@ -220,6 +236,7 @@ imgts_task: only_if: *is_pr skip: *ci_docs_tooling depends_on: + - base_images - cache_images container: image: 'quay.io/libpod/imgts:c$IMG_SFX' diff --git a/Makefile b/Makefile index ab228cd8..d0f698ed 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,10 @@ if_ci_else = $(if $(findstring true,$(CI)),$(1),$(2)) # VM images, and storage objects. export GAC_FILEPATH +# Ditto for AWS credentials (INI file) with access to create VMs and images. +# ref: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-where +export AWS_SHARED_CREDENTIALS_FILE + PACKER_LOG ?= # Uncomment the following to enable additional logging from packer. #override PACKER_LOG := 1 @@ -82,11 +86,13 @@ ci_debug: $(_TEMPDIR)/ci_debug.tar ## Build and enter container for local develo /usr/bin/podman run -it --rm \ --security-opt label=disable \ -v $(_MKFILE_DIR):$(_MKFILE_DIR) -w $(_MKFILE_DIR) \ - -v $(_TEMPDIR):$(_TEMPDIR):Z \ - -v $(call err_if_empty,GAC_FILEPATH):$(GAC_FILEPATH):Z \ + -v $(_TEMPDIR):$(_TEMPDIR) \ + -v $(call err_if_empty,GAC_FILEPATH):$(GAC_FILEPATH) \ + -v $(call err_if_empty,AWS_SHARED_CREDENTIALS_FILE):$(AWS_SHARED_CREDENTIALS_FILE) \ -e PACKER_INSTALL_DIR=/usr/local/bin \ -e PACKER_VERSION=$(call err_if_empty,PACKER_VERSION) \ - -e GAC_FILEPATH=$(call err_if_empty,GAC_FILEPATH) \ + -e GAC_FILEPATH=$(GAC_FILEPATH) \ + -e AWS_SHARED_CREDENTIALS_FILE=$(AWS_SHARED_CREDENTIALS_FILE) \ -e TEMPDIR=$(_TEMPDIR) \ docker-archive:$< @@ -102,7 +108,7 @@ define podman_build podman save --quiet -o $(1) $(2) endef -$(_TEMPDIR)/ci_debug.tar: $(_TEMPDIR)/.cache/fedora ci/Containerfile ci/install_packages.txt ci/install_packages.sh lib.sh +$(_TEMPDIR)/ci_debug.tar: $(_TEMPDIR)/.cache/fedora $(wildcard ci/*) $(call podman_build,$@,ci_debug,ci,fedora) $(_TEMPDIR): @@ -150,13 +156,17 @@ $(_TEMPDIR)/user-data: $(_TEMPDIR) $(_TEMPDIR)/cidata.ssh.pub $(_TEMPDIR)/cidata .PHONY: cidata cidata: $(_TEMPDIR)/user-data $(_TEMPDIR)/meta-data +# First argument is the path to the template JSON, second +# argument is the path to AWS_SHARED_CREDENTIALS_FILE +# when required. N/B: GAC_FILEPATH is always required. define packer_build env PACKER_CACHE_DIR="$(_TEMPDIR)" \ + AWS_SHARED_CREDENTIALS_FILE="$(2)" \ + GAC_FILEPATH="$(call err_if_empty,GAC_FILEPATH)" \ CHECKPOINT_DISABLE=1 \ $(PACKER_INSTALL_DIR)/packer build \ -force \ -var TEMPDIR="$(_TEMPDIR)" \ - -var GAC_FILEPATH="$(call err_if_empty,GAC_FILEPATH)" \ $(if $(PACKER_BUILDS),-only=$(PACKER_BUILDS)) \ $(if $(IMG_SFX),-var IMG_SFX=$(IMG_SFX)) \ $(if $(DEBUG_NESTED_VM),-var TTYDEV=$(shell tty),-var TTYDEV=/dev/null) \ @@ -167,7 +177,7 @@ endef .PHONY: image_builder image_builder: image_builder/manifest.json ## Create image-building image and import into GCE (needed for making all other images) image_builder/manifest.json: image_builder/gce.json image_builder/setup.sh lib.sh systemd_banish.sh $(PACKER_INSTALL_DIR)/packer - $(call packer_build,$<) + $(call packer_build,$<,) # Note: We assume this repo is checked out somewhere under the caller's # home-dir for bind-mounting purposes. Otherwise possibly necessary @@ -177,29 +187,31 @@ image_builder/manifest.json: image_builder/gce.json image_builder/setup.sh lib.s image_builder_debug: $(_TEMPDIR)/image_builder_debug.tar ## Build and enter container for local development/debugging of targets requiring packer + virtualization /usr/bin/podman run -it --rm \ --security-opt label=disable -v $$HOME:$$HOME -w $(_MKFILE_DIR) \ - -v $(_TEMPDIR):$(_TEMPDIR):Z \ - -v $(call err_if_empty,GAC_FILEPATH):$(GAC_FILEPATH):Z \ + -v $(_TEMPDIR):$(_TEMPDIR) \ + -v $(call err_if_empty,GAC_FILEPATH):$(GAC_FILEPATH) \ + -v $(call err_if_empty,AWS_SHARED_CREDENTIALS_FILE):$(AWS_SHARED_CREDENTIALS_FILE) \ -v /dev/kvm:/dev/kvm \ -e PACKER_INSTALL_DIR=/usr/local/bin \ -e PACKER_VERSION=$(call err_if_empty,PACKER_VERSION) \ -e IMG_SFX=$(call err_if_empty,IMG_SFX) \ - -e GAC_FILEPATH=$(call err_if_empty,GAC_FILEPATH) \ + -e GAC_FILEPATH=$(GAC_FILEPATH) \ + -e AWS_SHARED_CREDENTIALS_FILE=$(AWS_SHARED_CREDENTIALS_FILE) \ docker-archive:$< -$(_TEMPDIR)/image_builder_debug.tar: $(_TEMPDIR)/.cache/centos image_builder/Containerfile image_builder/install_packages.txt ci/install_packages.sh lib.sh +$(_TEMPDIR)/image_builder_debug.tar: $(_TEMPDIR)/.cache/centos $(wildcard image_builder/*) $(call podman_build,$@,image_builder_debug,image_builder,centos) .PHONY: base_images # This needs to run in a virt/nested-virt capable environment base_images: base_images/manifest.json ## Create, prepare, and import base-level images into GCE. Optionally, set PACKER_BUILDS= to select builder(s). -base_images/manifest.json: base_images/gce.json base_images/fedora_base-setup.sh cidata $(_TEMPDIR)/cidata.ssh $(PACKER_INSTALL_DIR)/packer - $(call packer_build,$<) +base_images/manifest.json: base_images/cloud.json $(wildcard base_images/*.sh) cidata $(_TEMPDIR)/cidata.ssh $(PACKER_INSTALL_DIR)/packer + $(call packer_build,base_images/cloud.json,$(call err_if_empty,AWS_SHARED_CREDENTIALS_FILE)) .PHONY: cache_images cache_images: cache_images/manifest.json ## Create, prepare, and import top-level images into GCE. Optionally, set PACKER_BUILDS= to select builder(s). -cache_images/manifest.json: cache_images/gce.json $(wildcard cache_images/*.sh) $(PACKER_INSTALL_DIR)/packer - $(call packer_build,$<) +cache_images/manifest.json: cache_images/cloud.json $(wildcard cache_images/*.sh) $(PACKER_INSTALL_DIR)/packer + $(call packer_build,cache_images/cloud.json,$(call err_if_empty,AWS_SHARED_CREDENTIALS_FILE)) override _fedora_podman_release := $(file < podman/fedora_release) override _prior-fedora_podman_release := $(file < podman/prior-fedora_release) diff --git a/README.md b/README.md index d8613332..eeb6a0a4 100644 --- a/README.md +++ b/README.md @@ -47,9 +47,11 @@ step**](README.md#the-last-part-first-overview-step-4). However, all steps are listed below for completeness. For more information on the overall process of importing custom GCE VM -Images, please [refer to the documentation](https://cloud.google.com/compute/docs/import/import-existing-image). For more information on the primary tool -(*packer*) used for this process, please [see it's -documentation](https://www.packer.io/docs). +Images, please [refer to the documentation](https://cloud.google.com/compute/docs/import/import-existing-image). For references to the latest pre-build AWS +EC2 Fedora AMI's see [the +upstream cloud page](https://alt.fedoraproject.org/cloud/). +For more information on the primary tool (*packer*) used for this process, +please [see it's documentation page](https://www.packer.io/docs). 1. [Build and import a VM image](README.md#the-image-builder-image-overview-step-1) with necessary packages and metadata for @@ -68,17 +70,18 @@ documentation](https://www.packer.io/docs). Use this VM to [build and then import base-level VM image](README.md#the-base-images-overview-step-3) for supported platforms - (Fedora or Ubuntu; as of this writing). In other words, convert - generic distribution provided VM Images, into a form capable of being - booted as *GCE VMs*. In parallel, build Fedora and Ubuntu container - images and push them to ``quay.io/libpod/_podman`` + (Fedora or Ubuntu; as of this writing). For GCE use, convert the + generic distribution provided QCOW files, into bootable *GCE VMs*. For + AWS, boot the pre-build AMI's, add minimal tooling, and save them as + private, non-expiring AMIs. In parallel, build Fedora and Ubuntu + container images and push them to ``quay.io/libpod/_podman`` -4. [Boot a *GCE VM* from each image produced in step +4. [Boot *VMs* from each image produced in step 3](README.md#the-last-part-first-overview-step-4). - Execute necessary - scripts to customize image for use by containers-project automation. - In other words, install packages and scripts needed for Future incarnations - of the VM to run automated tests. + Execute the necessary scripts to customize images for use by + various containers-project automation. Mainly this involves + installing build & test dependencies, but also includes some + kernel and systemd services configuration. ## The last part first (overview step 4) @@ -88,7 +91,7 @@ a.k.a. ***Cache Images*** These are the VM Images actually used by other repositories for automated testing. So, assuming you just need to update packages or tweak the list, [start here](README.md#process). Though be aware, this repository does not -yet perform any testing of the images. That's your secondary responsibility, +perform much/any testing of the images. That's your secondary responsibility, see step 4 below. **Notes:** @@ -121,17 +124,20 @@ see step 4 below. 1. After you make your script changes, push to a PR. They will be validated and linted before VM image production begins. -2. The name of all output images will share a common suffix (*image ID*). +2. The name of all output GCE images will share a common suffix (*image ID*). Assuming a successful image-build, a [github-action](.github/workflows/pr_image_id.yml) will post the new *image ID* as a comment in the PR. If this automation breaks, you may need to [figure the ID out the hard - way](README.md#Looking-up-an-image-ID). + way](README.md#Looking-up-an-image-ID). For AWS EC2 images, every one + will have a unique AMI ID assigned. You'll need to + [look these up separately](README.md#Looking-up-an-image-ID) + until the github action is updated. 3. Go over to whatever other containers/repository needed the image update. Open the `.cirrus.yml` file, and find the 'env' line referencing the *image - ID*. It will likely be named `IMAGE_SUFFIX:` or something similar. - Paste in the *image ID*. + ID* and/or *AMI*. It will likely be named `IMAGE_SUFFIX:` or something + similar. Paste in the *image ID* or *AMI*. 4. Open up a PR with this change, and push it. Once all tests pass and you're satisfied with the image changes, ask somebody to review/approve both @@ -143,25 +149,29 @@ see step 4 below. ### Looking up an image ID: ### -An *image ID* is simplya big number prefixed by the letter 'c'. You may +A GCE *image ID* is simply big number prefixed by the letter 'c'. You may need to look it up in a PR for example, if [the automated comment posting github-action](.github/workflows/pr_image_id.yml) -fails. +fails. For AWS EC2 images, you'll need to look up the AMI ID (string) for each +cache-image produced. -1. In a PR, find and click one of the `View more details on Cirrus CI` - links (bottom of the *Checks* tab in github). Any **Cirrus-CI** task - will do, it doesn't matter which you pick. +1. In a PR, find and click the build task for the image you're interested in. + Near the top of the Cirrus-CI WebUI, will be a section labeled 'Artifacts'. -2. Toward the top of the page, is a button labeled *VIEW ALL TASKS*. - Click this button. +2. Click the `manifest` artifact -3. Look at the URL in your browser, it will be of the form - `https://cirrus-ci.com/build/`. Copy-paste (or otherwise - record in stone) the **big number**, you'll need it for the next step. +3. Click the `cache_images` folder -4. The new *image ID* is formed by prefixing the **big number** with the - the letter *"c*". For example, if the url was `http://.../12345` - the *image ID* would be `c12345`. +4. Click the `manifest.json` file, it should open in your browser window. + +5. For *GCE images* look at the `artifact_id` field. It will end in a + `c`. This is the ID for + this one, specific image. **Every AWS image will have a unique AMI ID** + (unlike the shared ID for GCE images). ## The image-builder image (overview step 1) @@ -173,6 +183,11 @@ to perform these steps within automation, a dedicated VM image is needed which itself has been prepared with the necessary incantations, packages, configuration, and magic license keys. +The Fedora project does provide AWS Elastic Compute Cloud (EC2) images for all +supported releases (the two most recent ones). These are ready to go, but +have depreciation times set on them. This is no good for direct use, as the +image may get deleted if not used frequently enough. So copies must be made. + For normal day-to-day use, this process should not need to be modified or maintained much. However, on the off-chance that's ever not true, here is an overview of the process followed **by automation** to produce the @@ -260,26 +275,35 @@ VM Images in GCE depend upon certain google-specific systemd-services to be running on boot. Additionally, in order to import external OS images, google needs a specific partition and archive file layout. Lastly, importing images must be done indirectly, through [Google Cloud -Storage (GCS)](https://cloud.google.com/storage/docs/introduction). As with -the image-builder image, this process is mainly orchestrated by Packer: +Storage (GCS)](https://cloud.google.com/storage/docs/introduction). + +VM images in AWS EC2 are basically ready-to go as-is. Only a copy needs to +be made to remove the depreciation metadata and install some basic +automation tooling libraries used by nearly all downstream automation. + +As with the image-builder image, this process is mainly orchestrated by +Packer: 1. A GCE VM is booted from the image-builder image, produced in *overview step 1*. 2. On the image-builder VM, the (upstream) generic-cloud images for each distribution are downloaded and verified. *This is very networking-intense.* -3. The image-builder VM then boots (nested) KVM VMs for the downloaded +3. For GCE, the image-builder VM then boots (nested) KVM VMs for the downloaded images. These local VMs are then updated, installed, and prepared with the necessary packages and services as described above. *This is very disk and CPU intense*. +4. For AWS, the pre-built Fedora project AMI's are simply booted in EC2. + 4. All the automation-deities pray with us, that the nested VMs setup correctly and completely. Debugging them can be incredibly difficult and painful. -5. Packer (running on the image-builder VM), shuts down the nested VMs, +5. Packer (running on the image-builder VM), shuts down the VMs, and performs the import/conversion process. Creating compressed tarballs, - uploading to GCS, then importing into GCP VM images. + uploading to GCS, then importing into GCP VM images. AWS EC2 instances + are snapshotted, and an AMI is created from the snapshot. 7. Packer deletes the VM, and writes the freshly created image name and other metadata details into a `image_builder/manifest.json` file for reference. @@ -290,27 +314,28 @@ the image-builder image, this process is mainly orchestrated by Packer: ## VM Image lifecycle management -There is no built-in mechanism for removing disused VM images in GCP. Nor is -there any built-in tracking information, recording which VM images are -currently being used by one or more containers-repository automation. -Three containers and two asynchronous processes are responsible for tracking -and preventing infinite-growth of the VM image count. +There is no built-in mechanism for removing disused VM images. In GCE, there +isn't any built-in tracking information, recording which VM images are +currently being used by one or more containers-repository automation. In AWS +'Last launched' metadata is recorded automatically. Three containers and +two asynchronous processes are responsible for tracking and preventing +infinite-growth of the VM image count. * `imgts` Runs as part of automation for every repository, every time any VM is utilized. It records the usage details, along with a timestamp - into the utilized VM image "labels" (metadata). Failure to update + into the GCE VM image "labels" (metadata). Failure to update metadata is considered critical, and the task will fail to prompt immediate corrective action by automation maintainers. -* `imgobsolete` is triggered periodically by cron *only* on this - repository. It scans through all VM Images, filtering any which +* `imgobsolete` is triggered periodically by cirrus-cron *only* on this + repository. It scans through all GCE VM Images, filtering any which haven't been used within the last 30 days (according to `imgts` updated labels). Identified images are deprecated by marking them `obsolete` in GCE. This status blocks them from being used, but does not actually remove them. * `imgprune` also runs periodically, immediately following `imgobsolete`. - It scans all currently obsolete images, filtering any which were + It scans all currently obsolete GCE images, filtering any which were deprecated more than 30 days ago (according to deprecation metadata). Images which have been obsolete for more than 30 days, are permanently removed. @@ -320,8 +345,8 @@ and preventing infinite-growth of the VM image count. Because the entire automated build process is containerized, it may easily be performed locally on your laptop/workstation. However, this process will -still involve interfacing with GCP and GCS. Therefore, you must be in possession -of a *Google Application Credentials* (GAC) JSON file. +still involve interfacing with GCE and AWS. Therefore, you must be in possession +of a *Google Application Credentials* (GAC) JSON and AWS credentials INI file. The GAC JSON file should represent a service account (contrasted to a user account, which always uses OAuth2). The name of the service account doesn't matter, @@ -333,20 +358,30 @@ but it must have the following roles granted to it: * Storage Admin * Storage Object Admin -Somebody familiar with Google IAM will need to provide you with the GAC JSON -file and ensure correct service account configuration. Having this file +The service account for AWS may be personal or a shared account. It must have +one the following (custom) IAM policies enabled: + +* Admin +* Packer + +Somebody familiar with Google and AWS IAM will need to provide you with the +credential files and ensure correct account configuration. Having these files stored *in your home directory* on your laptop/workstation, the process of producing images proceeds as follows: 1. Invent some unique identity suffix for your images. It may contain (***only***) lowercase letters, numbers and dashes; nothing else. Some suggestions - of useful values would be your name and todays date. If you manage to screw + of useful values would be your name and today's date. If you manage to screw this up somehow, stern errors will be presented without causing any real harm. 2. Ensure you have podman installed, and lots of available network and CPU resources (i.e. turn off YouTube, shut down background VMs and other hungry tasks). Build the image-builder container image, by executing - ``make image_builder_debug GAC_FILEPATH= IMG_SFX=`` + ``` + make image_builder_debug GAC_FILEPATH= \ + AWS_SHARED_CREDENTIALS_FILE= + IMG_SFX= + ``` 3. You will be dropped into a debugging container, inside a volume-mount of the repository root. This container is practically identical to the VM @@ -371,7 +406,7 @@ producing images proceeds as follows: as before, packer will force-overwrite any broken/partially created images automatically. -6. Produce the GCE VM Cache Images, equivalent to the operations outlined +6. Produce the VM Cache Images, equivalent to the operations outlined in *overview step 3*. Execute the following command (still within the debug image-builder container): ``make cache_images``. diff --git a/base_images/gce.yml b/base_images/cloud.yml similarity index 62% rename from base_images/gce.yml rename to base_images/cloud.yml index 09815f2a..995ca284 100644 --- a/base_images/gce.yml +++ b/base_images/cloud.yml @@ -6,14 +6,13 @@ variables: # Empty value means it must be passed in on command-line # Pre-existing google storage bucket w/ very short lifecycle enabled XFERBUCKET: "packer-import" # Required path to service account credentials file - GAC_FILEPATH: + GAC_FILEPATH: "{{env `GAC_FILEPATH`}}" # Required for presenting output from qemu builders TTYDEV: # Required for 'make clean' support and not clobbering a memory-backed /tmp TEMPDIR: - # Naming suffix for images to prevent clashes (default to timestamp) - # N/B: There are length/character limitations in GCE for image names - IMG_SFX: '{{ timestamp }}' + # Naming suffix for images to prevent clashes + IMG_SFX: # BIG-FAT-WARNING: When updating the image names and/or URLs below, # ensure the distro version numbers contained in the `podman/*_release` @@ -25,6 +24,11 @@ variables: # Empty value means it must be passed in on command-line # Use the most recent image based on this family name. UBUNTU_BASE_FAMILY: 'ubuntu-2204-lts' + # AWS base image in 'US East (N. Virginia)' region + # from https://alt.fedoraproject.org/cloud + FEDORAPROJECT_AMI: "ami-08b7bda26f4071b80" + FEDORAPROJECT_AARCH64_AMI: "ami-01925eb0821988986" + # Latest Fedora release (qcow and CHECKSUM) download URLs # N/B: There are Fedora-Cloud...GCP.tar.gz images available, however # as of this comment, they lack the cloud-init package which is @@ -37,11 +41,31 @@ variables: # Empty value means it must be passed in on command-line PRIOR_FEDORA_IMAGE_URL: "https://dl.fedoraproject.org/pub/fedora/linux/releases/35/Cloud/x86_64/images/Fedora-Cloud-Base-35-1.2.x86_64.qcow2" -# Don't leak sensitive values in error messages / output -sensitive-variables: - - 'GAC_FILEPATH' - builders: + - name: 'ubuntu' + type: 'googlecompute' + # Prefix IMG_SFX with "b" so this is never confused with a cache_image name + image_name: '{{build_name}}-b{{user `IMG_SFX`}}' + image_family: '{{build_name}}-base' + image_description: 'Built in https://cirrus-ci.com/build/{{user `IMG_SFX`}}' + source_image_family: '{{user `UBUNTU_BASE_FAMILY`}}' + source_image_project_id: 'ubuntu-os-cloud' + project_id: '{{user `GCP_PROJECT_ID`}}' + # Can't use env. var for this, googlecompute-import only supports filepath + account_file: '{{user `GAC_FILEPATH`}}' + zone: 'us-central1-a' + disk_size: 20 + # Identify the instance + labels: &imgcpylabels + sfx: '{{user `IMG_SFX`}}' + src: '{{user `UBUNTU_BASE_FAMILY`}}' + stage: 'base' + arch: 'x86_64' + # Gotcha: https://www.packer.io/docs/builders/googlecompute#gotchas + ssh_username: 'packer' + temporary_key_pair_type: ed25519 + ssh_clear_authorized_keys: true + - &qemu_virt name: 'fedora' type: 'qemu' @@ -88,26 +112,66 @@ builders: iso_url: '{{user `PRIOR_FEDORA_IMAGE_URL`}}' iso_checksum: 'file:{{user `PRIOR_FEDORA_CSUM_URL`}}' - - name: 'ubuntu' - type: 'googlecompute' - # Prefix IMG_SFX with "b" so this is never confused with a cache_image name - image_name: 'ubuntu-b{{user `IMG_SFX`}}' - image_family: '{{build_name}}-base' - source_image_family: '{{user `UBUNTU_BASE_FAMILY`}}' - source_image_project_id: 'ubuntu-os-cloud' - project_id: '{{user `GCP_PROJECT_ID`}}' - # Can't use env. var for this, googlecompute-import only supports filepath - account_file: '{{user `GAC_FILEPATH`}}' - zone: 'us-central1-a' - disk_size: 20 - # Identify the instance - labels: - sfx: '{{user `IMG_SFX`}}' - src: '{{user `UBUNTU_BASE_FAMILY`}}' - # Gotcha: https://www.packer.io/docs/builders/googlecompute#gotchas - ssh_username: 'packer' - temporary_key_pair_type: ed25519 + # ref: https://www.packer.io/plugins/builders/amazon/ebs + - &fedora-aws + name: 'fedora-aws' + type: 'amazon-ebs' + source_ami: '{{user `FEDORAPROJECT_AMI`}}' + instance_type: 'm5zn.metal' + # In case of packer problem or ungraceful exit, don't wait for shutdown. + # This doesn't always work properly, sometimes leaving EC2 instances in + # a 'stopped' instead of terminated state :( + shutdown_behavior: 'terminate' + # If something goes wrong, remove the broken AMI. + force_deregister: true # Remove AMI with same name if exists + force_delete_snapshot: true # Also remove snapshots of force-removed AMI + # Required for network access, must be the 'default' group used by Cirrus-CI + security_group_id: "sg-042c75677872ef81c" + # Prefix IMG_SFX with "b" so this is never confused with a cache_image + ami_name: 'fedora-aws-b{{user `IMG_SFX`}}' + ami_description: 'Built in https://cirrus-ci.com/build/{{user `IMG_SFX`}}' + ebs_optimized: true + launch_block_device_mappings: + - device_name: '/dev/sda1' + volume_size: 20 + volume_type: 'gp2' + delete_on_termination: true + # These are critical and used by security-polciy to enforce instance launch limits. + + tags: &awstags + <<: *imgcpylabels + # EC2 expects "Name" to be capitalized + Name: 'fedora-aws-b{{user `IMG_SFX`}}' + src: '{{user `FEDORAPROJECT_AMI`}}' + automation: 'true' + run_tags: *awstags + run_volume_tags: *awstags + snapshot_tags: *awstags + # Docs are wrong, specifying the Account ID required to make AMIs private. + # This is necessary for security - The CI service accounts are not permitted + # to use AMI's from any other account, including public ones. The Account + # ID is hard-coded here out of expediency, since passing in more packer args + # from the command-line (in Makefile) is non-trivial. + ami_users: ["449134212816"] + ssh_username: 'fedora' ssh_clear_authorized_keys: true + # N/B: Required Packer >= 1.8.0 + # https://github.com/hashicorp/packer/issues/10074#issuecomment-1070469367 + temporary_key_pair_type: 'ed25519' + + - <<: *fedora-aws + name: 'fedora-aws-arm64' + source_ami: '{{user `FEDORAPROJECT_AARCH64_AMI`}}' + instance_type: 't4g.medium' # arm64 type + ami_name: 'fedora-aws-arm64-b{{user `IMG_SFX`}}' # must be unique + tags: &awsarm64tags + <<: *awstags + src: '{{user `FEDORAPROJECT_AARCH64_AMI`}}' + arch: 'arm64' + run_tags: *awsarm64tags + run_volume_tags: *awsarm64tags + snapshot_tags: *awsarm64tags + provisioners: # Ubuntu images come bundled with GCE integrations provisioned - type: 'shell' @@ -119,7 +183,7 @@ provisioners: # Ubuntu images come bundled with GCE integrations provisioned source: '{{ pwd }}/' destination: '/tmp/automation_images/' - - only: ['fedora', 'prior-fedora'] + - except: ['ubuntu'] type: 'shell' inline: - 'set -e' @@ -133,8 +197,8 @@ provisioners: # Ubuntu images come bundled with GCE integrations provisioned post-processors: # Must be double-nested to guarantee execution order - - - type: "compress" - only: ['fedora', 'prior-fedora'] + - - only: ['fedora', 'prior-fedora'] + type: "compress" output: '{{ user `TEMPDIR` }}/{{build_name}}/disk.raw.tar.gz' format: '.tar.gz' compression_level: 9 @@ -149,15 +213,20 @@ post-processors: image_family: '{{build_name}}-base' # Can't save the url in an image_label image_description: '{{user `FEDORA_IMAGE_URL`}}' - image_labels: - sfx: '{{user `IMG_SFX`}}' + image_labels: &importlabels + <<: *imgcpylabels + src: 'fedoraproject' - <<: *gcp_import only: ['prior-fedora'] image_name: "prior-fedora-b{{user `IMG_SFX`}}" image_family: '{{build_name}}-base' image_description: '{{user `PRIOR_FEDORA_IMAGE_URL`}}' + # This is critical, especially for the aws builders. + # Producing the cache-images from these base images + # needs to lookup the runtime-produced AMI ID. - type: 'manifest' output: 'base_images/manifest.json' # Collected by Cirrus-CI strip_path: true custom_data: IMG_SFX: '{{ user `IMG_SFX` }}' + STAGE: base diff --git a/base_images/fedora_base-setup.sh b/base_images/fedora_base-setup.sh index e200d000..89f57f7e 100644 --- a/base_images/fedora_base-setup.sh +++ b/base_images/fedora_base-setup.sh @@ -21,26 +21,32 @@ source "$REPO_DIRPATH/lib.sh" # with rpm debugging. # Ref: https://github.com/rpm-software-management/rpm/commit/8cbe8baf9c3ff4754369bcd29441df14ecc6889d declare -a PKGS -PKGS=(rng-tools git coreutils) +PKGS=(rng-tools git coreutils cloud-init) XSELINUX= if ((CONTAINER)); then if ((OS_RELEASE_VER<35)); then XSELINUX="--exclude=selinux*" fi -else - PKGS+=(google-compute-engine-oslogin) - if ((OS_RELEASE_VER<35)); then - PKGS+=(google-compute-engine-tools) - else - PKGS+=(google-compute-engine-guest-configs) +else # A VM + # Packer defines this automatically for us + # shellcheck disable=SC2154 + if [[ "$PACKER_BUILD_NAME" =~ "aws" ]]; then + echo "WARN: AWS EC2 Instance Connect not supported on Fedora, use cloud-init." + else # GCP image + PKGS+=(google-compute-engine-oslogin) + if ((OS_RELEASE_VER<35)); then + PKGS+=(google-compute-engine-tools) + else + PKGS+=(google-compute-engine-guest-configs) + fi fi fi -dnf -y update $XSELINUX -dnf -y install $XSELINUX "${PKGS[@]}" +$SUDO dnf -y update $XSELINUX +$SUDO dnf -y install $XSELINUX "${PKGS[@]}" if ! ((CONTAINER)); then - systemctl enable rngd + $SUDO systemctl enable rngd fi install_automation_tooling @@ -57,24 +63,27 @@ if ! ((CONTAINER)); then # vs google-network-daemon.service. Fix this with a custom # cloud-init service file. CLOUD_SERVICE_PATH="systemd/system/cloud-init.service" - echo "$sourcemsg" > /etc/$CLOUD_SERVICE_PATH - cat $SCRIPT_DIRPATH/fedora-cloud-init.service >> /etc/$CLOUD_SERVICE_PATH + echo -e "$sourcemsg" | $SUDO tee /etc/$CLOUD_SERVICE_PATH + cat $SCRIPT_DIRPATH/fedora-cloud-init.service | \ + $SUDO tee -a /etc/$CLOUD_SERVICE_PATH fi - echo "Setting GCP startup service (for Cirrus-CI agent) SELinux unconfined" - # ref: https://cloud.google.com/compute/docs/startupscript # The mechanism used by Cirrus-CI to execute tasks on the system is through an - # "agent" process launched as a GCP startup-script (from the metadata service). + # "agent" process launched as a GCP VM startup-script (from 'user-data'). # This agent is responsible for cloning the repository and executing all task # scripts and other operations. Therefor, on SELinux-enforcing systems, the # service must be labeled properly to ensure it's child processes can # run with the proper contexts. METADATA_SERVICE_CTX=unconfined_u:unconfined_r:unconfined_t:s0 - METADATA_SERVICE_PATH=systemd/system/google-startup-scripts.service - echo "$sourcemsg" > /etc/$METADATA_SERVICE_PATH - sed -r -e \ - "s/^Type=oneshot/Type=oneshot\nSELinuxContext=$METADATA_SERVICE_CTX/" \ - /lib/$METADATA_SERVICE_PATH >> /etc/$METADATA_SERVICE_PATH + if [[ ! "$PACKER_BUILD_NAME" =~ "aws" ]]; then # GCP Image + echo "Setting GCP startup service (for Cirrus-CI agent) SELinux unconfined" + # ref: https://cloud.google.com/compute/docs/startupscript + METADATA_SERVICE_PATH=systemd/system/google-startup-scripts.service + echo "$sourcemsg" | $SUDO tee -a /etc/$METADATA_SERVICE_PATH + sed -r -e \ + "s/^Type=oneshot/Type=oneshot\nSELinuxContext=$METADATA_SERVICE_CTX/" \ + /lib/$METADATA_SERVICE_PATH | $SUDO tee -a /etc/$METADATA_SERVICE_PATH + fi fi if [[ "$OS_RELEASE_ID" == "fedora" ]] && ((OS_RELEASE_VER>=33)); then diff --git a/cache_images/build-push_setup.sh b/cache_images/build-push_setup.sh deleted file mode 100644 index b2407e50..00000000 --- a/cache_images/build-push_setup.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# This script is called by packer on the subject fedora VM, to setup the podman -# build/test environment. It's not intended to be used outside of this context. - -set -e - -SCRIPT_FILEPATH=$(realpath "$0") -SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") -REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") - -# Run as quickly as possible after boot -/bin/bash $REPO_DIRPATH/systemd_banish.sh - -# shellcheck source=./lib.sh -source "$REPO_DIRPATH/lib.sh" - -# packer and/or a --build-arg define this envar value uniformly -# for both VM and container image build workflows. -req_env_vars PACKER_BUILD_NAME - -bash $SCRIPT_DIRPATH/build-push_packaging.sh - -# Registers qemu emulation for non-native execution -$SUDO systemctl enable systemd-binfmt - -# Pre-populate container storage with multi-arch base images -for arch in amd64 s390x ppc64le arm64; do - msg "Caching latest $arch fedora image..." - $SUDO podman pull --quiet --arch=$arch \ - registry.fedoraproject.org/fedora:$OS_RELEASE_VER -done - -finalize - -echo "SUCCESS!" diff --git a/cache_images/cloud.yml b/cache_images/cloud.yml new file mode 100644 index 00000000..c6299282 --- /dev/null +++ b/cache_images/cloud.yml @@ -0,0 +1,169 @@ +--- + +variables: # Empty value means it must be passed in on command-line + # For produced images, to prevent name clashes + IMG_SFX: + + # Required for ssh private key access + TEMPDIR: + + # GCE Project ID where images will be produced + GCP_PROJECT_ID: "libpod-218412" + + # Required path to service account credentials file + GAC_FILEPATH: "{{env `GAC_FILEPATH`}}" + + +builders: + - &gce_hosted_image + name: 'ubuntu' + type: 'googlecompute' + # N/B: This implies base images always built with same IMG_SFX. + source_image: '{{ build_name }}-b{{user `IMG_SFX`}}' + # Prefix IMG_SFX with "c" so this is never confused with a base_image name + image_name: '{{ build_name }}-c{{user `IMG_SFX`}}' + image_family: '{{ build_name }}-cache' + project_id: '{{user `GCP_PROJECT_ID`}}' + account_file: '{{user `GAC_FILEPATH`}}' + zone: 'us-central1-a' + disk_size: 20 # REQUIRED: Runtime allocation > this value + disable_default_service_account: true + labels: # For the VM + sfx: '{{user `IMG_SFX`}}' + src: '{{ build_name }}-b{{user `IMG_SFX` }}' + stage: cache + ssh_username: packer # arbitrary, packer will create & setup w/ temp. keypair + ssh_pty: 'true' + temporary_key_pair_type: ed25519 + ssh_clear_authorized_keys: true + # Permit running nested VM's to support specialized testing + image_licenses: ["projects/vm-options/global/licenses/enable-vmx"] + + - <<: *gce_hosted_image + name: 'fedora' + + - <<: *gce_hosted_image + name: 'prior-fedora' + + - &aux_fed_img + <<: *gce_hosted_image + name: 'build-push' + source_image: 'fedora-b{{user `IMG_SFX`}}' + source_image_family: 'fedora-base' + + - <<: *aux_fed_img + name: 'fedora-podman-py' + + - <<: *aux_fed_img + name: 'fedora-netavark' + + # ref: https://www.packer.io/plugins/builders/amazon/ebs + - &fedora-aws + name: 'fedora-aws' + type: 'amazon-ebs' + instance_type: 'm5zn.metal' + source_ami_filter: # Will fail if >1 or no AMI found + owners: + # Docs are wrong, specifying the Account ID required to make AMIs private. + # The Account ID is hard-coded here out of expediency, since passing in + # more packer args from the command-line (in Makefile) is non-trivial. + - &accountid '449134212816' + # It's necessary to 'search' for the base-image by these criteria. If + # more than one image is found, Packer will fail the build (and display + # the conflicting AMI IDs). + filters: &ami_filters + architecture: 'x86_64' + image-type: 'machine' + is-public: 'false' + name: '{{build_name}}-b{{user `IMG_SFX`}}' + root-device-type: 'ebs' + state: 'available' + virtualization-type: 'hvm' + # In case of packer problem or ungraceful exit, don't wait for shutdown. + # This doesn't always work properly, sometimes leaving EC2 instances in + # a 'stopped' instead of terminated state :( + shutdown_behavior: 'terminate' + # If something goes wrong, remove the broken AMI. + force_deregister: true # Remove AMI with same name if exists + force_delete_snapshot: true # Also remove snapshots of force-removed AMI + # Required for network access, must be the 'default' group used by Cirrus-CI + security_group_id: "sg-042c75677872ef81c" + # Prefix IMG_SFX with "b" so this is never confused with a cache_image + ami_name: '{{build_name}}-c{{user `IMG_SFX`}}' + ami_description: 'Built in https://cirrus-ci.com/build/{{user `IMG_SFX`}}' + ebs_optimized: true + launch_block_device_mappings: + - device_name: '/dev/sda1' + volume_size: 200 + volume_type: 'gp2' + delete_on_termination: true + # These are critical and used by security-polciy to enforce instance launch limits. + tags: &tags + # EC2 expects "Name" tag to be capitalized + Name: '{{build_name}}-c{{user `IMG_SFX`}}' + sfx: '{{user `IMG_SFX`}}' + src: '{{.SourceAMI}}' # Generated AMI ID looked up at runtime + automation: 'true' + stage: 'cache' + arch: 'x86_64' + run_tags: *tags + run_volume_tags: *tags + snapshot_tags: *tags + # Also required to make AMI private + ami_users: + - *accountid + ssh_username: 'root' + ssh_clear_authorized_keys: true + # N/B: Required Packer >= 1.8.0 + # https://github.com/hashicorp/packer/issues/10074#issuecomment-1070469367 + temporary_key_pair_type: 'ed25519' + + - <<: *fedora-aws + name: 'fedora-netavark-aws' + source_ami_filter: + owners: + - *accountid + filters: + <<: *ami_filters + architecture: 'arm64' + name: 'fedora-aws-arm64-b{{user `IMG_SFX`}}' + instance_type: 't4g.medium' # arm64 type + tags: &netavark_tags + <<: *tags + Name: '{{build_name}}-c{{user `IMG_SFX`}}' + arch: 'arm64' + run_tags: *netavark_tags + run_volume_tags: *netavark_tags + snapshot_tags: *netavark_tags + +provisioners: + - type: 'shell' + inline: + - 'set -e' + - 'mkdir -p /tmp/automation_images' + + - type: 'file' + source: '{{ pwd }}/' + destination: "/tmp/automation_images" + + - except: ['ubuntu'] + type: 'shell' + inline: + - 'set -e' + - '/bin/bash /tmp/automation_images/cache_images/fedora_setup.sh' + + - only: ['ubuntu'] + type: 'shell' + inline: + - 'set -e' + - '/bin/bash /tmp/automation_images/cache_images/ubuntu_setup.sh' + +post-processors: + # This is critical for human-interaction. Copntents will be used + # to provide the image names and IDs to the user's PR. + - - type: 'manifest' # writes packer-manifest.json + output: 'cache_images/manifest.json' + strip_path: true + custom_data: + IMG_SFX: '{{ user `IMG_SFX` }}' + STAGE: 'cache' diff --git a/cache_images/fedora_packaging.sh b/cache_images/fedora_packaging.sh index 90c1f298..361b3680 100644 --- a/cache_images/fedora_packaging.sh +++ b/cache_images/fedora_packaging.sh @@ -181,6 +181,7 @@ DOWNLOAD_PACKAGES=(\ parallel podman-docker podman-plugins + podman-gvproxy python3-pytest python3-virtualenv ) diff --git a/cache_images/fedora_setup.sh b/cache_images/fedora_setup.sh index 09b1fa6a..3d77ceb8 100644 --- a/cache_images/fedora_setup.sh +++ b/cache_images/fedora_setup.sh @@ -26,6 +26,15 @@ if [[ "$PACKER_BUILD_NAME" =~ "netavark" ]]; then bash $SCRIPT_DIRPATH/fedora-netavark_packaging.sh elif [[ "$PACKER_BUILD_NAME" =~ "podman-py" ]]; then bash $SCRIPT_DIRPATH/fedora-podman-py_packaging.sh +elif [[ "$PACKER_BUILD_NAME" =~ "build-push" ]]; then + bash $SCRIPT_DIRPATH/build-push_packaging.sh + # Registers qemu emulation for non-native execution + $SUDO systemctl enable systemd-binfmt + for arch in amd64 s390x ppc64le arm64; do + msg "Caching latest $arch fedora image..." + $SUDO podman pull --quiet --arch=$arch \ + registry.fedoraproject.org/fedora:$OS_RELEASE_VER + done else bash $SCRIPT_DIRPATH/fedora_packaging.sh fi diff --git a/cache_images/gce.yml b/cache_images/gce.yml deleted file mode 100644 index 7adac20a..00000000 --- a/cache_images/gce.yml +++ /dev/null @@ -1,94 +0,0 @@ ---- - -variables: - # GCE Project ID where images will be produced - GCP_PROJECT_ID: "libpod-218412" - # Required path to service account credentials file - GAC_FILEPATH: - # For produced images - IMG_SFX: '{{ timestamp }}' - - # Required for ssh private key access - TEMPDIR: - - -# Don't leak sensitive values in error messages / output -sensitive-variables: - - 'GAC_FILEPATH' - -builders: - - &gce_hosted_image - name: 'ubuntu' - type: 'googlecompute' - # N/B: This implies base images always built with same IMG_SFX. - source_image: '{{ build_name }}-b{{user `IMG_SFX`}}' - # Prefix IMG_SFX with "c" so this is never confused with a base_image name - image_name: '{{ build_name }}-c{{user `IMG_SFX`}}' - image_family: '{{ build_name }}-cache' - project_id: '{{user `GCP_PROJECT_ID`}}' - account_file: '{{user `GAC_FILEPATH`}}' - zone: 'us-central1-a' - disk_size: 20 # REQUIRED: Runtime allocation > this value - disable_default_service_account: true - labels: # For the VM - sfx: '{{user `IMG_SFX`}}' - src: '{{ build_name }}-b{{user `IMG_SFX` }}' - ssh_username: packer # arbitrary, packer will create & setup w/ temp. keypair - ssh_pty: 'true' - temporary_key_pair_type: ed25519 - ssh_clear_authorized_keys: true - # Permit running nested VM's to support specialized testing - image_licenses: ["projects/vm-options/global/licenses/enable-vmx"] - - - <<: *gce_hosted_image - name: 'fedora' - - - <<: *gce_hosted_image - name: 'prior-fedora' - - - &aux_fed_img - <<: *gce_hosted_image - name: 'build-push' - source_image: 'fedora-b{{user `IMG_SFX`}}' - source_image_family: 'fedora-base' - - - <<: *aux_fed_img - name: 'fedora-podman-py' - - - <<: *aux_fed_img - name: 'fedora-netavark' - -provisioners: - - type: 'shell' - inline: - - 'set -e' - - 'mkdir -p /tmp/automation_images' - - - type: 'file' - source: '{{ pwd }}/' - destination: "/tmp/automation_images" - - - only: ['fedora', 'prior-fedora', 'fedora-netavark', 'fedora-podman-py'] - type: 'shell' - inline: - - 'set -e' - - '/bin/bash /tmp/automation_images/cache_images/fedora_setup.sh' - - - only: ['ubuntu'] - type: 'shell' - inline: - - 'set -e' - - '/bin/bash /tmp/automation_images/cache_images/ubuntu_setup.sh' - - - only: ['build-push'] - type: 'shell' - inline: - - 'set -e' - - '/bin/bash /tmp/automation_images/cache_images/build-push_setup.sh' - -post-processors: - - - type: 'manifest' # writes packer-manifest.json - output: 'cache_images/manifest.json' - strip_path: true - custom_data: - IMG_SFX: '{{ user `IMG_SFX` }}' diff --git a/ci/make_base_images.sh b/ci/make_base_images.sh index 0fa2bcd3..4c2e8d6e 100755 --- a/ci/make_base_images.sh +++ b/ci/make_base_images.sh @@ -20,6 +20,7 @@ elif [[ -z "$IMG_SFX" ]] || [[ -z "$PACKER_BUILDS" ]]; then fi set_gac_filepath +set_aws_filepath set -exo pipefail cd "$REPO_DIRPATH" diff --git a/ci/make_cache_images.sh b/ci/make_cache_images.sh index 9e88923d..f57f172a 100755 --- a/ci/make_cache_images.sh +++ b/ci/make_cache_images.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -eo pipefail + # This script is intended to be used by Cirrus-CI, from the container # built by the ContainerFile in this directory. Use of this script # in any other context/environment is unlikely to function as intended. @@ -19,8 +21,9 @@ elif [[ -z "$IMG_SFX" ]] || [[ -z "$PACKER_BUILDS" ]]; then fi set_gac_filepath +set_aws_filepath -set -exo pipefail +set -x cd "$REPO_DIRPATH" export IMG_SFX=$IMG_SFX export PACKER_BUILDS=$PACKER_BUILDS diff --git a/image_builder/Containerfile b/image_builder/Containerfile index 3fd53500..1409ef20 100644 --- a/image_builder/Containerfile +++ b/image_builder/Containerfile @@ -1,7 +1,8 @@ -# This dockerfile mimics locally, the environment used for -# making base_images. It's intended to be created and used -# by the relevant target in the Makefile at the root of this -# repository. +# This Containerfile produces the environment necessary for +# building CI VM images. It's intended to be created and used +# by humans or CI, to build targets defined in the Makefile +# at the root of this repository. It should be built with +# the repository root as the context directory. ARG CENTOS_RELEASE=stream8 FROM quay.io/centos/centos:${CENTOS_RELEASE} diff --git a/image_builder/gce.yml b/image_builder/gce.yml index 40668f4e..b521e2bd 100644 --- a/image_builder/gce.yml +++ b/image_builder/gce.yml @@ -3,17 +3,17 @@ variables: # GCE Project ID where images will be produced GCP_PROJECT_ID: "libpod-218412" + # Pre-existing google storage bucket w/ very short lifecycle enabled XFERBUCKET: "packer-import" + # Required path to service account credentials file - GAC_FILEPATH: + GAC_FILEPATH: "{{env `GAC_FILEPATH`}}" + # Naming suffix for images to prevent clashes (default to timestamp) # N/B: There are length/character limitations in GCE for image names IMG_SFX: '{{ timestamp }}' -# Don't leak sensitive values in error messages / output -sensitive-variables: - - 'GAC_FILEPATH' builders: - name: 'image-builder' diff --git a/image_builder/install_packages.sh b/image_builder/install_packages.sh index 7ec3aa4a..3fee5191 100644 --- a/image_builder/install_packages.sh +++ b/image_builder/install_packages.sh @@ -21,18 +21,8 @@ source "$REPO_DIRPATH/lib.sh" [[ -n "$PACKER_VERSION" ]] || \ die "Expecting a non-empty \$PACKER_VERSION value" -set -x - dnf update -y - dnf -y install epel-release - dnf mark remove $(rpm -qa | grep -Ev '(gpg-pubkey)|(dnf)|(sudo)') - - dnf install -y $(<"$INST_PKGS_FP") -set +x - -# Only for containers do we care about saving every ounce of disk-space -if (("${CONTAINER:-0}")); then - dnf mark install dnf yum $(<"$INST_PKGS_FP") - dnf autoremove -y -fi +dnf update -y +dnf -y install epel-release +dnf install -y $(<"$INST_PKGS_FP") install_automation_tooling diff --git a/image_builder/setup.sh b/image_builder/setup.sh index e1f356d0..91a654a6 100644 --- a/image_builder/setup.sh +++ b/image_builder/setup.sh @@ -12,7 +12,9 @@ SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") # Run as quickly as possible after boot -/bin/bash $REPO_DIRPATH/systemd_banish.sh +# unless building a container +((CONTAINER)) || \ + /bin/bash $REPO_DIRPATH/systemd_banish.sh # shellcheck source=./lib.sh source "$REPO_DIRPATH/lib.sh" @@ -21,14 +23,18 @@ PACKER_VERSION=$(bash $REPO_DIRPATH/get_packer_version.sh) $SUDO env PACKER_VERSION=$PACKER_VERSION \ /bin/bash "$SCRIPT_DIRPATH/install_packages.sh" -$SUDO systemctl enable rngd +# Unnecessary inside a container +if ! ((CONTAINER)); then + $SUDO systemctl enable rngd -$SUDO tee /etc/modprobe.d/kvm-nested.conf <0)); then + rm -f "$GAC_FILEPATH" + fi + if ((${#AWS_SHARED_CREDENTIALS_FILE}>0)); then + rm -f "$AWS_SHARED_CREDENTIALS_FILE" + fi +} + # This function may only/ever be used within Cirrus-CI set_gac_filepath() { # shellcheck disable=SC2154 if [[ -z "$CI" ]] || [[ "$CI" != "true" ]] || [[ "$CIRRUS_CI" != "$CI" ]]; then die "Unexpected \$CI=$CI and/or \$CIRRUS_CI=$CIRRUS_CI" - elif ((${#GAC_JSON}<=0)); then + elif ((${#GAC_JSON}<=2)); then die "Required (secret) \$GAC_JSON value appears to be empty" elif grep -iq "ENCRYPTED" <<<"$GAC_JSON"; then die "Decrpytion of \$GAC_JSON failed." @@ -93,11 +106,30 @@ set_gac_filepath() { set +x; GAC_FILEPATH=$(mktemp -p '' '.XXXXXXXX.') export GAC_FILEPATH - trap "rm -f $GAC_FILEPATH" EXIT + trap clear_cred_files EXIT echo "$GAC_JSON" > "$GAC_FILEPATH" unset GAC_JSON; } +# This function may only/ever be used within Cirrus-CI +set_aws_filepath() { + # shellcheck disable=SC2154 + if [[ -z "$CI" ]] || [[ "$CI" != "true" ]] || [[ "$CIRRUS_CI" != "$CI" ]]; then + die "Unexpected \$CI=$CI and/or \$CIRRUS_CI=$CIRRUS_CI" + elif ((${#AWS_INI}<=2)); then + die "Required (secret) \$AWS_INI value appears to be empty" + elif grep -iq "ENCRYPTED" <<<"$AWS_INI"; then + die "Decrpytion of \$AWS_INI failed." + fi + set +x; + # Magic filename packer is sensitive to + AWS_SHARED_CREDENTIALS_FILE=$(mktemp -p '' '.XXXXXXXX.') + export AWS_SHARED_CREDENTIALS_FILE + trap clear_cred_files EXIT + echo "$AWS_INI" > "$AWS_SHARED_CREDENTIALS_FILE" + unset AWS_INI; +} + # print a space-separated list of labels when run under Cirrus-CI for a PR get_pr_labels() { req_env_vars CIRRUS_CI CIRRUS_PR CIRRUS_REPO_CLONE_TOKEN