From f37d1d2175445a3c8d2a5eb25748c274433083b0 Mon Sep 17 00:00:00 2001 From: Chris Evich Date: Tue, 3 Nov 2020 15:10:31 -0500 Subject: [PATCH 1/3] Use ping from alpine As of this commit, in Fedora 33, without without `CAP_NET_ADMIN` and `CAP_NET_RAW`, require setting `net.ipv3.ping_group_range` in order for the `ping` command to work inside a container. However, not all images `ping` are created equal. For whatever reason, the busybox version in the busybox container image, does not function. Switch to the Alpine image's busybox ping, which seems to work fine. Signed-off-by: Chris Evich --- test/e2e/pod_infra_container_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/e2e/pod_infra_container_test.go b/test/e2e/pod_infra_container_test.go index 797d51c335..7ec36b2f82 100644 --- a/test/e2e/pod_infra_container_test.go +++ b/test/e2e/pod_infra_container_test.go @@ -383,12 +383,14 @@ var _ = Describe("Podman pod create", func() { podID := session.OutputToString() // verify we can add a host to the infra's /etc/hosts - session = podmanTest.Podman([]string{"run", "--pod", podID, "--add-host", "foobar:127.0.0.1", BB, "ping", "-c", "1", "foobar"}) + // N/B: Using alpine for ping, since BB ping throws + // permission denied error as of Fedora 33. + session = podmanTest.Podman([]string{"run", "--pod", podID, "--add-host", "foobar:127.0.0.1", ALPINE, "ping", "-c", "1", "foobar"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) // verify we can see the other hosts of infra's /etc/hosts - session = podmanTest.Podman([]string{"run", "--pod", podID, BB, "ping", "-c", "1", "foobar"}) + session = podmanTest.Podman([]string{"run", "--pod", podID, ALPINE, "ping", "-c", "1", "foobar"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) }) From 55a1aecc741391335795ff46283bb08a20710b2e Mon Sep 17 00:00:00 2001 From: Chris Evich Date: Wed, 4 Nov 2020 09:33:14 -0500 Subject: [PATCH 2/3] Cirrus: Simplify artifact collection On several occasions, fatal task failures were observed during the upload of artifacts after a otherwise successful testing. Prior to this commit, most tasks were storing both logs and binary artifacts. Avoid possible major inconveniences of upload failures, by only collecting binary artifacts when necessary. Signed-off-by: Chris Evich --- .cirrus.yml | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/.cirrus.yml b/.cirrus.yml index b75e99184f..306ca58c53 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -189,17 +189,13 @@ build_task: clone_script: &noop mkdir -p $CIRRUS_WORKING_DIR setup_script: *setup main_script: *main - always: &artifacts + always: &binary_artifacts gosrc_artifacts: path: ./* # Grab everything in top-level $GOSRC type: application/octet-stream binary_artifacts: path: ./bin/* type: application/octet-stream - # Required for `contrib/cirrus/logformatter` to work properly - html_artifacts: - path: ./*.html - type: text/html # Confirm the result of building on at least one platform appears sane. @@ -228,7 +224,6 @@ validate_task: clone_script: *noop setup_script: *setup main_script: *main - always: *artifacts # Exercise the "libpod" API with a small set of common @@ -248,7 +243,6 @@ bindings_task: clone_script: *noop # Comes from cache setup_script: *setup main_script: *main - always: *artifacts # Build the "libpod" API documentation `swagger.yaml` for eventual @@ -267,7 +261,7 @@ swagger_task: clone_script: *full_clone # build-cache not available to container tasks setup_script: *setup main_script: *main - always: *artifacts + always: *binary_artifacts endpoint_task: @@ -285,7 +279,6 @@ endpoint_task: clone_script: *full_clone # build-cache not available to container tasks setup_script: *setup main_script: *main - always: *artifacts # Check that all included go modules from other sources match @@ -304,7 +297,6 @@ vendor_task: clone_script: *full_clone # build-cache not available to container tasks setup_script: *setup main_script: *main - always: *artifacts # There are several other important variations of podman which @@ -335,7 +327,8 @@ alt_build_task: ALT_NAME: 'Build varlink-binaries' setup_script: *setup main_script: *main - always: *artifacts + always: *binary_artifacts + # Confirm building a statically-linked binary is successful static_alt_build_task: @@ -364,7 +357,7 @@ static_alt_build_task: fingerprint_script: cat nix/* setup_script: *setup main_script: *main - always: *artifacts + always: *binary_artifacts # Confirm building the remote client, natively on a Mac OS-X VM. @@ -385,7 +378,7 @@ osx_alt_build_task: - brew install go-md2man - make podman-remote-darwin - make install-podman-remote-darwin-docs - always: *artifacts + always: *binary_artifacts # This task is a stub: In the future it will be used to verify @@ -405,7 +398,6 @@ docker-py_test_task: clone_script: *noop # Comes from cache setup_script: *setup main_script: *main - always: *artifacts # Does exactly what it says, execute the podman unit-tests on all primary @@ -424,7 +416,6 @@ unit_test_task: gopath_cache: *ro_gopath_cache setup_script: *setup main_script: *main - always: *artifacts apiv2_test_task: @@ -441,7 +432,10 @@ apiv2_test_task: setup_script: *setup main_script: *main always: &logs_artifacts - <<: *artifacts + # Required for `contrib/cirrus/logformatter` to work properly + html_artifacts: + path: ./*.html + type: text/html package_versions_script: '$SCRIPT_BASE/logcollector.sh packages' ginkgo_node_logs_script: '$SCRIPT_BASE/logcollector.sh ginkgo' df_script: '$SCRIPT_BASE/logcollector.sh df' @@ -515,6 +509,7 @@ container_integration_test_task: main_script: *main always: *logs_artifacts + # Execute most integration tests as a regular (non-root) user. rootless_integration_test_task: name: *std_name_fmt @@ -584,6 +579,7 @@ rootless_system_test_task: main_script: *main always: *logs_artifacts + # This task is critical. It updates the "last-used by" timestamp stored # in metadata for all VM images. This mechanism functions in tandem with # an out-of-band pruning operation to remove disused VM images. @@ -665,7 +661,8 @@ release_task: clone_script: *noop # Comes from cache setup_script: *setup main_script: *main - always: *artifacts + always: *binary_artifacts + # When preparing to release a new version, this task may be manually # activated at the PR stage to verify the code is in a proper state. @@ -686,4 +683,4 @@ release_test_task: clone_script: *noop # Comes from cache setup_script: *setup main_script: *main - always: *artifacts + always: *binary_artifacts From 46498331a3ed6778664389e3a3c4cf5b0f658c66 Mon Sep 17 00:00:00 2001 From: Chris Evich Date: Tue, 20 Oct 2020 12:01:13 -0400 Subject: [PATCH 3/3] Cirrus: Use F33beta VM image Includes disk-space increase for all Fedora images to accommodate the static-build job disk space requirements. This job substantially leverages task-cache, which was previously failing to restore early on in the Cirrus-CI task setup, due to disk-space limitations. Also simplify .cirrus.yml slightly by removing an unncessary setup and run directory change step. Signed-off-by: Chris Evich --- .cirrus.yml | 16 ++++++---------- contrib/cirrus/lib.sh | 11 ++++++----- contrib/cirrus/setup_environment.sh | 7 ++++--- 3 files changed, 16 insertions(+), 18 deletions(-) diff --git a/.cirrus.yml b/.cirrus.yml index 306ca58c53..1a109f5ba2 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -21,13 +21,13 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) #### - FEDORA_NAME: "fedora-32" - PRIOR_FEDORA_NAME: "fedora-31" + FEDORA_NAME: "fedora-33" + PRIOR_FEDORA_NAME: "fedora-32" UBUNTU_NAME: "ubuntu-20" PRIOR_UBUNTU_NAME: "ubuntu-19" # Google-cloud VM Images - IMAGE_SUFFIX: "c4948709391728640" + IMAGE_SUFFIX: "c6323493627232256" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" @@ -74,12 +74,8 @@ ext_svc_check_task: env: TEST_FLAVOR: ext_svc CTR_FQIN: ${FEDORA_CONTAINER_FQIN} - setup_script: &setup - - 'cd $GOSRC/$SCRIPT_BASE || exit 1' - - './setup_environment.sh' - main_script: &main - - 'cd $GOSRC/$SCRIPT_BASE || exit 1' - - './runner.sh' + setup_script: &setup '$GOSRC/$SCRIPT_BASE/setup_environment.sh' + main_script: &main '$GOSRC/$SCRIPT_BASE/runner.sh' # Execute some quick checks to confirm this YAML file and all @@ -339,7 +335,7 @@ static_alt_build_task: - build # Community-maintained task, may fail on occasion. If so, uncomment # the next line and file an issue with details about the failure. - # allow_failures: $CI == $CI + allow_failures: $CI == $CI gce_instance: *bigvm env: <<: *stdenvars diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh index 050fb16f39..04e8a3c1cb 100644 --- a/contrib/cirrus/lib.sh +++ b/contrib/cirrus/lib.sh @@ -10,6 +10,9 @@ set -a # handling of the (otherwise) default shell setup is non-uniform. Rather # than attempt to workaround differences, simply force-load/set required # items every time this library is utilized. +_waserrexit=0 +if [[ "$SHELLOPTS" =~ errexit ]]; then _waserrexit=1; fi +set +e # Assumed in F33 for setting global vars source /etc/profile source /etc/environment if [[ -r "/etc/ci_environment" ]]; then source /etc/ci_environment; fi @@ -18,6 +21,7 @@ HOME="$(getent passwd $USER | cut -d : -f 6)" # Some platforms set and make this read-only [[ -n "$UID" ]] || \ UID=$(getent passwd $USER | cut -d : -f 3) +if ((_waserrexit)); then set -e; fi # During VM Image build, the 'containers/automation' installation # was performed. The final step of installation sets the library @@ -25,11 +29,8 @@ HOME="$(getent passwd $USER | cut -d : -f 6)" # default shell profile depending on distribution. # shellcheck disable=SC2154 if [[ -n "$AUTOMATION_LIB_PATH" ]]; then - for libname in defaults anchors console_output utils; do - # There's no way shellcheck can process this location - # shellcheck disable=SC1090 - source $AUTOMATION_LIB_PATH/${libname}.sh - done + # shellcheck source=/usr/share/automation/lib/common_lib.sh + source $AUTOMATION_LIB_PATH/common_lib.sh else ( echo "WARNING: It does not appear that containers/automation was installed." diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh index 8ccbd95d9b..da175cc054 100755 --- a/contrib/cirrus/setup_environment.sh +++ b/contrib/cirrus/setup_environment.sh @@ -99,11 +99,12 @@ fi case "$OS_RELEASE_ID" in ubuntu*) ;; fedora*) - if ((CONTAINER==0)); then # Not yet running inside a container + if ((CONTAINER==0)); then msg "Configuring / Expanding host storage." # VM is setup to allow flexibility in testing alternate storage. # For general use, simply make use of all available space. - ooe.sh bash "$SCRIPT_BASE/add_second_partition.sh" + bash "$SCRIPT_BASE/add_second_partition.sh" + $SCRIPT_BASE/logcollector.sh df # All SELinux distros need this for systemd-in-a-container msg "Enabling container_manage_cgroup" @@ -215,4 +216,4 @@ echo -e "\n# End of global variable definitions" \ >> /etc/ci_environment msg "Global CI Environment vars.:" -cat /etc/ci_environment | sort | indent +grep -Ev '^#' /etc/ci_environment | sort | indent