forked from openzfs/zfs
-
Notifications
You must be signed in to change notification settings - Fork 0
94 lines (91 loc) · 3.64 KB
/
zfs-tests-object-store.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
name: zfs-tests-object-store
on:
push:
pull_request:
jobs:
tests-object-store:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install dependencies
run: |
sudo apt-get update
sudo xargs --arg-file=${{ github.workspace }}/.github/workflows/build-dependencies.txt apt-get install -qq
- name: Autogen.sh
run: |
sh autogen.sh
- name: Configure
run: |
./configure --enable-debug --enable-debuginfo
- name: Make
run: |
make --no-print-directory -s pkg-utils pkg-kmod
- name: Install
run: |
sudo dpkg -i *.deb
# Update order of directories to search for modules, otherwise
# Ubuntu will load kernel-shipped ones.
sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
sudo depmod
sudo modprobe zfs
# Workaround to provide additional free space for testing.
# https://github.com/actions/virtual-environments/issues/2840
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf "/usr/local/share/boost"
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- name: Setup minio
run: |
docker run -d -p 9000:9000 --name minio --network=host \
-e "MINIO_ACCESS_KEY=minioadmin" \
-e "MINIO_SECRET_KEY=minioadmin" \
-v /tmp/data:/data \
-v /tmp/config:/root/.minio \
minio/minio server /data
export AWS_EC2_METADATA_DISABLED=true
aws configure set aws_access_key_id minioadmin && aws configure set aws_secret_access_key minioadmin
aws --endpoint-url http://127.0.0.1:9000/ s3 mb s3://testbucket
sudo cp -r $HOME/.aws /root
- name: Tests
run: |
# The github CI package build doesn't enable any ZFS services by
# default. Since the object agent service is available on the test VM,
# ZTS tries to use it. Since it depends on the zfs-mount service, we
# enable it here. If we don't the agent will fail to start, and the
# workflow will hang.
sudo systemctl enable --now zfs-mount
/usr/share/zfs/zfs-tests.sh -v -s 3G -r object_store
env:
LD_LIBRARY_PATH: /lib/x86_64-linux-gnu
ZTS_OBJECT_STORE: s3
ZTS_BUCKET_NAME: testbucket
ZTS_OBJECT_ENDPOINT: http://127.0.0.1:9000
ZTS_REGION: us-west-2
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
AWS_EC2_METADATA_DISABLED: true
- name: Prepare artifacts
if: failure() || cancelled()
run: |
RESULTS_PATH=$(readlink -f /var/tmp/test_results/current)
sudo dmesg > $RESULTS_PATH/dmesg
sudo cp /var/log/syslog $RESULTS_PATH/
# Stop the zfs-object-agent to prevent it from writing
# into the /var/log/zoa
sudo systemctl stop zfs-object-agent.service || true
# The create archive command may exit with status 1 if the
# files are being updated while the tar command is running
# Thus we do a clean exit in such case for the next
# instructions in the sequence to run
sudo tar czvf $RESULTS_PATH/zoa_logs.tgz /var/log/zoa || true
sudo chmod +r $RESULTS_PATH/*
# Replace ':' in dir names, actions/upload-artifact doesn't support it
for f in $(find $RESULTS_PATH -name '*:*'); do mv "$f" "${f//:/__}"; done
- uses: actions/upload-artifact@v2
if: failure() || cancelled()
with:
name: Test logs
path: /var/tmp/test_results/20*/
if-no-files-found: ignore