From 0a90c66f2f255d6ac5abc7c234d4d5c0391652da Mon Sep 17 00:00:00 2001 From: Wesley Pettit Date: Thu, 17 Sep 2020 21:54:01 -0700 Subject: [PATCH 1/5] Add integration tests for out_s3 --- integ/integ.sh | 45 +++++++++++++++++++ integ/s3-logger/Dockerfile | 8 ++++ integ/s3-logger/logscript.sh | 20 +++++++++ integ/s3/validate-and-clean.go | 33 +++++++++----- .../docker-compose.validate-and-clean-s3.yml | 1 + .../docker-compose.validate-and-clean-s3.yml | 1 + integ/test_s3/docker-compose.test.yml | 34 ++++++++++++++ .../docker-compose.validate-and-clean-s3.yml | 17 +++++++ integ/test_s3/fluent-bit.conf | 25 +++++++++++ 9 files changed, 174 insertions(+), 10 deletions(-) create mode 100644 integ/s3-logger/Dockerfile create mode 100644 integ/s3-logger/logscript.sh create mode 100644 integ/test_s3/docker-compose.test.yml create mode 100644 integ/test_s3/docker-compose.validate-and-clean-s3.yml create mode 100644 integ/test_s3/fluent-bit.conf diff --git a/integ/integ.sh b/integ/integ.sh index 4a69c1d78..679195d3b 100755 --- a/integ/integ.sh +++ b/integ/integ.sh @@ -78,6 +78,30 @@ test_firehose() { fi } +test_s3() { + # Generates log data which will be stored on the s3 bucket + docker-compose --file ./integ/test_s3/docker-compose.test.yml build + docker-compose --file ./integ/test_s3/docker-compose.test.yml up --abort-on-container-exit + + # Giving a pause before running the validation test + sleep 20 + + # Creates a file as a flag for the validation failure + mkdir -p ./integ/out + touch ./integ/out/s3-test + + export S3_ACTION="validate" + docker-compose --file ./integ/test_s3/docker-compose.validate-and-clean-s3.yml build + docker-compose --file ./integ/test_s3/docker-compose.validate-and-clean-s3.yml up --abort-on-container-exit + + if [ -f ./integ/out/s3-test ]; then + # if the file still exists, test failed + echo "Test failed for S3." + exit 1 + fi +} + + clean_s3() { validate_or_clean_s3 clean } @@ -113,6 +137,7 @@ fi if [ "${1}" = "kinesis" ]; then export S3_PREFIX="kinesis-test" export TEST_FILE="kinesis-test" + export EXPECTED_EVENTS_LEN="1000" source ./integ/resources/create_test_resources.sh source ./integ/resources/setup_test_environment.sh @@ -122,12 +147,23 @@ fi if [ "${1}" = "firehose" ]; then export S3_PREFIX="firehose-test" export TEST_FILE="firehose-test" + export EXPECTED_EVENTS_LEN="1000" source ./integ/resources/create_test_resources.sh source ./integ/resources/setup_test_environment.sh clean_s3 && test_firehose fi +if [ "${1}" = "s3" ]; then + export S3_PREFIX="fluent-bit-logs" + export TEST_FILE="s3-test" + export EXPECTED_EVENTS_LEN="7717" + source ./integ/resources/create_test_resources.sh + source ./integ/resources/setup_test_environment.sh + + clean_s3 && test_s3 +fi + if [ "${1}" = "clean-s3" ]; then source ./integ/resources/setup_test_environment.sh clean_s3 @@ -155,10 +191,19 @@ if [ "${1}" = "cicd" ]; then source ./integ/resources/setup_test_environment.sh export S3_PREFIX="kinesis-test" export TEST_FILE="kinesis-test" + export EXPECTED_EVENTS_LEN="1000" clean_s3 && test_kinesis export S3_PREFIX="firehose-test" export TEST_FILE="firehose-test" clean_s3 && test_firehose + + export S3_PREFIX="fluent-bit-logs" + export TEST_FILE="s3-test" + export EXPECTED_EVENTS_LEN="7717" + source ./integ/resources/create_test_resources.sh + source ./integ/resources/setup_test_environment.sh + + clean_s3 && test_s3 fi if [ "${1}" = "delete" ]; then diff --git a/integ/s3-logger/Dockerfile b/integ/s3-logger/Dockerfile new file mode 100644 index 000000000..93e14cbba --- /dev/null +++ b/integ/s3-logger/Dockerfile @@ -0,0 +1,8 @@ +FROM amazonlinux + +RUN yum upgrade -y +RUN yum install -y openssl + +COPY logscript.sh / + +CMD ["bash", "/logscript.sh"] diff --git a/integ/s3-logger/logscript.sh b/integ/s3-logger/logscript.sh new file mode 100644 index 000000000..47b0bd4ea --- /dev/null +++ b/integ/s3-logger/logscript.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Writes 7717 unique log lines +# intermixed with 1 KB lines of random data +# these logs are written over the course of a little over 1 minute +# because we want to test S3's local buffering +# Why 7717? Its a prime number. And I like that. +# Finally we sleep for 90s- ensuring the upload timeout hits for the last chunk +# then exit + +for i in {0..7716} +do + echo $i + openssl rand -base64 1000 | tr '\n' '-' && echo "" + sleep 0.0001 +done + +sleep 90 + +exit 0 diff --git a/integ/s3/validate-and-clean.go b/integ/s3/validate-and-clean.go index 777a787d8..ea7db6b78 100644 --- a/integ/s3/validate-and-clean.go +++ b/integ/s3/validate-and-clean.go @@ -16,11 +16,12 @@ import ( ) const ( - envAWSRegion = "AWS_REGION" - envS3Bucket = "S3_BUCKET_NAME" - envS3Action = "S3_ACTION" - envS3Prefix = "S3_PREFIX" - envTestFile = "TEST_FILE" + envAWSRegion = "AWS_REGION" + envS3Bucket = "S3_BUCKET_NAME" + envS3Action = "S3_ACTION" + envS3Prefix = "S3_PREFIX" + envTestFile = "TEST_FILE" + envExpectedLogsLen = "EXPECTED_EVENTS_LEN" ) type Message struct { @@ -48,6 +49,15 @@ func main() { exitErrorf("[TEST FAILURE] test verfication file name required. Set the value for environment variable- %s", envTestFile) } + expectedEventsLen := os.Getenv(envExpectedLogsLen) + if expectedEventsLen == "" { + exitErrorf("[TEST FAILURE] number of expected log events required. Set the value for environment variable- %s", envExpectedLogsLen) + } + numEvents, convertionError := strconv.Atoi(expectedEventsLen) + if convertionError != nil { + exitErrorf("[TEST FAILURE] String to Int convertion Error for EXPECTED_EVENTS_LEN:", convertionError) + } + s3Client, err := getS3Client(region) if err != nil { exitErrorf("[TEST FAILURE] Unable to create new S3 client: %v", err) @@ -57,7 +67,7 @@ func main() { if s3Action == "validate" { // Validate the data on the s3 bucket getS3ObjectsResponse := getS3Objects(s3Client, bucket, prefix) - validate(s3Client, getS3ObjectsResponse, bucket, testFile) + validate(s3Client, getS3ObjectsResponse, bucket, testFile, numEvents) } else { // Clean the s3 bucket-- delete all objects deleteS3Objects(s3Client, bucket, prefix) @@ -97,8 +107,8 @@ func getS3Objects(s3Client *s3.S3, bucket string, prefix string) *s3.ListObjects // Validates the log messages. Our log producer is designed to send 1000 integers [0 - 999]. // Both of the Kinesis Streams and Kinesis Firehose try to send each log maintaining the "at least once" policy. // To validate, we need to make sure all the valid numbers [0 - 999] are stored at least once. -func validate(s3Client *s3.S3, response *s3.ListObjectsV2Output, bucket string, testFile string) { - logCounter := make([]int, 1000) +func validate(s3Client *s3.S3, response *s3.ListObjectsV2Output, bucket string, testFile string, numEvents int) { + logCounter := make([]int, numEvents) for index := range logCounter { logCounter[index] = 1 } @@ -121,6 +131,9 @@ func validate(s3Client *s3.S3, response *s3.ListObjectsV2Output, bucket string, if d == "" { continue } + if len(d) > 500 { + continue + } var message Message @@ -134,8 +147,8 @@ func validate(s3Client *s3.S3, response *s3.ListObjectsV2Output, bucket string, exitErrorf("[TEST FAILURE] String to Int convertion Error:", convertionError) } - if number < 0 || number >= 1000 { - exitErrorf("[TEST FAILURE] Invalid number: %d found. Expected value in range (0 - 999)", number) + if number < 0 || number >= numEvents { + exitErrorf("[TEST FAILURE] Invalid number: %d found. Expected value in range (0 - %d)", number, numEvents) } logCounter[number] = 0 diff --git a/integ/test_firehose/docker-compose.validate-and-clean-s3.yml b/integ/test_firehose/docker-compose.validate-and-clean-s3.yml index fa551be76..d9695dd86 100644 --- a/integ/test_firehose/docker-compose.validate-and-clean-s3.yml +++ b/integ/test_firehose/docker-compose.validate-and-clean-s3.yml @@ -12,5 +12,6 @@ services: - "S3_ACTION=${S3_ACTION}" - "S3_PREFIX=${S3_PREFIX}" - "TEST_FILE=${TEST_FILE}" + - "EXPECTED_EVENTS_LEN=${EXPECTED_EVENTS_LEN}" volumes: - ${PROJECT_ROOT}/integ/out:/out diff --git a/integ/test_kinesis/docker-compose.validate-and-clean-s3.yml b/integ/test_kinesis/docker-compose.validate-and-clean-s3.yml index fa551be76..d9695dd86 100644 --- a/integ/test_kinesis/docker-compose.validate-and-clean-s3.yml +++ b/integ/test_kinesis/docker-compose.validate-and-clean-s3.yml @@ -12,5 +12,6 @@ services: - "S3_ACTION=${S3_ACTION}" - "S3_PREFIX=${S3_PREFIX}" - "TEST_FILE=${TEST_FILE}" + - "EXPECTED_EVENTS_LEN=${EXPECTED_EVENTS_LEN}" volumes: - ${PROJECT_ROOT}/integ/out:/out diff --git a/integ/test_s3/docker-compose.test.yml b/integ/test_s3/docker-compose.test.yml new file mode 100644 index 000000000..2246e4bca --- /dev/null +++ b/integ/test_s3/docker-compose.test.yml @@ -0,0 +1,34 @@ +version: "2" + +services: + fluent-bit: + image: amazon/aws-for-fluent-bit:latest + environment: + - "AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}" + - "AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}" + - "AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}" + - "S3_BUCKET_NAME=${S3_BUCKET_NAME}" + - FLB_LOG_LEVEL=debug + volumes: + - /var/run/:/var/run + - ./:/fluent-bit/etc/ + logger-multipart-test: + build: ${PROJECT_ROOT}/integ/s3-logger + depends_on: + - fluent-bit + logging: + driver: fluentd + options: + tag: multipart-upload-test + fluentd-address: unix:///var/run/fluent.sock + fluentd-async-connect: "true" + logger-put-object-test: + build: ${PROJECT_ROOT}/integ/s3-logger + depends_on: + - fluent-bit + logging: + driver: fluentd + options: + tag: put-object-test + fluentd-address: unix:///var/run/fluent.sock + fluentd-async-connect: "true" diff --git a/integ/test_s3/docker-compose.validate-and-clean-s3.yml b/integ/test_s3/docker-compose.validate-and-clean-s3.yml new file mode 100644 index 000000000..4c68b7a31 --- /dev/null +++ b/integ/test_s3/docker-compose.validate-and-clean-s3.yml @@ -0,0 +1,17 @@ +version: "2" + +services: + validate-s3: + build: ${PROJECT_ROOT}/integ/s3 + environment: + - "AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}" + - "AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}" + - "AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}" + - "AWS_REGION=${AWS_REGION}" + - "S3_BUCKET_NAME=${S3_BUCKET_NAME}" + - "S3_ACTION=${S3_ACTION}" + - "S3_PREFIX=${S3_PREFIX}" + - "EXPECTED_EVENTS_LEN=${EXPECTED_EVENTS_LEN}" + - "TEST_FILE=${TEST_FILE}" + volumes: + - ${PROJECT_ROOT}/integ/out:/out diff --git a/integ/test_s3/fluent-bit.conf b/integ/test_s3/fluent-bit.conf new file mode 100644 index 000000000..db57698c6 --- /dev/null +++ b/integ/test_s3/fluent-bit.conf @@ -0,0 +1,25 @@ +[SERVICE] + Log_Level debug + +[INPUT] + Name forward + unix_path /var/run/fluent.sock + +[OUTPUT] + Name s3 + Match multipart* + bucket ${S3_BUCKET_NAME} + region us-west-2 + total_file_size 100M + upload_timeout 1 + chunk_buffer_path /fluent-bit/s3/one/ + +[OUTPUT] + Name s3 + Match put-object* + bucket ${S3_BUCKET_NAME} + region us-west-2 + total_file_size 1M + use_put_object On + upload_timeout 1 + chunk_buffer_path /fluent-bit/s3/two/ From a739b6b6f50e041bb12d6d591a2fda8bb762dd49 Mon Sep 17 00:00:00 2001 From: Wesley Pettit Date: Sun, 27 Sep 2020 16:55:30 -0700 Subject: [PATCH 2/5] S3 integ tests --- integ/integ.sh | 26 ++++++++++++++++--- integ/out/expected-metric-name | 1 + integ/out/s3-test | 0 integ/test_s3/docker-compose.test.yml | 4 +-- ... docker-compose.validate-s3-multipart.yml} | 4 +-- .../docker-compose.validate-s3-putobject.yml | 17 ++++++++++++ integ/test_s3/fluent-bit.conf | 6 +++-- tests/.gitignore | 2 ++ 8 files changed, 50 insertions(+), 10 deletions(-) create mode 100644 integ/out/expected-metric-name create mode 100644 integ/out/s3-test rename integ/test_s3/{docker-compose.validate-and-clean-s3.yml => docker-compose.validate-s3-multipart.yml} (87%) create mode 100644 integ/test_s3/docker-compose.validate-s3-putobject.yml create mode 100644 tests/.gitignore diff --git a/integ/integ.sh b/integ/integ.sh index 679195d3b..027d70fda 100755 --- a/integ/integ.sh +++ b/integ/integ.sh @@ -79,6 +79,11 @@ test_firehose() { } test_s3() { + # different S3 prefix for each test + export S3_PREFIX_PUT_OBJECT="logs/putobject" + export S3_PREFIX_MULTIPART="logs/multipart" + # Tag is used in the s3 keys; each test run has a unique (random) tag + export TAG=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 10) # Generates log data which will be stored on the s3 bucket docker-compose --file ./integ/test_s3/docker-compose.test.yml build docker-compose --file ./integ/test_s3/docker-compose.test.yml up --abort-on-container-exit @@ -91,8 +96,22 @@ test_s3() { touch ./integ/out/s3-test export S3_ACTION="validate" - docker-compose --file ./integ/test_s3/docker-compose.validate-and-clean-s3.yml build - docker-compose --file ./integ/test_s3/docker-compose.validate-and-clean-s3.yml up --abort-on-container-exit + docker-compose --file ./integ/test_s3/docker-compose.validate-s3-multipart.yml build + docker-compose --file ./integ/test_s3/docker-compose.validate-s3-multipart.yml up --abort-on-container-exit + + if [ -f ./integ/out/s3-test ]; then + # if the file still exists, test failed + echo "Test failed for S3." + exit 1 + fi + + # Creates a file as a flag for the validation failure + mkdir -p ./integ/out + touch ./integ/out/s3-test + + export S3_ACTION="validate" + docker-compose --file ./integ/test_s3/docker-compose.validate-s3-putobject.yml build + docker-compose --file ./integ/test_s3/docker-compose.validate-s3-putobject.yml up --abort-on-container-exit if [ -f ./integ/out/s3-test ]; then # if the file still exists, test failed @@ -155,7 +174,7 @@ if [ "${1}" = "firehose" ]; then fi if [ "${1}" = "s3" ]; then - export S3_PREFIX="fluent-bit-logs" + export S3_PREFIX="logs" export TEST_FILE="s3-test" export EXPECTED_EVENTS_LEN="7717" source ./integ/resources/create_test_resources.sh @@ -197,7 +216,6 @@ if [ "${1}" = "cicd" ]; then export TEST_FILE="firehose-test" clean_s3 && test_firehose - export S3_PREFIX="fluent-bit-logs" export TEST_FILE="s3-test" export EXPECTED_EVENTS_LEN="7717" source ./integ/resources/create_test_resources.sh diff --git a/integ/out/expected-metric-name b/integ/out/expected-metric-name new file mode 100644 index 000000000..328d1d0c6 --- /dev/null +++ b/integ/out/expected-metric-name @@ -0,0 +1 @@ +fluent-bit-integ-test-23465 diff --git a/integ/out/s3-test b/integ/out/s3-test new file mode 100644 index 000000000..e69de29bb diff --git a/integ/test_s3/docker-compose.test.yml b/integ/test_s3/docker-compose.test.yml index 2246e4bca..8402eab16 100644 --- a/integ/test_s3/docker-compose.test.yml +++ b/integ/test_s3/docker-compose.test.yml @@ -19,7 +19,7 @@ services: logging: driver: fluentd options: - tag: multipart-upload-test + tag: "multipart-upload-test-${TAG}" fluentd-address: unix:///var/run/fluent.sock fluentd-async-connect: "true" logger-put-object-test: @@ -29,6 +29,6 @@ services: logging: driver: fluentd options: - tag: put-object-test + tag: "put-object-test-${TAG}" fluentd-address: unix:///var/run/fluent.sock fluentd-async-connect: "true" diff --git a/integ/test_s3/docker-compose.validate-and-clean-s3.yml b/integ/test_s3/docker-compose.validate-s3-multipart.yml similarity index 87% rename from integ/test_s3/docker-compose.validate-and-clean-s3.yml rename to integ/test_s3/docker-compose.validate-s3-multipart.yml index 4c68b7a31..feea3eacb 100644 --- a/integ/test_s3/docker-compose.validate-and-clean-s3.yml +++ b/integ/test_s3/docker-compose.validate-s3-multipart.yml @@ -1,7 +1,7 @@ version: "2" services: - validate-s3: + validate-s3-multipart: build: ${PROJECT_ROOT}/integ/s3 environment: - "AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}" @@ -10,7 +10,7 @@ services: - "AWS_REGION=${AWS_REGION}" - "S3_BUCKET_NAME=${S3_BUCKET_NAME}" - "S3_ACTION=${S3_ACTION}" - - "S3_PREFIX=${S3_PREFIX}" + - "S3_PREFIX=${S3_PREFIX_MULTIPART}" - "EXPECTED_EVENTS_LEN=${EXPECTED_EVENTS_LEN}" - "TEST_FILE=${TEST_FILE}" volumes: diff --git a/integ/test_s3/docker-compose.validate-s3-putobject.yml b/integ/test_s3/docker-compose.validate-s3-putobject.yml new file mode 100644 index 000000000..f6e83b4aa --- /dev/null +++ b/integ/test_s3/docker-compose.validate-s3-putobject.yml @@ -0,0 +1,17 @@ +version: "2" + +services: + validate-s3-put-object: + build: ${PROJECT_ROOT}/integ/s3 + environment: + - "AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}" + - "AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}" + - "AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}" + - "AWS_REGION=${AWS_REGION}" + - "S3_BUCKET_NAME=${S3_BUCKET_NAME}" + - "S3_ACTION=${S3_ACTION}" + - "S3_PREFIX=${S3_PREFIX_PUT_OBJECT}" + - "EXPECTED_EVENTS_LEN=${EXPECTED_EVENTS_LEN}" + - "TEST_FILE=${TEST_FILE}" + volumes: + - ${PROJECT_ROOT}/integ/out:/out diff --git a/integ/test_s3/fluent-bit.conf b/integ/test_s3/fluent-bit.conf index db57698c6..8b2d9ae4b 100644 --- a/integ/test_s3/fluent-bit.conf +++ b/integ/test_s3/fluent-bit.conf @@ -11,8 +11,9 @@ bucket ${S3_BUCKET_NAME} region us-west-2 total_file_size 100M + s3_key_format /logs/multipart/$TAG[1]/logs/$TAG/%Y/%m/%d/%H/%M/%S + s3_key_format_tag_delimiters .- upload_timeout 1 - chunk_buffer_path /fluent-bit/s3/one/ [OUTPUT] Name s3 @@ -20,6 +21,7 @@ bucket ${S3_BUCKET_NAME} region us-west-2 total_file_size 1M + s3_key_format /logs/put$TAG[1]/logs/$TAG/%Y/%m/%d/%H/%M/%S + s3_key_format_tag_delimiters .- use_put_object On upload_timeout 1 - chunk_buffer_path /fluent-bit/s3/two/ diff --git a/tests/.gitignore b/tests/.gitignore new file mode 100644 index 000000000..8c0580437 --- /dev/null +++ b/tests/.gitignore @@ -0,0 +1,2 @@ +bin +integ/out From c65c615d1ac63ff0311bb8139cc1fdd167f72be8 Mon Sep 17 00:00:00 2001 From: Wesley Pettit Date: Sun, 11 Oct 2020 23:11:48 -0700 Subject: [PATCH 3/5] Add integration tests for new S3 plugin Signed-off-by: Wesley Pettit --- integ/integ.sh | 5 +++-- integ/out/expected-metric-name | 2 +- integ/s3-logger/logscript.sh | 2 +- integ/test_s3/docker-compose.test.yml | 1 + integ/test_s3/fluent-bit.conf | 10 ++++++---- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/integ/integ.sh b/integ/integ.sh index 027d70fda..4d8ca9679 100755 --- a/integ/integ.sh +++ b/integ/integ.sh @@ -80,8 +80,9 @@ test_firehose() { test_s3() { # different S3 prefix for each test - export S3_PREFIX_PUT_OBJECT="logs/putobject" - export S3_PREFIX_MULTIPART="logs/multipart" + export ARCHITECTURE=$(uname -m) + export S3_PREFIX_PUT_OBJECT="logs/${ARCHITECTURE}/putobject" + export S3_PREFIX_MULTIPART="logs/${ARCHITECTURE}/logs/multipart" # Tag is used in the s3 keys; each test run has a unique (random) tag export TAG=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 10) # Generates log data which will be stored on the s3 bucket diff --git a/integ/out/expected-metric-name b/integ/out/expected-metric-name index 328d1d0c6..736dc8745 100644 --- a/integ/out/expected-metric-name +++ b/integ/out/expected-metric-name @@ -1 +1 @@ -fluent-bit-integ-test-23465 +fluent-bit-integ-test-1264 diff --git a/integ/s3-logger/logscript.sh b/integ/s3-logger/logscript.sh index 47b0bd4ea..8f9a570e5 100644 --- a/integ/s3-logger/logscript.sh +++ b/integ/s3-logger/logscript.sh @@ -15,6 +15,6 @@ do sleep 0.0001 done -sleep 90 +sleep 200 exit 0 diff --git a/integ/test_s3/docker-compose.test.yml b/integ/test_s3/docker-compose.test.yml index 8402eab16..635476ee7 100644 --- a/integ/test_s3/docker-compose.test.yml +++ b/integ/test_s3/docker-compose.test.yml @@ -8,6 +8,7 @@ services: - "AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}" - "AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}" - "S3_BUCKET_NAME=${S3_BUCKET_NAME}" + - "ARCHITECTURE=${ARCHITECTURE}" - FLB_LOG_LEVEL=debug volumes: - /var/run/:/var/run diff --git a/integ/test_s3/fluent-bit.conf b/integ/test_s3/fluent-bit.conf index 8b2d9ae4b..6bfaddc6c 100644 --- a/integ/test_s3/fluent-bit.conf +++ b/integ/test_s3/fluent-bit.conf @@ -10,18 +10,20 @@ Match multipart* bucket ${S3_BUCKET_NAME} region us-west-2 + store_dir /fluent-bit/buffer total_file_size 100M - s3_key_format /logs/multipart/$TAG[1]/logs/$TAG/%Y/%m/%d/%H/%M/%S + s3_key_format /logs/${ARCHITECTURE}/multipart/$TAG[1]/logs/$TAG/%Y/%m/%d/%H/%M/%S s3_key_format_tag_delimiters .- - upload_timeout 1 + upload_timeout 2m [OUTPUT] Name s3 Match put-object* bucket ${S3_BUCKET_NAME} region us-west-2 + store_dir /fluent-bit/buffer total_file_size 1M - s3_key_format /logs/put$TAG[1]/logs/$TAG/%Y/%m/%d/%H/%M/%S + s3_key_format /logs/${ARCHITECTURE}/put$TAG[1]/logs/$TAG/%Y/%m/%d/%H/%M/%S s3_key_format_tag_delimiters .- use_put_object On - upload_timeout 1 + upload_timeout 30s From 2008d4b04bd170979f81283583c13e128461a026 Mon Sep 17 00:00:00 2001 From: Wesley Pettit Date: Sun, 11 Oct 2020 23:30:57 -0700 Subject: [PATCH 4/5] Fix cicd target for integ tests --- integ/integ.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/integ/integ.sh b/integ/integ.sh index 4d8ca9679..4b65e5c4d 100755 --- a/integ/integ.sh +++ b/integ/integ.sh @@ -217,6 +217,7 @@ if [ "${1}" = "cicd" ]; then export TEST_FILE="firehose-test" clean_s3 && test_firehose + export S3_PREFIX="logs" export TEST_FILE="s3-test" export EXPECTED_EVENTS_LEN="7717" source ./integ/resources/create_test_resources.sh From dee80895db767cebc01959af3c664f749879e5b5 Mon Sep 17 00:00:00 2001 From: Wesley Pettit Date: Mon, 12 Oct 2020 13:11:56 -0700 Subject: [PATCH 5/5] Fix multipart uplaod prefix for S3 test --- integ/integ.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integ/integ.sh b/integ/integ.sh index 4b65e5c4d..23a2d7769 100755 --- a/integ/integ.sh +++ b/integ/integ.sh @@ -82,7 +82,7 @@ test_s3() { # different S3 prefix for each test export ARCHITECTURE=$(uname -m) export S3_PREFIX_PUT_OBJECT="logs/${ARCHITECTURE}/putobject" - export S3_PREFIX_MULTIPART="logs/${ARCHITECTURE}/logs/multipart" + export S3_PREFIX_MULTIPART="logs/${ARCHITECTURE}/multipart" # Tag is used in the s3 keys; each test run has a unique (random) tag export TAG=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 10) # Generates log data which will be stored on the s3 bucket