-
Notifications
You must be signed in to change notification settings - Fork 133
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add integration tests for out_s3 #75
Changes from all commits
0a90c66
a739b6b
c65c615
2008d4b
dee8089
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
fluent-bit-integ-test-1264 |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
FROM amazonlinux | ||
|
||
RUN yum upgrade -y | ||
RUN yum install -y openssl | ||
|
||
COPY logscript.sh / | ||
|
||
CMD ["bash", "/logscript.sh"] |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
#!/bin/bash | ||
|
||
# Writes 7717 unique log lines | ||
# intermixed with 1 KB lines of random data | ||
# these logs are written over the course of a little over 1 minute | ||
# because we want to test S3's local buffering | ||
# Why 7717? Its a prime number. And I like that. | ||
# Finally we sleep for 90s- ensuring the upload timeout hits for the last chunk | ||
# then exit | ||
|
||
for i in {0..7716} | ||
do | ||
echo $i | ||
openssl rand -base64 1000 | tr '\n' '-' && echo "" | ||
sleep 0.0001 | ||
done | ||
|
||
sleep 200 | ||
|
||
exit 0 |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,11 +16,12 @@ import ( | |
) | ||
|
||
const ( | ||
envAWSRegion = "AWS_REGION" | ||
envS3Bucket = "S3_BUCKET_NAME" | ||
envS3Action = "S3_ACTION" | ||
envS3Prefix = "S3_PREFIX" | ||
envTestFile = "TEST_FILE" | ||
envAWSRegion = "AWS_REGION" | ||
envS3Bucket = "S3_BUCKET_NAME" | ||
envS3Action = "S3_ACTION" | ||
envS3Prefix = "S3_PREFIX" | ||
envTestFile = "TEST_FILE" | ||
envExpectedLogsLen = "EXPECTED_EVENTS_LEN" | ||
) | ||
|
||
type Message struct { | ||
|
@@ -48,6 +49,15 @@ func main() { | |
exitErrorf("[TEST FAILURE] test verfication file name required. Set the value for environment variable- %s", envTestFile) | ||
} | ||
|
||
expectedEventsLen := os.Getenv(envExpectedLogsLen) | ||
if expectedEventsLen == "" { | ||
exitErrorf("[TEST FAILURE] number of expected log events required. Set the value for environment variable- %s", envExpectedLogsLen) | ||
} | ||
numEvents, convertionError := strconv.Atoi(expectedEventsLen) | ||
if convertionError != nil { | ||
exitErrorf("[TEST FAILURE] String to Int convertion Error for EXPECTED_EVENTS_LEN:", convertionError) | ||
} | ||
|
||
s3Client, err := getS3Client(region) | ||
if err != nil { | ||
exitErrorf("[TEST FAILURE] Unable to create new S3 client: %v", err) | ||
|
@@ -57,7 +67,7 @@ func main() { | |
if s3Action == "validate" { | ||
// Validate the data on the s3 bucket | ||
getS3ObjectsResponse := getS3Objects(s3Client, bucket, prefix) | ||
validate(s3Client, getS3ObjectsResponse, bucket, testFile) | ||
validate(s3Client, getS3ObjectsResponse, bucket, testFile, numEvents) | ||
} else { | ||
// Clean the s3 bucket-- delete all objects | ||
deleteS3Objects(s3Client, bucket, prefix) | ||
|
@@ -97,8 +107,8 @@ func getS3Objects(s3Client *s3.S3, bucket string, prefix string) *s3.ListObjects | |
// Validates the log messages. Our log producer is designed to send 1000 integers [0 - 999]. | ||
// Both of the Kinesis Streams and Kinesis Firehose try to send each log maintaining the "at least once" policy. | ||
// To validate, we need to make sure all the valid numbers [0 - 999] are stored at least once. | ||
func validate(s3Client *s3.S3, response *s3.ListObjectsV2Output, bucket string, testFile string) { | ||
logCounter := make([]int, 1000) | ||
func validate(s3Client *s3.S3, response *s3.ListObjectsV2Output, bucket string, testFile string, numEvents int) { | ||
logCounter := make([]int, numEvents) | ||
for index := range logCounter { | ||
logCounter[index] = 1 | ||
} | ||
|
@@ -121,6 +131,9 @@ func validate(s3Client *s3.S3, response *s3.ListObjectsV2Output, bucket string, | |
if d == "" { | ||
continue | ||
} | ||
if len(d) > 500 { | ||
continue | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Maybe I am missing something. Why do we need this condition and the upper limit is 500? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Look at the new S3 logger, it spits out log lines that are just numbers, and also large lines of 1KB random data (because S3 can only send data in chunks of a few MB). I wanted to make it so that the code didn't waste time trying to process those lines with random data. 500 is an arbitrary number between the size of each type of log event. The integer lines that we count will be smaller and the random data lines will be larger. |
||
} | ||
|
||
var message Message | ||
|
||
|
@@ -134,8 +147,8 @@ func validate(s3Client *s3.S3, response *s3.ListObjectsV2Output, bucket string, | |
exitErrorf("[TEST FAILURE] String to Int convertion Error:", convertionError) | ||
} | ||
|
||
if number < 0 || number >= 1000 { | ||
exitErrorf("[TEST FAILURE] Invalid number: %d found. Expected value in range (0 - 999)", number) | ||
if number < 0 || number >= numEvents { | ||
exitErrorf("[TEST FAILURE] Invalid number: %d found. Expected value in range (0 - %d)", number, numEvents) | ||
} | ||
|
||
logCounter[number] = 0 | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
version: "2" | ||
|
||
services: | ||
fluent-bit: | ||
image: amazon/aws-for-fluent-bit:latest | ||
environment: | ||
- "AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}" | ||
- "AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}" | ||
- "AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}" | ||
- "S3_BUCKET_NAME=${S3_BUCKET_NAME}" | ||
- "ARCHITECTURE=${ARCHITECTURE}" | ||
- FLB_LOG_LEVEL=debug | ||
volumes: | ||
- /var/run/:/var/run | ||
- ./:/fluent-bit/etc/ | ||
logger-multipart-test: | ||
build: ${PROJECT_ROOT}/integ/s3-logger | ||
depends_on: | ||
- fluent-bit | ||
logging: | ||
driver: fluentd | ||
options: | ||
tag: "multipart-upload-test-${TAG}" | ||
fluentd-address: unix:///var/run/fluent.sock | ||
fluentd-async-connect: "true" | ||
logger-put-object-test: | ||
build: ${PROJECT_ROOT}/integ/s3-logger | ||
depends_on: | ||
- fluent-bit | ||
logging: | ||
driver: fluentd | ||
options: | ||
tag: "put-object-test-${TAG}" | ||
fluentd-address: unix:///var/run/fluent.sock | ||
fluentd-async-connect: "true" |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,17 @@ | ||
version: "2" | ||
|
||
services: | ||
validate-s3-multipart: | ||
build: ${PROJECT_ROOT}/integ/s3 | ||
environment: | ||
- "AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}" | ||
- "AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}" | ||
- "AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}" | ||
- "AWS_REGION=${AWS_REGION}" | ||
- "S3_BUCKET_NAME=${S3_BUCKET_NAME}" | ||
- "S3_ACTION=${S3_ACTION}" | ||
- "S3_PREFIX=${S3_PREFIX_MULTIPART}" | ||
- "EXPECTED_EVENTS_LEN=${EXPECTED_EVENTS_LEN}" | ||
- "TEST_FILE=${TEST_FILE}" | ||
volumes: | ||
- ${PROJECT_ROOT}/integ/out:/out |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,17 @@ | ||
version: "2" | ||
|
||
services: | ||
validate-s3-put-object: | ||
build: ${PROJECT_ROOT}/integ/s3 | ||
environment: | ||
- "AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}" | ||
- "AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}" | ||
- "AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}" | ||
- "AWS_REGION=${AWS_REGION}" | ||
- "S3_BUCKET_NAME=${S3_BUCKET_NAME}" | ||
- "S3_ACTION=${S3_ACTION}" | ||
- "S3_PREFIX=${S3_PREFIX_PUT_OBJECT}" | ||
- "EXPECTED_EVENTS_LEN=${EXPECTED_EVENTS_LEN}" | ||
- "TEST_FILE=${TEST_FILE}" | ||
volumes: | ||
- ${PROJECT_ROOT}/integ/out:/out |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
[SERVICE] | ||
Log_Level debug | ||
|
||
[INPUT] | ||
Name forward | ||
unix_path /var/run/fluent.sock | ||
|
||
[OUTPUT] | ||
Name s3 | ||
Match multipart* | ||
bucket ${S3_BUCKET_NAME} | ||
region us-west-2 | ||
store_dir /fluent-bit/buffer | ||
total_file_size 100M | ||
s3_key_format /logs/${ARCHITECTURE}/multipart/$TAG[1]/logs/$TAG/%Y/%m/%d/%H/%M/%S | ||
s3_key_format_tag_delimiters .- | ||
upload_timeout 2m | ||
|
||
[OUTPUT] | ||
Name s3 | ||
Match put-object* | ||
bucket ${S3_BUCKET_NAME} | ||
region us-west-2 | ||
store_dir /fluent-bit/buffer | ||
total_file_size 1M | ||
s3_key_format /logs/${ARCHITECTURE}/put$TAG[1]/logs/$TAG/%Y/%m/%d/%H/%M/%S | ||
s3_key_format_tag_delimiters .- | ||
use_put_object On | ||
upload_timeout 30s |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
bin | ||
integ/out |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
lol- I was searching for it.