Skip to content

Commit

Permalink
Improve the corrupted block test (#1147)
Browse files Browse the repository at this point in the history
## Description of change

Just test improvements. Addresses comments from the
#1139.

Relevant issues: N/A.

## Does this change impact existing behavior?

No.

## Does this change need a changelog entry in any of the crates?

No.

---

By submitting this pull request, I confirm that my contribution is made
under the terms of the Apache 2.0 license and I agree to the terms of
the [Developer Certificate of Origin
(DCO)](https://developercertificate.org/).

---------

Signed-off-by: Vlad Volodkin <[email protected]>
Co-authored-by: Vlad Volodkin <[email protected]>
  • Loading branch information
vladem and Vlad Volodkin authored Nov 20, 2024
1 parent 84c3e54 commit 87ce33f
Showing 1 changed file with 20 additions and 22 deletions.
42 changes: 20 additions & 22 deletions mountpoint-s3/tests/fuse_tests/cache_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,26 +15,25 @@ use std::time::Duration;
use tempfile::TempDir;
use test_case::test_case;

#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
#[cfg(feature = "s3express_tests")]
use crate::common::s3::{get_express_bucket, get_standard_bucket};
#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
#[cfg(feature = "s3express_tests")]
use mountpoint_s3::data_cache::{build_prefix, get_s3_key, BlockIndex, ExpressDataCache};
#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
#[cfg(feature = "s3express_tests")]
use mountpoint_s3::object::ObjectId;
#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
use mountpoint_s3_client::types::{PutObjectSingleParams, UploadChecksum};
#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
#[cfg(feature = "s3express_tests")]
use mountpoint_s3_client::ObjectClient;
#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
use mountpoint_s3_crt::checksums::crc32c;

const CACHE_BLOCK_SIZE: u64 = 1024 * 1024;
const CLIENT_PART_SIZE: usize = 8 * 1024 * 1024;

/// A test that checks that an invalid block may not be served from the cache
#[tokio::test]
#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
#[cfg(feature = "s3express_tests")]
async fn express_invalid_block_read() {
use mountpoint_s3_client::types::{PutObjectSingleParams, UploadChecksum};
use mountpoint_s3_crt::checksums::crc32c;

let bucket = get_standard_bucket();
let cache_bucket = get_express_bucket();
let prefix = get_test_prefix("express_invalid_block_read");
Expand All @@ -50,7 +49,7 @@ async fn express_invalid_block_read() {
let (mount_point, _session) = mount_bucket(client.clone(), cache.clone(), &bucket, &prefix);

// Put an object to the mounted bucket
let object_key = generate_unprefixed_key(&prefix, "key", 100);
let object_key = "key";
let full_object_key = format!("{prefix}{object_key}");
let object_data = "object_data";
let result = client
Expand All @@ -60,7 +59,7 @@ async fn express_invalid_block_read() {
let object_etag = result.etag.into_inner();

// Read data twice, expect cache hits and no errors
let path = mount_point.path().join(&object_key);
let path = mount_point.path().join(object_key);

let put_block_count = cache.put_block_count();
let read = fs::read(&path).expect("read should succeed");
Expand All @@ -74,7 +73,7 @@ async fn express_invalid_block_read() {
assert!(cache.get_block_hit_count() > 0, "reads should result in a cache hit");

// Corrupt the cache block by replacing it with an object holding no metadata
let object_id = get_object_id(&prefix, &object_key, &object_etag);
let object_id = get_object_id(&prefix, object_key, &object_etag);
let block_key = get_express_cache_block_key(&bucket, &object_id, 0);
let corrupted_block = "corrupted_block";
let checksum = crc32c::checksum(corrupted_block.as_bytes());
Expand All @@ -84,8 +83,8 @@ async fn express_invalid_block_read() {
.await
.expect("put object must succeed");

// Expect a successfull read from the source bucket. We expect cache errors being recorded because of the corrupted block.
let path = mount_point.path().join(&object_key);
// Expect a successful read from the source bucket. We expect cache errors being recorded because of the corrupted block.
let path = mount_point.path().join(object_key);
let read = fs::read(&path).expect("read should succeed");
assert_eq!(read, object_data.as_bytes());
assert!(
Expand All @@ -98,7 +97,7 @@ async fn express_invalid_block_read() {
#[test_case("£", 100, 1024; "non-ascii key")]
#[test_case("key", 1024, 1024; "long key")]
#[test_case("key", 100, 1024 * 1024; "big file")]
#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
#[cfg(feature = "s3express_tests")]
fn express_cache_write_read(key_suffix: &str, key_size: usize, object_size: usize) {
let client = create_crt_client(CLIENT_PART_SIZE, CLIENT_PART_SIZE, Default::default());
let bucket_name = get_standard_bucket();
Expand All @@ -120,7 +119,6 @@ fn express_cache_write_read(key_suffix: &str, key_size: usize, object_size: usiz
#[test_case("£", 100, 1024; "non-ascii key")]
#[test_case("key", 1024, 1024; "long key")]
#[test_case("key", 100, 1024 * 1024; "big file")]
#[cfg(feature = "s3_tests")]
fn disk_cache_write_read(key_suffix: &str, key_size: usize, object_size: usize) {
let cache_dir = tempfile::tempdir().unwrap();
let cache_config = DiskDataCacheConfig {
Expand All @@ -144,7 +142,7 @@ fn disk_cache_write_read(key_suffix: &str, key_size: usize, object_size: usize)
}

#[tokio::test]
#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
#[cfg(feature = "s3express_tests")]
async fn express_cache_verify_fail_non_express() {
use mountpoint_s3_client::error::ObjectClientError;
use mountpoint_s3_client::S3RequestError::ResponseError;
Expand All @@ -168,7 +166,7 @@ async fn express_cache_verify_fail_non_express() {
}

#[tokio::test]
#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
#[cfg(feature = "s3express_tests")]
async fn express_cache_verify_fail_forbidden() {
use crate::common::creds::get_scoped_down_credentials;
use mountpoint_s3_client::config::S3ClientAuthConfig;
Expand Down Expand Up @@ -230,7 +228,7 @@ fn cache_write_read_base<Cache>(
let (mount_point, _session) = mount_bucket(client, cache.clone(), bucket, &prefix);

// Write an object, no caching happens yet
let key = generate_unprefixed_key(&prefix, key_suffix, key_size);
let key = get_random_key(&prefix, key_suffix, key_size);
let path = mount_point.path().join(&key);
let written = random_binary_data(object_size);
fs::write(&path, &written).expect("write should succeed");
Expand Down Expand Up @@ -265,7 +263,7 @@ fn random_binary_data(size_in_bytes: usize) -> Vec<u8> {

/// Creates a random key which has a size of at least `min_size_in_bytes`
/// The `key_prefix` is not included in the return value.
fn generate_unprefixed_key(key_prefix: &str, key_suffix: &str, min_size_in_bytes: usize) -> String {
fn get_random_key(key_prefix: &str, key_suffix: &str, min_size_in_bytes: usize) -> String {
let random_suffix: u64 = rand::thread_rng().gen();
let last_key_part = format!("{key_suffix}{random_suffix}"); // part of the key after all the "/"
let full_key = format!("{key_prefix}{last_key_part}");
Expand Down Expand Up @@ -293,12 +291,12 @@ where
(mount_point, session)
}

#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
#[cfg(feature = "s3express_tests")]
fn get_object_id(prefix: &str, key: &str, etag: &str) -> ObjectId {
ObjectId::new(format!("{prefix}{key}"), etag.into())
}

#[cfg(all(feature = "s3_tests", feature = "s3express_tests"))]
#[cfg(feature = "s3express_tests")]
fn get_express_cache_block_key(bucket: &str, cache_key: &ObjectId, block_idx: BlockIndex) -> String {
let block_key_prefix = build_prefix(bucket, CACHE_BLOCK_SIZE);
get_s3_key(&block_key_prefix, cache_key, block_idx)
Expand Down

0 comments on commit 87ce33f

Please sign in to comment.