Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: remove unused unit tests in pegasus_write_service_impl_test #674

Merged
merged 2 commits into from
Jan 8, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
115 changes: 0 additions & 115 deletions src/server/test/pegasus_write_service_impl_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,6 @@
#include "server/pegasus_write_service_impl.h"
#include "message_utils.h"

#include <dsn/utility/defer.h>

namespace pegasus {
namespace server {
extern const int FAIL_DB_GET;
Expand All @@ -44,33 +42,6 @@ class pegasus_write_service_impl_test : public pegasus_server_test_base
_rocksdb_wrapper = _write_impl->_rocksdb_wrapper.get();
}

uint64_t read_timestamp_from(dsn::string_view raw_key)
{
std::string raw_value;
rocksdb::Status s = _write_impl->_db->Get(
_write_impl->_rd_opts, utils::to_rocksdb_slice(raw_key), &raw_value);

uint64_t local_timetag =
pegasus_extract_timetag(_write_impl->_pegasus_data_version, raw_value);
return extract_timestamp_from_timetag(local_timetag);
}

// start with duplicating.
void set_app_duplicating()
{
_server->stop(false);
dsn::replication::destroy_replica(_replica);

dsn::app_info app_info;
app_info.app_type = "pegasus";
app_info.duplicating = true;
_replica =
dsn::replication::create_test_replica(_replica_stub, _gpid, app_info, "./", false);
_server = dsn::make_unique<pegasus_server_impl>(_replica);

SetUp();
}

int db_get(dsn::string_view raw_key, db_get_context *get_ctx)
{
return _rocksdb_wrapper->get(raw_key, get_ctx);
Expand All @@ -88,92 +59,6 @@ class pegasus_write_service_impl_test : public pegasus_server_test_base
}
};

TEST_F(pegasus_write_service_impl_test, put_verify_timetag)
{
set_app_duplicating();

dsn::blob raw_key;
pegasus::pegasus_generate_key(
raw_key, dsn::string_view("hash_key"), dsn::string_view("sort_key"));
std::string value = "value";
int64_t decree = 10;

/// insert timestamp 10
uint64_t timestamp = 10;
auto ctx = db_write_context::create(decree, timestamp);
ASSERT_EQ(0, _write_impl->db_write_batch_put_ctx(ctx, raw_key, value, 0));
ASSERT_EQ(0, _write_impl->db_write(ctx.decree));
_write_impl->clear_up_batch_states(decree, 0);
ASSERT_EQ(read_timestamp_from(raw_key), timestamp);

/// insert timestamp 15, which overwrites the previous record
timestamp = 15;
ctx = db_write_context::create(decree, timestamp);
ASSERT_EQ(0, _write_impl->db_write_batch_put_ctx(ctx, raw_key, value, 0));
ASSERT_EQ(0, _write_impl->db_write(ctx.decree));
_write_impl->clear_up_batch_states(decree, 0);
ASSERT_EQ(read_timestamp_from(raw_key), timestamp);

/// insert timestamp 15 from remote, which will overwrite the previous record,
/// since its cluster id is larger (current cluster_id=1)
timestamp = 15;
ctx.remote_timetag = pegasus::generate_timetag(timestamp, 2, false);
ctx.verify_timetag = true;
ASSERT_EQ(0, _write_impl->db_write_batch_put_ctx(ctx, raw_key, value + "_new", 0));
ASSERT_EQ(0, _write_impl->db_write(ctx.decree));
_write_impl->clear_up_batch_states(decree, 0);
ASSERT_EQ(read_timestamp_from(raw_key), timestamp);
std::string raw_value;
dsn::blob user_value;
rocksdb::Status s =
_write_impl->_db->Get(_write_impl->_rd_opts, utils::to_rocksdb_slice(raw_key), &raw_value);
pegasus_extract_user_data(_write_impl->_pegasus_data_version, std::move(raw_value), user_value);
ASSERT_EQ(user_value.to_string(), "value_new");

// write retry
ASSERT_EQ(0, _write_impl->db_write_batch_put_ctx(ctx, raw_key, value + "_new", 0));
ASSERT_EQ(0, _write_impl->db_write(ctx.decree));
_write_impl->clear_up_batch_states(decree, 0);

/// insert timestamp 16 from local, which will overwrite the remote record,
/// since its timestamp is larger
timestamp = 16;
ctx = db_write_context::create(decree, timestamp);
ASSERT_EQ(0, _write_impl->db_write_batch_put_ctx(ctx, raw_key, value, 0));
ASSERT_EQ(0, _write_impl->db_write(ctx.decree));
_write_impl->clear_up_batch_states(decree, 0);
ASSERT_EQ(read_timestamp_from(raw_key), timestamp);

// write retry
ASSERT_EQ(0, _write_impl->db_write_batch_put_ctx(ctx, raw_key, value, 0));
ASSERT_EQ(0, _write_impl->db_write(ctx.decree));
_write_impl->clear_up_batch_states(decree, 0);
}

// verify timetag on data version v0
TEST_F(pegasus_write_service_impl_test, verify_timetag_compatible_with_version_0)
{
dsn::fail::setup();
dsn::fail::cfg("db_get", "100%1*return()");
// if db_write_batch_put_ctx invokes db_get, this test must fail.

const_cast<uint32_t &>(_write_impl->_pegasus_data_version) = 0; // old version

dsn::blob raw_key;
pegasus::pegasus_generate_key(
raw_key, dsn::string_view("hash_key"), dsn::string_view("sort_key"));
std::string value = "value";
int64_t decree = 10;
uint64_t timestamp = 10;

auto ctx = db_write_context::create_duplicate(decree, timestamp, true);
ASSERT_EQ(0, _write_impl->db_write_batch_put_ctx(ctx, raw_key, value, 0));
ASSERT_EQ(0, _write_impl->db_write(ctx.decree));
_write_impl->clear_up_batch_states(decree, 0);

dsn::fail::teardown();
}

class incr_test : public pegasus_write_service_impl_test
{
public:
Expand Down