From 2b0afee766ef3d1ac03eb635b92cfa402c0536f4 Mon Sep 17 00:00:00 2001 From: Tobias Grieger Date: Thu, 13 Oct 2022 14:34:47 +0200 Subject: [PATCH] kvnemesis: uniquely identify all versions https://cockroachlabs.slack.com/archives/C02FTAH8JCT/p1666704951072499 https://cockroachlabs.slack.com/archives/C02FTAH8JCT/p1666729571728869?thread_ts=1666705742.466619&cid=C02FTAH8JCT This is essentially a v2 of kvnemesis. Fixes #69642. Epic: none Release note: None --- pkg/BUILD.bazel | 6 + pkg/kv/batch.go | 5 + pkg/kv/kvclient/kvcoord/dist_sender.go | 26 +- pkg/kv/kvclient/kvcoord/testing_knobs.go | 11 +- pkg/kv/kvclient/kvcoord/transport.go | 14 +- pkg/kv/kvnemesis/BUILD.bazel | 12 +- pkg/kv/kvnemesis/applier.go | 60 +- pkg/kv/kvnemesis/applier_test.go | 188 +- pkg/kv/kvnemesis/doc.go | 62 +- pkg/kv/kvnemesis/engine.go | 81 +- pkg/kv/kvnemesis/engine_test.go | 22 +- pkg/kv/kvnemesis/env.go | 11 +- pkg/kv/kvnemesis/generator.go | 206 +- pkg/kv/kvnemesis/generator_test.go | 21 +- pkg/kv/kvnemesis/kvnemesis.go | 32 +- pkg/kv/kvnemesis/kvnemesis_test.go | 132 +- pkg/kv/kvnemesis/kvnemesisutil/BUILD.bazel | 14 + pkg/kv/kvnemesis/kvnemesisutil/context.go | 27 + pkg/kv/kvnemesis/kvnemesisutil/seq.go | 21 + pkg/kv/kvnemesis/operations.go | 107 +- pkg/kv/kvnemesis/operations.proto | 26 +- pkg/kv/kvnemesis/operations_test.go | 55 +- pkg/kv/kvnemesis/seq_tracker.go | 88 + pkg/kv/kvnemesis/testdata/TestApplier/batch | 8 + .../testdata/TestApplier/batch-mixed | 12 + .../testdata/TestApplier/batch-mixed-err | 10 + pkg/kv/kvnemesis/testdata/TestApplier/del | 3 + pkg/kv/kvnemesis/testdata/TestApplier/del-err | 3 + .../kvnemesis/testdata/TestApplier/delrange | 3 + .../testdata/TestApplier/delrange-err | 3 + pkg/kv/kvnemesis/testdata/TestApplier/get | 3 + pkg/kv/kvnemesis/testdata/TestApplier/get-err | 3 + .../testdata/TestApplier/get-for-update | 3 + pkg/kv/kvnemesis/testdata/TestApplier/merge | 3 + .../testdata/TestApplier/merge-again | 3 + pkg/kv/kvnemesis/testdata/TestApplier/put | 3 + pkg/kv/kvnemesis/testdata/TestApplier/put-err | 3 + pkg/kv/kvnemesis/testdata/TestApplier/rscan | 3 + .../kvnemesis/testdata/TestApplier/rscan-err | 3 + .../testdata/TestApplier/rscan-for-update | 3 + .../testdata/TestApplier/rscan-for-update-err | 3 + pkg/kv/kvnemesis/testdata/TestApplier/scan | 3 + .../testdata/TestApplier/scan-for-update | 3 + .../testdata/TestApplier/scan-for-update-err | 3 + pkg/kv/kvnemesis/testdata/TestApplier/split | 3 + .../testdata/TestApplier/split-again | 3 + .../kvnemesis/testdata/TestApplier/transfer | 3 + .../testdata/TestApplier/transfer-again | 3 + .../testdata/TestApplier/txn-commit-batch | 10 + .../testdata/TestApplier/txn-commit-mixed | 12 + .../testdata/TestApplier/txn-delrange | 6 + pkg/kv/kvnemesis/testdata/TestApplier/txn-err | 6 + .../kvnemesis/testdata/TestApplier/txn-error | 6 + .../testdata/TestApplier/txn-rollback | 6 + pkg/kv/kvnemesis/testdata/TestApplier/zcfg | 3 + .../kvnemesis/testdata/TestApplier/zcfg-again | 3 + .../kvnemesis/testdata/TestEngine/output.txt | 11 + .../kvnemesis/testdata/TestOperationsFormat/0 | 3 + .../kvnemesis/testdata/TestOperationsFormat/1 | 3 + .../kvnemesis/testdata/TestOperationsFormat/2 | 9 + .../kvnemesis/testdata/TestOperationsFormat/3 | 14 + .../ambiguous_del-del_transaction_committed | 8 + ...el-del_transaction_committed_but_wrong_seq | 9 + .../ambiguous_put-del_transaction_committed | 9 + ...saction_committed_but_has_validation_error | 10 + ...biguous_put-del_transaction_did_not_commit | 7 + .../ambiguous_put-put_transaction_committed | 9 + ...saction_committed_but_has_validation_error | 10 + ...biguous_put-put_transaction_did_not_commit | 7 + .../TestValidate/batch_of_reads_after_writes | 13 + .../batch_of_reads_after_writes_and_deletes | 17 + ...er_writes_and_deletes_returning_tombstones | 19 + ..._writes_and_deletes_returning_wrong_values | 18 + ...writes_and_deletes_with_valid_time_overlap | 17 + ..._reads_after_writes_returning_wrong_values | 14 + ...reads_after_writes_with_empty_time_overlap | 14 + .../TestValidate/batch_of_scans_after_writes | 13 + ..._scans_after_writes_returning_wrong_values | 14 + ...s_after_writes_with_non-empty_time_overlap | 14 + .../TestValidate/batch_of_touching_rangedels | 10 + .../batch_of_two_overlapping_rangedels | 10 + .../batch_with_two_deletes_of_same_key | 9 + .../testdata/TestValidate/no_ops_and_no_kvs | 2 + .../no_ops_with_unexpected_delete | 4 + .../TestValidate/no_ops_with_unexpected_write | 4 + .../one_ambiguous_delete_with_failed_write | 3 + ...iled_write_before_a_later_committed_delete | 5 + ...one_ambiguous_delete_with_successful_write | 4 + .../one_ambiguous_put_with_failed_write | 3 + .../one_ambiguous_put_with_successful_write | 4 + .../one_batch_delete_with_missing_write | 8 + .../one_batch_delete_with_successful_write | 8 + .../one_batch_put_with_missing_write | 8 + .../one_batch_put_with_successful_write | 8 + .../one_delete_with_expected_write | 4 + ...ter_write_transaction_with_shadowed_delete | 15 + .../one_delete_with_missing_write | 4 + .../TestValidate/one_deleterange_after_write | 9 + ...one_deleterange_after_write_extra_deletion | 11 + .../one_deleterange_after_write_missing_write | 9 + ...eterange_after_write_returning_wrong_value | 10 + ...terange_after_write_with_spurious_deletion | 10 + .../TestValidate/one_deleterange_after_writes | 15 + .../one_deleterange_after_writes_and_delete | 18 + ...rectly_deleting_keys_outside_span_boundary | 12 + ...rites_returning_keys_outside_span_boundary | 12 + ...eleterange_after_writes_with_missing_write | 16 + ...r_writes_with_write_timestamp_disagreement | 16 + .../TestValidate/one_deleterange_before_write | 5 + ...terange_before_write_returning_wrong_value | 6 + .../TestValidate/one_put_with_expected_write | 4 + .../TestValidate/one_put_with_missing_write | 4 + .../TestValidate/one_read_after_write | 5 + .../one_read_after_write_and_delete | 7 + ...after_write_and_delete_returning_tombstone | 7 + ...one_read_after_write_returning_wrong_value | 6 + .../TestValidate/one_read_before_delete | 5 + .../TestValidate/one_read_before_write | 5 + .../one_read_before_write_and_delete | 7 + ...ne_read_before_write_returning_wrong_value | 5 + .../one_read_in_between_write_and_delete | 7 + .../TestValidate/one_read_in_between_writes | 7 + ...ryable_delete_with_write_correctly_missing | 3 + ...able_delete_with_write_incorrectly_present | 5 + ...retryable_put_with_write_correctly_missing | 3 + ...tryable_put_with_write_incorrectly_present | 5 + ...verse_scan_after_write_returning_extra_key | 8 + ...rse_scan_after_write_returning_missing_key | 8 + .../one_reverse_scan_after_writes | 7 + ...er_writes_returning_results_in_wrong_order | 8 + ...es_returning_results_outside_scan_boundary | 10 + .../TestValidate/one_scan_after_write | 5 + .../one_scan_after_write_returning_extra_key | 8 + ...one_scan_after_write_returning_missing_key | 8 + ...one_scan_after_write_returning_wrong_value | 6 + .../TestValidate/one_scan_after_writes | 7 + .../one_scan_after_writes_and_delete | 11 + ...er_writes_and_delete_returning_missing_key | 20 + ...er_writes_returning_results_in_wrong_order | 8 + ...es_returning_results_outside_scan_boundary | 10 + .../TestValidate/one_scan_before_write | 5 + ...ne_scan_before_write_returning_wrong_value | 6 + .../TestValidate/one_scan_in_between_writes | 7 + ...after_write_and_delete_returning_extra_key | 13 + ...ete_with_write_on_another_key_after_delete | 11 + ...l_deleterange_followed_by_put_after_writes | 11 + ...r_writes_with_write_timestamp_disagreement | 12 + ...l_put_shadowed_by_deleterange_after_writes | 11 + ...r_writes_with_write_timestamp_disagreement | 12 + ...transactional_put_with_correct_commit_time | 7 + ...ansactional_put_with_incorrect_commit_time | 8 + ...scan_followed_by_delete_outside_time_range | 13 + ..._scan_followed_by_delete_within_time_range | 12 + ..._committed_delete_with_first_write_missing | 9 + ...committed_delete_with_second_write_missing | 9 + ...y_committed_delete_with_the_correct_writes | 7 + ...d_delete_with_write_timestamp_disagreement | 10 + ...lly_committed_put_with_first_write_missing | 9 + ...ly_committed_put_with_second_write_missing | 9 + ...ally_committed_put_with_the_correct_writes | 7 + ...tted_put_with_write_timestamp_disagreement | 10 + ..._batch_delete_with_write_correctly_missing | 10 + ...ack_batch_put_with_write_correctly_missing | 10 + ...d_back_delete_with_write_correctly_missing | 6 + ...back_delete_with_write_incorrectly_present | 8 + ...lled_back_put_with_write_correctly_missing | 6 + ...ed_back_put_with_write_incorrectly_present | 8 + .../TestValidate/rangedel_with_range_split | 6 + .../TestValidate/read_before_rangedel | 7 + .../TestValidate/single_mvcc_rangedel | 4 + .../single_mvcc_rangedel_after_put | 6 + .../single_mvcc_rangedel_before_put | 6 + ...ansaction_with_incorrect_read_after_delete | 12 + ...ransaction_with_incorrect_read_after_write | 10 + ...nsaction_with_incorrect_read_before_delete | 12 + ...ansaction_with_incorrect_read_before_write | 10 + ...ransaction_with_incorrect_scan_after_write | 10 + ...ansaction_with_incorrect_scan_before_write | 10 + ...nsaction_with_read_before_and_after_delete | 11 + ...ansaction_with_read_before_and_after_write | 9 + ...ansaction_with_scan_before_and_after_write | 9 + ...nal_read_and_write_with_empty_time_overlap | 13 + ...read_and_write_with_non-empty_time_overlap | 12 + ...writes_and_deletes_with_empty_time_overlap | 20 + ...es_and_deletes_with_non-empty_time_overlap | 16 + ...eletes_after_write_with_empty_time_overlap | 16 + ...es_after_write_with_non-empty_time_overlap | 15 + ..._reads_one_missing_with_empty_time_overlap | 14 + ...ds_one_missing_with_non-empty_time_overlap | 13 + ...ransactional_reads_with_empty_time_overlap | 16 + ...actional_reads_with_non-empty_time_overlap | 15 + ...nal_scan_and_write_with_empty_time_overlap | 13 + ...scan_and_write_with_non-empty_time_overlap | 12 + ...scans_after_delete_with_empty_time_overlap | 16 + ...s_after_delete_with_non-empty_time_overlap | 17 + ..._scans_one_missing_with_empty_time_overlap | 14 + ...ns_one_missing_with_non-empty_time_overlap | 13 + ...ransactional_scans_with_empty_time_overlap | 16 + ...actional_scans_with_non-empty_time_overlap | 15 + .../TestValidate/two_overlapping_rangedels | 8 + ...nal_deletes_with_out_of_order_commit_times | 13 + ...tionally_committed_deletes_of_the_same_key | 8 + ...d_deletes_of_the_same_key_with_extra_write | 10 + ...te_ops_of_the_same_key_with_incorrect_read | 12 + ..._put_delete_ops_of_the_same_key_with_reads | 11 + ...sactionally_committed_puts_of_the_same_key | 8 + ...tted_puts_of_the_same_key_with_extra_write | 10 + ..._committed_puts_of_the_same_key_with_reads | 11 + ..._committed_puts_of_the_same_key_with_scans | 14 + ...ommitted_writes_delete_put_of_the_same_key | 8 + ...ommitted_writes_put_delete_of_the_same_key | 8 + ...ut_delete_of_the_same_key_with_extra_write | 10 + pkg/kv/kvnemesis/validator.go | 891 ++++--- pkg/kv/kvnemesis/validator_test.go | 2037 ++++++++--------- pkg/kv/kvnemesis/watcher.go | 130 +- pkg/kv/kvserver/BUILD.bazel | 1 + pkg/kv/kvserver/batcheval/cmd_scan.go | 1 + pkg/kv/kvserver/kvserverbase/base.go | 2 +- pkg/kv/kvserver/rangefeed/catchup_scan.go | 14 +- pkg/kv/kvserver/replica_evaluate.go | 5 + pkg/kv/kvserver/replica_raft.go | 2 +- pkg/kv/kvserver/replica_rangefeed.go | 79 +- pkg/kv/kvserver/testing_knobs.go | 7 + pkg/roachpb/BUILD.bazel | 2 + pkg/roachpb/api.go | 1 + pkg/roachpb/api.proto | 6 + pkg/roachpb/api_test.go | 18 + pkg/roachpb/span_group.go | 6 +- pkg/roachprod/prometheus/prometheus_test.go | 14 +- .../testdata/multipleScrapeNodes.txt | 19 + .../prometheus/testdata/usingMakeCommands.txt | 28 + pkg/storage/BUILD.bazel | 1 + pkg/storage/enginepb/BUILD.bazel | 1 + pkg/storage/enginepb/mvcc3.go | 7 +- pkg/storage/enginepb/mvcc3.proto | 6 + pkg/storage/enginepb/mvcc3_test.go | 1 + pkg/storage/mvcc.go | 64 +- pkg/storage/mvcc_test.go | 19 +- pkg/storage/mvcc_value.go | 6 +- pkg/storage/point_synthesizing_iter.go | 12 +- .../lint/passes/fmtsafe/functions.go | 2 + pkg/util/buildutil/BUILD.bazel | 6 +- pkg/util/buildutil/crdb_test_off.go | 31 + pkg/util/buildutil/crdb_test_on.go | 4 + pkg/util/buildutil/crdb_test_test.go | 7 +- pkg/util/buildutil/testingint/BUILD.bazel | 23 + .../buildutil/testingint/testing_int64.go | 68 + .../testingint/testing_int64_test.go | 37 + 248 files changed, 4537 insertions(+), 1894 deletions(-) create mode 100644 pkg/kv/kvnemesis/kvnemesisutil/BUILD.bazel create mode 100644 pkg/kv/kvnemesis/kvnemesisutil/context.go create mode 100644 pkg/kv/kvnemesis/kvnemesisutil/seq.go create mode 100644 pkg/kv/kvnemesis/seq_tracker.go create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/batch create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/batch-mixed create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/batch-mixed-err create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/del create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/del-err create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/delrange create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/delrange-err create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/get create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/get-err create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/get-for-update create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/merge create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/merge-again create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/put create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/put-err create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/rscan create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/rscan-err create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/rscan-for-update create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/rscan-for-update-err create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/scan create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/scan-for-update create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/scan-for-update-err create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/split create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/split-again create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/transfer create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/transfer-again create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/txn-commit-batch create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/txn-commit-mixed create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/txn-delrange create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/txn-err create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/txn-error create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/txn-rollback create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/zcfg create mode 100644 pkg/kv/kvnemesis/testdata/TestApplier/zcfg-again create mode 100644 pkg/kv/kvnemesis/testdata/TestEngine/output.txt create mode 100644 pkg/kv/kvnemesis/testdata/TestOperationsFormat/0 create mode 100644 pkg/kv/kvnemesis/testdata/TestOperationsFormat/1 create mode 100644 pkg/kv/kvnemesis/testdata/TestOperationsFormat/2 create mode 100644 pkg/kv/kvnemesis/testdata/TestOperationsFormat/3 create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_del-del_transaction_committed create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_del-del_transaction_committed_but_wrong_seq create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_committed create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_committed_but_has_validation_error create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_did_not_commit create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_committed create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_committed_but_has_validation_error create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_did_not_commit create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_returning_tombstones create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_returning_wrong_values create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_with_valid_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_returning_wrong_values create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_with_empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes_returning_wrong_values create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes_with_non-empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_touching_rangedels create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_of_two_overlapping_rangedels create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/batch_with_two_deletes_of_same_key create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/no_ops_and_no_kvs create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/no_ops_with_unexpected_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/no_ops_with_unexpected_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_failed_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_failed_write_before_a_later_committed_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_successful_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_put_with_failed_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_put_with_successful_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_batch_delete_with_missing_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_batch_delete_with_successful_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_batch_put_with_missing_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_batch_put_with_successful_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_expected_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_expected_write_after_write_transaction_with_shadowed_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_missing_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_extra_deletion create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_missing_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_returning_wrong_value create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_with_spurious_deletion create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_and_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_incorrectly_deleting_keys_outside_span_boundary create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_returning_keys_outside_span_boundary create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_with_missing_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_with_write_timestamp_disagreement create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_before_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_before_write_returning_wrong_value create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_put_with_expected_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_put_with_missing_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_and_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_and_delete_returning_tombstone create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_returning_wrong_value create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write_and_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write_returning_wrong_value create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_read_in_between_write_and_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_read_in_between_writes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_delete_with_write_correctly_missing create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_delete_with_write_incorrectly_present create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_put_with_write_correctly_missing create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_put_with_write_incorrectly_present create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_write_returning_extra_key create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_write_returning_missing_key create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes_returning_results_in_wrong_order create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes_returning_results_outside_scan_boundary create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_extra_key create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_missing_key create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_wrong_value create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_and_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_and_delete_returning_missing_key create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_returning_results_in_wrong_order create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_returning_results_outside_scan_boundary create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_before_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_before_write_returning_wrong_value create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_scan_in_between_writes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_tranactional_scan_after_write_and_delete_returning_extra_key create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_delete_with_write_on_another_key_after_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_deleterange_followed_by_put_after_writes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_deleterange_followed_by_put_after_writes_with_write_timestamp_disagreement create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_shadowed_by_deleterange_after_writes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_shadowed_by_deleterange_after_writes_with_write_timestamp_disagreement create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_with_correct_commit_time create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_with_incorrect_commit_time create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_scan_followed_by_delete_outside_time_range create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_scan_followed_by_delete_within_time_range create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_first_write_missing create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_second_write_missing create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_the_correct_writes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_write_timestamp_disagreement create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_first_write_missing create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_second_write_missing create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_the_correct_writes create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_write_timestamp_disagreement create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_batch_delete_with_write_correctly_missing create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_batch_put_with_write_correctly_missing create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_delete_with_write_correctly_missing create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_delete_with_write_incorrectly_present create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_put_with_write_correctly_missing create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_put_with_write_incorrectly_present create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/rangedel_with_range_split create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/read_before_rangedel create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel_after_put create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel_before_put create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_after_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_after_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_before_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_before_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_scan_after_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_scan_before_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_read_before_and_after_delete create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_read_before_and_after_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_scan_before_and_after_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_read_and_write_with_empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_read_and_write_with_non-empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_after_writes_and_deletes_with_empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_after_writes_and_deletes_with_non-empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_and_deletes_after_write_with_empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_and_deletes_after_write_with_non-empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_one_missing_with_empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_one_missing_with_non-empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_with_empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_with_non-empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_scan_and_write_with_empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_scan_and_write_with_non-empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_after_delete_with_empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_after_delete_with_non-empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_one_missing_with_empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_one_missing_with_non-empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_with_empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_with_non-empty_time_overlap create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_overlapping_rangedels create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactional_deletes_with_out_of_order_commit_times create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_deletes_of_the_same_key create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_deletes_of_the_same_key_with_extra_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_put_delete_ops_of_the_same_key_with_incorrect_read create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_put_delete_ops_of_the_same_key_with_reads create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_extra_write create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_reads create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_scans create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_delete_put_of_the_same_key create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_put_delete_of_the_same_key create mode 100644 pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_put_delete_of_the_same_key_with_extra_write create mode 100644 pkg/roachprod/prometheus/testdata/multipleScrapeNodes.txt create mode 100644 pkg/roachprod/prometheus/testdata/usingMakeCommands.txt create mode 100644 pkg/util/buildutil/testingint/BUILD.bazel create mode 100644 pkg/util/buildutil/testingint/testing_int64.go create mode 100644 pkg/util/buildutil/testingint/testing_int64_test.go diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel index 7a7eb7a44287..c175cab4e54b 100644 --- a/pkg/BUILD.bazel +++ b/pkg/BUILD.bazel @@ -542,6 +542,7 @@ ALL_TESTS = [ "//pkg/util/admission:admission_test", "//pkg/util/binfetcher:binfetcher_test", "//pkg/util/bitarray:bitarray_test", + "//pkg/util/buildutil/testingint:testingint_test", "//pkg/util/buildutil:buildutil_test", "//pkg/util/bulk:bulk_test", "//pkg/util/cache:cache_test", @@ -1114,6 +1115,7 @@ GO_TARGETS = [ "//pkg/kv/kvclient/rangefeed:rangefeed_test", "//pkg/kv/kvclient/rangestats:rangestats", "//pkg/kv/kvclient:kvclient", + "//pkg/kv/kvnemesis/kvnemesisutil:kvnemesisutil", "//pkg/kv/kvnemesis:kvnemesis", "//pkg/kv/kvnemesis:kvnemesis_test", "//pkg/kv/kvprober:kvprober", @@ -1927,6 +1929,8 @@ GO_TARGETS = [ "//pkg/util/bitarray:bitarray", "//pkg/util/bitarray:bitarray_test", "//pkg/util/bufalloc:bufalloc", + "//pkg/util/buildutil/testingint:testingint", + "//pkg/util/buildutil/testingint:testingint_test", "//pkg/util/buildutil:buildutil", "//pkg/util/buildutil:buildutil_test", "//pkg/util/bulk:bulk", @@ -2461,6 +2465,7 @@ GET_X_DATA_TARGETS = [ "//pkg/kv/kvclient/rangefeed/rangefeedcache:get_x_data", "//pkg/kv/kvclient/rangestats:get_x_data", "//pkg/kv/kvnemesis:get_x_data", + "//pkg/kv/kvnemesis/kvnemesisutil:get_x_data", "//pkg/kv/kvprober:get_x_data", "//pkg/kv/kvserver:get_x_data", "//pkg/kv/kvserver/abortspan:get_x_data", @@ -2964,6 +2969,7 @@ GET_X_DATA_TARGETS = [ "//pkg/util/bitarray:get_x_data", "//pkg/util/bufalloc:get_x_data", "//pkg/util/buildutil:get_x_data", + "//pkg/util/buildutil/testingint:get_x_data", "//pkg/util/bulk:get_x_data", "//pkg/util/cache:get_x_data", "//pkg/util/caller:get_x_data", diff --git a/pkg/kv/batch.go b/pkg/kv/batch.go index 890e4472146c..dc9d73ca77ec 100644 --- a/pkg/kv/batch.go +++ b/pkg/kv/batch.go @@ -78,6 +78,11 @@ func (b *Batch) ApproximateMutationBytes() int { return b.approxMutationReqBytes } +// Requests exposes the requests stashed in the batch thus far. +func (b *Batch) Requests() []roachpb.RequestUnion { + return b.reqs +} + // RawResponse returns the BatchResponse which was the result of a successful // execution of the batch, and nil otherwise. func (b *Batch) RawResponse() *roachpb.BatchResponse { diff --git a/pkg/kv/kvclient/kvcoord/dist_sender.go b/pkg/kv/kvclient/kvcoord/dist_sender.go index d8c1bdc5f9c0..2e5d6df18513 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender.go @@ -345,6 +345,8 @@ type DistSender struct { // LatencyFunc is used to estimate the latency to other nodes. latencyFunc LatencyFunc + onRangeSpanningNonTxnalBatch func(ba *roachpb.BatchRequest) *roachpb.Error + // locality is the description of the topography of the server on which the // DistSender is running. It is used to estimate the latency to other nodes // in the absence of a latency function. @@ -498,6 +500,11 @@ func NewDistSender(cfg DistSenderConfig) *DistSender { } else { ds.latencyFunc = ds.rpcContext.RemoteClocks.Latency } + + if cfg.TestingKnobs.OnRangeSpanningNonTxnalBatch != nil { + ds.onRangeSpanningNonTxnalBatch = cfg.TestingKnobs.OnRangeSpanningNonTxnalBatch + } + return ds } @@ -1243,8 +1250,23 @@ func (ds *DistSender) divideAndSendBatchToRanges( // If there's no transaction and ba spans ranges, possibly re-run as part of // a transaction for consistency. The case where we don't need to re-run is // if the read consistency is not required. - if ba.Txn == nil && ba.IsTransactional() && ba.ReadConsistency == roachpb.CONSISTENT { - return nil, roachpb.NewError(&roachpb.OpRequiresTxnError{}) + // + // NB: this check isn't quite right. If we mixed a DeleteRangeUsingTombstone + // with a Put, for example, we'd restart with a txn, but + // DeleteRangeUsingTombstone does not support txns. Could we instead determine + // the read/write timestamp here? But then the write might not be possible at + // that timestamp, and we need to start retrying the batch as a kind of + // starvable txn (currently the contract is that batches can't return retry + // errors). + if ba.Txn == nil { + if ba.IsTransactional() && ba.ReadConsistency == roachpb.CONSISTENT { + return nil, roachpb.NewError(&roachpb.OpRequiresTxnError{}) + } + if fn := ds.onRangeSpanningNonTxnalBatch; fn != nil { + if pErr := fn(ba); pErr != nil { + return nil, pErr + } + } } // If the batch contains a non-parallel commit EndTxn and spans ranges then // we want the caller to come again with the EndTxn in a separate diff --git a/pkg/kv/kvclient/kvcoord/testing_knobs.go b/pkg/kv/kvclient/kvcoord/testing_knobs.go index 9674aa6df967..1da19d7a75ed 100644 --- a/pkg/kv/kvclient/kvcoord/testing_knobs.go +++ b/pkg/kv/kvclient/kvcoord/testing_knobs.go @@ -10,7 +10,10 @@ package kvcoord -import "github.com/cockroachdb/cockroach/pkg/base" +import ( + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/roachpb" +) // ClientTestingKnobs contains testing options that dictate the behavior // of the key-value client. @@ -52,6 +55,12 @@ type ClientTestingKnobs struct { // CommitWaitFilter allows tests to instrument the beginning of a transaction // commit wait sleep. CommitWaitFilter func() + + // OnRangeSpanningNonTxnalBatch is invoked whenever DistSender attempts to split + // a non-transactional batch across a range boundary. The method may inject an + // error which, if non-nil, becomes the result of the batch. Otherwise, execution + // continues. + OnRangeSpanningNonTxnalBatch func(ba *roachpb.BatchRequest) *roachpb.Error } var _ base.ModuleTestingKnobs = &ClientTestingKnobs{} diff --git a/pkg/kv/kvclient/kvcoord/transport.go b/pkg/kv/kvclient/kvcoord/transport.go index ad074b811629..92f9a0aa6754 100644 --- a/pkg/kv/kvclient/kvcoord/transport.go +++ b/pkg/kv/kvclient/kvcoord/transport.go @@ -207,15 +207,21 @@ func (gt *grpcTransport) sendBatch( gt.opts.metrics.LocalSentCount.Inc(1) } reply, err := iface.Batch(ctx, ba) + // If we queried a remote node, perform extra validation and // import trace spans. if reply != nil && !rpc.IsLocal(iface) { - for i := range reply.Responses { - if err := reply.Responses[i].GetInner().Verify(ba.Requests[i].GetInner()); err != nil { - log.Errorf(ctx, "%v", err) + if err == nil { + for i := range reply.Responses { + if err := reply.Responses[i].GetInner().Verify(ba.Requests[i].GetInner()); err != nil { + log.Errorf(ctx, "%v", err) + return nil, err + } } } - // Import the remotely collected spans, if any. + + // Import the remotely collected spans, if any. Do this on error too, + // to get traces in that case as well (or to at least have a chance). if len(reply.CollectedSpans) != 0 { span := tracing.SpanFromContext(ctx) if span == nil { diff --git a/pkg/kv/kvnemesis/BUILD.bazel b/pkg/kv/kvnemesis/BUILD.bazel index a094482ca25e..cd29930a354f 100644 --- a/pkg/kv/kvnemesis/BUILD.bazel +++ b/pkg/kv/kvnemesis/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "generator.go", "kvnemesis.go", "operations.go", + "seq_tracker.go", "validator.go", "watcher.go", ], @@ -24,11 +25,13 @@ go_library( "//pkg/keys", "//pkg/kv", "//pkg/kv/kvclient/kvcoord", + "//pkg/kv/kvnemesis/kvnemesisutil", "//pkg/kv/kvserver", "//pkg/kv/kvserver/liveness", "//pkg/roachpb", "//pkg/sql/catalog/bootstrap", "//pkg/storage", + "//pkg/storage/enginepb", "//pkg/util/bufalloc", "//pkg/util/ctxgroup", "//pkg/util/encoding", @@ -41,11 +44,11 @@ go_library( "//pkg/util/timeutil", "//pkg/util/tracing", "//pkg/util/tracing/tracingpb", - "//pkg/util/uuid", "@com_github_cockroachdb_cockroach_go_v2//crdb", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_pebble//:pebble", "@com_github_cockroachdb_pebble//vfs", + "@com_github_petermattis_goid//:goid", "@org_golang_google_protobuf//proto", ], ) @@ -63,21 +66,28 @@ go_test( "validator_test.go", ], args = ["-test.timeout=55s"], + data = glob(["testdata/**"]), embed = [":kvnemesis"], deps = [ "//pkg/base", "//pkg/config/zonepb", "//pkg/kv", + "//pkg/kv/kvclient/kvcoord", + "//pkg/kv/kvnemesis/kvnemesisutil", "//pkg/kv/kvserver", "//pkg/roachpb", "//pkg/security/securityassets", "//pkg/security/securitytest", "//pkg/server", "//pkg/storage", + "//pkg/storage/enginepb", + "//pkg/testutils", + "//pkg/testutils/echotest", "//pkg/testutils/serverutils", "//pkg/testutils/skip", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", + "//pkg/util/buildutil", "//pkg/util/envutil", "//pkg/util/hlc", "//pkg/util/leaktest", diff --git a/pkg/kv/kvnemesis/applier.go b/pkg/kv/kvnemesis/applier.go index a6aae4df7642..4cd135cbde0d 100644 --- a/pkg/kv/kvnemesis/applier.go +++ b/pkg/kv/kvnemesis/applier.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -83,8 +84,9 @@ func applyOp(ctx context.Context, env *Env, db *kv.DB, op *Operation) { *ScanOperation, *BatchOperation, *DeleteOperation, - *DeleteRangeOperation: - applyClientOp(ctx, db, op, false /* inTxn */) + *DeleteRangeOperation, + *DeleteRangeUsingTombstoneOperation: + applyClientOp(ctx, db, op, false) case *SplitOperation: err := db.AdminSplit(ctx, o.Key, hlc.MaxTimestamp, roachpb.AdminSplitRequest_INGESTION) o.Result = resultInit(ctx, err) @@ -94,7 +96,6 @@ func applyOp(ctx context.Context, env *Env, db *kv.DB, op *Operation) { case *ChangeReplicasOperation: desc := getRangeDesc(ctx, o.Key, db) _, err := db.AdminChangeReplicas(ctx, o.Key, desc, o.Changes) - // TODO(dan): Save returned desc? o.Result = resultInit(ctx, err) case *TransferLeaseOperation: err := db.AdminTransferLease(ctx, o.Key, o.Target) @@ -120,7 +121,7 @@ func applyOp(ctx context.Context, env *Env, db *kv.DB, op *Operation) { for i := range o.Ops { op := &o.Ops[i] op.Result().Reset() // in case we're a retry - applyClientOp(ctx, txn, op, true /* inTxn */) + applyClientOp(ctx, txn, op, true) // The KV api disallows use of a txn after an operation on it errors. if r := op.Result(); r.Type == ResultType_Error { return errors.DecodeError(ctx, *r.Err) @@ -128,7 +129,7 @@ func applyOp(ctx context.Context, env *Env, db *kv.DB, op *Operation) { } if o.CommitInBatch != nil { b := txn.NewBatch() - applyBatchOp(ctx, b, txn.CommitInBatch, o.CommitInBatch, true) + applyBatchOp(ctx, b, txn.CommitInBatch, o.CommitInBatch) // The KV api disallows use of a txn after an operation on it errors. if r := o.CommitInBatch.Result; r.Type == ResultType_Error { return errors.DecodeError(ctx, *r.Err) @@ -221,7 +222,8 @@ func applyClientOp(ctx context.Context, db clientI, op *Operation, inTxn bool) { } case *PutOperation: _, ts, err := dbRunWithResultAndTimestamp(ctx, db, func(b *kv.Batch) { - b.Put(o.Key, o.Value) + b.Put(o.Key, o.Value()) + setLastReqSeq(b, o.Seq) }) o.Result = resultInit(ctx, err) if err != nil { @@ -250,13 +252,14 @@ func applyClientOp(ctx context.Context, db clientI, op *Operation, inTxn bool) { o.Result.Values = make([]KeyValue, len(kvs)) for i, kv := range kvs { o.Result.Values[i] = KeyValue{ - Key: []byte(kv.Key), + Key: kv.Key, Value: kv.Value.RawBytes, } } case *DeleteOperation: res, ts, err := dbRunWithResultAndTimestamp(ctx, db, func(b *kv.Batch) { b.Del(o.Key) + setLastReqSeq(b, o.Seq) }) o.Result = resultInit(ctx, err) if err != nil { @@ -270,11 +273,9 @@ func applyClientOp(ctx context.Context, db clientI, op *Operation, inTxn bool) { o.Result.Keys[i] = deletedKey } case *DeleteRangeOperation: - if !inTxn { - panic(errors.AssertionFailedf(`non-transactional DelRange operations currently unsupported`)) - } res, ts, err := dbRunWithResultAndTimestamp(ctx, db, func(b *kv.Batch) { b.DelRange(o.Key, o.EndKey, true /* returnKeys */) + setLastReqSeq(b, o.Seq) }) o.Result = resultInit(ctx, err) if err != nil { @@ -287,20 +288,34 @@ func applyClientOp(ctx context.Context, db clientI, op *Operation, inTxn bool) { for i, deletedKey := range deletedKeys { o.Result.Keys[i] = deletedKey } + case *DeleteRangeUsingTombstoneOperation: + _, ts, err := dbRunWithResultAndTimestamp(ctx, db, func(b *kv.Batch) { + b.DelRangeUsingTombstone(o.Key, o.EndKey) + setLastReqSeq(b, o.Seq) + }) + o.Result = resultInit(ctx, err) + if err != nil { + return + } + o.Result.OptionalTimestamp = ts case *BatchOperation: b := &kv.Batch{} - applyBatchOp(ctx, b, db.Run, o, inTxn) + applyBatchOp(ctx, b, db.Run, o) default: panic(errors.AssertionFailedf(`unknown batch operation type: %T %v`, o, o)) } } +func setLastReqSeq(b *kv.Batch, seq kvnemesisutil.Seq) { + sl := b.Requests() + req := sl[len(sl)-1].GetInner() + h := req.Header() + h.KVNemesisSeq.Set(int64(seq)) + req.SetHeader(h) +} + func applyBatchOp( - ctx context.Context, - b *kv.Batch, - run func(context.Context, *kv.Batch) error, - o *BatchOperation, - inTxn bool, + ctx context.Context, b *kv.Batch, run func(context.Context, *kv.Batch) error, o *BatchOperation, ) { for i := range o.Ops { switch subO := o.Ops[i].GetValue().(type) { @@ -311,7 +326,8 @@ func applyBatchOp( b.Get(subO.Key) } case *PutOperation: - b.Put(subO.Key, subO.Value) + b.Put(subO.Key, subO.Value()) + setLastReqSeq(b, subO.Seq) case *ScanOperation: if subO.Reverse && subO.ForUpdate { b.ReverseScanForUpdate(subO.Key, subO.EndKey) @@ -324,11 +340,13 @@ func applyBatchOp( } case *DeleteOperation: b.Del(subO.Key) + setLastReqSeq(b, subO.Seq) case *DeleteRangeOperation: - if !inTxn { - panic(errors.AssertionFailedf(`non-transactional batch DelRange operations currently unsupported`)) - } b.DelRange(subO.Key, subO.EndKey, true /* returnKeys */) + setLastReqSeq(b, subO.Seq) + case *DeleteRangeUsingTombstoneOperation: + b.DelRangeUsingTombstone(subO.Key, subO.EndKey) + setLastReqSeq(b, subO.Seq) default: panic(errors.AssertionFailedf(`unknown batch operation type: %T %v`, subO, subO)) } @@ -384,6 +402,8 @@ func applyBatchOp( subO.Result.Keys[j] = key } } + case *DeleteRangeUsingTombstoneOperation: + subO.Result = resultInit(ctx, err) default: panic(errors.AssertionFailedf(`unknown batch operation type: %T %v`, subO, subO)) } diff --git a/pkg/kv/kvnemesis/applier_test.go b/pkg/kv/kvnemesis/applier_test.go index 6b68eb12ff10..227d0e2fe413 100644 --- a/pkg/kv/kvnemesis/applier_test.go +++ b/pkg/kv/kvnemesis/applier_test.go @@ -13,17 +13,17 @@ package kvnemesis import ( "context" gosql "database/sql" - "fmt" "regexp" "strings" "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/echotest" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" ) @@ -37,20 +37,28 @@ func TestApplier(t *testing.T) { defer tc.Stopper().Stop(ctx) db := tc.Server(0).DB() sqlDB := tc.ServerConn(0) - env := &Env{sqlDBs: []*gosql.DB{sqlDB}} + env := &Env{SQLDBs: []*gosql.DB{sqlDB}} + + type testCase struct { + name string + a string // actual output + } a := MakeApplier(env, db, db) - check := func(t *testing.T, s Step, expected string) { + + var tests []testCase + addPass := func(t *testing.T, name string, s Step) { t.Helper() _ /* trace */, err := a.Apply(ctx, &s) require.NoError(t, err) actual := s.String() - // Trim out the txn stuff. It has things like timestamps in it that are not - // stable from run to run. - actual = regexp.MustCompile(` // nil txnpb:\(.*\)`).ReplaceAllString(actual, ` // nil txnpb:(...)`) - assert.Equal(t, strings.TrimSpace(expected), strings.TrimSpace(actual)) + // Trim out the txn to avoid nondeterminism. + actual = regexp.MustCompile(` txnpb:\(.*\)`).ReplaceAllLiteralString(actual, ` txnpb:`) + // Replace timestamps. + actual = regexp.MustCompile(`[0-9]+\.[0-9]+,[0-9]+`).ReplaceAllLiteralString(actual, ``) + tests = append(tests, testCase{name: name, a: actual}) } - checkErr := func(t *testing.T, s Step, expected string) { + addErr := func(t *testing.T, name string, s Step) { t.Helper() cancelledCtx, cancel := context.WithCancel(context.Background()) cancel() @@ -61,151 +69,75 @@ func TestApplier(t *testing.T) { // The wrapped string around the context canceled error depends on where // the context cancellation was noticed. actual = regexp.MustCompile(` aborted .*: context canceled`).ReplaceAllString(actual, ` context canceled`) - assert.Equal(t, strings.TrimSpace(expected), strings.TrimSpace(actual)) - } - - checkPanics := func(t *testing.T, s Step, expectedPanic string) { - t.Helper() - _ /* trace */, err := a.Apply(ctx, &s) - require.EqualError(t, err, fmt.Sprintf("panic applying step %s: %v", s, expectedPanic)) + tests = append(tests, testCase{name: name, a: actual}) } // Basic operations - check(t, step(get(`a`)), `db0.Get(ctx, "a") // (nil, nil)`) - check(t, step(scan(`a`, `c`)), `db1.Scan(ctx, "a", "c", 0) // ([], nil)`) - - check(t, step(put(`a`, `1`)), `db0.Put(ctx, "a", 1) // nil`) - check(t, step(getForUpdate(`a`)), `db1.GetForUpdate(ctx, "a") // ("1", nil)`) - check(t, step(scanForUpdate(`a`, `c`)), `db0.ScanForUpdate(ctx, "a", "c", 0) // (["a":"1"], nil)`) - - check(t, step(put(`b`, `2`)), `db1.Put(ctx, "b", 2) // nil`) - check(t, step(get(`b`)), `db0.Get(ctx, "b") // ("2", nil)`) - check(t, step(scan(`a`, `c`)), `db1.Scan(ctx, "a", "c", 0) // (["a":"1", "b":"2"], nil)`) + addPass(t, "get", step(get(`a`))) + addPass(t, "scan", step(scan(`a`, `c`))) - check(t, step(reverseScan(`a`, `c`)), `db0.ReverseScan(ctx, "a", "c", 0) // (["b":"2", "a":"1"], nil)`) - check(t, step(reverseScanForUpdate(`a`, `b`)), `db1.ReverseScanForUpdate(ctx, "a", "b", 0) // (["a":"1"], nil)`) + addPass(t, "put", step(put(`a`, 1))) + addPass(t, "get-for-update", step(getForUpdate(`a`))) + addPass(t, "scan-for-update", step(scanForUpdate(`a`, `c`))) - check(t, step(del(`b`)), `db0.Del(ctx, "b")`) - check(t, step(get(`b`)), `db1.Get(ctx, "b") // (nil, nil)`) + addPass(t, `batch`, step(batch(put(`a`, 21), delRange(`b`, `c`, 22)))) - check(t, step(put(`c`, `3`)), `db0.Put(ctx, "c", 3) // nil`) - check(t, step(put(`d`, `4`)), `db1.Put(ctx, "d", 4) // nil`) + addPass(t, "rscan", step(reverseScan(`a`, `c`))) + addPass(t, "rscan-for-update", step(reverseScanForUpdate(`a`, `b`))) - check(t, step(del(`c`)), `db0.Del(ctx, "c")`) - check(t, step(scan(`a`, `e`)), `db1.Scan(ctx, "a", "e", 0) // (["a":"1", "d":"4"], nil)`) + addPass(t, "del", step(del(`b`, 1))) + addPass(t, "delrange", step(delRange(`a`, `c`, 6))) - check(t, step(put(`c`, `5`)), `db0.Put(ctx, "c", 5) // nil`) - check(t, step(closureTxn(ClosureTxnType_Commit, delRange(`b`, `d`))), ` -db1.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - txn.DelRange(ctx, "b", "d", true) // (["c"], nil) - return nil -}) // nil txnpb:(...) - `) + addPass(t, "txn-delrange", step(closureTxn(ClosureTxnType_Commit, delRange(`b`, `d`, 1)))) - checkErr(t, step(get(`a`)), `db0.Get(ctx, "a") // (nil, context canceled)`) - checkErr(t, step(put(`a`, `1`)), `db1.Put(ctx, "a", 1) // context canceled`) + addErr(t, "get-err", step(get(`a`))) + addErr(t, "put-err", step(put(`a`, 1))) - checkErr(t, step(scanForUpdate(`a`, `c`)), `db0.ScanForUpdate(ctx, "a", "c", 0) // (nil, context canceled)`) - checkErr(t, step(reverseScan(`a`, `c`)), `db1.ReverseScan(ctx, "a", "c", 0) // (nil, context canceled)`) + addErr(t, "scan-for-update-err", step(scanForUpdate(`a`, `c`))) + addErr(t, "rscan-err", step(reverseScan(`a`, `c`))) + addErr(t, "rscan-for-update-err", step(reverseScanForUpdate(`a`, `c`))) + addErr(t, "del-err", step(del(`b`, 1))) + addErr(t, "delrange-err", step(delRange(`b`, `c`, 12))) - checkErr(t, step(reverseScanForUpdate(`a`, `c`)), `db0.ReverseScanForUpdate(ctx, "a", "c", 0) // (nil, context canceled)`) - checkErr(t, step(del(`b`)), `db1.Del(ctx, "b") // context canceled`) - - checkErr(t, step(closureTxn(ClosureTxnType_Commit, delRange(`b`, `d`))), ` -db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - txn.DelRange(ctx, "b", "d", true) - return nil -}) // context canceled - `) - - checkPanics(t, step(delRange(`b`, `d`)), `non-transactional DelRange operations currently unsupported`) - checkPanics(t, step(batch(delRange(`b`, `d`))), `non-transactional batch DelRange operations currently unsupported`) + addErr(t, `txn-err`, step(closureTxn(ClosureTxnType_Commit, delRange(`b`, `d`, 1)))) // Batch - check(t, step(batch(put(`b`, `2`), get(`a`), del(`b`), del(`c`), scan(`a`, `c`), reverseScanForUpdate(`a`, `e`))), ` -{ - b := &Batch{} - b.Put(ctx, "b", 2) // nil - b.Get(ctx, "a") // ("1", nil) - b.Del(ctx, "b") // nil - b.Del(ctx, "c") // nil - b.Scan(ctx, "a", "c") // (["a":"1"], nil) - b.ReverseScanForUpdate(ctx, "a", "e") // (["d":"4", "a":"1"], nil) - db1.Run(ctx, b) // nil -} -`) - checkErr(t, step(batch(put(`b`, `2`), getForUpdate(`a`), scanForUpdate(`a`, `c`), reverseScan(`a`, `c`))), ` -{ - b := &Batch{} - b.Put(ctx, "b", 2) // context canceled - b.GetForUpdate(ctx, "a") // (nil, context canceled) - b.ScanForUpdate(ctx, "a", "c") // (nil, context canceled) - b.ReverseScan(ctx, "a", "c") // (nil, context canceled) - db0.Run(ctx, b) // context canceled -} -`) + addPass(t, `batch-mixed`, step(batch(put(`b`, 2), get(`a`), del(`b`, 1), del(`c`, 1), scan(`a`, `c`), reverseScanForUpdate(`a`, `e`)))) + addErr(t, `batch-mixed-err`, step(batch(put(`b`, 2), getForUpdate(`a`), scanForUpdate(`a`, `c`), reverseScan(`a`, `c`)))) // Txn commit - check(t, step(closureTxn(ClosureTxnType_Commit, put(`e`, `5`), batch(put(`f`, `6`), delRange(`c`, `e`)))), ` -db1.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - txn.Put(ctx, "e", 5) // nil - { - b := &Batch{} - b.Put(ctx, "f", 6) // nil - b.DelRange(ctx, "c", "e", true) // (["d"], nil) - txn.Run(ctx, b) // nil - } - return nil -}) // nil txnpb:(...) - `) - + addPass(t, `txn-commit-mixed`, step(closureTxn(ClosureTxnType_Commit, put(`e`, 5), batch(put(`f`, 6), delRange(`c`, `e`, 1))))) // Txn commit in batch - check(t, step(closureTxnCommitInBatch(opSlice(get(`a`), put(`f`, `6`)), put(`e`, `5`))), ` -db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - txn.Put(ctx, "e", 5) // nil - b := &Batch{} - b.Get(ctx, "a") // ("1", nil) - b.Put(ctx, "f", 6) // nil - txn.CommitInBatch(ctx, b) // nil - return nil -}) // nil txnpb:(...) - `) + addPass(t, `txn-commit-batch`, step(closureTxnCommitInBatch(opSlice(get(`a`), put(`f`, 6)), put(`e`, 5)))) // Txn rollback - check(t, step(closureTxn(ClosureTxnType_Rollback, put(`e`, `5`))), ` -db1.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - txn.Put(ctx, "e", 5) // nil - return errors.New("rollback") -}) // rollback - `) + addPass(t, `txn-rollback`, step(closureTxn(ClosureTxnType_Rollback, put(`e`, 5)))) // Txn error - checkErr(t, step(closureTxn(ClosureTxnType_Rollback, put(`e`, `5`))), ` -db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - txn.Put(ctx, "e", 5) - return errors.New("rollback") -}) // context canceled - `) + addErr(t, `txn-error`, step(closureTxn(ClosureTxnType_Rollback, put(`e`, 5)))) // Splits and merges - check(t, step(split(`foo`)), `db1.AdminSplit(ctx, "foo") // nil`) - check(t, step(merge(`foo`)), `db0.AdminMerge(ctx, "foo") // nil`) - checkErr(t, step(split(`foo`)), - `db1.AdminSplit(ctx, "foo") // context canceled`) - checkErr(t, step(merge(`foo`)), - `db0.AdminMerge(ctx, "foo") // context canceled`) + addPass(t, `split`, step(split(`foo`))) + addPass(t, `merge`, step(merge(`foo`))) + addErr(t, `split-again`, step(split(`foo`))) + addErr(t, `merge-again`, step(merge(`foo`))) // Lease transfers - check(t, step(transferLease(`foo`, 1)), - `db1.TransferLeaseOperation(ctx, "foo", 1) // nil`) - checkErr(t, step(transferLease(`foo`, 1)), - `db0.TransferLeaseOperation(ctx, "foo", 1) // context canceled`) + addPass(t, `transfer`, step(transferLease(`foo`, 1))) + addErr(t, `transfer-again`, step(transferLease(`foo`, 1))) // Zone config changes - check(t, step(changeZone(ChangeZoneType_ToggleGlobalReads)), - `env.UpdateZoneConfig(ctx, ToggleGlobalReads) // nil`) - checkErr(t, step(changeZone(ChangeZoneType_ToggleGlobalReads)), - `env.UpdateZoneConfig(ctx, ToggleGlobalReads) // context canceled`) + addPass(t, `zcfg`, step(changeZone(ChangeZoneType_ToggleGlobalReads))) + addErr(t, `zcfg-again`, step(changeZone(ChangeZoneType_ToggleGlobalReads))) + + w := echotest.Walk(t, testutils.TestDataPath(t, t.Name())) + defer w.Check(t) + for _, test := range tests { + t.Run(test.name, w.Do(t, test.name, func(t *testing.T, path string) { + echotest.Require(t, strings.TrimLeft(test.a, "\n"), path) + t.Log(test.a) + })) + } } func TestUpdateZoneConfig(t *testing.T) { diff --git a/pkg/kv/kvnemesis/doc.go b/pkg/kv/kvnemesis/doc.go index 6ed0e4fdbc42..8740a5fe1491 100644 --- a/pkg/kv/kvnemesis/doc.go +++ b/pkg/kv/kvnemesis/doc.go @@ -8,34 +8,42 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -// Package kvnemesis exercises the KV api with random traffic and then validates -// that the observed behaviors are consistent with our guarantees. +// Package kvnemesis exercises the KV API with random concurrent traffic (as +// well as splits, merges, etc) and then validates that the observed behaviors +// are serializable. // -// A set of Operations are generated which represent usage of the public KV api. -// These include both "workload" operations like Gets and Puts as well as -// "admin" operations like rebalances. These Operations can be handed to an -// Applier, which runs them against the KV api and records the results. +// It does so in polynomial time based on the techniques used by [Elle] (see in +// particular section 4.2.3), using the after-the-fact MVCC history as a record +// of truth. It ensures that all write operations embed a unique identifier that +// is stored in MVCC history, and can thus identify which of its operations' +// mutations are reflected in the database ("recoverability" in Elle parlance). // -// Operations do allow for concurrency (this testing is much less interesting -// otherwise), which means that the state of the KV map is not recoverable from -// _only_ the input. TODO(dan): We can use RangeFeed to recover the exact KV -// history. This plus some Kyle magic can be used to check our transactional -// guarantees. +// A run of kvnemesis proceeds as follows. // -// TODO -// - CPut/InitPut/Increment -// - ClearRange/RevertRange -// - AdminRelocateRange -// - AdminUnsplit -// - AdminScatter -// - CheckConsistency -// - ExportRequest -// - AddSSTable -// - Root and leaf transactions -// - GCRequest -// - Protected timestamps -// - Transactions being abandoned by their coordinator -// - Continuing txns after CPut and WriteIntent errors (generally continuing -// after errors is not allowed, but it is allowed after ConditionFailedError and -// WriteIntentError as a special case) +// First, the generator creates a random slice of operations, each with a unique +// sequence number. These are distributed across a number of concurrent worker +// threads and are executed against the database. Some of these operations may +// succeed, some may fail, and for some of them an ambiguous result may be +// encountered. A rangefeed consumes the entire MVCC history. +// +// Second, the activity thus generated is validated. An unambiguously failed +// operation is verified for not having left any writes in the MVCC history[^1]. +// For an ambiguously failed operation, we check whether any writes materialized +// and if so, treat it as committed, otherwise as failed. A committed operation +// is translated into an atomic sequence of read (`observedRead` and +// `observedScan`) and write (`observedWrite`) operations (based on its +// result[^2]). Each write operation checks whether a write with a matching +// sequence number is present on the affected key and if so, marks itself as +// "materialized", i.e. fills in the timestamp field from the MVCC write. For +// read/scan operation, we compute the range of timestamps at which the +// read/write was valid[^3]. Within an atomic unit, the timestamps thus obtained +// must be compatible with each other if the history is serializable. Also, all +// MVCC writes must be reflected by exactly one observedWrite. +// +// [Elle]: https://arxiv.org/pdf/2003.10554.pdf +// [^1]: this happens indirectly: at the end of validation, any unclaimed writes +// fail validation. +// [^2]: the absence of a result can cause issues, for instance for a DeleteRange that +// was batched with a transaction commit, we don't know which keys were touched. +// [^3]: txns always read their own write, so this requires more work than may be obvious. package kvnemesis diff --git a/pkg/kv/kvnemesis/engine.go b/pkg/kv/kvnemesis/engine.go index 257acba4b4e3..aa1ad4b45e83 100644 --- a/pkg/kv/kvnemesis/engine.go +++ b/pkg/kv/kvnemesis/engine.go @@ -34,6 +34,7 @@ type Engine struct { // MakeEngine returns a new Engine. func MakeEngine() (*Engine, error) { opts := storage.DefaultPebbleOptions() + opts.FormatMajorVersion = 8 // for range key deletions opts.FS = vfs.NewMem() kvs, err := pebble.Open(`kvnemesis`, opts) if err != nil { @@ -52,12 +53,32 @@ func (e *Engine) Close() { // Get returns the value for this key with the highest timestamp <= ts. If no // such value exists, the returned value's RawBytes is nil. func (e *Engine) Get(key roachpb.Key, ts hlc.Timestamp) roachpb.Value { - iter := e.kvs.NewIter(nil) + opts := pebble.IterOptions{ + KeyTypes: pebble.IterKeyTypePointsAndRanges, + // Make MVCC range deletions actually appear to delete points in + // this low-level iterator, so we don't have to implement it manually + // a second time. + RangeKeyMasking: pebble.RangeKeyMasking{ + Suffix: storage.EncodeMVCCTimestampSuffix(ts), + }, + } + iter := e.kvs.NewIter(&opts) defer func() { _ = iter.Close() }() iter.SeekGE(storage.EncodeMVCCKey(storage.MVCCKey{Key: key, Timestamp: ts})) + for iter.Valid() { + hasPoint, _ := iter.HasPointAndRange() + if !hasPoint { + iter.Next() + } else { + break + } + } if !iter.Valid() { return roachpb.Value{} } + + // We're on the first point the iter is seeing. + // This use of iter.Key() is safe because it comes entirely before the // deferred iter.Close. mvccKey, err := storage.DecodeMVCCKey(iter.Key()) @@ -89,33 +110,50 @@ func (e *Engine) Put(key storage.MVCCKey, value []byte) { } } -// Delete writes a tombstone value for a given key/timestamp. This is -// equivalent to a Put with an empty value. -func (e *Engine) Delete(key storage.MVCCKey) { - if err := e.kvs.Set(storage.EncodeMVCCKey(key), nil, nil); err != nil { +func (e *Engine) DeleteRange(from, to roachpb.Key, ts hlc.Timestamp, val []byte) { + suffix := storage.EncodeMVCCTimestampSuffix(ts) + if err := e.kvs.RangeKeySet(from, to, suffix, val, nil); err != nil { panic(err) } } // Iterate calls the given closure with every KV in the Engine, in ascending // order. -func (e *Engine) Iterate(fn func(key storage.MVCCKey, value []byte, err error)) { - iter := e.kvs.NewIter(nil) +func (e *Engine) Iterate( + fn func(key, endKey roachpb.Key, ts hlc.Timestamp, value []byte, err error), +) { + iter := e.kvs.NewIter(&pebble.IterOptions{KeyTypes: storage.IterKeyTypePointsAndRanges}) defer func() { _ = iter.Close() }() for iter.First(); iter.Valid(); iter.Next() { - if err := iter.Error(); err != nil { - fn(storage.MVCCKey{}, nil, err) - continue - } + hasPoint, _ := iter.HasPointAndRange() var keyCopy, valCopy []byte e.b, keyCopy = e.b.Copy(iter.Key(), 0 /* extraCap */) e.b, valCopy = e.b.Copy(iter.Value(), 0 /* extraCap */) - key, err := storage.DecodeMVCCKey(keyCopy) - if err != nil { - fn(storage.MVCCKey{}, nil, err) - continue + if hasPoint { + key, err := storage.DecodeMVCCKey(keyCopy) + if err != nil { + fn(nil, nil, hlc.Timestamp{}, nil, err) + } else { + fn(key.Key, nil, key.Timestamp, valCopy, nil) + } + } + if iter.RangeKeyChanged() { + key, endKey := iter.RangeBounds() + e.b, key = e.b.Copy(key, 0 /* extraCap */) + e.b, endKey = e.b.Copy(endKey, 0 /* extraCap */) + for _, rk := range iter.RangeKeys() { + ts, err := storage.DecodeMVCCTimestampSuffix(rk.Suffix) + if err != nil { + fn(nil, nil, hlc.Timestamp{}, nil, err) + continue + } + fn(key, endKey, ts, rk.Value, nil) + } } - fn(key, valCopy, nil) + } + + if err := iter.Error(); err != nil { + fn(nil, nil, hlc.Timestamp{}, nil, err) } } @@ -123,7 +161,7 @@ func (e *Engine) Iterate(fn func(key storage.MVCCKey, value []byte, err error)) // debugging. func (e *Engine) DebugPrint(indent string) string { var buf strings.Builder - e.Iterate(func(key storage.MVCCKey, value []byte, err error) { + e.Iterate(func(key, endKey roachpb.Key, ts hlc.Timestamp, value []byte, err error) { if buf.Len() > 0 { buf.WriteString("\n") } @@ -133,9 +171,14 @@ func (e *Engine) DebugPrint(indent string) string { v, err := storage.DecodeMVCCValue(value) if err != nil { fmt.Fprintf(&buf, "(err:%s)", err) - } else { + return + } + if len(endKey) == 0 { fmt.Fprintf(&buf, "%s%s %s -> %s", - indent, key.Key, key.Timestamp, v.Value.PrettyPrint()) + indent, key, ts, v.Value.PrettyPrint()) + } else { + fmt.Fprintf(&buf, "%s%s-%s %s -> %s", + indent, key, endKey, ts, v.Value.PrettyPrint()) } } }) diff --git a/pkg/kv/kvnemesis/engine_test.go b/pkg/kv/kvnemesis/engine_test.go index 8b73b089e1b1..44fb2c383b76 100644 --- a/pkg/kv/kvnemesis/engine_test.go +++ b/pkg/kv/kvnemesis/engine_test.go @@ -11,11 +11,12 @@ package kvnemesis import ( - "strings" "testing" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/echotest" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -47,11 +48,12 @@ func TestEngine(t *testing.T) { e.Put(k(`a`, ts(1)), roachpb.MakeValueFromString(`a-1`).RawBytes) e.Put(k(`a`, ts(2)), roachpb.MakeValueFromString(`a-2`).RawBytes) e.Put(k(`b`, ts(2)), roachpb.MakeValueFromString(`b-2`).RawBytes) - e.Delete(k(`b`, ts(3))) - e.Delete(k(`c`, ts(4))) + e.Put(k(`b`, ts(3)), nil) + e.Put(k(`c`, ts(4)), nil) e.Put(k(`d`, ts(4)), roachpb.MakeValueFromString(`d-4`).RawBytes) e.Put(k(`e`, ts(4)), roachpb.MakeValueFromString(`e-4`).RawBytes) - e.Delete(k(`d`, ts(5))) + e.Put(k(`d`, ts(5)), nil) + e.DeleteRange(roachpb.Key("f"), roachpb.Key("g"), ts(7), nil) assert.Equal(t, v(`a-2`, ts(2)), e.Get(roachpb.Key(`a`), ts(3))) assert.Equal(t, v(`a-2`, ts(2)), e.Get(roachpb.Key(`a`), ts(2))) assert.Equal(t, v(`a-1`, ts(1)), e.Get(roachpb.Key(`a`), ts(2).Prev())) @@ -65,14 +67,6 @@ func TestEngine(t *testing.T) { assert.Equal(t, missing, e.Get(roachpb.Key(`d`), ts(5))) assert.Equal(t, v(`e-4`, ts(4)), e.Get(roachpb.Key(`e`), ts(5))) - assert.Equal(t, strings.TrimSpace(` -"a" 0.000000002,0 -> /BYTES/a-2 -"a" 0.000000001,0 -> /BYTES/a-1 -"b" 0.000000003,0 -> / -"b" 0.000000002,0 -> /BYTES/b-2 -"c" 0.000000004,0 -> / -"d" 0.000000005,0 -> / -"d" 0.000000004,0 -> /BYTES/d-4 -"e" 0.000000004,0 -> /BYTES/e-4 - `), e.DebugPrint("")) + _, _ = k, v + echotest.Require(t, e.DebugPrint(""), testutils.TestDataPath(t, t.Name(), "output.txt")) } diff --git a/pkg/kv/kvnemesis/env.go b/pkg/kv/kvnemesis/env.go index c87e65c48d4b..882e5c349a9e 100644 --- a/pkg/kv/kvnemesis/env.go +++ b/pkg/kv/kvnemesis/env.go @@ -23,16 +23,23 @@ import ( "github.com/cockroachdb/errors" ) +// Logger is the log sink used by kvnemesis. +type Logger interface { + Logf(string, ...interface{}) +} + // Env manipulates the environment (cluster settings, zone configurations) that // the Applier operates in. type Env struct { - sqlDBs []*gosql.DB + SQLDBs []*gosql.DB + Tracker *SeqTracker + L Logger } func (e *Env) anyNode() *gosql.DB { // NOTE: There is currently no need to round-robin through the sql gateways, // so we always just return the first DB. - return e.sqlDBs[0] + return e.SQLDBs[0] } // CheckConsistency runs a consistency check on all ranges in the given span, diff --git a/pkg/kv/kvnemesis/generator.go b/pkg/kv/kvnemesis/generator.go index 2289a37e1e72..95cb24bdf0eb 100644 --- a/pkg/kv/kvnemesis/generator.go +++ b/pkg/kv/kvnemesis/generator.go @@ -11,15 +11,17 @@ package kvnemesis import ( + "encoding/binary" + "encoding/hex" + "fmt" "math/rand" - "strconv" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/syncutil" - "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" ) @@ -104,6 +106,8 @@ type ClientOperationConfig struct { DeleteExisting int // DeleteRange is an operation that Deletes a key range that may contain values. DeleteRange int + // DeleteRange is an operation that invokes DeleteRangeUsingTombstone. + DeleteRangeUsingTombstone int } // BatchOperationConfig configures the relative probability of generating a @@ -170,19 +174,20 @@ type ChangeZoneConfig struct { // yet pass (for example, if the new operation finds a kv bug or edge case). func newAllOperationsConfig() GeneratorConfig { clientOpConfig := ClientOperationConfig{ - GetMissing: 1, - GetMissingForUpdate: 1, - GetExisting: 1, - GetExistingForUpdate: 1, - PutMissing: 1, - PutExisting: 1, - Scan: 1, - ScanForUpdate: 1, - ReverseScan: 1, - ReverseScanForUpdate: 1, - DeleteMissing: 1, - DeleteExisting: 1, - DeleteRange: 1, + GetMissing: 1, + GetMissingForUpdate: 1, + GetExisting: 1, + GetExistingForUpdate: 1, + PutMissing: 1, + PutExisting: 1, + Scan: 1, + ScanForUpdate: 1, + ReverseScan: 1, + ReverseScanForUpdate: 1, + DeleteMissing: 1, + DeleteExisting: 1, + DeleteRange: 1, + DeleteRangeUsingTombstone: 1, } batchOpConfig := BatchOperationConfig{ Batch: 4, @@ -227,9 +232,18 @@ func newAllOperationsConfig() GeneratorConfig { // operations/make some operations more likely. func NewDefaultConfig() GeneratorConfig { config := newAllOperationsConfig() - // TODO(sarkesian): Enable non-transactional DelRange once #69642 is fixed. - config.Ops.DB.DeleteRange = 0 - config.Ops.Batch.Ops.DeleteRange = 0 + // DeleteRangeUsingTombstone does not support transactions. + config.Ops.ClosureTxn.TxnClientOps.DeleteRangeUsingTombstone = 0 + config.Ops.ClosureTxn.TxnBatchOps.Ops.DeleteRangeUsingTombstone = 0 + config.Ops.ClosureTxn.CommitBatchOps.DeleteRangeUsingTombstone = 0 + // DeleteRangeUsingTombstone does in principle support batches, but + // in kvnemesis we don't let it span ranges non-atomically (as it + // is allowed to do in CRDB). So if we allow it in batches, it will + // likely doom most batches to fail, i.e. we're better off not adding + // it in the first place. We don't mix DeleteRangeUsingTombstone in + // CRDB, though we may use multiple in a single batch, so adding + // coverage for that would be useful. + config.Ops.Batch.Ops.DeleteRangeUsingTombstone = 0 // TODO(sarkesian): Enable DeleteRange in comingled batches once #71236 is fixed. config.Ops.ClosureTxn.CommitBatchOps.DeleteRange = 0 config.Ops.ClosureTxn.TxnBatchOps.Ops.DeleteRange = 0 @@ -241,10 +255,13 @@ func NewDefaultConfig() GeneratorConfig { // (see CrossRangeTxnWrapperSender) if they are. roachpb.SpanGroup can be used // to efficiently check this. // - // TODO(dan): Make this `config.Ops.Batch.Ops.PutExisting = 0` once #46081 is - // fixed. + // TODO(during review): Make this `config.Ops.Batch.Ops.PutExisting = 0` (and + // DeleteRange, etc, all ops that can overwrite existing keys basically), as + // #46081 has long been fixed. Then file an issue about generating + // non-self-overlapping operations for batches. config.Ops.Batch = BatchOperationConfig{} - // TODO(dan): Remove when #45586 is addressed. + // TODO(during review): Should be able to remove the two lines below, since + // #45586 has already been addressed. config.Ops.ClosureTxn.CommitBatchOps.GetExisting = 0 config.Ops.ClosureTxn.CommitBatchOps.GetMissing = 0 return config @@ -331,7 +348,7 @@ type generator struct { Config GeneratorConfig replicasFn GetReplicasFn - nextValue int + seqGen kvnemesisutil.Seq // keys is the set of every key that has been written to, including those // deleted or in rolled back transactions. @@ -390,6 +407,11 @@ func (g *generator) RandStep(rng *rand.Rand) Step { return step(g.selectOp(rng, allowed)) } +func (g *generator) nextSeq() kvnemesisutil.Seq { + g.seqGen++ + return g.seqGen +} + type opGenFunc func(*generator, *rand.Rand) Operation type opGen struct { @@ -433,6 +455,7 @@ func (g *generator) registerClientOps(allowed *[]opGen, c *ClientOperationConfig addOpGen(allowed, randReverseScan, c.ReverseScan) addOpGen(allowed, randReverseScanForUpdate, c.ReverseScanForUpdate) addOpGen(allowed, randDelRange, c.DeleteRange) + addOpGen(allowed, randDelRangeUsingTombstone, c.DeleteRangeUsingTombstone) } func (g *generator) registerBatchOps(allowed *[]opGen, c *BatchOperationConfig) { @@ -462,16 +485,16 @@ func randGetExistingForUpdate(g *generator, rng *rand.Rand) Operation { } func randPutMissing(g *generator, rng *rand.Rand) Operation { - value := g.getNextValue() + seq := g.nextSeq() key := randKey(rng) g.keys[key] = struct{}{} - return put(key, value) + return put(key, seq) } func randPutExisting(g *generator, rng *rand.Rand) Operation { - value := g.getNextValue() + seq := g.nextSeq() key := randMapKey(rng, g.keys) - return put(key, value) + return put(key, seq) } func randScan(g *generator, rng *rand.Rand) Operation { @@ -500,19 +523,78 @@ func randReverseScanForUpdate(g *generator, rng *rand.Rand) Operation { func randDelMissing(g *generator, rng *rand.Rand) Operation { key := randKey(rng) g.keys[key] = struct{}{} - return del(key) + seq := g.nextSeq() + return del(key, seq) } func randDelExisting(g *generator, rng *rand.Rand) Operation { key := randMapKey(rng, g.keys) - return del(key) + seq := g.nextSeq() + return del(key, seq) } func randDelRange(g *generator, rng *rand.Rand) Operation { // We don't write any new keys to `g.keys` on a DeleteRange operation, // because DelRange(..) only deletes existing keys. key, endKey := randSpan(rng) - return delRange(key, endKey) + seq := g.nextSeq() + return delRange(key, endKey, seq) +} + +func randDelRangeUsingTombstone(g *generator, rng *rand.Rand) Operation { + // Delete a span. We don't want MVCC rangedels to get split along range + // boundaries in kvnemesis (since that would violate atomicity, but is + // desired behavior in CRDB) so we try our best to not straddle splits. If + // we end up doing it anyway, an interceptor will cause the request to fail + // (but not kvnemesis). + var key, endKey string + n := rng.Intn(3) + switch { + case n == 0 && len(g.historicalSplits) > 0: + // Using historical splits gives us a bit of "everything coverage", including + // crossing range boundaries (which is supposed to error out anyway but still). + key, endKey = randMapKey(rng, g.historicalSplits), randMapKey(rng, g.historicalSplits) + case n == 1 && len(g.currentSplits) > 0: + key, endKey = randMapKey(rng, g.currentSplits), randMapKey(rng, g.currentSplits) + case n == 2 && len(g.keys) > 0: + // Delete a specific key (which may no longer be visible, but at least + // has nontrivial MVCC history). + key = randMapKey(rng, g.keys) + endKey = uint64ToKey(uint64FromKey(key) + 1) + default: + // Delete a random key. + key = randKey(rng) + endKey = uint64ToKey(uint64FromKey(key) + 1) + } + + if endKey < key { + endKey, key = key, endKey + } else if key == endKey { + // This span will always be contained in one range. + endKey = uint64ToKey(uint64FromKey(key) + 1) + } + + // Fudge the boundaries a bit, some of the time. Note + // that if it's a single-point span, this is a no-op. + switch rng.Intn(3) { + case 0: + // Move start key forward. + key = randKeyBetween(rng, key, endKey) + case 1: + // Shorten endKey to something in (key,endKey]. + endKey = randKeyBetween(rng, + uint64ToKey(uint64FromKey(key)+1), + uint64ToKey(uint64FromKey(endKey)-1), + ) + default: // noop + } + + seq := g.nextSeq() + if key >= endKey { + s := fmt.Sprintf("%d %d", uint64FromKey(key), uint64FromKey(endKey)) + panic(s) + } + return delRangeUsingTombstone(key, endKey, seq) } func randSplitNew(g *generator, rng *rand.Rand) Operation { @@ -595,7 +677,6 @@ func makeRandBatch(c *ClientOperationConfig) opGenFunc { return func(g *generator, rng *rand.Rand) Operation { var allowed []opGen g.registerClientOps(&allowed, c) - numOps := rng.Intn(4) ops := make([]Operation, numOps) for i := range ops { @@ -640,19 +721,29 @@ func makeClosureTxn( } } -func (g *generator) getNextValue() string { - value := `v-` + strconv.Itoa(g.nextValue) - g.nextValue++ - return value -} - -func randKey(rng *rand.Rand) string { - u, err := uuid.NewGenWithReader(rng).NewV4() +func uint64FromKey(k string) uint64 { + k = k[len(GeneratorDataSpan().Key):] + _, s, err := encoding.DecodeUnsafeStringAscendingDeepCopy([]byte(k), nil) + if err != nil { + panic(err) + } + sl, err := hex.DecodeString(s) if err != nil { panic(err) } + return binary.BigEndian.Uint64(sl) +} + +func randKey(rng *rand.Rand) string { + return uint64ToKey(rng.Uint64()) +} + +func uint64ToKey(n uint64) string { + var sl [8]byte + binary.BigEndian.PutUint64(sl[:8], n) + s := hex.EncodeToString(sl[:8]) key := GeneratorDataSpan().Key - key = encoding.EncodeStringAscending(key, u.Short()) + key = encoding.EncodeStringAscending(key, s) return string(key) } @@ -667,6 +758,20 @@ func randMapKey(rng *rand.Rand, m map[string]struct{}) string { return keys[rng.Intn(len(keys))] } +// Returns a key that falls into `[k,ek)`. +func randKeyBetween(rng *rand.Rand, k, ek string) string { + a, b := uint64FromKey(k), uint64FromKey(ek) + if b <= a { + b = a + 1 // we will return `k` + } + defer func() { + if r := recover(); r != nil { + panic(fmt.Sprintf("a=%d b=%d b-a=%d: %v", a, b, int64(b-a), r)) + } + }() + return uint64ToKey(a + (rng.Uint64() % (b - a))) +} + func randSpan(rng *rand.Rand) (string, string) { key, endKey := randKey(rng), randKey(rng) if endKey < key { @@ -682,7 +787,9 @@ func step(op Operation) Step { } func batch(ops ...Operation) Operation { - return Operation{Batch: &BatchOperation{Ops: ops}} + return Operation{Batch: &BatchOperation{ + Ops: ops, + }} } func opSlice(ops ...Operation) []Operation { @@ -709,8 +816,8 @@ func getForUpdate(key string) Operation { return Operation{Get: &GetOperation{Key: []byte(key), ForUpdate: true}} } -func put(key, value string) Operation { - return Operation{Put: &PutOperation{Key: []byte(key), Value: []byte(value)}} +func put(key string, seq kvnemesisutil.Seq) Operation { + return Operation{Put: &PutOperation{Key: []byte(key), Seq: seq}} } func scan(key, endKey string) Operation { @@ -729,12 +836,19 @@ func reverseScanForUpdate(key, endKey string) Operation { return Operation{Scan: &ScanOperation{Key: []byte(key), EndKey: []byte(endKey), Reverse: true, ForUpdate: true}} } -func del(key string) Operation { - return Operation{Delete: &DeleteOperation{Key: []byte(key)}} +func del(key string, seq kvnemesisutil.Seq) Operation { + return Operation{Delete: &DeleteOperation{ + Key: []byte(key), + Seq: seq, + }} +} + +func delRange(key, endKey string, seq kvnemesisutil.Seq) Operation { + return Operation{DeleteRange: &DeleteRangeOperation{Key: []byte(key), EndKey: []byte(endKey), Seq: seq}} } -func delRange(key, endKey string) Operation { - return Operation{DeleteRange: &DeleteRangeOperation{Key: []byte(key), EndKey: []byte(endKey)}} +func delRangeUsingTombstone(key, endKey string, seq kvnemesisutil.Seq) Operation { + return Operation{DeleteRangeUsingTombstone: &DeleteRangeUsingTombstoneOperation{Key: []byte(key), EndKey: []byte(endKey), Seq: seq}} } func split(key string) Operation { diff --git a/pkg/kv/kvnemesis/generator_test.go b/pkg/kv/kvnemesis/generator_test.go index de66d2899f50..909807f58b55 100644 --- a/pkg/kv/kvnemesis/generator_test.go +++ b/pkg/kv/kvnemesis/generator_test.go @@ -12,6 +12,7 @@ package kvnemesis import ( "context" + "math/rand" "reflect" "testing" @@ -136,9 +137,13 @@ func TestRandStep(t *testing.T) { } case *DeleteRangeOperation: client.DeleteRange++ + case *DeleteRangeUsingTombstoneOperation: + client.DeleteRangeUsingTombstone++ case *BatchOperation: batch.Batch++ countClientOps(&batch.Ops, nil, o.Ops...) + default: + t.Fatalf("%T", o) } } } @@ -152,7 +157,8 @@ func TestRandStep(t *testing.T) { *ScanOperation, *BatchOperation, *DeleteOperation, - *DeleteRangeOperation: + *DeleteRangeOperation, + *DeleteRangeUsingTombstoneOperation: countClientOps(&counts.DB, &counts.Batch, step.Op) case *ClosureTxnOperation: countClientOps(&counts.ClosureTxn.TxnClientOps, &counts.ClosureTxn.TxnBatchOps, o.Ops...) @@ -201,6 +207,8 @@ func TestRandStep(t *testing.T) { case ChangeZoneType_ToggleGlobalReads: counts.ChangeZone.ToggleGlobalReads++ } + default: + t.Fatalf("%T", o) } updateKeys(step.Op) @@ -211,3 +219,14 @@ func TestRandStep(t *testing.T) { } } } + +func TestRandKeyDecode(t *testing.T) { + defer leaktest.AfterTest(t)() + + for i := 0; i < 10; i++ { + rng := rand.New(rand.NewSource(int64(i))) + k := randKey(rng) + n := uint64FromKey(k) + require.Equal(t, k, uint64ToKey(n)) + } +} diff --git a/pkg/kv/kvnemesis/kvnemesis.go b/pkg/kv/kvnemesis/kvnemesis.go index 5ad20efdefae..06b2e6a91bab 100644 --- a/pkg/kv/kvnemesis/kvnemesis.go +++ b/pkg/kv/kvnemesis/kvnemesis.go @@ -22,6 +22,20 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" ) +type loggerKey struct{} + +func l(ctx context.Context, debug bool, format string, args ...interface{}) { + if logger := ctx.Value(loggerKey{}); logger != nil && !debug { + if t, ok := logger.(interface { + Helper() + }); ok { + t.Helper() + } + logger.(Logger).Logf(format, args...) + } + log.InfofDepth(ctx, 2, format, args...) +} + // RunNemesis generates and applies a series of Operations to exercise the KV // api. It returns a slice of the logical failures encountered. func RunNemesis( @@ -29,10 +43,13 @@ func RunNemesis( rng *rand.Rand, env *Env, config GeneratorConfig, + concurrency int, numSteps int, dbs ...*kv.DB, ) ([]error, error) { - const concurrency = 5 + if env.L != nil { + ctx = context.WithValue(ctx, loggerKey{}, env.L) + } if numSteps <= 0 { return nil, fmt.Errorf("numSteps must be >0, got %v", numSteps) } @@ -68,14 +85,14 @@ func RunNemesis( if err != nil { buf.Reset() step.format(&buf, formatCtx{indent: ` ` + workerName + ` ERR `}) - log.Infof(ctx, "error: %+v\n\n%s", err, buf.String()) + l(ctx, false, "error: %+v\n\n%s", err, buf.String()) return err } buf.Reset() fmt.Fprintf(&buf, "\n before: %s", step.Before) step.format(&buf, formatCtx{indent: ` ` + workerName + ` OP `}) fmt.Fprintf(&buf, "\n after: %s", step.After) - log.Infof(ctx, "%v", buf.String()) + l(ctx, true, "%v", buf.String()) stepsByWorker[workerIdx] = append(stepsByWorker[workerIdx], step) } return nil @@ -96,15 +113,16 @@ func RunNemesis( } kvs := w.Finish() defer kvs.Close() - failures := Validate(allSteps, kvs) + + failures := Validate(allSteps, kvs, env.Tracker) // Run consistency checks across the data span, primarily to check the // accuracy of evaluated MVCC stats. failures = append(failures, env.CheckConsistency(ctx, dataSpan)...) if len(failures) > 0 { - log.Infof(ctx, "reproduction steps:\n%s", printRepro(stepsByWorker)) - log.Infof(ctx, "kvs (recorded from rangefeed):\n%s", kvs.DebugPrint(" ")) + l(ctx, false, "reproduction steps:\n%s", printRepro(stepsByWorker)) + l(ctx, false, "kvs (recorded from rangefeed):\n%s", kvs.DebugPrint(" ")) scanKVs, err := dbs[0].Scan(ctx, dataSpan.Key, dataSpan.EndKey, -1) if err != nil { @@ -114,7 +132,7 @@ func RunNemesis( for _, kv := range scanKVs { fmt.Fprintf(&kvsBuf, " %s %s -> %s\n", kv.Key, kv.Value.Timestamp, kv.Value.PrettyPrint()) } - log.Infof(ctx, "kvs (scan of latest values according to crdb):\n%s", kvsBuf.String()) + l(ctx, false, "kvs (scan of latest values according to crdb):\n%s", kvsBuf.String()) } } diff --git a/pkg/kv/kvnemesis/kvnemesis_test.go b/pkg/kv/kvnemesis/kvnemesis_test.go index a23c9498251b..705a8ed0f319 100644 --- a/pkg/kv/kvnemesis/kvnemesis_test.go +++ b/pkg/kv/kvnemesis/kvnemesis_test.go @@ -13,16 +13,23 @@ package kvnemesis import ( "context" gosql "database/sql" + "math/rand" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" + "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" + "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/envutil" + "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/randutil" @@ -35,23 +42,86 @@ func init() { numSteps = envutil.EnvOrDefaultInt("COCKROACH_KVNEMESIS_STEPS", 50) } -func TestKVNemesisSingleNode(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - skip.UnderRace(t) +func testClusterArgs(tr *SeqTracker) base.TestClusterArgs { + storeKnobs := &kvserver.StoreTestingKnobs{ + // Drop the clock MaxOffset to reduce commit-wait time for + // transactions that write to global_read ranges. + MaxOffset: 10 * time.Millisecond, + // Make sure we know the seq for each of our writes when they come out of + // the rangefeed. We do this via an interceptor to avoid having to change + // RangeFeed's APIs. + RangefeedValueHeaderFilter: func(key, endKey roachpb.Key, ts hlc.Timestamp, vh enginepb.MVCCValueHeader) { + if seq := kvnemesisutil.Seq(vh.KVNemesisSeq.Get()); seq > 0 { + tr.Add(key, endKey, ts, seq) + } + }, + } - ctx := context.Background() - tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ + return base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ Knobs: base.TestingKnobs{ - Store: &kvserver.StoreTestingKnobs{ - // Drop the clock MaxOffset to reduce commit-wait time for - // transactions that write to global_read ranges. - MaxOffset: 10 * time.Millisecond, + Store: storeKnobs, + KVClient: &kvcoord.ClientTestingKnobs{ + // Don't let DistSender split DeleteRangeUsingTombstone across range boundaries. + // This does happen in real CRDB, but leads to separate atomic subunits, which + // would add complexity to kvnemesis that isn't worth it. Instead, the operation + // generator for the most part tries to avoid range-spanning requests, and the + // ones that do end up happening get a hard error. + OnRangeSpanningNonTxnalBatch: func(ba *roachpb.BatchRequest) *roachpb.Error { + for _, req := range ba.Requests { + if req.GetInner().Method() != roachpb.DeleteRange { + continue + } + if req.GetDeleteRange().UseRangeTombstone == true { + return roachpb.NewErrorf("DeleteRangeUsingTombstone can not straddle range boundary") + } + } + return nil + }, }, }, }, - }) + } +} + +func randWithSeed( + t interface { + Logf(string, ...interface{}) + Helper() + }, seedOrZero int64, +) *rand.Rand { + t.Helper() + var rng *rand.Rand + if seedOrZero > 0 { + rng = rand.New(rand.NewSource(seedOrZero)) + } else { + rng, seedOrZero = randutil.NewTestRand() + } + t.Logf("seed: %d", seedOrZero) + return rng +} + +func TestKVNemesisSingleNode(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + skip.UnderRace(t) + + if !buildutil.CrdbTestBuild { + // `roachpb.RequestHeader` and `MVCCValueHeader` have a KVNemesisSeq field + // that is zero-sized outside of test builds. We could revisit that should + // a need arise to run kvnemesis against production binaries. + skip.IgnoreLint(t, "kvnemesis must be run with the crdb_test build tag") + } + + ctx := context.Background() + + // Can set a seed here for determinism. This works best when the seed was + // obtained with concurrency=1. + const concurrency = 5 + rng := randWithSeed(t, 0) + + tr := &SeqTracker{} + tc := testcluster.StartTestCluster(t, 1, testClusterArgs(tr)) defer tc.Stopper().Stop(ctx) db := tc.Server(0).DB() sqlDB := tc.ServerConn(0) @@ -59,9 +129,12 @@ func TestKVNemesisSingleNode(t *testing.T) { config := NewDefaultConfig() config.NumNodes, config.NumReplicas = 1, 1 - rng, _ := randutil.NewTestRand() - env := &Env{sqlDBs: []*gosql.DB{sqlDB}} - failures, err := RunNemesis(ctx, rng, env, config, numSteps, db) + + env := &Env{SQLDBs: []*gosql.DB{sqlDB}, Tracker: tr, L: t} + // NB: when a failure is observed, it can be helpful to try to reproduce it + // with concurrency 1, as then determinism is much more likely once a suitable + // seed has been identified. + failures, err := RunNemesis(ctx, rng, env, config, concurrency, numSteps, db) require.NoError(t, err, `%+v`, err) for _, failure := range failures { @@ -74,21 +147,23 @@ func TestKVNemesisMultiNode(t *testing.T) { defer log.Scope(t).Close(t) skip.UnderRace(t) + if !buildutil.CrdbTestBuild { + // `roachpb.RequestHeader` and `MVCCValueHeader` have a KVNemesisSeq field + // that is zero-sized outside test builds. We could revisit that should + // a need arise to run kvnemesis against production binaries. + skip.IgnoreLint(t, "kvnemesis must be run with the crdb_test build tag") + } + + // Can set a seed here for determinism. This works best when the seed was + // obtained with concurrency=1. + const concurrency = 5 + rng := randWithSeed(t, 0) + // 4 nodes so we have somewhere to move 3x replicated ranges to. const numNodes = 4 ctx := context.Background() - tc := testcluster.StartTestCluster(t, numNodes, base.TestClusterArgs{ - ReplicationMode: base.ReplicationManual, - ServerArgs: base.TestServerArgs{ - Knobs: base.TestingKnobs{ - Store: &kvserver.StoreTestingKnobs{ - // Drop the clock MaxOffset to reduce commit-wait time for - // transactions that write to global_read ranges. - MaxOffset: 10 * time.Millisecond, - }, - }, - }, - }) + tr := &SeqTracker{} + tc := testcluster.StartTestCluster(t, numNodes, testClusterArgs(tr)) defer tc.Stopper().Stop(ctx) dbs, sqlDBs := make([]*kv.DB, numNodes), make([]*gosql.DB, numNodes) for i := 0; i < numNodes; i++ { @@ -102,9 +177,8 @@ func TestKVNemesisMultiNode(t *testing.T) { config := NewDefaultConfig() config.NumNodes, config.NumReplicas = numNodes, 3 - rng, _ := randutil.NewTestRand() - env := &Env{sqlDBs: sqlDBs} - failures, err := RunNemesis(ctx, rng, env, config, numSteps, dbs...) + env := &Env{SQLDBs: sqlDBs, Tracker: tr, L: t} + failures, err := RunNemesis(ctx, rng, env, config, concurrency, numSteps, dbs...) require.NoError(t, err, `%+v`, err) for _, failure := range failures { diff --git a/pkg/kv/kvnemesis/kvnemesisutil/BUILD.bazel b/pkg/kv/kvnemesis/kvnemesisutil/BUILD.bazel new file mode 100644 index 000000000000..6ccb50fe7c47 --- /dev/null +++ b/pkg/kv/kvnemesis/kvnemesisutil/BUILD.bazel @@ -0,0 +1,14 @@ +load("//build/bazelutil/unused_checker:unused.bzl", "get_x_data") +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "kvnemesisutil", + srcs = [ + "context.go", + "seq.go", + ], + importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil", + visibility = ["//visibility:public"], +) + +get_x_data(name = "get_x_data") diff --git a/pkg/kv/kvnemesis/kvnemesisutil/context.go b/pkg/kv/kvnemesis/kvnemesisutil/context.go new file mode 100644 index 000000000000..70083fb27a65 --- /dev/null +++ b/pkg/kv/kvnemesis/kvnemesisutil/context.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +// + +package kvnemesisutil + +import "context" + +type seqKey struct{} + +// WithSeq wraps the Context with a Seq. +func WithSeq(ctx context.Context, seq Seq) context.Context { + return context.WithValue(ctx, seqKey{}, seq) +} + +// FromContext extracts a Seq from the Context if there is one. +func FromContext(ctx context.Context) (Seq, bool) { + v, ok := ctx.Value(seqKey{}).(Seq) + return v, ok +} diff --git a/pkg/kv/kvnemesis/kvnemesisutil/seq.go b/pkg/kv/kvnemesis/kvnemesisutil/seq.go new file mode 100644 index 000000000000..175e344966b5 --- /dev/null +++ b/pkg/kv/kvnemesis/kvnemesisutil/seq.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kvnemesisutil + +import "fmt" + +// Seq is a unique identifier used to associate MVCC versions with the kvnemesis +// operation that wrote them. +type Seq int32 + +func (d Seq) String() string { + return fmt.Sprintf("s%d", uint32(d)) +} diff --git a/pkg/kv/kvnemesis/operations.go b/pkg/kv/kvnemesis/operations.go index 6d8ad552614f..fe5747d8bd5b 100644 --- a/pkg/kv/kvnemesis/operations.go +++ b/pkg/kv/kvnemesis/operations.go @@ -13,6 +13,7 @@ package kvnemesis import ( "context" "fmt" + "strconv" "strings" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -33,6 +34,8 @@ func (op Operation) Result() *Result { return &o.Result case *DeleteRangeOperation: return &o.Result + case *DeleteRangeUsingTombstoneOperation: + return &o.Result case *SplitOperation: return &o.Result case *MergeOperation: @@ -112,6 +115,8 @@ func (op Operation) format(w *strings.Builder, fctx formatCtx) { o.format(w, fctx) case *DeleteRangeOperation: o.format(w, fctx) + case *DeleteRangeUsingTombstoneOperation: + o.format(w, fctx) case *SplitOperation: o.format(w, fctx) case *MergeOperation: @@ -182,24 +187,20 @@ func (op GetOperation) format(w *strings.Builder, fctx formatCtx) { methodName = `GetForUpdate` } fmt.Fprintf(w, `%s.%s(ctx, %s)`, fctx.receiver, methodName, roachpb.Key(op.Key)) - switch op.Result.Type { - case ResultType_Error: - err := errors.DecodeError(context.TODO(), *op.Result.Err) - fmt.Fprintf(w, ` // (nil, %s)`, err.Error()) - case ResultType_Value: - v := `nil` - if len(op.Result.Value) > 0 { - v = `"` + mustGetStringValue(op.Result.Value) + `"` - } - fmt.Fprintf(w, ` // (%s, nil)`, v) - } + op.Result.format(w) } func (op PutOperation) format(w *strings.Builder, fctx formatCtx) { - fmt.Fprintf(w, `%s.Put(ctx, %s, %s)`, fctx.receiver, roachpb.Key(op.Key), op.Value) + fmt.Fprintf(w, `%s.Put(ctx, %s, %s)`, fctx.receiver, roachpb.Key(op.Key), op.Value()) op.Result.format(w) } +// Value returns the value written by this put. This is a function of the +// sequence number. +func (op PutOperation) Value() string { + return `v` + strconv.Itoa(int(op.Seq)) +} + func (op ScanOperation) format(w *strings.Builder, fctx formatCtx) { methodName := `Scan` if op.ForUpdate { @@ -214,49 +215,22 @@ func (op ScanOperation) format(w *strings.Builder, fctx formatCtx) { maxRowsArg = `` } fmt.Fprintf(w, `%s.%s(ctx, %s, %s%s)`, fctx.receiver, methodName, roachpb.Key(op.Key), roachpb.Key(op.EndKey), maxRowsArg) - switch op.Result.Type { - case ResultType_Error: - err := errors.DecodeError(context.TODO(), *op.Result.Err) - fmt.Fprintf(w, ` // (nil, %s)`, err.Error()) - case ResultType_Values: - var kvs strings.Builder - for i, kv := range op.Result.Values { - if i > 0 { - kvs.WriteString(`, `) - } - kvs.WriteByte('"') - kvs.WriteString(string(kv.Key)) - kvs.WriteString(`":"`) - kvs.WriteString(mustGetStringValue(kv.Value)) - kvs.WriteByte('"') - } - fmt.Fprintf(w, ` // ([%s], nil)`, kvs.String()) - } + op.Result.format(w) } func (op DeleteOperation) format(w *strings.Builder, fctx formatCtx) { - fmt.Fprintf(w, `%s.Del(ctx, %s)`, fctx.receiver, roachpb.Key(op.Key)) + fmt.Fprintf(w, `%s.Del(ctx, %s /* @%s */)`, fctx.receiver, roachpb.Key(op.Key), op.Seq) op.Result.format(w) } func (op DeleteRangeOperation) format(w *strings.Builder, fctx formatCtx) { - fmt.Fprintf(w, `%s.DelRange(ctx, %s, %s, true)`, fctx.receiver, roachpb.Key(op.Key), roachpb.Key(op.EndKey)) - switch op.Result.Type { - case ResultType_Error: - err := errors.DecodeError(context.TODO(), *op.Result.Err) - fmt.Fprintf(w, ` // (nil, %s)`, err.Error()) - case ResultType_Keys: - var keysW strings.Builder - for i, key := range op.Result.Keys { - if i > 0 { - keysW.WriteString(`, `) - } - keysW.WriteByte('"') - keysW.WriteString(string(key)) - keysW.WriteString(`"`) - } - fmt.Fprintf(w, ` // ([%s], nil)`, keysW.String()) - } + fmt.Fprintf(w, `%s.DelRange(ctx, %s, %s, true /* @%s */)`, fctx.receiver, roachpb.Key(op.Key), roachpb.Key(op.EndKey), op.Seq) + op.Result.format(w) +} + +func (op DeleteRangeUsingTombstoneOperation) format(w *strings.Builder, fctx formatCtx) { + fmt.Fprintf(w, `%s.DelRangeUsingTombstone(ctx, %s, %s /* @%s */)`, fctx.receiver, roachpb.Key(op.Key), roachpb.Key(op.EndKey), op.Seq) + op.Result.format(w) } func (op SplitOperation) format(w *strings.Builder, fctx formatCtx) { @@ -292,11 +266,44 @@ func (op ChangeZoneOperation) format(w *strings.Builder, fctx formatCtx) { } func (r Result) format(w *strings.Builder) { + if r.Type == ResultType_Unknown { + return + } + fmt.Fprintf(w, ` //`) + if r.OptionalTimestamp.IsSet() { + fmt.Fprintf(w, ` @%s`, r.OptionalTimestamp) + } + + var sl []string + errString := "" switch r.Type { case ResultType_NoError: - fmt.Fprintf(w, ` // nil`) case ResultType_Error: err := errors.DecodeError(context.TODO(), *r.Err) - fmt.Fprintf(w, ` // %s`, err.Error()) + errString = fmt.Sprint(err) + case ResultType_Keys: + for _, k := range r.Keys { + sl = append(sl, string(k)) + } + case ResultType_Value: + sl = append(sl, mustGetStringValue(r.Value)) + case ResultType_Values: + for _, kv := range r.Values { + sl = append(sl, fmt.Sprintf(`%s:%s`, kv.Key, mustGetStringValue(kv.Value))) + } + default: + panic("unhandled ResultType") } + + w.WriteString(" ") + + sl = append(sl, errString) + if len(sl) > 1 { + w.WriteString("(") + } + w.WriteString(strings.Join(sl, ", ")) + if len(sl) > 1 { + w.WriteString(")") + } + } diff --git a/pkg/kv/kvnemesis/operations.proto b/pkg/kv/kvnemesis/operations.proto index e5ac38c0f913..1fece84fd961 100644 --- a/pkg/kv/kvnemesis/operations.proto +++ b/pkg/kv/kvnemesis/operations.proto @@ -54,19 +54,28 @@ message ScanOperation { message PutOperation { bytes key = 1; - bytes value = 2; + uint32 Seq = 2 [(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil.Seq"]; Result result = 3 [(gogoproto.nullable) = false]; } message DeleteOperation { bytes key = 1; - Result result = 2 [(gogoproto.nullable) = false]; + uint32 Seq = 2 [(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil.Seq"]; + Result result = 3 [(gogoproto.nullable) = false]; } message DeleteRangeOperation { bytes key = 1; bytes end_key = 2; - Result result = 3 [(gogoproto.nullable) = false]; + uint32 Seq = 3 [(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil.Seq"]; + Result result = 4 [(gogoproto.nullable) = false]; +} + +message DeleteRangeUsingTombstoneOperation { + bytes key = 1; + bytes end_key = 2; + uint32 Seq = 3 [(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil.Seq"]; + Result result = 4 [(gogoproto.nullable) = false]; } message SplitOperation { @@ -117,11 +126,12 @@ message Operation { ScanOperation scan = 9; DeleteOperation delete = 10; DeleteRangeOperation delete_range = 11; - SplitOperation split = 12; - MergeOperation merge = 13; - ChangeReplicasOperation change_replicas = 14; - TransferLeaseOperation transfer_lease = 15; - ChangeZoneOperation change_zone = 16; + DeleteRangeUsingTombstoneOperation delete_range_using_tombstone = 12; + SplitOperation split = 13; + MergeOperation merge = 14; + ChangeReplicasOperation change_replicas = 15; + TransferLeaseOperation transfer_lease = 16; + ChangeZoneOperation change_zone = 17; } enum ResultType { diff --git a/pkg/kv/kvnemesis/operations_test.go b/pkg/kv/kvnemesis/operations_test.go index 92fd862e90ea..edf7c26e539a 100644 --- a/pkg/kv/kvnemesis/operations_test.go +++ b/pkg/kv/kvnemesis/operations_test.go @@ -11,12 +11,14 @@ package kvnemesis import ( + "fmt" "strings" "testing" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/echotest" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/stretchr/testify/assert" ) func TestOperationsFormat(t *testing.T) { @@ -24,48 +26,29 @@ func TestOperationsFormat(t *testing.T) { defer log.Scope(t).Close(t) tests := []struct { - step Step - expected string + step Step }{ - {step: step(get(`a`)), expected: `db0.Get(ctx, "a")`}, - {step: step(del(`a`)), expected: `db0.Del(ctx, "a")`}, - {step: step(batch(get(`b`), reverseScanForUpdate(`c`, `e`), get(`f`))), expected: ` - { - b := &Batch{} - b.Get(ctx, "b") - b.ReverseScanForUpdate(ctx, "c", "e") - b.Get(ctx, "f") - db0.Run(ctx, b) - } - `}, + {step: step(get(`a`))}, + {step: step(del(`a`, 1))}, + {step: step(batch(get(`b`), reverseScanForUpdate(`c`, `e`), get(`f`)))}, { step: step( closureTxn(ClosureTxnType_Commit, - batch(get(`g`), get(`h`), del(`i`)), - delRange(`j`, `k`), - put(`k`, `l`), + batch(get(`g`), get(`h`), del(`i`, 1)), + delRange(`j`, `k`, 2), + put(`k`, 3), )), - expected: ` - db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - { - b := &Batch{} - b.Get(ctx, "g") - b.Get(ctx, "h") - b.Del(ctx, "i") - txn.Run(ctx, b) - } - txn.DelRange(ctx, "j", "k", true) - txn.Put(ctx, "k", l) - return nil - }) - `, }, } - for _, test := range tests { - expected := strings.TrimSpace(test.expected) - var actual strings.Builder - test.step.format(&actual, formatCtx{indent: "\t\t\t"}) - assert.Equal(t, expected, strings.TrimSpace(actual.String())) + w := echotest.Walk(t, testutils.TestDataPath(t, t.Name())) + defer w.Check(t) + for i, test := range tests { + name := fmt.Sprint(i) + t.Run(name, w.Do(t, name, func(t *testing.T, path string) { + var actual strings.Builder + test.step.format(&actual, formatCtx{indent: "···"}) + echotest.Require(t, strings.TrimLeft(actual.String(), "\n"), path) + })) } } diff --git a/pkg/kv/kvnemesis/seq_tracker.go b/pkg/kv/kvnemesis/seq_tracker.go new file mode 100644 index 000000000000..93a583afedce --- /dev/null +++ b/pkg/kv/kvnemesis/seq_tracker.go @@ -0,0 +1,88 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +// + +package kvnemesis + +import ( + "fmt" + "sort" + "strings" + + "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" +) + +// SeqTracker is a container that helps kvnemesis map MVCC versions to +// operations as identified by their Seq. +// +// SeqTracker is threadsafe. +type SeqTracker struct { + syncutil.Mutex + seen map[keyTS]kvnemesisutil.Seq +} + +type keyTS struct { + key, endKey string + ts hlc.Timestamp +} + +func (tr *SeqTracker) String() string { + tr.Lock() + defer tr.Unlock() + + var sl []keyTS + for k := range tr.seen { + sl = append(sl, k) + } + sort.Slice(sl, func(i, j int) bool { + return fmt.Sprintf("%v", sl[i]) < fmt.Sprintf("%v", sl[j]) + }) + + var buf strings.Builder + for _, el := range sl { + fmt.Fprintf(&buf, "%s %s -> %s\n", roachpb.Span{Key: roachpb.Key(el.key), EndKey: roachpb.Key(el.endKey)}, el.ts, tr.seen[el]) + } + return buf.String() +} + +// Add associates key@ts with the provided Seq. +func (tr *SeqTracker) Add(key, endKey roachpb.Key, ts hlc.Timestamp, seq kvnemesisutil.Seq) { + tr.Lock() + defer tr.Unlock() + + if tr.seen == nil { + tr.seen = map[keyTS]kvnemesisutil.Seq{} + } + + tr.seen[keyTS{key: string(key), endKey: string(endKey), ts: ts}] = seq +} + +// Lookup checks whether the version key@ts is associated with a Seq. +func (tr *SeqTracker) Lookup(key, endKey roachpb.Key, ts hlc.Timestamp) (kvnemesisutil.Seq, bool) { + tr.Lock() + defer tr.Unlock() + // Rangedels can be split, but the tracker will always see the pre-split + // value (since it's reported by the operation's BatchRequest). So this + // method checks whether the input span is contained in any span seen + // by the tracker. + for kts := range tr.seen { + if kts.ts != ts { + continue + } + cur := roachpb.Span{Key: roachpb.Key(kts.key), EndKey: roachpb.Key(kts.endKey)} + if cur.Contains(roachpb.Span{Key: key, EndKey: endKey}) { + return tr.seen[kts], true + } + } + return 0, false +} diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/batch b/pkg/kv/kvnemesis/testdata/TestApplier/batch new file mode 100644 index 000000000000..d6228afebba4 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/batch @@ -0,0 +1,8 @@ +echo +---- +{ + b := &Batch{} + b.Put(ctx, "a", v21) // + b.DelRange(ctx, "b", "c", true /* @s22 */) // + db1.Run(ctx, b) // @ +} diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/batch-mixed b/pkg/kv/kvnemesis/testdata/TestApplier/batch-mixed new file mode 100644 index 000000000000..8e079b23a899 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/batch-mixed @@ -0,0 +1,12 @@ +echo +---- +{ + b := &Batch{} + b.Put(ctx, "b", v2) // + b.Get(ctx, "a") // (, ) + b.Del(ctx, "b" /* @s1 */) // + b.Del(ctx, "c" /* @s1 */) // + b.Scan(ctx, "a", "c") // + b.ReverseScanForUpdate(ctx, "a", "e") // + db1.Run(ctx, b) // @ +} diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/batch-mixed-err b/pkg/kv/kvnemesis/testdata/TestApplier/batch-mixed-err new file mode 100644 index 000000000000..a3972366c938 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/batch-mixed-err @@ -0,0 +1,10 @@ +echo +---- +{ + b := &Batch{} + b.Put(ctx, "b", v2) // context canceled + b.GetForUpdate(ctx, "a") // context canceled + b.ScanForUpdate(ctx, "a", "c") // context canceled + b.ReverseScan(ctx, "a", "c") // context canceled + db0.Run(ctx, b) // context canceled +} diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/del b/pkg/kv/kvnemesis/testdata/TestApplier/del new file mode 100644 index 000000000000..7e969ae30919 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/del @@ -0,0 +1,3 @@ +echo +---- +db0.Del(ctx, "b" /* @s1 */) // @ diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/del-err b/pkg/kv/kvnemesis/testdata/TestApplier/del-err new file mode 100644 index 000000000000..7edf879a3bdf --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/del-err @@ -0,0 +1,3 @@ +echo +---- +db0.Del(ctx, "b" /* @s1 */) // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/delrange b/pkg/kv/kvnemesis/testdata/TestApplier/delrange new file mode 100644 index 000000000000..fec4e947790e --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/delrange @@ -0,0 +1,3 @@ +echo +---- +db1.DelRange(ctx, "a", "c", true /* @s6 */) // @ (a, ) diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/delrange-err b/pkg/kv/kvnemesis/testdata/TestApplier/delrange-err new file mode 100644 index 000000000000..b3ecc57deba9 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/delrange-err @@ -0,0 +1,3 @@ +echo +---- +db1.DelRange(ctx, "b", "c", true /* @s12 */) // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/get b/pkg/kv/kvnemesis/testdata/TestApplier/get new file mode 100644 index 000000000000..42a660e4209e --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/get @@ -0,0 +1,3 @@ +echo +---- +db0.Get(ctx, "a") // @ (, ) diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/get-err b/pkg/kv/kvnemesis/testdata/TestApplier/get-err new file mode 100644 index 000000000000..64a22b935dac --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/get-err @@ -0,0 +1,3 @@ +echo +---- +db1.Get(ctx, "a") // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/get-for-update b/pkg/kv/kvnemesis/testdata/TestApplier/get-for-update new file mode 100644 index 000000000000..01bd03475fbe --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/get-for-update @@ -0,0 +1,3 @@ +echo +---- +db1.GetForUpdate(ctx, "a") // @ (v1, ) diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/merge b/pkg/kv/kvnemesis/testdata/TestApplier/merge new file mode 100644 index 000000000000..d24bef8be7da --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/merge @@ -0,0 +1,3 @@ +echo +---- +db0.AdminMerge(ctx, "foo") // diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/merge-again b/pkg/kv/kvnemesis/testdata/TestApplier/merge-again new file mode 100644 index 000000000000..41f26ee8dcc8 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/merge-again @@ -0,0 +1,3 @@ +echo +---- +db0.AdminMerge(ctx, "foo") // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/put b/pkg/kv/kvnemesis/testdata/TestApplier/put new file mode 100644 index 000000000000..13724bf42eca --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/put @@ -0,0 +1,3 @@ +echo +---- +db0.Put(ctx, "a", v1) // @ diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/put-err b/pkg/kv/kvnemesis/testdata/TestApplier/put-err new file mode 100644 index 000000000000..f1748fd3d0ec --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/put-err @@ -0,0 +1,3 @@ +echo +---- +db0.Put(ctx, "a", v1) // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/rscan b/pkg/kv/kvnemesis/testdata/TestApplier/rscan new file mode 100644 index 000000000000..322cf1556ed0 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/rscan @@ -0,0 +1,3 @@ +echo +---- +db0.ReverseScan(ctx, "a", "c", 0) // @ (a:v21, ) diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/rscan-err b/pkg/kv/kvnemesis/testdata/TestApplier/rscan-err new file mode 100644 index 000000000000..9a7c360fac1b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/rscan-err @@ -0,0 +1,3 @@ +echo +---- +db0.ReverseScan(ctx, "a", "c", 0) // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/rscan-for-update b/pkg/kv/kvnemesis/testdata/TestApplier/rscan-for-update new file mode 100644 index 000000000000..da8068079ddf --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/rscan-for-update @@ -0,0 +1,3 @@ +echo +---- +db1.ReverseScanForUpdate(ctx, "a", "b", 0) // @ (a:v21, ) diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/rscan-for-update-err b/pkg/kv/kvnemesis/testdata/TestApplier/rscan-for-update-err new file mode 100644 index 000000000000..4150f4096f3b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/rscan-for-update-err @@ -0,0 +1,3 @@ +echo +---- +db1.ReverseScanForUpdate(ctx, "a", "c", 0) // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/scan b/pkg/kv/kvnemesis/testdata/TestApplier/scan new file mode 100644 index 000000000000..8be1acfd0499 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/scan @@ -0,0 +1,3 @@ +echo +---- +db1.Scan(ctx, "a", "c", 0) // @ diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/scan-for-update b/pkg/kv/kvnemesis/testdata/TestApplier/scan-for-update new file mode 100644 index 000000000000..d6decc65fe2e --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/scan-for-update @@ -0,0 +1,3 @@ +echo +---- +db0.ScanForUpdate(ctx, "a", "c", 0) // @ (a:v1, ) diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/scan-for-update-err b/pkg/kv/kvnemesis/testdata/TestApplier/scan-for-update-err new file mode 100644 index 000000000000..911886d671d4 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/scan-for-update-err @@ -0,0 +1,3 @@ +echo +---- +db1.ScanForUpdate(ctx, "a", "c", 0) // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/split b/pkg/kv/kvnemesis/testdata/TestApplier/split new file mode 100644 index 000000000000..ff90d3071844 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/split @@ -0,0 +1,3 @@ +echo +---- +db1.AdminSplit(ctx, "foo") // diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/split-again b/pkg/kv/kvnemesis/testdata/TestApplier/split-again new file mode 100644 index 000000000000..0e13bcf6bf8e --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/split-again @@ -0,0 +1,3 @@ +echo +---- +db1.AdminSplit(ctx, "foo") // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/transfer b/pkg/kv/kvnemesis/testdata/TestApplier/transfer new file mode 100644 index 000000000000..7a00575623d3 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/transfer @@ -0,0 +1,3 @@ +echo +---- +db1.TransferLeaseOperation(ctx, "foo", 1) // diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/transfer-again b/pkg/kv/kvnemesis/testdata/TestApplier/transfer-again new file mode 100644 index 000000000000..8fee60e0ba89 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/transfer-again @@ -0,0 +1,3 @@ +echo +---- +db0.TransferLeaseOperation(ctx, "foo", 1) // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/txn-commit-batch b/pkg/kv/kvnemesis/testdata/TestApplier/txn-commit-batch new file mode 100644 index 000000000000..31453925956c --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/txn-commit-batch @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "e", v5) // @ + b := &Batch{} + b.Get(ctx, "a") // (, ) + b.Put(ctx, "f", v6) // + txn.CommitInBatch(ctx, b) // @ + return nil +}) // @ txnpb: diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/txn-commit-mixed b/pkg/kv/kvnemesis/testdata/TestApplier/txn-commit-mixed new file mode 100644 index 000000000000..d7419c32ffc3 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/txn-commit-mixed @@ -0,0 +1,12 @@ +echo +---- +db1.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "e", v5) // @ + { + b := &Batch{} + b.Put(ctx, "f", v6) // + b.DelRange(ctx, "c", "e", true /* @s1 */) // + txn.Run(ctx, b) // @ + } + return nil +}) // @ txnpb: diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/txn-delrange b/pkg/kv/kvnemesis/testdata/TestApplier/txn-delrange new file mode 100644 index 000000000000..1cda6addb520 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/txn-delrange @@ -0,0 +1,6 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "b", "d", true /* @s1 */) // @ + return nil +}) // @ txnpb: diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/txn-err b/pkg/kv/kvnemesis/testdata/TestApplier/txn-err new file mode 100644 index 000000000000..11344af13632 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/txn-err @@ -0,0 +1,6 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "b", "d", true /* @s1 */) + return nil +}) // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/txn-error b/pkg/kv/kvnemesis/testdata/TestApplier/txn-error new file mode 100644 index 000000000000..5163e96bf7c5 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/txn-error @@ -0,0 +1,6 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "e", v5) + return errors.New("rollback") +}) // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/txn-rollback b/pkg/kv/kvnemesis/testdata/TestApplier/txn-rollback new file mode 100644 index 000000000000..bb013d0ea858 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/txn-rollback @@ -0,0 +1,6 @@ +echo +---- +db1.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "e", v5) // @ + return errors.New("rollback") +}) // rollback diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/zcfg b/pkg/kv/kvnemesis/testdata/TestApplier/zcfg new file mode 100644 index 000000000000..a06cdd0d5bce --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/zcfg @@ -0,0 +1,3 @@ +echo +---- +env.UpdateZoneConfig(ctx, ToggleGlobalReads) // diff --git a/pkg/kv/kvnemesis/testdata/TestApplier/zcfg-again b/pkg/kv/kvnemesis/testdata/TestApplier/zcfg-again new file mode 100644 index 000000000000..4e3dfb6c3023 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestApplier/zcfg-again @@ -0,0 +1,3 @@ +echo +---- +env.UpdateZoneConfig(ctx, ToggleGlobalReads) // context canceled diff --git a/pkg/kv/kvnemesis/testdata/TestEngine/output.txt b/pkg/kv/kvnemesis/testdata/TestEngine/output.txt new file mode 100644 index 000000000000..1c50f8610efa --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestEngine/output.txt @@ -0,0 +1,11 @@ +echo +---- +"a" 0.000000002,0 -> /BYTES/a-2 +"a" 0.000000001,0 -> /BYTES/a-1 +"b" 0.000000003,0 -> / +"b" 0.000000002,0 -> /BYTES/b-2 +"c" 0.000000004,0 -> / +"d" 0.000000005,0 -> / +"d" 0.000000004,0 -> /BYTES/d-4 +"e" 0.000000004,0 -> /BYTES/e-4 +"f"-"g" 0.000000007,0 -> / diff --git a/pkg/kv/kvnemesis/testdata/TestOperationsFormat/0 b/pkg/kv/kvnemesis/testdata/TestOperationsFormat/0 new file mode 100644 index 000000000000..44973c745b19 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestOperationsFormat/0 @@ -0,0 +1,3 @@ +echo +---- +···db0.Get(ctx, "a") diff --git a/pkg/kv/kvnemesis/testdata/TestOperationsFormat/1 b/pkg/kv/kvnemesis/testdata/TestOperationsFormat/1 new file mode 100644 index 000000000000..8d8b723a6df2 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestOperationsFormat/1 @@ -0,0 +1,3 @@ +echo +---- +···db0.Del(ctx, "a" /* @s1 */) diff --git a/pkg/kv/kvnemesis/testdata/TestOperationsFormat/2 b/pkg/kv/kvnemesis/testdata/TestOperationsFormat/2 new file mode 100644 index 000000000000..7413299ed413 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestOperationsFormat/2 @@ -0,0 +1,9 @@ +echo +---- +···{ +··· b := &Batch{} +··· b.Get(ctx, "b") +··· b.ReverseScanForUpdate(ctx, "c", "e") +··· b.Get(ctx, "f") +··· db0.Run(ctx, b) +···} diff --git a/pkg/kv/kvnemesis/testdata/TestOperationsFormat/3 b/pkg/kv/kvnemesis/testdata/TestOperationsFormat/3 new file mode 100644 index 000000000000..c2874f263947 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestOperationsFormat/3 @@ -0,0 +1,14 @@ +echo +---- +···db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { +··· { +··· b := &Batch{} +··· b.Get(ctx, "g") +··· b.Get(ctx, "h") +··· b.Del(ctx, "i" /* @s1 */) +··· txn.Run(ctx, b) +··· } +··· txn.DelRange(ctx, "j", "k", true /* @s2 */) +··· txn.Put(ctx, "k", v3) +··· return nil +···}) diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_del-del_transaction_committed b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_del-del_transaction_committed new file mode 100644 index 000000000000..4abd96687198 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_del-del_transaction_committed @@ -0,0 +1,8 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s1 */) // + txn.Del(ctx, "a" /* @s2 */) // + return nil +}) // result is ambiguous: boom +"a"/0.000000001,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_del-del_transaction_committed_but_wrong_seq b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_del-del_transaction_committed_but_wrong_seq new file mode 100644 index 000000000000..045ed683fb44 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_del-del_transaction_committed_but_wrong_seq @@ -0,0 +1,9 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s1 */) // + txn.Del(ctx, "a" /* @s2 */) // + return nil +}) // result is ambiguous: boom +"a"/0.000000001,0 @ s1 +committed txn overwritten key had write: [d]"a":0.000000001,0->@s1 [d]"a":missing->@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_committed b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_committed new file mode 100644 index 000000000000..6fc3115e4705 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_committed @@ -0,0 +1,9 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Del(ctx, "b" /* @s2 */) // + return nil +}) // result is ambiguous: boom +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000001,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_committed_but_has_validation_error b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_committed_but_has_validation_error new file mode 100644 index 000000000000..d274e4420525 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_committed_but_has_validation_error @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Del(ctx, "b" /* @s2 */) // + return nil +}) // result is ambiguous: boom +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 +ambiguous txn non-atomic timestamps: [w]"a":0.000000001,0->v1@s1 [d]"b":0.000000002,0->@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_did_not_commit b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_did_not_commit new file mode 100644 index 000000000000..bf6ace7b4a62 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_did_not_commit @@ -0,0 +1,7 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Del(ctx, "b" /* @s2 */) // + return nil +}) // result is ambiguous: boom diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_committed b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_committed new file mode 100644 index 000000000000..9cd5a2bd61ef --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_committed @@ -0,0 +1,9 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Put(ctx, "b", v2) // + return nil +}) // result is ambiguous: boom +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000001,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_committed_but_has_validation_error b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_committed_but_has_validation_error new file mode 100644 index 000000000000..ee3414c470fb --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_committed_but_has_validation_error @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Put(ctx, "b", v2) // + return nil +}) // result is ambiguous: boom +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +ambiguous txn non-atomic timestamps: [w]"a":0.000000001,0->v1@s1 [w]"b":0.000000002,0->v2@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_did_not_commit b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_did_not_commit new file mode 100644 index 000000000000..a0ccc7fed500 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-put_transaction_did_not_commit @@ -0,0 +1,7 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Put(ctx, "b", v2) // + return nil +}) // result is ambiguous: boom diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes new file mode 100644 index 000000000000..76465943108a --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes @@ -0,0 +1,13 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +{ + b := &Batch{} + b.Get(ctx, "a") // (v1, ) + b.Get(ctx, "b") // (v2, ) + b.Get(ctx, "c") // (, ) + db0.Run(ctx, b) // @0.000000003,0 +} +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes new file mode 100644 index 000000000000..26dd1f65913c --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes @@ -0,0 +1,17 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Del(ctx, "a" /* @s3 */) // @0.000000003,0 +db0.Del(ctx, "b" /* @s4 */) // @0.000000004,0 +{ + b := &Batch{} + b.Get(ctx, "a") // (v1, ) + b.Get(ctx, "b") // (v2, ) + b.Get(ctx, "c") // (, ) + db0.Run(ctx, b) // @0.000000003,0 +} +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"a"/0.000000003,0 @ s3 +"b"/0.000000004,0 @ s4 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_returning_tombstones b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_returning_tombstones new file mode 100644 index 000000000000..882769e4e584 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_returning_tombstones @@ -0,0 +1,19 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Del(ctx, "a" /* @s3 */) // @0.000000003,0 +db0.Del(ctx, "b" /* @s3 */) // @0.000000004,0 +{ + b := &Batch{} + b.Get(ctx, "a") // (, ) + b.Get(ctx, "b") // (, ) + b.Get(ctx, "c") // (, ) + db0.Run(ctx, b) // @0.000000005,0 +} +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"a"/0.000000003,0 @ s3 +"b"/0.000000004,0 @ s4 +committed delete missing write at seq s3: [d]"b":missing->@s3 +unclaimed writes: [d]"b":0.000000004,0->@s4 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_returning_wrong_values b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_returning_wrong_values new file mode 100644 index 000000000000..40eaf2e49f4a --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_returning_wrong_values @@ -0,0 +1,18 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Del(ctx, "a" /* @s3 */) // @0.000000003,0 +db0.Del(ctx, "b" /* @s4 */) // @0.000000004,0 +{ + b := &Batch{} + b.Get(ctx, "a") // (, ) + b.Get(ctx, "b") // (v1, ) + b.Get(ctx, "c") // (v2, ) + db0.Run(ctx, b) // @0.000000005,0 +} +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"a"/0.000000003,0 @ s3 +"b"/0.000000004,0 @ s4 +committed batch non-atomic timestamps: [r]"a":[, 0.000000001,0),[0.000000003,0, )-> [r]"b":[0,0, 0,0)->v1 [r]"c":[0,0, 0,0)->v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_with_valid_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_with_valid_time_overlap new file mode 100644 index 000000000000..bd252eabff51 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_and_deletes_with_valid_time_overlap @@ -0,0 +1,17 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Del(ctx, "a" /* @s3 */) // @0.000000003,0 +db0.Del(ctx, "b" /* @s4 */) // @0.000000004,0 +{ + b := &Batch{} + b.Get(ctx, "a") // (, ) + b.Get(ctx, "b") // (v2, ) + b.Get(ctx, "c") // (, ) + db0.Run(ctx, b) // @0.000000003,0 +} +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"a"/0.000000003,0 @ s3 +"b"/0.000000004,0 @ s4 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_returning_wrong_values b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_returning_wrong_values new file mode 100644 index 000000000000..6a919a673ff5 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_returning_wrong_values @@ -0,0 +1,14 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +{ + b := &Batch{} + b.Get(ctx, "a") // (, ) + b.Get(ctx, "b") // (v1, ) + b.Get(ctx, "c") // (v2, ) + db0.Run(ctx, b) // @0.000000003,0 +} +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +committed batch non-atomic timestamps: [r]"a":[, 0.000000001,0)-> [r]"b":[0,0, 0,0)->v1 [r]"c":[0,0, 0,0)->v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_with_empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_with_empty_time_overlap new file mode 100644 index 000000000000..5ed5d77070aa --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_reads_after_writes_with_empty_time_overlap @@ -0,0 +1,14 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +{ + b := &Batch{} + b.Get(ctx, "a") // (, ) + b.Get(ctx, "b") // (v2, ) + b.Get(ctx, "c") // (, ) + db0.Run(ctx, b) // @0.000000003,0 +} +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +committed batch non-atomic timestamps: [r]"a":[, 0.000000001,0)-> [r]"b":[0.000000002,0, )->v2 [r]"c":[, )-> diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes new file mode 100644 index 000000000000..85f9a196663c --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes @@ -0,0 +1,13 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +{ + b := &Batch{} + b.Scan(ctx, "a", "c") // (a:v1, b:v2, ) + b.Scan(ctx, "b", "d") // (b:v2, ) + b.Scan(ctx, "c", "e") // + db0.Run(ctx, b) // @0.000000003,0 +} +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes_returning_wrong_values b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes_returning_wrong_values new file mode 100644 index 000000000000..4676cadff8dc --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes_returning_wrong_values @@ -0,0 +1,14 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +{ + b := &Batch{} + b.Scan(ctx, "a", "c") // + b.Scan(ctx, "b", "d") // (b:v1, ) + b.Scan(ctx, "c", "e") // (c:v2, ) + db0.Run(ctx, b) // @0.000000003,0 +} +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +committed batch non-atomic timestamps: [s]{a-c}:{gap:[, 0.000000001,0)}->[] [s]{b-d}:{0:[0,0, 0,0), gap:[, )}->["b":v1] [s]{c-e}:{0:[0,0, 0,0), gap:[, )}->["c":v2] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes_with_non-empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes_with_non-empty_time_overlap new file mode 100644 index 000000000000..b7cba3943156 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_scans_after_writes_with_non-empty_time_overlap @@ -0,0 +1,14 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +{ + b := &Batch{} + b.Scan(ctx, "a", "c") // (b:v1, ) + b.Scan(ctx, "b", "d") // (b:v1, ) + b.Scan(ctx, "c", "e") // + db0.Run(ctx, b) // @0.000000003,0 +} +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +committed batch non-atomic timestamps: [s]{a-c}:{0:[0,0, 0,0), gap:[, 0.000000001,0)}->["b":v1] [s]{b-d}:{0:[0,0, 0,0), gap:[, )}->["b":v1] [s]{c-e}:{gap:[, )}->[] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_touching_rangedels b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_touching_rangedels new file mode 100644 index 000000000000..3bbe96250401 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_touching_rangedels @@ -0,0 +1,10 @@ +echo +---- +{ + b := &Batch{} + b.DelRangeUsingTombstone(ctx, "a", "b" /* @s1 */) + b.DelRangeUsingTombstone(ctx, "b", "d" /* @s2 */) + db0.Run(ctx, b) // @0.000000001,0 +} +{a-b}/0.000000001,0 @ s1 +{b-d}/0.000000001,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_two_overlapping_rangedels b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_two_overlapping_rangedels new file mode 100644 index 000000000000..853f29d7eaa4 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_of_two_overlapping_rangedels @@ -0,0 +1,10 @@ +echo +---- +{ + b := &Batch{} + b.DelRangeUsingTombstone(ctx, "a", "c" /* @s1 */) + b.DelRangeUsingTombstone(ctx, "b", "d" /* @s2 */) + db0.Run(ctx, b) // @0.000000001,0 +} +{a-b}/0.000000001,0 @ s1 +{b-d}/0.000000001,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/batch_with_two_deletes_of_same_key b/pkg/kv/kvnemesis/testdata/TestValidate/batch_with_two_deletes_of_same_key new file mode 100644 index 000000000000..bf3d16589ba7 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/batch_with_two_deletes_of_same_key @@ -0,0 +1,9 @@ +echo +---- +{ + b := &Batch{} + b.Del(ctx, "a" /* @s1 */) // + b.Del(ctx, "a" /* @s2 */) // + db0.Run(ctx, b) // @0.000000001,0 +} +"a"/0.000000001,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/no_ops_and_no_kvs b/pkg/kv/kvnemesis/testdata/TestValidate/no_ops_and_no_kvs new file mode 100644 index 000000000000..ad07fd2183e0 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/no_ops_and_no_kvs @@ -0,0 +1,2 @@ +echo +---- diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/no_ops_with_unexpected_delete b/pkg/kv/kvnemesis/testdata/TestValidate/no_ops_with_unexpected_delete new file mode 100644 index 000000000000..0a83e1e35a97 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/no_ops_with_unexpected_delete @@ -0,0 +1,4 @@ +echo +---- +"a"/0.000000001,0 @ s1 +unclaimed writes: [d]"a":0.000000001,0->@s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/no_ops_with_unexpected_write b/pkg/kv/kvnemesis/testdata/TestValidate/no_ops_with_unexpected_write new file mode 100644 index 000000000000..000d17a77ffa --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/no_ops_with_unexpected_write @@ -0,0 +1,4 @@ +echo +---- +"a"/0.000000001,0 @ s1 v1 +unclaimed writes: [w]"a":0.000000001,0->v1@s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_failed_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_failed_write new file mode 100644 index 000000000000..e69870fcc14d --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_failed_write @@ -0,0 +1,3 @@ +echo +---- +db0.Del(ctx, "a" /* @s1 */) // result is ambiguous: boom diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_failed_write_before_a_later_committed_delete b/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_failed_write_before_a_later_committed_delete new file mode 100644 index 000000000000..d5b7c85d444a --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_failed_write_before_a_later_committed_delete @@ -0,0 +1,5 @@ +echo +---- +db0.Del(ctx, "a" /* @s1 */) // result is ambiguous: boom +db0.Del(ctx, "a" /* @s2 */) // @0.000000002,0 +"a"/0.000000002,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_successful_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_successful_write new file mode 100644 index 000000000000..22aa36a8548d --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_delete_with_successful_write @@ -0,0 +1,4 @@ +echo +---- +db0.Del(ctx, "a" /* @s1 */) // result is ambiguous: boom +"a"/0.000000001,0 @ s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_put_with_failed_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_put_with_failed_write new file mode 100644 index 000000000000..965fcf4fa3a9 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_put_with_failed_write @@ -0,0 +1,3 @@ +echo +---- +db0.Put(ctx, "a", v1) // result is ambiguous: boom diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_put_with_successful_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_put_with_successful_write new file mode 100644 index 000000000000..52b96f7bf589 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_ambiguous_put_with_successful_write @@ -0,0 +1,4 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 result is ambiguous: boom +"a"/0.000000001,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_delete_with_missing_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_delete_with_missing_write new file mode 100644 index 000000000000..0a679a69a334 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_delete_with_missing_write @@ -0,0 +1,8 @@ +echo +---- +{ + b := &Batch{} + b.Del(ctx, "a" /* @s1 */) // + db0.Run(ctx, b) // @0.000000001,0 +} +committed batch missing write at seq s1: [d]"a":missing->@s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_delete_with_successful_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_delete_with_successful_write new file mode 100644 index 000000000000..f3de3d9e55ba --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_delete_with_successful_write @@ -0,0 +1,8 @@ +echo +---- +{ + b := &Batch{} + b.Del(ctx, "a" /* @s1 */) + db0.Run(ctx, b) // @0.000000001,0 +} +"a"/0.000000001,0 @ s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_put_with_missing_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_put_with_missing_write new file mode 100644 index 000000000000..dee0bc5bd6b6 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_put_with_missing_write @@ -0,0 +1,8 @@ +echo +---- +{ + b := &Batch{} + b.Put(ctx, "a", v1) // + db0.Run(ctx, b) // @0.000000001,0 +} +committed batch missing write at seq s1: [w]"a":missing->v1@s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_put_with_successful_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_put_with_successful_write new file mode 100644 index 000000000000..865045276ebb --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_batch_put_with_successful_write @@ -0,0 +1,8 @@ +echo +---- +{ + b := &Batch{} + b.Put(ctx, "a", v1) // + db0.Run(ctx, b) // @0.000000001,0 +} +"a"/0.000000001,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_expected_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_expected_write new file mode 100644 index 000000000000..056209a52128 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_expected_write @@ -0,0 +1,4 @@ +echo +---- +db0.Del(ctx, "a" /* @s1 */) // @0.000000001,0 +"a"/0.000000001,0 @ s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_expected_write_after_write_transaction_with_shadowed_delete b/pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_expected_write_after_write_transaction_with_shadowed_delete new file mode 100644 index 000000000000..40dd2ded5609 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_expected_write_after_write_transaction_with_shadowed_delete @@ -0,0 +1,15 @@ +echo +---- +db0.Del(ctx, "a" /* @s1 */) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v3) // + txn.Del(ctx, "a" /* @s4 */) // + txn.Put(ctx, "a", v5) // + return nil +}) // @0.000000003,0 +db0.Del(ctx, "a" /* @s6 */) // @0.000000004,0 +"a"/0.000000001,0 @ s1 +"a"/0.000000002,0 @ s2 v2 +"a"/0.000000003,0 @ s5 v5 +"a"/0.000000004,0 @ s6 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_missing_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_missing_write new file mode 100644 index 000000000000..88d795c24a34 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_delete_with_missing_write @@ -0,0 +1,4 @@ +echo +---- +db0.Del(ctx, "a" /* @s1 */) // @0.000000001,0 +committed delete missing write at seq s1: [d]"a":missing->@s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write new file mode 100644 index 000000000000..889b6820869b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write @@ -0,0 +1,9 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s2 */) // (a, ) + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_extra_deletion b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_extra_deletion new file mode 100644 index 000000000000..35f34a922f1d --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_extra_deletion @@ -0,0 +1,11 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000002,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s2 */) // @0.000000002,0 (a, b, ) + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 +mismatched write timestamp 0.000000001,0 and exec timestamp 0.000000002,0: [w]"a":0.000000001,0->v1@s1 +committed txn missing write at seq s2: [dr.d]"a":0.000000002,0->@s2 [dr.d]"b":missing->@s2 [dr.s]{a-c}:{gap:[, 0.000000001,0),[0.000000002,0, )}->[] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_missing_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_missing_write new file mode 100644 index 000000000000..ebab1a99dce0 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_missing_write @@ -0,0 +1,9 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s2 */) // @0.000000002,0 (a, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 +committed txn missing write at seq s2: [dr.d]"a":missing->@s2 [dr.s]{a-c}:{gap:[, 0.000000001,0)}->[] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_returning_wrong_value b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_returning_wrong_value new file mode 100644 index 000000000000..3bd8a50d609e --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_returning_wrong_value @@ -0,0 +1,10 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s2 */) // @0.000000002,0 + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 +unclaimed writes: [d]"a":0.000000002,0->@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_with_spurious_deletion b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_with_spurious_deletion new file mode 100644 index 000000000000..39caac7e893f --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_write_with_spurious_deletion @@ -0,0 +1,10 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s2 */) // @0.000000002,0 (a, b, ) + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 +"b"/0.000000002,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes new file mode 100644 index 000000000000..0c6327896e59 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes @@ -0,0 +1,15 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Put(ctx, "c", v3) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s4 */) // (a, b, ) + return nil +}) // @0.000000004,0 +db0.Scan(ctx, "a", "d", 0) // @0.000000004,0 (c:v3, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"c"/0.000000003,0 @ s3 v3 +"a"/0.000000004,0 @ s4 +"b"/0.000000004,0 @ s4 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_and_delete b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_and_delete new file mode 100644 index 000000000000..481446aba853 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_and_delete @@ -0,0 +1,18 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Del(ctx, "a" /* @s3 */) // @0.000000004,0 +db0.Put(ctx, "a", v4) // @0.000000005,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s5 */) // (a, b, ) + return nil +}) // @0.000000003,0 +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"a"/0.000000003,0 @ s5 +"b"/0.000000003,0 @ s5 +"a"/0.000000004,0 @ s3 +"b"/0.000000005,0 @ s4 v4 +committed put missing write at seq s4: [w]"a":missing->v4@s4 +unclaimed writes: [w]"b":0.000000005,0->v4@s4 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_incorrectly_deleting_keys_outside_span_boundary b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_incorrectly_deleting_keys_outside_span_boundary new file mode 100644 index 000000000000..ad46fca31dff --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_incorrectly_deleting_keys_outside_span_boundary @@ -0,0 +1,12 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "d", v2) // @0.000000002,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s3 */) // (a, d, ) + return nil +}) // @0.000000003,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000003,0 @ s3 +"d"/0.000000002,0 @ s2 v2 +"d"/0.000000003,0 @ s3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_returning_keys_outside_span_boundary b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_returning_keys_outside_span_boundary new file mode 100644 index 000000000000..632cf9559a8e --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_returning_keys_outside_span_boundary @@ -0,0 +1,12 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "d", v2) // @0.000000002,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s3 */) // (a, d, ) + return nil +}) // @0.000000003,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000003,0 @ s3 +"d"/0.000000002,0 @ s2 v2 +committed txn missing write at seq s3: [dr.d]"a":0.000000003,0->@s3 [dr.d]"d":missing->@s3 [dr.s]{a-c}:{gap:[, 0.000000001,0),[0.000000003,0, )}->[] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_with_missing_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_with_missing_write new file mode 100644 index 000000000000..e9e2831fed1b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_with_missing_write @@ -0,0 +1,16 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Put(ctx, "c", v3) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s4 */) // (a, b, ) + return nil +}) // @0.000000004,0 +db0.Scan(ctx, "a", "d", 0) // @0.000000005,0 (c:v3, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"c"/0.000000003,0 @ s3 v3 +"a"/0.000000004,0 @ s4 +committed txn missing write at seq s4: [dr.d]"a":0.000000004,0->@s4 [dr.d]"b":missing->@s4 [dr.s]{a-c}:{gap:[, 0.000000001,0),[0.000000004,0, )}->[] +committed scan non-atomic timestamps: [s]{a-d}:{0:[0.000000003,0, ), gap:[, 0.000000001,0)}->["c":v3] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_with_write_timestamp_disagreement b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_with_write_timestamp_disagreement new file mode 100644 index 000000000000..c4f50aa9b65b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_after_writes_with_write_timestamp_disagreement @@ -0,0 +1,16 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Put(ctx, "c", v3) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s4 */) // (a, b, c, ) + return nil +}) // @0.000000004,0 +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"c"/0.000000003,0 @ s3 v3 +"a"/0.000000003,0 @ s4 +"b"/0.000000004,0 @ s4 +"c"/0.000000004,0 @ s4 +committed txn non-atomic timestamps: [dr.d]"a":0.000000003,0->@s4 [dr.d]"b":0.000000004,0->@s4 [dr.d]"c":0.000000004,0->@s4 [dr.s]{a-c}:{gap:[, 0.000000001,0),[0.000000004,0, )}->[] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_before_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_before_write new file mode 100644 index 000000000000..efff6f38baa5 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_before_write @@ -0,0 +1,5 @@ +echo +---- +db0.DelRange(ctx, "a", "c", true /* @s1 */) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +"a"/0.000000002,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_before_write_returning_wrong_value b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_before_write_returning_wrong_value new file mode 100644 index 000000000000..fd233922e394 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_deleterange_before_write_returning_wrong_value @@ -0,0 +1,6 @@ +echo +---- +db0.DelRange(ctx, "a", "c", true /* @s1 */) // @0.000000001,0 (a, ) +db0.Put(ctx, "a", v2) // @0.000000002,0 +"a"/0.000000002,0 @ s2 v2 +committed deleteRange missing write at seq s1: [dr.d]"a":missing->@s1 [dr.s]{a-c}:{gap:[, 0.000000002,0)}->[] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_put_with_expected_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_put_with_expected_write new file mode 100644 index 000000000000..15544ecd10bb --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_put_with_expected_write @@ -0,0 +1,4 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_put_with_missing_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_put_with_missing_write new file mode 100644 index 000000000000..623e26c7e3cf --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_put_with_missing_write @@ -0,0 +1,4 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +committed put missing write at seq s1: [w]"a":missing->v1@s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write new file mode 100644 index 000000000000..684ebe68a72c --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write @@ -0,0 +1,5 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Get(ctx, "a") // @0.000000002,0 (v1, ) +"a"/0.000000001,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_and_delete b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_and_delete new file mode 100644 index 000000000000..92c23f8f5bf5 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_and_delete @@ -0,0 +1,7 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Del(ctx, "a" /* @s2 */) // @0.000000002,0 +db0.Get(ctx, "a") // @0.000000001,0 (v1, ) +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_and_delete_returning_tombstone b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_and_delete_returning_tombstone new file mode 100644 index 000000000000..9cc2f7a027d9 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_and_delete_returning_tombstone @@ -0,0 +1,7 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Del(ctx, "a" /* @s2 */) // @0.000000002,0 +db0.Get(ctx, "a") // @0.000000003,0 (, ) +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_returning_wrong_value b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_returning_wrong_value new file mode 100644 index 000000000000..e95e40cf29cb --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_after_write_returning_wrong_value @@ -0,0 +1,6 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Get(ctx, "a") // @0.000000002,0 (v2, ) +"a"/0.000000001,0 @ s1 v1 +committed get non-atomic timestamps: [r]"a":[0,0, 0,0)->v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_delete b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_delete new file mode 100644 index 000000000000..fef830e462c4 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_delete @@ -0,0 +1,5 @@ +echo +---- +db0.Get(ctx, "a") // @0.000000001,0 (, ) +db0.Del(ctx, "a" /* @s1 */) // @0.000000002,0 +"a"/0.000000002,0 @ s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write new file mode 100644 index 000000000000..19b59e0388c7 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write @@ -0,0 +1,5 @@ +echo +---- +db0.Get(ctx, "a") // @0.000000001,0 (, ) +db0.Put(ctx, "a", v1) // @0.000000002,0 +"a"/0.000000002,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write_and_delete b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write_and_delete new file mode 100644 index 000000000000..71102eab0684 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write_and_delete @@ -0,0 +1,7 @@ +echo +---- +db0.Get(ctx, "a") // @0.000000001,0 (, ) +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Del(ctx, "a" /* @s2 */) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write_returning_wrong_value b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write_returning_wrong_value new file mode 100644 index 000000000000..af5448685b1f --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_before_write_returning_wrong_value @@ -0,0 +1,5 @@ +echo +---- +db0.Get(ctx, "a") // @0.000000001,0 (v1, ) +db0.Put(ctx, "a", v1) // @0.000000002,0 +"a"/0.000000002,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_read_in_between_write_and_delete b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_in_between_write_and_delete new file mode 100644 index 000000000000..c2f16fc420b7 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_in_between_write_and_delete @@ -0,0 +1,7 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Get(ctx, "a") // @0.000000002,0 (v1, ) +db0.Del(ctx, "a" /* @s2 */) // @0.000000003,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000003,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_read_in_between_writes b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_in_between_writes new file mode 100644 index 000000000000..326ab6c0e221 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_read_in_between_writes @@ -0,0 +1,7 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Get(ctx, "a") // @0.000000002,0 (v1, ) +db0.Put(ctx, "a", v2) // @0.000000003,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000003,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_delete_with_write_correctly_missing b/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_delete_with_write_correctly_missing new file mode 100644 index 000000000000..18764fbc99bc --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_delete_with_write_correctly_missing @@ -0,0 +1,3 @@ +echo +---- +db0.Del(ctx, "a" /* @s1 */) // TransactionRetryWithProtoRefreshError: diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_delete_with_write_incorrectly_present b/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_delete_with_write_incorrectly_present new file mode 100644 index 000000000000..47511685c396 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_delete_with_write_incorrectly_present @@ -0,0 +1,5 @@ +echo +---- +db0.Del(ctx, "a" /* @s1 */) // TransactionRetryWithProtoRefreshError: +"a"/0.000000001,0 @ s1 +uncommitted delete had writes: [d]"a":0.000000001,0->@s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_put_with_write_correctly_missing b/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_put_with_write_correctly_missing new file mode 100644 index 000000000000..a2c07b473661 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_put_with_write_correctly_missing @@ -0,0 +1,3 @@ +echo +---- +db0.Put(ctx, "a", v1) // TransactionRetryWithProtoRefreshError: diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_put_with_write_incorrectly_present b/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_put_with_write_incorrectly_present new file mode 100644 index 000000000000..832bc6db5d64 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_retryable_put_with_write_incorrectly_present @@ -0,0 +1,5 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 TransactionRetryWithProtoRefreshError: +"a"/0.000000001,0 @ s1 v1 +uncommitted put had writes: [w]"a":0.000000001,0->v1@s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_write_returning_extra_key b/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_write_returning_extra_key new file mode 100644 index 000000000000..727d06cab73d --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_write_returning_extra_key @@ -0,0 +1,8 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.ReverseScan(ctx, "a", "c", 0) // @0.000000003,0 (b:v2, a2:v3, a:v1, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +committed reverse scan non-atomic timestamps: [rs]{a-c}:{0:[0.000000002,0, ), 1:[0,0, 0,0), 2:[0.000000001,0, ), gap:[, )}->["b":v2, "a2":v3, "a":v1] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_write_returning_missing_key b/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_write_returning_missing_key new file mode 100644 index 000000000000..685331738eee --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_write_returning_missing_key @@ -0,0 +1,8 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.ReverseScan(ctx, "a", "c", 0) // @0.000000003,0 (b:v2, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +committed reverse scan non-atomic timestamps: [rs]{a-c}:{0:[0.000000002,0, ), gap:[, 0.000000001,0)}->["b":v2] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes b/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes new file mode 100644 index 000000000000..53f5697d1718 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes @@ -0,0 +1,7 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.ReverseScan(ctx, "a", "c", 0) // @0.000000003,0 (b:v2, a:v1, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes_returning_results_in_wrong_order b/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes_returning_results_in_wrong_order new file mode 100644 index 000000000000..c74626994e7e --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes_returning_results_in_wrong_order @@ -0,0 +1,8 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.ReverseScan(ctx, "a", "c", 0) // @0.000000003,0 (a:v1, b:v2, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +scan result not ordered correctly: [rs]{a-c}:{0:[0.000000001,0, ), 1:[0.000000002,0, ), gap:[, )}->["a":v1, "b":v2] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes_returning_results_outside_scan_boundary b/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes_returning_results_outside_scan_boundary new file mode 100644 index 000000000000..e78c2eca5db7 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_reverse_scan_after_writes_returning_results_outside_scan_boundary @@ -0,0 +1,10 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Put(ctx, "c", v3) // @0.000000003,0 +db0.ReverseScan(ctx, "a", "c", 0) // @0.000000004,0 (c:v3, b:v2, a:v1, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"c"/0.000000003,0 @ s3 v3 +key "c" outside scan bounds: [rs]{a-c}:{0:[0.000000003,0, ), 1:[0.000000002,0, ), 2:[0.000000001,0, ), gap:[, )}->["c":v3, "b":v2, "a":v1] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write new file mode 100644 index 000000000000..adc2403102ec --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write @@ -0,0 +1,5 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Scan(ctx, "a", "c", 0) // @0.000000002,0 (a:v1, ) +"a"/0.000000001,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_extra_key b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_extra_key new file mode 100644 index 000000000000..458a1e44596b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_extra_key @@ -0,0 +1,8 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Scan(ctx, "a", "c", 0) // @0.000000003,0 (a:v1, a2:v3, b:v2, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +committed scan non-atomic timestamps: [s]{a-c}:{0:[0.000000001,0, ), 1:[0,0, 0,0), 2:[0.000000002,0, ), gap:[, )}->["a":v1, "a2":v3, "b":v2] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_missing_key b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_missing_key new file mode 100644 index 000000000000..fae702db92e4 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_missing_key @@ -0,0 +1,8 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Scan(ctx, "a", "c", 0) // @0.000000003,0 (b:v2, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +committed scan non-atomic timestamps: [s]{a-c}:{0:[0.000000002,0, ), gap:[, 0.000000001,0)}->["b":v2] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_wrong_value b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_wrong_value new file mode 100644 index 000000000000..75011d078cd5 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_write_returning_wrong_value @@ -0,0 +1,6 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Scan(ctx, "a", "c", 0) // @0.000000002,0 (a:v2, ) +"a"/0.000000001,0 @ s1 v1 +committed scan non-atomic timestamps: [s]{a-c}:{0:[0,0, 0,0), gap:[, )}->["a":v2] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes new file mode 100644 index 000000000000..21bb51808322 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes @@ -0,0 +1,7 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Scan(ctx, "a", "c", 0) // @0.000000003,0 (a:v1, b:v2, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_and_delete b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_and_delete new file mode 100644 index 000000000000..26ef7fa753c1 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_and_delete @@ -0,0 +1,11 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Del(ctx, "a" /* @s3 */) // @0.000000003,0 +db0.Put(ctx, "a", v4) // @0.000000004,0 +db0.Scan(ctx, "a", "c", 0) // @0.000000005,0 (b:v2, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"a"/0.000000003,0 @ s3 +"a"/0.000000004,0 @ s4 v4 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_and_delete_returning_missing_key b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_and_delete_returning_missing_key new file mode 100644 index 000000000000..60ecf54a9625 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_and_delete_returning_missing_key @@ -0,0 +1,20 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Put(ctx, "b", v2) // + return nil +}) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (b:v2, ) + txn.Del(ctx, "a" /* @s3 */) // + return nil +}) // @0.000000002,0 +db0.Put(ctx, "a", v4) // @0.000000003,0 +db0.Del(ctx, "a" /* @s5 */) // @0.000000004,0 +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000001,0 @ s2 v2 +"a"/0.000000002,0 @ s3 +"a"/0.000000003,0 @ s4 v4 +"a"/0.000000004,0 @ s5 +committed txn non-atomic timestamps: [s]{a-c}:{0:[0.000000001,0, ), gap:[, 0.000000001,0),[0.000000004,0, )}->["b":v2] [d]"a":0.000000002,0->@s3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_returning_results_in_wrong_order b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_returning_results_in_wrong_order new file mode 100644 index 000000000000..aa7026a77235 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_returning_results_in_wrong_order @@ -0,0 +1,8 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Scan(ctx, "a", "c", 0) // @0.000000003,0 (b:v2, a:v1, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +scan result not ordered correctly: [s]{a-c}:{0:[0.000000002,0, ), 1:[0.000000001,0, ), gap:[, )}->["b":v2, "a":v1] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_returning_results_outside_scan_boundary b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_returning_results_outside_scan_boundary new file mode 100644 index 000000000000..ae27d26fd5c3 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_after_writes_returning_results_outside_scan_boundary @@ -0,0 +1,10 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Put(ctx, "c", v3) // @0.000000003,0 +db0.Scan(ctx, "a", "c", 0) // @0.000000004,0 (a:v1, b:v2, c:v3, ) +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"c"/0.000000003,0 @ s3 v3 +key "c" outside scan bounds: [s]{a-c}:{0:[0.000000001,0, ), 1:[0.000000002,0, ), 2:[0.000000003,0, ), gap:[, )}->["a":v1, "b":v2, "c":v3] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_before_write b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_before_write new file mode 100644 index 000000000000..4af885926b39 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_before_write @@ -0,0 +1,5 @@ +echo +---- +db0.Scan(ctx, "a", "c", 0) // @0.000000001,0 +db0.Put(ctx, "a", v1) // @0.000000002,0 +"a"/0.000000002,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_before_write_returning_wrong_value b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_before_write_returning_wrong_value new file mode 100644 index 000000000000..f9ec5ca7b8e7 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_before_write_returning_wrong_value @@ -0,0 +1,6 @@ +echo +---- +db0.Scan(ctx, "a", "c", 0) // @0.000000001,0 (a:v2, ) +db0.Put(ctx, "a", v1) // @0.000000002,0 +"a"/0.000000002,0 @ s1 v1 +committed scan non-atomic timestamps: [s]{a-c}:{0:[0,0, 0,0), gap:[, )}->["a":v2] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_in_between_writes b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_in_between_writes new file mode 100644 index 000000000000..ecb51c06c159 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_scan_in_between_writes @@ -0,0 +1,7 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Scan(ctx, "a", "c", 0) // @0.000000002,0 (a:v1, ) +db0.Put(ctx, "a", v2) // @0.000000003,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000003,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_tranactional_scan_after_write_and_delete_returning_extra_key b/pkg/kv/kvnemesis/testdata/TestValidate/one_tranactional_scan_after_write_and_delete_returning_extra_key new file mode 100644 index 000000000000..ed0b7807b8c8 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_tranactional_scan_after_write_and_delete_returning_extra_key @@ -0,0 +1,13 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "b", v2) // + txn.Del(ctx, "a" /* @s3 */) // + return nil +}) // @0.000000002,0 +db0.Scan(ctx, "a", "c", 0) // @0.000000003,0 (a:v1, b:v2, ) +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s3 +"b"/0.000000002,0 @ s2 v2 +committed scan non-atomic timestamps: [s]{a-c}:{0:[0.000000001,0, 0.000000002,0), 1:[0.000000002,0, ), gap:[, )}->["a":v1, "b":v2] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_delete_with_write_on_another_key_after_delete b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_delete_with_write_on_another_key_after_delete new file mode 100644 index 000000000000..ac57d9e17fb5 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_delete_with_write_on_another_key_after_delete @@ -0,0 +1,11 @@ +echo +---- +db0.Del(ctx, "a" /* @s1 */) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "b", v2) // + txn.Del(ctx, "a" /* @s3 */) // + return nil +}) // @0.000000002,0 +"a"/0.000000002,0 @ s3 +"a"/0.000000003,0 @ s1 +"b"/0.000000002,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_deleterange_followed_by_put_after_writes b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_deleterange_followed_by_put_after_writes new file mode 100644 index 000000000000..dc8d389fd254 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_deleterange_followed_by_put_after_writes @@ -0,0 +1,11 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s2 */) // (a, ) + txn.Put(ctx, "b", v3) // + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 +"b"/0.000000002,0 @ s3 v3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_deleterange_followed_by_put_after_writes_with_write_timestamp_disagreement b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_deleterange_followed_by_put_after_writes_with_write_timestamp_disagreement new file mode 100644 index 000000000000..023cc467130b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_deleterange_followed_by_put_after_writes_with_write_timestamp_disagreement @@ -0,0 +1,12 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.DelRange(ctx, "a", "c", true /* @s2 */) // (a, ) + txn.Put(ctx, "b", v3) // + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 +"b"/0.000000003,0 @ s3 v3 +committed txn non-atomic timestamps: [dr.d]"a":0.000000002,0->@s2 [dr.s]{a-c}:{gap:[, 0.000000001,0),[0.000000003,0, )}->[] [w]"b":0.000000003,0->v3@s3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_shadowed_by_deleterange_after_writes b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_shadowed_by_deleterange_after_writes new file mode 100644 index 000000000000..b60425fa639f --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_shadowed_by_deleterange_after_writes @@ -0,0 +1,11 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "b", v2) // + txn.DelRange(ctx, "a", "c", true /* @s3 */) // (a, b, ) + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s3 +"b"/0.000000002,0 @ s3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_shadowed_by_deleterange_after_writes_with_write_timestamp_disagreement b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_shadowed_by_deleterange_after_writes_with_write_timestamp_disagreement new file mode 100644 index 000000000000..22196101c2c9 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_shadowed_by_deleterange_after_writes_with_write_timestamp_disagreement @@ -0,0 +1,12 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "b", v2) // + txn.DelRange(ctx, "a", "c", true /* @s3 */) // (a, b, ) + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s3 +"b"/0.000000003,0 @ s3 +committed txn non-atomic timestamps: [w]"b":missing->v2@s2 [dr.d]"a":0.000000002,0->@s3 [dr.d]"b":0.000000003,0->@s3 [dr.s]{a-c}:{gap:[, 0.000000001,0),[0.000000003,0, )}->[] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_with_correct_commit_time b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_with_correct_commit_time new file mode 100644 index 000000000000..5df4beccec2a --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_with_correct_commit_time @@ -0,0 +1,7 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_with_incorrect_commit_time b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_with_incorrect_commit_time new file mode 100644 index 000000000000..49e1afbd8a04 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_with_incorrect_commit_time @@ -0,0 +1,8 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + return nil +}) // @0.000000001,0 +"a"/0.000000002,0 @ s1 v1 +mismatched write timestamp 0.000000002,0 and exec timestamp 0.000000001,0: [w]"a":0.000000002,0->v1@s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_scan_followed_by_delete_outside_time_range b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_scan_followed_by_delete_outside_time_range new file mode 100644 index 000000000000..bf376bf738c1 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_scan_followed_by_delete_outside_time_range @@ -0,0 +1,13 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + txn.Del(ctx, "a" /* @s2 */) // + return nil +}) // @0.000000004,0 +db0.Put(ctx, "b", v3) // @0.000000003,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000004,0 @ s2 +"b"/0.000000003,0 @ s3 v3 +committed txn non-atomic timestamps: [s]{a-c}:{0:[0.000000001,0, ), gap:[, 0.000000003,0)}->["a":v1] [d]"a":0.000000004,0->@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_scan_followed_by_delete_within_time_range b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_scan_followed_by_delete_within_time_range new file mode 100644 index 000000000000..4d23be35aac2 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_scan_followed_by_delete_within_time_range @@ -0,0 +1,12 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + txn.Del(ctx, "a" /* @s2 */) // + return nil +}) // @0.000000002,0 +db0.Put(ctx, "b", v3) // @0.000000003,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 +"b"/0.000000003,0 @ s3 v3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_first_write_missing b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_first_write_missing new file mode 100644 index 000000000000..3cd05f60eb40 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_first_write_missing @@ -0,0 +1,9 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s1 */) // + txn.Del(ctx, "b" /* @s2 */) // + return nil +}) // @0.000000001,0 +"b"/0.000000001,0 @ s2 +committed txn missing write at seq s1: [d]"a":missing->@s1 [d]"b":0.000000001,0->@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_second_write_missing b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_second_write_missing new file mode 100644 index 000000000000..9b2d766da4af --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_second_write_missing @@ -0,0 +1,9 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s1 */) // + txn.Del(ctx, "b" /* @s2 */) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 +committed txn missing write at seq s2: [d]"a":0.000000001,0->@s1 [d]"b":missing->@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_the_correct_writes b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_the_correct_writes new file mode 100644 index 000000000000..a37286ba8452 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_the_correct_writes @@ -0,0 +1,7 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s1 */) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_write_timestamp_disagreement b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_write_timestamp_disagreement new file mode 100644 index 000000000000..b8e7248aeac9 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_delete_with_write_timestamp_disagreement @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s1 */) // + txn.Del(ctx, "b" /* @s2 */) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 +"b"/0.000000002,0 @ s2 +committed txn non-atomic timestamps: [d]"a":0.000000001,0->@s1 [d]"b":0.000000002,0->@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_first_write_missing b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_first_write_missing new file mode 100644 index 000000000000..30e7b7bb7bd3 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_first_write_missing @@ -0,0 +1,9 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Put(ctx, "b", v2) // + return nil +}) // @0.000000001,0 +"b"/0.000000001,0 @ s2 v2 +committed txn missing write at seq s1: [w]"a":missing->v1@s1 [w]"b":0.000000001,0->v2@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_second_write_missing b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_second_write_missing new file mode 100644 index 000000000000..7f0c54666111 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_second_write_missing @@ -0,0 +1,9 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Put(ctx, "b", v2) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 +committed txn missing write at seq s2: [w]"a":0.000000001,0->v1@s1 [w]"b":missing->v2@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_the_correct_writes b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_the_correct_writes new file mode 100644 index 000000000000..5df4beccec2a --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_the_correct_writes @@ -0,0 +1,7 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_write_timestamp_disagreement b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_write_timestamp_disagreement new file mode 100644 index 000000000000..f61140cfb745 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_committed_put_with_write_timestamp_disagreement @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Put(ctx, "b", v2) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +committed txn non-atomic timestamps: [w]"a":0.000000001,0->v1@s1 [w]"b":0.000000002,0->v2@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_batch_delete_with_write_correctly_missing b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_batch_delete_with_write_correctly_missing new file mode 100644 index 000000000000..1bdaaebd109d --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_batch_delete_with_write_correctly_missing @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + { + b := &Batch{} + b.Del(ctx, "a" /* @s1 */) // + txn.Run(ctx, b) // + } + return errors.New("rollback") +}) // rollback diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_batch_put_with_write_correctly_missing b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_batch_put_with_write_correctly_missing new file mode 100644 index 000000000000..26492c6bfedf --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_batch_put_with_write_correctly_missing @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + { + b := &Batch{} + b.Put(ctx, "a", v1) // + txn.Run(ctx, b) // + } + return errors.New("rollback") +}) // rollback diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_delete_with_write_correctly_missing b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_delete_with_write_correctly_missing new file mode 100644 index 000000000000..7842a20e383b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_delete_with_write_correctly_missing @@ -0,0 +1,6 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s1 */) // + return errors.New("rollback") +}) // rollback diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_delete_with_write_incorrectly_present b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_delete_with_write_incorrectly_present new file mode 100644 index 000000000000..0c1559827646 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_delete_with_write_incorrectly_present @@ -0,0 +1,8 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s1 */) // + return errors.New("rollback") +}) // rollback +"a"/0.000000001,0 @ s1 +uncommitted txn had writes: [d]"a":0.000000001,0->@s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_put_with_write_correctly_missing b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_put_with_write_correctly_missing new file mode 100644 index 000000000000..9b8120a53674 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_put_with_write_correctly_missing @@ -0,0 +1,6 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + return errors.New("rollback") +}) // rollback diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_put_with_write_incorrectly_present b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_put_with_write_incorrectly_present new file mode 100644 index 000000000000..e687a842afee --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/one_transactionally_rolled_back_put_with_write_incorrectly_present @@ -0,0 +1,8 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + return errors.New("rollback") +}) // rollback +"a"/0.000000001,0 @ s1 v1 +uncommitted txn had writes: [w]"a":0.000000001,0->v1@s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/rangedel_with_range_split b/pkg/kv/kvnemesis/testdata/TestValidate/rangedel_with_range_split new file mode 100644 index 000000000000..9da45bcde41e --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/rangedel_with_range_split @@ -0,0 +1,6 @@ +echo +---- +db0.DelRangeUsingTombstone(ctx, "a", "c" /* @s1 */) // @0.000000002,0 +{a-b}/0.000000002,0 @ s1 +{b-c}/0.000000001,0 @ s1 +committed deleteRangeUsingTombstone non-atomic timestamps: [d]["a","b"):0.000000002,0->@s1 [d]["b","c"):0.000000001,0->@s1 [s]{a-c}:{gap:[, )}->[] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/read_before_rangedel b/pkg/kv/kvnemesis/testdata/TestValidate/read_before_rangedel new file mode 100644 index 000000000000..0d3dda979904 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/read_before_rangedel @@ -0,0 +1,7 @@ +echo +---- +db0.Put(ctx, "b", v1) // @0.000000001,0 +db0.Get(ctx, "b") // @0.000000002,0 (v1, ) +db0.DelRangeUsingTombstone(ctx, "a", "c" /* @s3 */) // @0.000000003,0 +"b"/0.000000001,0 @ s1 v1 +{a-c}/0.000000003,0 @ s3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel b/pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel new file mode 100644 index 000000000000..48201e55d792 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel @@ -0,0 +1,4 @@ +echo +---- +db0.DelRangeUsingTombstone(ctx, "a", "b" /* @s1 */) // @0.000000001,0 +{a-b}/0.000000001,0 @ s1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel_after_put b/pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel_after_put new file mode 100644 index 000000000000..5333693ed431 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel_after_put @@ -0,0 +1,6 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.DelRangeUsingTombstone(ctx, "a", "b" /* @s2 */) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +{a-b}/0.000000002,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel_before_put b/pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel_before_put new file mode 100644 index 000000000000..345315fed390 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/single_mvcc_rangedel_before_put @@ -0,0 +1,6 @@ +echo +---- +db0.DelRangeUsingTombstone(ctx, "a", "b" /* @s1 */) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +{a-b}/0.000000001,0 @ s1 +"a"/0.000000002,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_after_delete b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_after_delete new file mode 100644 index 000000000000..adfebd37b6f6 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_after_delete @@ -0,0 +1,12 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (v1, ) + txn.Del(ctx, "a" /* @s2 */) // + txn.Get(ctx, "a") // (v1, ) + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 +committed txn non-atomic timestamps: [r]"a":[0.000000001,0, )->v1 [d]"a":0.000000002,0->@s2 [r]"a":[0.000000001,0, 0.000000002,0)->v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_after_write b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_after_write new file mode 100644 index 000000000000..c89ee5212759 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_after_write @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (, ) + txn.Put(ctx, "a", v1) // + txn.Get(ctx, "a") // (, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 +committed txn non-atomic timestamps: [r]"a":[, )-> [w]"a":0.000000001,0->v1@s1 [r]"a":[, 0.000000001,0)-> diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_before_delete b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_before_delete new file mode 100644 index 000000000000..8e6756d63edb --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_before_delete @@ -0,0 +1,12 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (, ) + txn.Del(ctx, "a" /* @s2 */) // + txn.Get(ctx, "a") // (, ) + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 +committed txn non-atomic timestamps: [r]"a":[, 0.000000001,0)-> [d]"a":0.000000002,0->@s2 [r]"a":[, 0.000000001,0),[0.000000002,0, )-> diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_before_write b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_before_write new file mode 100644 index 000000000000..3d61feef2f2d --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_read_before_write @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (v1, ) + txn.Put(ctx, "a", v1) // + txn.Get(ctx, "a") // (v1, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 +committed txn non-atomic timestamps: [r]"a":[0,0, 0,0)->v1 [w]"a":0.000000001,0->v1@s1 [r]"a":[0.000000001,0, )->v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_scan_after_write b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_scan_after_write new file mode 100644 index 000000000000..c86a6ccdf3fc --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_scan_after_write @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // + txn.Put(ctx, "a", v1) // + txn.Scan(ctx, "a", "c", 0) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 +committed txn non-atomic timestamps: [s]{a-c}:{gap:[, )}->[] [w]"a":0.000000001,0->v1@s1 [s]{a-c}:{gap:[, 0.000000001,0)}->[] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_scan_before_write b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_scan_before_write new file mode 100644 index 000000000000..cef421455515 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_incorrect_scan_before_write @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + txn.Put(ctx, "a", v1) // + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 +committed txn non-atomic timestamps: [s]{a-c}:{0:[0,0, 0,0), gap:[, )}->["a":v1] [w]"a":0.000000001,0->v1@s1 [s]{a-c}:{0:[0.000000001,0, ), gap:[, )}->["a":v1] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_read_before_and_after_delete b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_read_before_and_after_delete new file mode 100644 index 000000000000..35e76583098a --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_read_before_and_after_delete @@ -0,0 +1,11 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (v1, ) + txn.Del(ctx, "a" /* @s2 */) // + txn.Get(ctx, "a") // (, ) + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_read_before_and_after_write b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_read_before_and_after_write new file mode 100644 index 000000000000..1ba74c270630 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_read_before_and_after_write @@ -0,0 +1,9 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (, ) + txn.Put(ctx, "a", v1) // + txn.Get(ctx, "a") // (v1, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_scan_before_and_after_write b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_scan_before_and_after_write new file mode 100644 index 000000000000..4a5b5061a582 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transaction_with_scan_before_and_after_write @@ -0,0 +1,9 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // + txn.Put(ctx, "a", v1) // + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_read_and_write_with_empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_read_and_write_with_empty_time_overlap new file mode 100644 index 000000000000..4741d5be6c4a --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_read_and_write_with_empty_time_overlap @@ -0,0 +1,13 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (v1, ) + txn.Put(ctx, "b", v3) // + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 v2 +"b"/0.000000002,0 @ s3 v3 +committed txn non-atomic timestamps: [r]"a":[0.000000001,0, 0.000000002,0)->v1 [w]"b":0.000000002,0->v3@s3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_read_and_write_with_non-empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_read_and_write_with_non-empty_time_overlap new file mode 100644 index 000000000000..f6614fa8a506 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_read_and_write_with_non-empty_time_overlap @@ -0,0 +1,12 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (v1, ) + txn.Put(ctx, "b", v3) // + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000003,0 @ s2 v2 +"b"/0.000000002,0 @ s3 v3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_after_writes_and_deletes_with_empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_after_writes_and_deletes_with_empty_time_overlap new file mode 100644 index 000000000000..da7d1e65c4f7 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_after_writes_and_deletes_with_empty_time_overlap @@ -0,0 +1,20 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s3 */) // + txn.Del(ctx, "b" /* @s4 */) // + return nil +}) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (, ) + txn.Get(ctx, "b") // (v2, ) + txn.Get(ctx, "c") // (, ) + return nil +}) // @0.000000004,0 +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"a"/0.000000003,0 @ s3 +"b"/0.000000003,0 @ s4 +committed txn non-atomic timestamps: [r]"a":[, 0.000000001,0),[0.000000003,0, )-> [r]"b":[0.000000002,0, 0.000000003,0)->v2 [r]"c":[, )-> diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_after_writes_and_deletes_with_non-empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_after_writes_and_deletes_with_non-empty_time_overlap new file mode 100644 index 000000000000..5c16a96af24f --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_after_writes_and_deletes_with_non-empty_time_overlap @@ -0,0 +1,16 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "b", v2) // @0.000000002,0 +db0.Del(ctx, "a" /* @s3 */) // @0.000000003,0 +db0.Del(ctx, "b" /* @s4 */) // @0.000000004,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (, ) + txn.Get(ctx, "b") // (v2, ) + txn.Get(ctx, "c") // (, ) + return nil +}) // @0.000000004,0 +"a"/0.000000001,0 @ s1 v1 +"b"/0.000000002,0 @ s2 v2 +"a"/0.000000003,0 @ s3 +"b"/0.000000004,0 @ s4 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_and_deletes_after_write_with_empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_and_deletes_after_write_with_empty_time_overlap new file mode 100644 index 000000000000..476ca18cca94 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_and_deletes_after_write_with_empty_time_overlap @@ -0,0 +1,16 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (, ) + txn.Del(ctx, "a" /* @s2 */) // + txn.Get(ctx, "a") // (, ) + return nil +}) // @0.000000002,0 +db0.Put(ctx, "a", v3) // @0.000000003,0 +db0.Del(ctx, "a" /* @s4 */) // @0.000000004,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 +"a"/0.000000003,0 @ s3 v3 +"a"/0.000000004,0 @ s4 +committed txn non-atomic timestamps: [r]"a":[, 0.000000001,0),[0.000000004,0, )-> [d]"a":0.000000002,0->@s2 [r]"a":[, 0.000000001,0),[0.000000004,0, ),[0.000000002,0, 0.000000003,0)-> diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_and_deletes_after_write_with_non-empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_and_deletes_after_write_with_non-empty_time_overlap new file mode 100644 index 000000000000..2b2430a74252 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_and_deletes_after_write_with_non-empty_time_overlap @@ -0,0 +1,15 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (v1, ) + txn.Del(ctx, "a" /* @s2 */) // + txn.Get(ctx, "a") // (, ) + return nil +}) // @0.000000002,0 +db0.Put(ctx, "a", v3) // @0.000000003,0 +db0.Del(ctx, "a" /* @s4 */) // @0.000000004,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 +"a"/0.000000003,0 @ s3 v3 +"a"/0.000000004,0 @ s4 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_one_missing_with_empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_one_missing_with_empty_time_overlap new file mode 100644 index 000000000000..4cab0f0a9f82 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_one_missing_with_empty_time_overlap @@ -0,0 +1,14 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +db0.Put(ctx, "b", v3) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (v1, ) + txn.Get(ctx, "b") // (, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 v2 +"b"/0.000000001,0 @ s3 v3 +committed txn non-atomic timestamps: [r]"a":[0.000000001,0, 0.000000002,0)->v1 [r]"b":[, 0.000000001,0)-> diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_one_missing_with_non-empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_one_missing_with_non-empty_time_overlap new file mode 100644 index 000000000000..a91e358b9bd8 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_one_missing_with_non-empty_time_overlap @@ -0,0 +1,13 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +db0.Put(ctx, "b", v3) // @0.000000002,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (v1, ) + txn.Get(ctx, "b") // (, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 v2 +"b"/0.000000002,0 @ s3 v3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_with_empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_with_empty_time_overlap new file mode 100644 index 000000000000..026a619b7321 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_with_empty_time_overlap @@ -0,0 +1,16 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +db0.Put(ctx, "b", v3) // @0.000000002,0 +db0.Put(ctx, "b", v4) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (v1, ) + txn.Get(ctx, "b") // (v3, ) + return nil +}) // @0.000000003,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 v2 +"b"/0.000000002,0 @ s3 v3 +"b"/0.000000003,0 @ s4 v4 +committed txn non-atomic timestamps: [r]"a":[0.000000001,0, 0.000000002,0)->v1 [r]"b":[0.000000002,0, 0.000000003,0)->v3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_with_non-empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_with_non-empty_time_overlap new file mode 100644 index 000000000000..5142fee7f59e --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_reads_with_non-empty_time_overlap @@ -0,0 +1,15 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000003,0 +db0.Put(ctx, "b", v3) // @0.000000002,0 +db0.Put(ctx, "b", v4) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (v1, ) + txn.Get(ctx, "b") // (v3, ) + return nil +}) // @0.000000003,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000003,0 @ s2 v2 +"b"/0.000000002,0 @ s3 v3 +"b"/0.000000003,0 @ s4 v4 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scan_and_write_with_empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scan_and_write_with_empty_time_overlap new file mode 100644 index 000000000000..e8d102625f87 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scan_and_write_with_empty_time_overlap @@ -0,0 +1,13 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + txn.Put(ctx, "b", v3) // + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 v2 +"b"/0.000000002,0 @ s3 v3 +committed txn non-atomic timestamps: [s]{a-c}:{0:[0.000000001,0, 0.000000002,0), gap:[, )}->["a":v1] [w]"b":0.000000002,0->v3@s3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scan_and_write_with_non-empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scan_and_write_with_non-empty_time_overlap new file mode 100644 index 000000000000..631ee8e81608 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scan_and_write_with_non-empty_time_overlap @@ -0,0 +1,12 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + txn.Put(ctx, "b", v3) // + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000003,0 @ s2 v2 +"b"/0.000000002,0 @ s3 v3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_after_delete_with_empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_after_delete_with_empty_time_overlap new file mode 100644 index 000000000000..3e0c4e9d467b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_after_delete_with_empty_time_overlap @@ -0,0 +1,16 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +db0.Put(ctx, "b", v3) // @0.000000001,0 +db0.Del(ctx, "b" /* @s4 */) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + txn.Scan(ctx, "b", "d", 0) // + return nil +}) // @0.000000003,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 v2 +"b"/0.000000001,0 @ s3 v3 +"b"/0.000000003,0 @ s4 +committed txn non-atomic timestamps: [s]{a-c}:{0:[0.000000001,0, 0.000000002,0), gap:[, 0.000000001,0),[0.000000003,0, )}->["a":v1] [s]{b-d}:{gap:[, 0.000000001,0),[0.000000003,0, )}->[] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_after_delete_with_non-empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_after_delete_with_non-empty_time_overlap new file mode 100644 index 000000000000..9a80c2d659d1 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_after_delete_with_non-empty_time_overlap @@ -0,0 +1,17 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000003,0 +db0.Put(ctx, "b", v3) // @0.000000001,0 +db0.Del(ctx, "b" /* @s4 */) // @0.000000002,0 +db0.Put(ctx, "b", v5) // @0.000000004,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + txn.Scan(ctx, "b", "d", 0) // + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000003,0 @ s2 v2 +"b"/0.000000001,0 @ s3 v3 +"b"/0.000000002,0 @ s4 +"b"/0.000000004,0 @ s5 v5 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_one_missing_with_empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_one_missing_with_empty_time_overlap new file mode 100644 index 000000000000..523a71e7ee8a --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_one_missing_with_empty_time_overlap @@ -0,0 +1,14 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +db0.Put(ctx, "b", v3) // @0.000000001,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + txn.Scan(ctx, "b", "d", 0) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 v2 +"b"/0.000000001,0 @ s3 v3 +committed txn non-atomic timestamps: [s]{a-c}:{0:[0.000000001,0, 0.000000002,0), gap:[, 0.000000001,0)}->["a":v1] [s]{b-d}:{gap:[, 0.000000001,0)}->[] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_one_missing_with_non-empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_one_missing_with_non-empty_time_overlap new file mode 100644 index 000000000000..d34b81770a2d --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_one_missing_with_non-empty_time_overlap @@ -0,0 +1,13 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +db0.Put(ctx, "b", v3) // @0.000000002,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + txn.Scan(ctx, "b", "d", 0) // + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 v2 +"b"/0.000000002,0 @ s3 v3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_with_empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_with_empty_time_overlap new file mode 100644 index 000000000000..4ed7fdaea817 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_with_empty_time_overlap @@ -0,0 +1,16 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000002,0 +db0.Put(ctx, "b", v3) // @0.000000002,0 +db0.Put(ctx, "b", v4) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (a:v1, b:v3, ) + txn.Scan(ctx, "b", "d", 0) // (b:v3, ) + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 v2 +"b"/0.000000002,0 @ s3 v3 +"b"/0.000000003,0 @ s4 v4 +committed txn non-atomic timestamps: [s]{a-c}:{0:[0.000000001,0, 0.000000002,0), 1:[0.000000002,0, 0.000000003,0), gap:[, )}->["a":v1, "b":v3] [s]{b-d}:{0:[0.000000002,0, 0.000000003,0), gap:[, )}->["b":v3] diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_with_non-empty_time_overlap b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_with_non-empty_time_overlap new file mode 100644 index 000000000000..c6f36b2063b6 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/transactional_scans_with_non-empty_time_overlap @@ -0,0 +1,15 @@ +echo +---- +db0.Put(ctx, "a", v1) // @0.000000001,0 +db0.Put(ctx, "a", v2) // @0.000000003,0 +db0.Put(ctx, "b", v3) // @0.000000002,0 +db0.Put(ctx, "b", v4) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // (a:v1, b:v3, ) + txn.Scan(ctx, "b", "d", 0) // (b:v3, ) + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000003,0 @ s2 v2 +"b"/0.000000002,0 @ s3 v3 +"b"/0.000000003,0 @ s4 v4 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_overlapping_rangedels b/pkg/kv/kvnemesis/testdata/TestValidate/two_overlapping_rangedels new file mode 100644 index 000000000000..c9bc192c4a49 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_overlapping_rangedels @@ -0,0 +1,8 @@ +echo +---- +db0.DelRangeUsingTombstone(ctx, "a", "c" /* @s1 */) // @0.000000001,0 +db0.DelRangeUsingTombstone(ctx, "b", "d" /* @s2 */) // @0.000000002,0 +{a-b}/0.000000001,0 @ s1 +{b-c}/0.000000001,0 @ s1 +{b-c}/0.000000002,0 @ s2 +{c-d}/0.000000002,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactional_deletes_with_out_of_order_commit_times b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactional_deletes_with_out_of_order_commit_times new file mode 100644 index 000000000000..7cb3c937994d --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactional_deletes_with_out_of_order_commit_times @@ -0,0 +1,13 @@ +echo +---- +db0.Del(ctx, "a" /* @s1 */) // @0.000000002,0 +db0.Del(ctx, "b" /* @s2 */) // @0.000000003,0 +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s3 */) // + txn.Del(ctx, "b" /* @s4 */) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s3 +"a"/0.000000002,0 @ s1 +"b"/0.000000001,0 @ s4 +"b"/0.000000003,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_deletes_of_the_same_key b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_deletes_of_the_same_key new file mode 100644 index 000000000000..b18eb11228a0 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_deletes_of_the_same_key @@ -0,0 +1,8 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s1 */) // + txn.Del(ctx, "a" /* @s2 */) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_deletes_of_the_same_key_with_extra_write b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_deletes_of_the_same_key_with_extra_write new file mode 100644 index 000000000000..c8952a3489ef --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_deletes_of_the_same_key_with_extra_write @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s1 */) // + txn.Del(ctx, "a" /* @s2 */) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 +"a"/0.000000002,0 @ s2 +committed txn overwritten key had write: [d]"a":0.000000001,0->@s1 [d]"a":0.000000002,0->@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_put_delete_ops_of_the_same_key_with_incorrect_read b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_put_delete_ops_of_the_same_key_with_incorrect_read new file mode 100644 index 000000000000..b10c18dddee6 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_put_delete_ops_of_the_same_key_with_incorrect_read @@ -0,0 +1,12 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (, ) + txn.Put(ctx, "a", v1) // + txn.Get(ctx, "a") // (v1, ) + txn.Del(ctx, "a" /* @s2 */) // + txn.Get(ctx, "a") // (v1, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s2 +committed txn non-atomic timestamps: [r]"a":[, )-> [w]"a":missing->v1@s1 [r]"a":[0.000000001,0, )->v1 [d]"a":0.000000001,0->@s2 [r]"a":[0,0, 0,0)->v1 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_put_delete_ops_of_the_same_key_with_reads b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_put_delete_ops_of_the_same_key_with_reads new file mode 100644 index 000000000000..61b7fbbb540b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_put_delete_ops_of_the_same_key_with_reads @@ -0,0 +1,11 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (, ) + txn.Put(ctx, "a", v1) // + txn.Get(ctx, "a") // (v1, ) + txn.Del(ctx, "a" /* @s2 */) // + txn.Get(ctx, "a") // (, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key new file mode 100644 index 000000000000..083544d53199 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key @@ -0,0 +1,8 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Put(ctx, "a", v2) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_extra_write b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_extra_write new file mode 100644 index 000000000000..aac081a68f6b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_extra_write @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Put(ctx, "a", v2) // + return nil +}) // @0.000000002,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 v2 +committed txn overwritten key had write: [w]"a":0.000000001,0->v1@s1 [w]"a":0.000000002,0->v2@s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_reads b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_reads new file mode 100644 index 000000000000..93d1a9e8a01d --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_reads @@ -0,0 +1,11 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Get(ctx, "a") // (, ) + txn.Put(ctx, "a", v1) // + txn.Get(ctx, "a") // (v1, ) + txn.Put(ctx, "a", v2) // + txn.Get(ctx, "a") // (v2, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_scans b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_scans new file mode 100644 index 000000000000..4ff1662ce49b --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_puts_of_the_same_key_with_scans @@ -0,0 +1,14 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Scan(ctx, "a", "c", 0) // + txn.Put(ctx, "a", v1) // + txn.Scan(ctx, "a", "c", 0) // (a:v1, ) + txn.Put(ctx, "a", v2) // + txn.Scan(ctx, "a", "c", 0) // (a:v2, ) + txn.Put(ctx, "b", v3) // + txn.Scan(ctx, "a", "c", 0) // (a:v2, b:v3, ) + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s2 v2 +"b"/0.000000001,0 @ s3 v3 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_delete_put_of_the_same_key b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_delete_put_of_the_same_key new file mode 100644 index 000000000000..e24d42f5275e --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_delete_put_of_the_same_key @@ -0,0 +1,8 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Del(ctx, "a" /* @s1 */) // + txn.Put(ctx, "a", v2) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s2 v2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_put_delete_of_the_same_key b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_put_delete_of_the_same_key new file mode 100644 index 000000000000..8f24d4064309 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_put_delete_of_the_same_key @@ -0,0 +1,8 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Del(ctx, "a" /* @s2 */) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s2 diff --git a/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_put_delete_of_the_same_key_with_extra_write b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_put_delete_of_the_same_key_with_extra_write new file mode 100644 index 000000000000..3f659d58e009 --- /dev/null +++ b/pkg/kv/kvnemesis/testdata/TestValidate/two_transactionally_committed_writes_put_delete_of_the_same_key_with_extra_write @@ -0,0 +1,10 @@ +echo +---- +db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + txn.Put(ctx, "a", v1) // + txn.Del(ctx, "a" /* @s2 */) // + return nil +}) // @0.000000001,0 +"a"/0.000000001,0 @ s1 v1 +"a"/0.000000002,0 @ s2 +committed txn overwritten key had write: [w]"a":0.000000001,0->v1@s1 [d]"a":0.000000002,0->@s2 diff --git a/pkg/kv/kvnemesis/validator.go b/pkg/kv/kvnemesis/validator.go index 622962a88f9e..f7dc1b6f893e 100644 --- a/pkg/kv/kvnemesis/validator.go +++ b/pkg/kv/kvnemesis/validator.go @@ -13,10 +13,12 @@ package kvnemesis import ( "context" "fmt" + "reflect" "regexp" "sort" "strings" + "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -46,8 +48,8 @@ import ( // // Splits and merges are not verified for anything other than that they did not // return an error. -func Validate(steps []Step, kvs *Engine) []error { - v, err := makeValidator(kvs) +func Validate(steps []Step, kvs *Engine, dt *SeqTracker) []error { + v, err := makeValidator(kvs, dt) if err != nil { return []error{err} } @@ -63,39 +65,34 @@ func Validate(steps []Step, kvs *Engine) []error { // by `After` timestamp is sufficient to get us the necessary ordering. This // is because txns cannot be used concurrently, so none of the (Begin,After) // timespans for a given transaction can overlap. - // - // TODO(tbg): if, as we should, we order all operations by the timestamp at - // which they executed (at least for MVCC-aware operations), then we would - // sort here by that timestamp. sort.Slice(steps, func(i, j int) bool { return steps[i].After.Less(steps[j].After) }) for _, s := range steps { - v.processOp(notBuffering, s.Op) + v.processOp(s.Op) } var extraKVs []observedOp - for _, kv := range v.kvByValue { - kv := &observedWrite{ - Key: kv.Key.Key, - Value: roachpb.Value{RawBytes: kv.Value}, - Timestamp: kv.Key.Timestamp, - Materialized: true, - } - extraKVs = append(extraKVs, kv) - } - for key, tombstones := range v.tombstonesForKey { - numExtraWrites := len(tombstones) - v.committedDeletesForKey[key] - for i := 0; i < numExtraWrites; i++ { + for seq, svs := range v.kvBySeq { + for _, sv := range svs { + mvccV, err := storage.DecodeMVCCValue(sv.Value) + if err != nil { + v.failures = append(v.failures, err) + continue + } kv := &observedWrite{ - Key: []byte(key), - Value: roachpb.Value{}, - // NB: As it's unclear which are "extra", timestamp is left missing. - Materialized: true, + Key: sv.Key, + EndKey: sv.EndKey, + Value: mvccV.Value, + Timestamp: sv.Timestamp, + Seq: seq, } extraKVs = append(extraKVs, kv) } } + + // These are writes that we saw in MVCC, but they weren't matched up to any + // operation kvnemesis ran. if len(extraKVs) > 0 { - err := errors.Errorf(`extra writes: %s`, printObserved(extraKVs...)) + err := errors.Errorf(`unclaimed writes: %s`, printObserved(extraKVs...)) v.failures = append(v.failures, err) } @@ -209,12 +206,13 @@ type observedOp interface { observedMarker() } +// An observedWrite is an effect of an operation. type observedWrite struct { - Key roachpb.Key - Value roachpb.Value - // Timestamp will only be filled if Materialized is true. + Key, EndKey roachpb.Key + Value roachpb.Value + Seq kvnemesisutil.Seq + // A write is materialized if it has a timestamp. Timestamp hlc.Timestamp - Materialized bool IsDeleteRange bool } @@ -249,94 +247,110 @@ type validator struct { // checkAtomic, which then calls processOp (which might recurse owing to the // existence of txn closures, batches, etc). curObservations []observedOp + buffering bufferingType // NB: The Generator carefully ensures that each value written is unique // globally over a run, so there's a 1:1 relationship between a value that was // written and the operation that wrote it. - kvByValue map[string]storage.MVCCKeyValue - - // Unfortunately, with tombstones there is no 1:1 relationship between the nil - // value and the delete operation that wrote it, so we must store all tombstones - // for a given key. When validating committed delete operations, we validate - // that there is a tombstone with a timestamp that would be valid, similar - // to how reads are evaluated. At the end of validation, we also validate - // that we have seen a correct number of materialized delete operations - // given the number of tombstones for each key; thus, we can see if we have - // any "missing" or "extra" writes at the end. - // Each key has a map of all the tombstone timestamps, stored with a boolean - // flag indicating if it has been matched to a transactional delete or not. - tombstonesForKey map[string]map[hlc.Timestamp]bool - committedDeletesForKey map[string]int + // kvsByKeyAndSeq map[keySeq]storage.MVCCKeyValue // TODO remove + kvBySeq map[kvnemesisutil.Seq][]tsSpanVal failures []error } -func makeValidator(kvs *Engine) (*validator, error) { - kvByValue := make(map[string]storage.MVCCKeyValue) - tombstonesForKey := make(map[string]map[hlc.Timestamp]bool) +type keySeq struct { + key, endKey string + seq kvnemesisutil.Seq +} + +type tsSpanVal struct { + roachpb.Span + hlc.Timestamp + Value []byte +} + +func makeValidator(kvs *Engine, tr *SeqTracker) (*validator, error) { + kvBySeq := make(map[kvnemesisutil.Seq][]tsSpanVal) var err error - kvs.Iterate(func(key storage.MVCCKey, value []byte, iterErr error) { + kvs.Iterate(func(key, endKey roachpb.Key, ts hlc.Timestamp, value []byte, iterErr error) { + if err != nil { + return + } if iterErr != nil { err = errors.CombineErrors(err, iterErr) return } - v, decodeErr := storage.DecodeMVCCValue(value) - if err != nil { - err = errors.CombineErrors(err, decodeErr) + seq, ok := tr.Lookup(key, endKey, ts) + if !ok { + err = errors.AssertionFailedf("no seqno found for [%s,%s) @ %s, tracker is %v", key, endKey, ts, tr) return } - if v.Value.GetTag() != roachpb.ValueType_UNKNOWN { - valueStr := mustGetStringValue(value) - if existing, ok := kvByValue[valueStr]; ok { - // TODO(dan): This may be too strict. Some operations (db.Run on a - // Batch) seem to be double-committing. See #46374. - panic(errors.AssertionFailedf( - `invariant violation: value %s was written by two operations %s and %s`, - valueStr, existing.Key, key)) - } - // NB: The Generator carefully ensures that each value written is unique - // globally over a run, so there's a 1:1 relationship between a value that - // was written and the operation that wrote it. - kvByValue[valueStr] = storage.MVCCKeyValue{Key: key, Value: value} - } else if !v.Value.IsPresent() { - rawKey := string(key.Key) - if _, ok := tombstonesForKey[rawKey]; !ok { - tombstonesForKey[rawKey] = make(map[hlc.Timestamp]bool) - } - tombstonesForKey[rawKey][key.Timestamp] = false + v := tsSpanVal{ + Span: roachpb.Span{Key: key, EndKey: endKey}, + Timestamp: ts, + Value: value, } + kvBySeq[seq] = append(kvBySeq[seq], v) }) if err != nil { return nil, err } return &validator{ - kvs: kvs, - kvByValue: kvByValue, - tombstonesForKey: tombstonesForKey, - committedDeletesForKey: make(map[string]int), + kvs: kvs, + kvBySeq: kvBySeq, }, nil } -// getDeleteForKey looks up a stored tombstone for a given key (if it -// exists) from tombstonesForKey, returning the tombstone (i.e. MVCCKey) along -// with a `true` boolean value if found, or the empty key and `false` if not. -func (v *validator) getDeleteForKey(key string, optOpTS hlc.Timestamp) (storage.MVCCKey, bool) { - if optOpTS.IsEmpty() { - panic(errors.AssertionFailedf(`transaction required to look up delete for key: %v`, key)) +func (v *validator) tryConsumeWrite(key roachpb.Key, seq kvnemesisutil.Seq) (tsSpanVal, bool) { + svs, ok := v.tryConsumeRangedWrite(seq, key, nil) + if !ok { + return tsSpanVal{}, false + } + if len(svs) != 1 { + panic(fmt.Sprintf("expected exactly one element: %+v", svs)) } + return svs[0], true +} - if used, ok := v.tombstonesForKey[key][optOpTS]; !used && ok { - v.tombstonesForKey[key][optOpTS] = true - return storage.MVCCKey{Key: []byte(key), Timestamp: optOpTS}, true +func (v *validator) tryConsumeRangedWrite( + seq kvnemesisutil.Seq, key, endKey roachpb.Key, +) ([]tsSpanVal, bool) { + svs, ok := v.kvBySeq[seq] + if !ok || len(svs) == 0 { + return nil, false + } + opSpan := roachpb.Span{Key: key, EndKey: endKey} + + var consumed []tsSpanVal + var remaining []tsSpanVal + for i := range svs { + cur := svs[i] + if !opSpan.Contains(cur.Span) { + // Operation must have written this write but doesn't want to consume it + // right now, so skip it. For example, DeleteRange decomposes into point + // deletes and will look these deletes up here one by one. If an operation + // truly wrote outside of its span, this will cause a failure in + // validation. + remaining = append(remaining, cur) + continue + } + consumed = append(consumed, cur) } - return storage.MVCCKey{}, false + if len(remaining) == 0 { + delete(v.kvBySeq, seq) + } else { + v.kvBySeq[seq] = remaining + } + return consumed, len(consumed) > 0 } +type bufferingType byte + const ( - notBuffering = false - isBuffering = true + bufferingSingle bufferingType = iota + bufferingBatchOrTxn ) // processOp turns the result of an operation into its observations (which are @@ -346,121 +360,214 @@ const ( // itself processOp, with the operation to handle being the batch or txn). // Whenever it is `false`, processOp invokes the validator's checkAtomic method // for the operation. -func (v *validator) processOp(buffering bool, op Operation) { +func (v *validator) processOp(op Operation) { // We don't need an execution timestamp when buffering (the caller will need // an execution timestamp for the combined operation, though). Additionally, // some operations supported by kvnemesis aren't MVCC-aware (splits, etc) and // thus also don't need an execution timestamp. - execTimestampStrictlyOptional := buffering + // + // TODO(during review): check that this still works when setting this back to + // "buffering". Had disabled this during development. + execTimestampStrictlyOptional := true + // TODO(during review): lots of operations here are missing error checks. Erik + // filed an issue for this so close that when done. switch t := op.GetValue().(type) { case *GetOperation: v.failIfError(op, t.Result) - if !buffering { - v.checkAtomic(`get`, t.Result, op) - } else { - read := &observedRead{ - Key: t.Key, - Value: roachpb.Value{RawBytes: t.Result.Value}, - } - v.curObservations = append(v.curObservations, read) + read := &observedRead{ + Key: t.Key, + Value: roachpb.Value{RawBytes: t.Result.Value}, + } + v.curObservations = append(v.curObservations, read) + + if v.buffering == bufferingSingle { + v.checkAtomic(`get`, t.Result) } case *PutOperation: - if !buffering { - v.checkAtomic(`put`, t.Result, op) - } else { - // Accumulate all the writes for this transaction. - kv, ok := v.kvByValue[string(t.Value)] - delete(v.kvByValue, string(t.Value)) + // Accumulate all the writes for this transaction. + write := &observedWrite{ + Key: t.Key, + Seq: t.Seq, + Value: roachpb.MakeValueFromString(t.Value()), + } + if sv, ok := v.tryConsumeWrite(t.Key, t.Seq); ok { + write.Timestamp = sv.Timestamp + } + v.curObservations = append(v.curObservations, write) + + if v.buffering == bufferingSingle { + v.checkAtomic(`put`, t.Result) + } + case *DeleteOperation: + sv, _ := v.tryConsumeWrite(t.Key, t.Seq) + write := &observedWrite{ + Key: t.Key, + Seq: t.Seq, + Timestamp: sv.Timestamp, + } + v.curObservations = append(v.curObservations, write) + + if v.buffering == bufferingSingle { + v.checkAtomic(`delete`, t.Result) + } + case *DeleteRangeOperation: + // We express DeleteRange as point deletions on all of the keys it claimed + // to have deleted and (atomically post-ceding the deletions) a scan that + // sees an empty span. If DeleteRange places a tombstone it didn't report, + // validation will fail with an unclaimed write. If it fails to delete a + // key, the scan will not validate. If it reports that it deleted a key + // that didn't have a non-nil value (i.e. didn't get a new tombstone), + // then validation will fail with a missing write. If it reports & places + // a tombstone that wasn't necessary (i.e. a combination of the above), + // validation will succeed. This is arguably incorrect; we had code in + // the past that handled this at the expense of additional complexity[^1]. + // See the `one deleterange after write with spurious deletion` test case + // in TestValidate. + // + // [^1]: https://github.com/cockroachdb/cockroach/pull/68003/files#diff-804b6fefcb2b7ae68fab388e6dcbaf7dbc3937a266b14b79c330b703ea9d0d95R382-R388 + deleteOps := make([]observedOp, len(t.Result.Keys)) + for i, key := range t.Result.Keys { + sv, _ := v.tryConsumeWrite(key, t.Seq) write := &observedWrite{ - Key: t.Key, - Value: roachpb.MakeValueFromBytes(t.Value), - Materialized: ok, + Key: key, + Seq: t.Seq, + Value: roachpb.Value{}, + IsDeleteRange: true, // only for String(), no semantics attached + Timestamp: sv.Timestamp, } - if write.Materialized { - write.Timestamp = kv.Key.Timestamp + deleteOps[i] = write + } + v.curObservations = append(v.curObservations, deleteOps...) + // The span ought to be empty right after the DeleteRange. + v.curObservations = append(v.curObservations, &observedScan{ + Span: roachpb.Span{ + Key: t.Key, + EndKey: t.EndKey, + }, + IsDeleteRange: true, // just for printing + KVs: nil, + }) + + if v.buffering == bufferingSingle { + v.checkAtomic(`deleteRange`, t.Result) + } + case *DeleteRangeUsingTombstoneOperation: + // NB: MVCC range deletions aren't allowed in transactions (and can't be + // overwritten in the same non-txn'al batch), so we currently will only + // ever see one write to consume. With transactions (or self-overlapping + // batches) we could get the following: + // + // txn.DelRangeUsingTombstone(a, c) + // txn.Put(b, v) + // txn.Commit + // + // The resulting atomic unit would emit two MVCC range deletions. [a,b) + // and [b\x00, c). + // + // The code here handles this, and it is unit tested, so that if and when + // we do support rangedels in transactions, kvnemesis will be ready. + // + // However, DeleteRangeUsingTombstone is a ranged non-txnal request type + // that will be split in DistSender, and so it is *not* atomic[^1]. An + // earlier attempt at letting `kvnemesis` handle this fact by treating each + // individual written piece that we see as an atomic unit led to too much + // complexity (in particular, we have to validate/tolerate partial + // executions). Instead, we *disable* DistSender's splitting of + // DeleteRangeUsingTombstone when run with kvnemesis, and attempt to create + // only operations for it that respect the likely range splits. + // + // In theory this code here supports any kind of atomic batched or + // transactional MVCC range deletions, assuming the KV API started to + // support them as well. + // + // [^1]: https://github.com/cockroachdb/cockroach/issues/46081 + svs, _ := v.tryConsumeRangedWrite(t.Seq, t.Key, t.EndKey) + var unobserved roachpb.SpanGroup + unobserved.Add(roachpb.Span{Key: t.Key, EndKey: t.EndKey}) + for _, sv := range svs { + unobserved.Sub(sv.Span) + write := &observedWrite{ + Key: sv.Key, + EndKey: sv.EndKey, + Seq: t.Seq, + Timestamp: sv.Timestamp, } v.curObservations = append(v.curObservations, write) } - case *DeleteOperation: - if !buffering { - v.checkAtomic(`delete`, t.Result, op) - } else { - // NB: While Put operations can be identified as having materialized - // (or not) in the storage engine because the Generator guarantees each - // value to be unique (and thus, if a MVCC key/value pair exists in the - // storage engine with a value matching that of a write operation, it - // materialized), the same cannot be done for Delete operations, which - // all write the same tombstone value. Thus, Delete operations can only - // be identified as materialized by determining if the final write - // operation for a key in a given transaction was a Delete, and - // validating that a potential tombstone for that key was stored. - // This validation must be done at the end of the transaction; - // specifically, in the function `checkAtomicCommitted(..)` where it looks - // up a corresponding tombstone with `getDeleteForKey(..)`. + // Add unmaterialized versions of the write for any gaps. If !atomicAcrossSplits, + // the batch might've partially succeeded (and so there might be gaps), but in + // this case we ought to have received an error. + for _, sp := range unobserved.Slice() { write := &observedWrite{ - Key: t.Key, - Value: roachpb.Value{}, + Key: sp.Key, + EndKey: sp.EndKey, + Seq: t.Seq, } v.curObservations = append(v.curObservations, write) } - case *DeleteRangeOperation: - if !buffering { - v.checkAtomic(`deleteRange`, t.Result, op) - } else { - // For the purposes of validation, DelRange operations decompose into - // a specialized scan for keys with non-nil values, followed by - // writes for each key, with a span to validate that the keys we are - // deleting are within the proper bounds. See above comment for how - // the individual deletion tombstones for each key are validated. - scan := &observedScan{ - Span: roachpb.Span{ - Key: t.Key, - EndKey: t.EndKey, - }, - IsDeleteRange: true, - KVs: make([]roachpb.KeyValue, len(t.Result.Keys)), - } - deleteOps := make([]observedOp, len(t.Result.Keys)) - for i, key := range t.Result.Keys { - scan.KVs[i] = roachpb.KeyValue{ - Key: key, - Value: roachpb.Value{}, - } - write := &observedWrite{ - Key: key, - Value: roachpb.Value{}, - IsDeleteRange: true, - } - deleteOps[i] = write - } - v.curObservations = append(v.curObservations, scan) - v.curObservations = append(v.curObservations, deleteOps...) + + // The span ought to be empty right after the DeleteRange, even if parts of + // the DeleteRange that didn't materialize due to a shadowing operation. + v.curObservations = append(v.curObservations, &observedScan{ + Span: roachpb.Span{ + Key: t.Key, + EndKey: t.EndKey, + }, + }) + + if v.buffering == bufferingSingle { + v.checkAtomic(`deleteRangeUsingTombstone`, t.Result) } case *ScanOperation: v.failIfError(op, t.Result) - if !buffering { + scan := &observedScan{ + Span: roachpb.Span{ + Key: t.Key, + EndKey: t.EndKey, + }, + KVs: make([]roachpb.KeyValue, len(t.Result.Values)), + Reverse: t.Reverse, + } + for i, kv := range t.Result.Values { + scan.KVs[i] = roachpb.KeyValue{ + Key: kv.Key, + Value: roachpb.Value{RawBytes: kv.Value}, + } + } + v.curObservations = append(v.curObservations, scan) + + if v.buffering == bufferingSingle { atomicScanType := `scan` if t.Reverse { atomicScanType = `reverse scan` } - v.checkAtomic(atomicScanType, t.Result, op) - } else { - scan := &observedScan{ - Span: roachpb.Span{ - Key: t.Key, - EndKey: t.EndKey, - }, - KVs: make([]roachpb.KeyValue, len(t.Result.Values)), - Reverse: t.Reverse, - } - for i, kv := range t.Result.Values { - scan.KVs[i] = roachpb.KeyValue{ - Key: kv.Key, - Value: roachpb.Value{RawBytes: kv.Value}, - } - } - v.curObservations = append(v.curObservations, scan) + v.checkAtomic(atomicScanType, t.Result) + } + case *BatchOperation: + if resultIsRetryable(t.Result) { + break + } + v.failIfError(op, t.Result) + // Only call checkAtomic if we're in bufferingSingle here. We could have + // been a batch inside a txn. + wasBuffering := v.buffering + v.buffering = bufferingBatchOrTxn + for _, op := range t.Ops { + v.processOp(op) + } + if wasBuffering == bufferingSingle { + v.checkAtomic(`batch`, t.Result) + } + case *ClosureTxnOperation: + ops := t.Ops + if t.CommitInBatch != nil { + ops = append(ops, t.CommitInBatch.Ops...) + } + v.buffering = bufferingBatchOrTxn + for _, op := range ops { + v.processOp(op) } + v.checkAtomic(`txn`, t.Result) case *SplitOperation: execTimestampStrictlyOptional = true v.failIfError(op, t.Result) @@ -550,28 +657,12 @@ func (v *validator) processOp(buffering bool, op Operation) { case *ChangeZoneOperation: execTimestampStrictlyOptional = true v.failIfError(op, t.Result) - case *BatchOperation: - if !resultIsRetryable(t.Result) { - v.failIfError(op, t.Result) - if !buffering { - v.checkAtomic(`batch`, t.Result, t.Ops...) - } else { - for _, op := range t.Ops { - v.processOp(buffering, op) - } - } - } - case *ClosureTxnOperation: - ops := t.Ops - if t.CommitInBatch != nil { - ops = append(ops, t.CommitInBatch.Ops...) - } - v.checkAtomic(`txn`, t.Result, ops...) default: panic(errors.AssertionFailedf(`unknown operation type: %T %v`, t, t)) } - if !execTimestampStrictlyOptional && !buffering && op.Result().Type != ResultType_Error && op.Result().OptionalTimestamp.IsEmpty() { + // TODO(during review): this condition probably isn't exactly right. + if !execTimestampStrictlyOptional && v.buffering == bufferingSingle && op.Result().Type != ResultType_Error && op.Result().OptionalTimestamp.IsEmpty() { v.failures = append(v.failures, errors.Errorf("execution timestamp missing for %s", op)) } } @@ -579,26 +670,30 @@ func (v *validator) processOp(buffering bool, op Operation) { // checkAtomic verifies a set of operations that should be atomic by trying to find // a timestamp at which the observed reads and writes of the operations (as executed // in the order in which they appear in the arguments) match the MVCC history. -func (v *validator) checkAtomic(atomicType string, result Result, ops ...Operation) { - for _, op := range ops { - // NB: we're not really necessarily in a txn, but passing true here means that - // we have an atomic unit, which is also the case if we are called here by a - // non-transactional Put, for example. - v.processOp(isBuffering, op) - } +func (v *validator) checkAtomic(atomicType string, result Result) { observations := v.curObservations v.curObservations = nil + v.buffering = bufferingSingle + // Only known-uncommitted results may come without a timestamp. Whenever we + // actually tried to commit, there is a timestamp. if result.Type != ResultType_Error { // The timestamp is not optional in this case. Note however that at the time // of writing, checkAtomicCommitted doesn't capitalize on this unconditional // presence yet, and most unit tests don't specify it for reads. - if result.OptionalTimestamp.IsEmpty() { + if !result.OptionalTimestamp.IsSet() { err := errors.AssertionFailedf("operation has no execution timestamp: %s", result) v.failures = append(v.failures, err) } v.checkAtomicCommitted(`committed `+atomicType, observations, result.OptionalTimestamp) } else if resultIsAmbiguous(result) { + // TODO(during review): we're sloppy with this in TestValidate, fix it up and then + // arm this check again. + const hack = true + if !hack && result.OptionalTimestamp.IsSet() { + err := errors.AssertionFailedf("OptionalTimestamp set for ambiguous result: %s", result) + v.failures = append(v.failures, err) + } v.checkAtomicAmbiguous(`ambiguous `+atomicType, observations) } else { v.checkAtomicUncommitted(`uncommitted `+atomicType, observations) @@ -613,7 +708,7 @@ func (v *validator) checkAtomic(atomicType string, result Result, ops ...Operati // succeeded in a "normal" way. However, for ambiguous results, it is not always // present. This limitation could be lifted, see checkAtomicAmbiguous. func (v *validator) checkAtomicCommitted( - atomicType string, txnObservations []observedOp, optOpsTimestamp hlc.Timestamp, + atomicType string, txnObservations []observedOp, execTimestamp hlc.Timestamp, ) { // The following works by verifying that there is at least one time at which // it was valid to see all the reads and writes that we saw in this @@ -685,84 +780,144 @@ func (v *validator) checkAtomicCommitted( batch := v.kvs.kvs.NewIndexedBatch() defer func() { _ = batch.Close() }() - // If the same key is written multiple times in a transaction, only the last - // one makes it to kv. - lastWriteIdxByKey := make(map[string]int, len(txnObservations)) + var failure string + // writeTS is populated with the timestamp of the materialized observed writes + // (if there are any). We'll use it below to maintain the "view" of prefixes + // of atomic unit. + var writeTS hlc.Timestamp + // First, hide all of our writes from the view. Remember the index of the last + // ('most recent') write to each key so that we can check below whether any + // shadowed writes erroneously materialized. Recall that writes can be ranged + // (mvcc range deletions), but these writes cannot be transactional. At the + // time of writing, we also don't do non-transactional batches (see + // DefaultConfig) which means in effect we'll only ever see ranged operations + // alone in an atomic unit. This code still handles these cases, and they are + // unit tested. + lastWritesByIdx := map[int]struct{}{} + var lastWrites roachpb.SpanGroup for idx := len(txnObservations) - 1; idx >= 0; idx-- { observation := txnObservations[idx] switch o := observation.(type) { case *observedWrite: - if _, ok := lastWriteIdxByKey[string(o.Key)]; !ok { - lastWriteIdxByKey[string(o.Key)] = idx - - // Mark which deletes are materialized and match them with a stored - // tombstone, since this cannot be done before the end of the txn. - // This is because materialized deletes do not write unique values, - // but must be the final write in a txn for that key. - if o.isDelete() { - key := string(o.Key) - v.committedDeletesForKey[key]++ - if optOpsTimestamp.IsEmpty() { - // Special case: our operation doesn't know at which timestamp - // it wrote and so we're unable to match it to a particular tombstone - // and can only check the cardinality - if there was a tombstone left, - // we assume it's ours. - // - // We leave the Timestamp field empty as a result (if there are - // multiple tombstones left, how do we know which one is ours?) and - // everyone else needs to be able to handle this special case. - // - // TODO(tbg): see checkAtomicAmbiguous about letting ambiguously - // committed operations learn their commit timestamp. - o.Materialized = v.committedDeletesForKey[key] <= len(v.tombstonesForKey[key]) - } else if storedDelete, ok := v.getDeleteForKey(key, optOpsTimestamp); ok { - o.Materialized = true - o.Timestamp = storedDelete.Timestamp - } + sp := roachpb.Span{Key: o.Key, EndKey: o.EndKey} + // Check if the last writes set already covers the current write. + // + // Writes are fragmented in the sense that they are either fully the + // last write or not, since all (materialized) writes happened at the + // same MVCC timestamp, at least in the absence of bugs. + // + // For example, a Put A that gets shadowed by an MVCC rangedel B that + // then gets overlaid by a Put C and then intersected by another + // rangedel D should give an "incremental history" (as we construct it + // further down below) + // [-----D----) + // C + // [----------B------------) + // A + // + // and lastWrites will be + // + // [-----B-----)C[--B--)[-----D----) + // + // In particular, when we constructed the observedWrite for our rangedels, + // we construct them for the actual spans from the rangefeed, not the span + // of the operation. + var lastWrite bool + { + var g roachpb.SpanGroup + g.Add(lastWrites.Slice()...) + lastWrite = !g.Sub(sp) // if subtracting did nothing, it's a most recent write + if !lastWrite { + // Otherwise, add it back in, which should restore the old set. If it + // didn't, there was partial overlap, which shouldn't be possible. + g.Add(sp) } + if then, now := lastWrites.Slice(), g.Slice(); !reflect.DeepEqual(then, now) { + v.failures = append(v.failures, + errors.AssertionFailedf("%s has write %q partially overlapping %+v; subtracting and re-adding gave %+v", atomicType, sp, then, now)) + return + } + } + + if lastWrite { + lastWritesByIdx[idx] = struct{}{} + lastWrites.Add(sp) + } + + if o.Timestamp.IsEmpty() { + // This write didn't materialize (say a superseded write in + // a txn), so it's not present here. + continue } - if !o.Timestamp.IsEmpty() { + + // NB: we allow writeTS to change here, since that will be caught by + // validation below anyway, and then we can produce better errors since + // read timestamps will be filled in. + if writeTS.IsEmpty() { + writeTS = o.Timestamp + } + + if len(o.EndKey) == 0 { // point write mvccKey := storage.MVCCKey{Key: o.Key, Timestamp: o.Timestamp} if err := batch.Delete(storage.EncodeMVCCKey(mvccKey), nil); err != nil { panic(err) } + } else { // ranged write + suffix := storage.EncodeMVCCTimestampSuffix(o.Timestamp) + if err := batch.RangeKeyUnset(o.Key, o.EndKey, suffix, nil); err != nil { + panic(err) + } } } } - // Check if any key that was written twice in the txn had the overwritten - // writes materialize in kv. Also fill in all the read timestamps first so - // they show up in the failure message. - var failure string + // Iterate through the observations, building up the snapshot visible at each + // point in the atomic unit and filling in the valid read times (validating + // them later, in a separate loop, for better errors). We also check that only + // the most recent writes materialized (i.e. showed up in MVCC). Check if any + // key that was written twice in the txn had the overwritten writes + // materialize in kv. for idx, observation := range txnObservations { if failure != `` { break } switch o := observation.(type) { case *observedWrite: - var mvccKey storage.MVCCKey - if lastWriteIdx := lastWriteIdxByKey[string(o.Key)]; idx == lastWriteIdx { - // The last write of a given key in the txn wins and should have made it - // to kv. - mvccKey = storage.MVCCKey{Key: o.Key, Timestamp: o.Timestamp} - } else { - if o.Materialized { - failure = `committed txn overwritten key had write` + // Only the most recent write between overlapping mutations makes it into MVCC. + // writeTS was populated above as the unique timestamp at which the writes became + // visible. We know the operation had writes (we're looking at one now) and so + // this operation has either materialized or is covered by a later one that did, + // and so we must have a timestamp here. We defer the failure to the next for + // loop, as we will have filled in the read timestamps at that time. + if writeTS.IsEmpty() { + continue + } + + _, isLastWrite := lastWritesByIdx[idx] + + if !isLastWrite && o.Timestamp.IsSet() { + failure = `committed txn overwritten key had write` + break + } + + // Make this write visible (at writeTS, regardless of whether it's the + // last write or not, since that's the snapshot at which our operation + // wrote). Helpfully, pebble will deal with all the partitioning of range + // tombstones that might be necessary under the hood. For example, if we + // currently have [a,z) and now are adding a Put(b), we'll automatically + // have [a,b); b; [b,z) after. + if len(o.EndKey) == 0 { + if err := batch.Set(storage.EncodeMVCCKey(storage.MVCCKey{Key: o.Key, Timestamp: writeTS}), o.Value.RawBytes, nil); err != nil { + panic(err) } - // This write was never materialized in KV because the key got - // overwritten later in the txn. But reads in the txn could have seen - // it, so we put in the batch being maintained for validReadTimes using - // the timestamp of the write for this key that eventually "won". - mvccKey = storage.MVCCKey{ - Key: o.Key, - Timestamp: txnObservations[lastWriteIdx].(*observedWrite).Timestamp, + } else { + suffix := storage.EncodeMVCCTimestampSuffix(writeTS) + if err := batch.RangeKeySet(o.Key, o.EndKey, suffix, o.Value.RawBytes, nil); err != nil { + panic(err) } } - if err := batch.Set(storage.EncodeMVCCKey(mvccKey), o.Value.RawBytes, nil); err != nil { - panic(err) - } case *observedRead: - o.ValidTimes = validReadTimes(batch, o.Key, o.Value.RawBytes, false) + o.ValidTimes = validReadTimes(batch, o.Key, o.Value.RawBytes) case *observedScan: // All kvs should be within scan boundary. for _, kv := range o.KVs { @@ -783,7 +938,7 @@ func (v *validator) checkAtomicCommitted( if !sort.IsSorted(orderedKVs) { failure = `scan result not ordered correctly` } - o.Valid = validScanTime(batch, o.Span, o.KVs, o.IsDeleteRange) + o.Valid = validScanTime(batch, o.Span, o.KVs) default: panic(errors.AssertionFailedf(`unknown observedOp: %T %s`, observation, observation)) } @@ -797,21 +952,14 @@ func (v *validator) checkAtomicCommitted( var opValid disjointTimeSpans switch o := observation.(type) { case *observedWrite: - isLastWriteForKey := idx == lastWriteIdxByKey[string(o.Key)] - if !isLastWriteForKey { + _, isLastWrite := lastWritesByIdx[idx] + if !isLastWrite { continue } - if !o.Materialized { - failure = atomicType + ` missing write` + if o.Timestamp.IsEmpty() { + failure = atomicType + ` missing write at seq ` + o.Seq.String() continue } - - if o.isDelete() && len(txnObservations) == 1 { - // For delete operations outside of transactions, it is not possible to - // identify the precise tombstone, so we skip timestamp validation. - continue - } - opValid = disjointTimeSpans{{Start: o.Timestamp, End: o.Timestamp.Next()}} case *observedRead: opValid = o.ValidTimes @@ -829,16 +977,8 @@ func (v *validator) checkAtomicCommitted( // Finally, validate that the write timestamp of the transaction matches the // write timestamp of each write within that transaction. - for _, observation := range txnObservations { - if failure != `` { - break - } - switch o := observation.(type) { - case *observedWrite: - if optOpsTimestamp.IsSet() && o.Materialized && optOpsTimestamp != o.Timestamp { - failure = fmt.Sprintf(`mismatched write timestamp %s`, optOpsTimestamp) - } - } + if failure == `` && writeTS.IsSet() && execTimestamp.IsSet() && writeTS != execTimestamp { + failure = fmt.Sprintf(`mismatched write timestamp %s and exec timestamp %s`, writeTS, execTimestamp) } if failure != `` { @@ -848,58 +988,36 @@ func (v *validator) checkAtomicCommitted( } func (v *validator) checkAtomicAmbiguous(atomicType string, txnObservations []observedOp) { - var somethingCommitted bool - deletedKeysInTxn := make(map[string]int) - var hadWrite bool - var maybeExecTS hlc.Timestamp + // If the atomic unit hasn't observed any writes (i.e. it's a read-only/admin + // op) or any part of it has materialized, treat it as committed. + // + // TODO(tbg): even when there's no materialized write, we could treat the + // prefix of pure reads as a committed operation. This is probably most + // relevant for aborted txns, which must have still seen a consistent snapshot + // before they realized they were aborted, and which had bugs in the past. + var execTimestamp hlc.Timestamp + var isRW bool for _, observation := range txnObservations { - switch o := observation.(type) { - case *observedWrite: - hadWrite = true - if o.Materialized { - somethingCommitted = true - maybeExecTS.Forward(o.Timestamp) // use Forward() just in case o.Timestamp is zero - break - } - if o.isDelete() && len(v.tombstonesForKey[string(o.Key)]) > v.committedDeletesForKey[string(o.Key)] { - deletedKeysInTxn[string(o.Key)]++ - break - } + o, ok := observation.(*observedWrite) + if !ok { + continue + } + isRW = true + if o.Timestamp.IsSet() { + execTimestamp = o.Timestamp + break } } - if len(deletedKeysInTxn) > 0 { - // TODO(sarkesian): Since we can't rely on the transaction write timestamp - // in an ambiguous transaction, and therefore cannot identify the tombstone - // resulting from a delete operation, it is impossible to validate if the - // transaction was actually atomic. For now, we have chosen to fail loudly, - // though if we are able to validate properly, this should be removed. - // - // TODO(tbg): this might be addressable. For an ambiguous transaction we - // should still be able to salvage the timestamp at which the transaction - // would have committed if it did, because kvnemesis always has a local - // TxnCoordSender which always knows the one possible commit timestamp - // and so it's simply a matter of making sure this information is - // guaranteed to flow back with the AmbiguousResultError. - err := errors.Errorf( - `unable to validate delete operations in ambiguous transactions: %s`, - printObserved(txnObservations...), - ) - v.failures = append(v.failures, err) - - for key := range deletedKeysInTxn { - // NB: We don't know for sure if these delete committed, but we know we - // still have tombstones for the keys. If we are incorrect in assuming it - // committed, it will affect delete counting in subsequent transactions; - // note that when dealing with ambiguous deletes that fail to commit, - // later deletes may show "committed delete missing write" errors. - v.committedDeletesForKey[key]++ - } - } else if !hadWrite || somethingCommitted { - v.checkAtomicCommitted(atomicType, txnObservations, maybeExecTS) + if !isRW || execTimestamp.IsSet() { + v.checkAtomicCommitted(atomicType, txnObservations, execTimestamp) } else { // This is a writing transaction but not a single one of its writes // showed up in KV, so verify that it is uncommitted. + // + // NB: if there's ever a way for a writing transaction to not leave + // a trace on the rangefeed (DeleteRange comes to mind) then it's + // fine to treat that transaction as uncommitted as well. v.checkAtomicUncommitted(atomicType, txnObservations) } } @@ -912,7 +1030,7 @@ func (v *validator) checkAtomicUncommitted(atomicType string, txnObservations [] } switch o := observed.(type) { case *observedWrite: - if o.Materialized { + if o.Timestamp.IsSet() { failure = atomicType + ` had writes` } // NB: While we don't check deletes here, as we cannot uniquely identify @@ -967,7 +1085,9 @@ func resultIsRetryable(r Result) bool { } func resultIsAmbiguous(r Result) bool { - return errors.HasInterface(errorFromResult(r), (*roachpb.ClientVisibleAmbiguousError)(nil)) + resErr := errorFromResult(r) + hasClientVisibleAE := errors.HasInterface(resErr, (*roachpb.ClientVisibleAmbiguousError)(nil)) + return hasClientVisibleAE } // TODO(dan): Checking errors using string containment is fragile at best and a @@ -998,45 +1118,81 @@ func mustGetStringValue(value []byte) string { return string(b) } -func validReadTimes( - b *pebble.Batch, key roachpb.Key, value []byte, anyValueAccepted bool, -) disjointTimeSpans { - var validTimes disjointTimeSpans - end := hlc.MaxTimestamp - - iter := b.NewIter(nil) +func validReadTimes(b *pebble.Batch, key roachpb.Key, value []byte) disjointTimeSpans { + var hist []storage.MVCCValue + iter := b.NewIter(&pebble.IterOptions{ + KeyTypes: storage.IterKeyTypePointsAndRanges, + }) defer func() { _ = iter.Close() }() iter.SeekGE(storage.EncodeMVCCKey(storage.MVCCKey{Key: key})) + for ; iter.Valid(); iter.Next() { mvccKey, err := storage.DecodeMVCCKey(iter.Key()) if err != nil { panic(err) } + + hasPoint, hasRange := iter.HasPointAndRange() + if hasRange && iter.RangeKeyChanged() { + k, ek := iter.RangeBounds() + sp := roachpb.Span{Key: k, EndKey: ek} + if !sp.ContainsKey(key) { + // If we see a range key that doesn't even contain the key, + // we've moved off the key (recall that we seeked to the key + // initially). + break + } + // Range key contains the key. Emit a point deletion on the key + // at the tombstone's timestamp for each active range key. + for _, rk := range iter.RangeKeys() { + ts, err := storage.DecodeMVCCTimestampSuffix(rk.Suffix) + if err != nil { + panic(err) + } + hist = append(hist, storage.MVCCValue{Value: roachpb.Value{Timestamp: ts}}) + } + } + + if !hasPoint { + continue + } + if !mvccKey.Key.Equal(key) { break } - if (anyValueAccepted && len(iter.Value()) > 0) || - (!anyValueAccepted && mustGetStringValue(iter.Value()) == mustGetStringValue(value)) { - validTimes = append(validTimes, timeSpan{Start: mvccKey.Timestamp, End: end}) + + // Handle a point key - put it into `hist`. + v, err := storage.DecodeMVCCValue(iter.Value()) + if err != nil { + panic(err) } - end = mvccKey.Timestamp + v.Value.Timestamp = mvccKey.Timestamp + hist = append(hist, v) } - if !anyValueAccepted && len(value) == 0 { + // The slice isn't sorted due to MVCC rangedels. Sort in descending order. + sort.Slice(hist, func(i, j int) bool { + return hist[j].Value.Timestamp.Less(hist[i].Value.Timestamp) + }) + + sv := mustGetStringValue(value) + var validTimes disjointTimeSpans + end := hlc.MaxTimestamp + for i := range hist { + v := hist[i].Value + if mustGetStringValue(v.RawBytes) == sv { + validTimes = append(validTimes, timeSpan{Start: v.Timestamp, End: end}) + } + end = v.Timestamp + } + + if len(value) == 0 { validTimes = append(disjointTimeSpans{{Start: hlc.MinTimestamp, End: end}}, validTimes...) } - // NB: With the exception of deletes, the "only write each value once" - // property of the generator means that we have a 1:1 mapping between some - // `(key, non-nil-value)` observation and a time span in which it was valid. - // With deletes, there multiple disjoint spans for a `(key, nil-value)` - // observation (i.e. before the key existed, after it was deleted). - // This means that for each read, we must consider all possibly valid times. return validTimes } -func validScanTime( - b *pebble.Batch, span roachpb.Span, kvs []roachpb.KeyValue, isDeleteRange bool, -) multiKeyTimeSpan { +func validScanTime(b *pebble.Batch, span roachpb.Span, kvs []roachpb.KeyValue) multiKeyTimeSpan { valid := multiKeyTimeSpan{ Gaps: disjointTimeSpans{{Start: hlc.MinTimestamp, End: hlc.MaxTimestamp}}, } @@ -1046,14 +1202,16 @@ func validScanTime( // Since scan results don't include deleted keys, there should only ever // be 0 or 1 valid read time span for each `(key, specific-non-nil-value)` // returned, given that the values are guaranteed to be unique by the - // Generator. However, in the DeleteRange case where we are looking for - // `(key, any-non-nil-value)`, it is of course valid for there to be - // multiple disjoint time spans. - validTimes := validReadTimes(b, kv.Key, kv.Value.RawBytes, isDeleteRange) - if !isDeleteRange && len(validTimes) > 1 { + // Generator. + // + // NB: we use value uniqueness here, but we could also use seqnos, so this + // is only a left-over of past times rather than an actual reliance on + // unique values. + validTimes := validReadTimes(b, kv.Key, kv.Value.RawBytes) + if len(validTimes) > 1 { panic(errors.AssertionFailedf( - `invalid number of read time spans for a (key,non-nil-value) pair in scan results: %s->%s`, - kv.Key, mustGetStringValue(kv.Value.RawBytes))) + `invalid number of read time spans for a (key,non-nil-value) pair in scan results: %s->%s: %v`, + kv.Key, mustGetStringValue(kv.Value.RawBytes), validTimes)) } if len(validTimes) == 0 { validTimes = append(validTimes, timeSpan{}) @@ -1073,6 +1231,12 @@ func validScanTime( defer func() { _ = iter.Close() }() iter.SeekGE(storage.EncodeMVCCKey(storage.MVCCKey{Key: span.Key})) for ; iter.Valid(); iter.Next() { + // TODO(during review): is this correct? Should be but have Erik check. + if hasPoint, _ := iter.HasPointAndRange(); !hasPoint { + // We're on an MVCC range deletion and don't have a point, so + // just nudge the iterator along. + continue + } mvccKey, err := storage.DecodeMVCCKey(iter.Key()) if err != nil { panic(err) @@ -1088,7 +1252,7 @@ func validScanTime( if _, ok := missingKeys[string(mvccKey.Key)]; !ok { // Key not in scan response. Only valid if scan was before key's time, or // at a time when the key was deleted. - missingKeys[string(mvccKey.Key)] = validReadTimes(b, mvccKey.Key, nil, false) + missingKeys[string(mvccKey.Key)] = validReadTimes(b, mvccKey.Key, nil) } } @@ -1110,21 +1274,26 @@ func printObserved(observedOps ...observedOp) string { opCode := "w" if o.isDelete() { if o.IsDeleteRange { - opCode = "dr.d" + if len(o.EndKey) == 0 { + opCode = "dr.d" + } else { + opCode = "rd" // mvcc range del + } } else { opCode = "d" } } ts := `missing` - if o.Materialized { - if o.isDelete() && o.Timestamp.IsEmpty() { - ts = `uncertain` - } else { - ts = o.Timestamp.String() - } + if o.Timestamp.IsSet() { + ts = o.Timestamp.String() + } + if len(o.EndKey) == 0 { + fmt.Fprintf(&buf, "[%s]%s:%s->%s@%s", + opCode, o.Key, ts, mustGetStringValue(o.Value.RawBytes), o.Seq) + } else { + fmt.Fprintf(&buf, "[%s][%s,%s):%s->%s@%s", + opCode, o.Key, o.EndKey, ts, mustGetStringValue(o.Value.RawBytes), o.Seq) } - fmt.Fprintf(&buf, "[%s]%s:%s->%s", - opCode, o.Key, ts, mustGetStringValue(o.Value.RawBytes)) case *observedRead: fmt.Fprintf(&buf, "[r]%s:", o.Key) validTimes := o.ValidTimes diff --git a/pkg/kv/kvnemesis/validator_test.go b/pkg/kv/kvnemesis/validator_test.go index 34583daf18a1..3a0cf4c7d2f1 100644 --- a/pkg/kv/kvnemesis/validator_test.go +++ b/pkg/kv/kvnemesis/validator_test.go @@ -12,16 +12,23 @@ package kvnemesis import ( "context" + "fmt" + "strings" "testing" + "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/cockroach/pkg/storage/enginepb" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/echotest" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" + "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -45,18 +52,23 @@ func withResult(op Operation) Operation { return withResultErr(op, nil /* err */) } +func withAmbResult(op Operation) Operation { + err := roachpb.NewAmbiguousResultErrorf("boom") + op = withResultErr(op, err) + return op +} + func withResultErr(op Operation, err error) Operation { *op.Result() = resultInit(context.Background(), err) - // Most operations in tests use timestamp 1, so use that and any test cases - // that differ from that can use withTimestamp(). - if op.Result().OptionalTimestamp.IsEmpty() { - op.Result().OptionalTimestamp = hlc.Timestamp{WallTime: 1} - } return op } func withReadResult(op Operation, value string) Operation { - op = withResult(op) + return withReadResultTS(op, value, 0) +} + +func withReadResultTS(op Operation, value string, ts int) Operation { + op = withResultTS(op, ts) get := op.GetValue().(*GetOperation) get.Result.Type = ResultType_Value if value != `` { @@ -65,45 +77,107 @@ func withReadResult(op Operation, value string) Operation { return op } -func withScanResult(op Operation, kvs ...KeyValue) Operation { - op = withResult(op) +func withScanResultTS(op Operation, ts int, kvs ...KeyValue) Operation { + op = withTimestamp(withResult(op), ts) scan := op.GetValue().(*ScanOperation) scan.Result.Type = ResultType_Values scan.Result.Values = kvs return op } -func withDeleteRangeResult(op Operation, keys ...[]byte) Operation { - op = withResult(op) +func withDeleteRangeResult(op Operation, ts int, keys ...[]byte) Operation { + op = withTimestamp(withResult(op), ts) delRange := op.GetValue().(*DeleteRangeOperation) delRange.Result.Type = ResultType_Keys delRange.Result.Keys = keys return op } +type seqKV struct { + key, endKey roachpb.Key + val []byte // contains seq + ts hlc.Timestamp +} + +func (kv *seqKV) seq() kvnemesisutil.Seq { + mvccV, err := storage.DecodeMVCCValue(kv.val) + if err != nil { + panic(err) + } + return kvnemesisutil.Seq(mvccV.KVNemesisSeq.Get()) +} + func TestValidate(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - kv := func(key string, ts int, value string) storage.MVCCKeyValue { - return storage.MVCCKeyValue{ - Key: storage.MVCCKey{ - Key: []byte(key), - Timestamp: hlc.Timestamp{WallTime: int64(ts)}, - }, - Value: roachpb.MakeValueFromString(value).RawBytes, + if !buildutil.CrdbTestBuild { + // `roachpb.RequestHeader` and `MVCCValueHeader` have a KVNemesisSeq field + // that is zero-sized outside test builds. We could revisit that should + // a need arise to run kvnemesis against production binaries. + skip.IgnoreLint(t, "kvnemesis must be run with the crdb_test build tag") + } + + const ( + s1 = kvnemesisutil.Seq(1 + iota) + s2 + s3 + s4 + s5 + s6 + ) + + const ( + noTS = iota + t1 + t2 + t3 + t4 + t5 + ) + + vi := func(s kvnemesisutil.Seq) string { + return PutOperation{Seq: s}.Value() + } + var ( + v1 = vi(s1) + v2 = vi(s2) + v3 = vi(s3) + ) + + valWithSeq := func(seq kvnemesisutil.Seq, v roachpb.Value) []byte { + var vh enginepb.MVCCValueHeader + vh.KVNemesisSeq.Set(int64(seq)) + sl, err := storage.EncodeMVCCValue(storage.MVCCValue{ + MVCCValueHeader: vh, + Value: v, + }) + if err != nil { + panic(err) } + return sl } - tombstone := func(key string, ts int) storage.MVCCKeyValue { - return storage.MVCCKeyValue{ - Key: storage.MVCCKey{ - Key: []byte(key), - Timestamp: hlc.Timestamp{WallTime: int64(ts)}, - }, - Value: nil, + kv := func(key string, ts int, seq kvnemesisutil.Seq) seqKV { + return seqKV{ + key: roachpb.Key(key), + ts: hlc.Timestamp{WallTime: int64(ts)}, + val: valWithSeq(seq, roachpb.MakeValueFromString(PutOperation{Seq: seq}.Value())), } } - kvs := func(kvs ...storage.MVCCKeyValue) []storage.MVCCKeyValue { + tombstone := func(key string, ts int, seq kvnemesisutil.Seq) seqKV { + r := kv(key, ts, seq) + r.val = valWithSeq(seq, roachpb.Value{}) + return r + } + rd := func(key, endKey string, ts int, seq kvnemesisutil.Seq) seqKV { + return seqKV{ + key: roachpb.Key(key), + endKey: roachpb.Key(endKey), + ts: hlc.Timestamp{WallTime: int64(ts)}, + val: valWithSeq(seq, roachpb.Value{}), + } + } + kvs := func(kvs ...seqKV) []seqKV { return kvs } scanKV := func(key, value string) KeyValue { @@ -114,1792 +188,1661 @@ func TestValidate(t *testing.T) { } tests := []struct { - name string - steps []Step - kvs []storage.MVCCKeyValue - expected []string + name string + steps []Step + kvs []seqKV }{ { - name: "no ops and no kvs", - steps: nil, - kvs: nil, - expected: nil, + name: "no ops and no kvs", + steps: nil, + kvs: nil, }, { - name: "no ops with unexpected write", - steps: nil, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{`extra writes: [w]"a":0.000000001,0->v1`}, + name: "no ops with unexpected write", + steps: nil, + kvs: kvs(kv(`a`, t1, s1)), }, { - name: "no ops with unexpected delete", - steps: nil, - kvs: kvs(tombstone(`a`, 1)), - expected: []string{`extra writes: [d]"a":uncertain->`}, + name: "no ops with unexpected delete", + steps: nil, + kvs: kvs(tombstone(`a`, t1, s1)), }, { - name: "one put with expected write", - steps: []Step{step(withResult(put(`a`, `v1`)))}, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + name: "one put with expected write", + steps: []Step{step(withResultTS(put(`a`, s1), t1))}, + kvs: kvs(kv(`a`, t1, s1)), }, { - name: "one delete with expected write", - steps: []Step{step(withResult(del(`a`)))}, - kvs: kvs(tombstone(`a`, 1)), - expected: nil, + name: "one delete with expected write", + steps: []Step{step(withResultTS(del(`a`, s1), t1))}, + kvs: kvs(tombstone(`a`, t1, s1)), }, { - name: "one put with missing write", - steps: []Step{step(withResult(put(`a`, `v1`)))}, - kvs: nil, - expected: []string{`committed put missing write: [w]"a":missing->v1`}, + name: "one put with missing write", + steps: []Step{step(withResultTS(put(`a`, s1), t1))}, + kvs: nil, }, { - name: "one delete with missing write", - steps: []Step{step(withResult(del(`a`)))}, - kvs: nil, - expected: []string{`committed delete missing write: [d]"a":missing->`}, + name: "one delete with missing write", + steps: []Step{step(withResultTS(del(`a`, s1), t1))}, + kvs: nil, }, { - name: "one ambiguous put with successful write", - steps: []Step{step(withResultErr(put(`a`, `v1`), roachpb.NewAmbiguousResultError(errors.New("boom"))))}, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + name: "one ambiguous put with successful write", + steps: []Step{step(withTimestamp(withAmbResult(put(`a`, s1)), t1))}, + kvs: kvs(kv(`a`, t1, s1)), }, { - name: "one ambiguous delete with successful write", - steps: []Step{step(withResultErr(del(`a`), roachpb.NewAmbiguousResultError(errors.New("boom"))))}, - kvs: kvs(tombstone(`a`, 1)), - expected: []string{`unable to validate delete operations in ambiguous transactions: [d]"a":missing->`}, + name: "one ambiguous delete with successful write", + steps: []Step{step(withAmbResult(del(`a`, s1)))}, + kvs: kvs(tombstone(`a`, t1, s1)), }, + { - name: "one ambiguous put with failed write", - steps: []Step{step(withResultErr(put(`a`, `v1`), roachpb.NewAmbiguousResultError(errors.New("boom"))))}, - kvs: nil, - expected: nil, + name: "one ambiguous put with failed write", + steps: []Step{step(withAmbResult(put(`a`, s1)))}, + kvs: nil, }, { - name: "one ambiguous delete with failed write", - steps: []Step{step(withResultErr(del(`a`), roachpb.NewAmbiguousResultError(errors.New("boom"))))}, - kvs: nil, - expected: nil, + name: "one ambiguous delete with failed write", + steps: []Step{step(withAmbResult(del(`a`, s1)))}, + kvs: nil, }, { name: "one ambiguous delete with failed write before a later committed delete", steps: []Step{ - step(withResultErr(del(`a`), roachpb.NewAmbiguousResultError(errors.New("boom")))), - step(withResultTS(del(`a`), 2)), - }, - kvs: kvs(tombstone(`a`, 2)), - expected: []string{ - `unable to validate delete operations in ambiguous transactions: [d]"a":missing->`, + step(withAmbResult(del(`a`, s1))), + step(withResultTS(del(`a`, s2), t2)), }, + kvs: kvs(tombstone(`a`, t2, s2)), }, { - name: "one retryable put with write (correctly) missing", - steps: []Step{step(withResultErr(put(`a`, `v1`), retryableError))}, - kvs: nil, - expected: nil, + name: "one retryable put with write (correctly) missing", + steps: []Step{step(withResultErr(put(`a`, s1), retryableError))}, + kvs: nil, }, { - name: "one retryable delete with write (correctly) missing", - steps: []Step{step(withResultErr(del(`a`), retryableError))}, - kvs: nil, - expected: nil, + name: "one retryable delete with write (correctly) missing", + steps: []Step{step(withResultErr(del(`a`, s1), retryableError))}, + kvs: nil, }, { - name: "one retryable put with write (incorrectly) present", - steps: []Step{step(withResultErr(put(`a`, `v1`), retryableError))}, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{`uncommitted put had writes: [w]"a":0.000000001,0->v1`}, + name: "one retryable put with write (incorrectly) present", + steps: []Step{step(withTimestamp(withResultErr(put(`a`, s1), retryableError), t1))}, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "one retryable delete with write (incorrectly) present", - steps: []Step{step(withResultErr(del(`a`), retryableError))}, - kvs: kvs(tombstone(`a`, 1)), + steps: []Step{step(withResultErr(del(`a`, s1), retryableError))}, + kvs: kvs(tombstone(`a`, t1, s1)), // NB: Error messages are different because we can't match an uncommitted // delete op to a stored kv like above. - expected: []string{`extra writes: [d]"a":uncertain->`}, + }, { name: "one delete with expected write after write transaction with shadowed delete", steps: []Step{ - step(withResultTS(del(`a`), 1)), - step(withResultTS(put(`a`, `v1`), 2)), + step(withResultTS(del(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withResultOK(put(`a`, `v2`)), - withResultOK(del(`a`)), - withResultOK(put(`a`, `v3`)), - ), 3)), - step(withResultTS(del(`a`), 4)), + withResultOK(put(`a`, s3)), + withResultOK(del(`a`, s4)), + withResultOK(put(`a`, s5)), + ), t3)), + step(withResultTS(del(`a`, s6), t4)), }, kvs: kvs( - tombstone(`a`, 1), - kv(`a`, 2, `v1`), - kv(`a`, 3, `v3`), - tombstone(`a`, 4)), - expected: nil, + tombstone(`a`, t1, s1), + kv(`a`, t2, s2), + kv(`a`, t3, s5), + tombstone(`a`, t4, s6)), }, { - name: "one batch put with successful write", - steps: []Step{step(withResult(batch(withResult(put(`a`, `v1`)))))}, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + name: "one batch put with successful write", + steps: []Step{step(withResultTS(batch(withResult(put(`a`, s1))), t1))}, + kvs: kvs(kv(`a`, t1, s1)), }, { - name: "one batch delete with successful write", - steps: []Step{step(withResult(batch(withResult(del(`a`)))))}, - kvs: kvs(tombstone(`a`, 1)), - expected: nil, + name: "one batch delete with successful write", + steps: []Step{step(withResultTS(batch(del(`a`, s1)), t1))}, + kvs: kvs(tombstone(`a`, t1, s1)), }, { - name: "one batch put with missing write", - steps: []Step{step(withResult(batch(withResult(put(`a`, `v1`)))))}, - kvs: nil, - expected: []string{`committed batch missing write: [w]"a":missing->v1`}, + name: "one batch put with missing write", + steps: []Step{step(withResultTS(batch(withResult(put(`a`, s1))), t1))}, + kvs: nil, }, { - name: "one batch delete with missing write", - steps: []Step{step(withResult(batch(withResult(del(`a`)))))}, - kvs: nil, - expected: []string{`committed batch missing write: [d]"a":missing->`}, + name: "one batch delete with missing write", + steps: []Step{step(withResultTS(batch(withResult(del(`a`, s1))), t1))}, + kvs: nil, }, { name: "one transactionally committed put with the correct writes", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + ), t1)), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1)), }, + { name: "one transactionally committed delete with the correct writes", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(del(`a`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(del(`a`, s1)), + ), t1)), }, - kvs: kvs(tombstone(`a`, 1)), - expected: nil, + kvs: kvs(tombstone(`a`, t1, s1)), }, { name: "one transactionally committed put with first write missing", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(put(`b`, `v2`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(put(`b`, s2)), + ), t1)), }, - kvs: kvs(kv(`b`, 1, `v2`)), - expected: []string{`committed txn missing write: [w]"a":missing->v1 [w]"b":0.000000001,0->v2`}, + kvs: kvs(kv(`b`, t1, s2)), }, { name: "one transactionally committed delete with first write missing", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(del(`a`)), - withResult(del(`b`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(del(`a`, s1)), + withResult(del(`b`, s2)), + ), t1)), }, - kvs: kvs(tombstone(`b`, 1)), - expected: []string{`committed txn missing write: [d]"a":missing-> [d]"b":0.000000001,0->`}, + kvs: kvs(tombstone(`b`, t1, s2)), }, { name: "one transactionally committed put with second write missing", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(put(`b`, `v2`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(put(`b`, s2)), + ), t1)), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{`committed txn missing write: [w]"a":0.000000001,0->v1 [w]"b":missing->v2`}, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "one transactionally committed delete with second write missing", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(del(`a`)), - withResult(del(`b`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(del(`a`, s1)), + withResult(del(`b`, s2)), + ), t1)), }, - kvs: kvs(tombstone(`a`, 1)), - expected: []string{`committed txn missing write: [d]"a":0.000000001,0-> [d]"b":missing->`}, + kvs: kvs(tombstone(`a`, t1, s1)), }, { name: "one transactionally committed put with write timestamp disagreement", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(put(`b`, `v2`)), - ), 1))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `committed txn non-atomic timestamps: [w]"a":0.000000001,0->v1 [w]"b":0.000000002,0->v2`, + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(put(`b`, s2)), + ), t1)), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "one transactionally committed delete with write timestamp disagreement", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(del(`a`)), - withResult(del(`b`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(del(`a`, s1)), + withResult(del(`b`, s2)), + ), t1)), }, - kvs: kvs(tombstone(`a`, 1), tombstone(`b`, 2)), + kvs: kvs(tombstone(`a`, t1, s1), tombstone(`b`, t2, s2)), // NB: Error messages are different because we can't match an uncommitted // delete op to a stored kv like above. - expected: []string{ - `committed txn missing write: [d]"a":0.000000001,0-> [d]"b":missing->`, - }, }, { name: "one transactionally rolled back put with write (correctly) missing", steps: []Step{ step(withResultErr(closureTxn(ClosureTxnType_Rollback, - withResult(put(`a`, `v1`)), + withResult(put(`a`, s1)), ), errors.New(`rollback`))), }, - kvs: nil, - expected: nil, + kvs: nil, }, { name: "one transactionally rolled back delete with write (correctly) missing", steps: []Step{ step(withResultErr(closureTxn(ClosureTxnType_Rollback, - withResult(del(`a`)), + withResult(del(`a`, s1)), ), errors.New(`rollback`))), }, - kvs: nil, - expected: nil, + kvs: nil, }, { name: "one transactionally rolled back put with write (incorrectly) present", steps: []Step{ step(withResultErr(closureTxn(ClosureTxnType_Rollback, - withResult(put(`a`, `v1`)), + withResult(put(`a`, s1)), ), errors.New(`rollback`))), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{`uncommitted txn had writes: [w]"a":0.000000001,0->v1`}, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "one transactionally rolled back delete with write (incorrectly) present", steps: []Step{ step(withResultErr(closureTxn(ClosureTxnType_Rollback, - withResult(del(`a`)), + withResult(del(`a`, s1)), ), errors.New(`rollback`))), }, - kvs: kvs(tombstone(`a`, 1)), - expected: []string{`extra writes: [d]"a":uncertain->`}, + kvs: kvs(tombstone(`a`, t1, s1)), }, { name: "one transactionally rolled back batch put with write (correctly) missing", steps: []Step{ step(withResultErr(closureTxn(ClosureTxnType_Rollback, withResult(batch( - withResult(put(`a`, `v1`)), + withResult(put(`a`, s1)), )), ), errors.New(`rollback`))), }, - kvs: nil, - expected: nil, + kvs: nil, }, { name: "one transactionally rolled back batch delete with write (correctly) missing", steps: []Step{ step(withResultErr(closureTxn(ClosureTxnType_Rollback, withResult(batch( - withResult(del(`a`)), + withResult(del(`a`, s1)), )), ), errors.New(`rollback`))), }, - kvs: nil, - expected: nil, + kvs: nil, }, { name: "two transactionally committed puts of the same key", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(put(`a`, `v2`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(put(`a`, s2)), + ), t1)), + }, + kvs: kvs(kv(`a`, t1, s2)), + }, + { + // NB: this can't happen in practice since KV would throw a WriteTooOldError. + // But transactionally this works, see below. + name: "batch with two deletes of same key", + steps: []Step{ + step(withResultTS(batch( + withResult(del(`a`, s1)), + withResult(del(`a`, s2)), + ), t1)), }, - kvs: kvs(kv(`a`, 1, `v2`)), - expected: nil, + kvs: kvs(tombstone("a", t1, s2)), }, { name: "two transactionally committed deletes of the same key", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(del(`a`)), - withResult(del(`a`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(del(`a`, s1)), + withResult(del(`a`, s2)), + ), t1)), }, - kvs: kvs(tombstone(`a`, 1)), - expected: nil, + kvs: kvs(tombstone(`a`, t1, s2)), }, { name: "two transactionally committed writes (put, delete) of the same key", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(del(`a`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(del(`a`, s2)), + ), t1)), }, - kvs: kvs(tombstone(`a`, 1)), - expected: nil, + kvs: kvs(tombstone(`a`, t1, s2)), }, { name: "two transactionally committed writes (delete, put) of the same key", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(del(`a`)), - withResult(put(`a`, `v2`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(del(`a`, s1)), + withResult(put(`a`, s2)), + ), t1)), }, - kvs: kvs(kv(`a`, 1, `v2`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s2)), }, { name: "two transactionally committed puts of the same key with extra write", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(put(`a`, `v2`)), - ), 2))), - }, - // HACK: These should be the same timestamp. See the TODO in - // watcher.processEvents. - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`)), - expected: []string{ - `committed txn overwritten key had write: [w]"a":0.000000001,0->v1 [w]"a":0.000000002,0->v2`, + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(put(`a`, s2)), + ), t2)), }, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t2, s2)), }, { name: "two transactionally committed deletes of the same key with extra write", steps: []Step{ - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(del(`a`)), - withResult(del(`a`)), - ), 1))), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withResult(del(`a`, s1)), + withResult(del(`a`, s2)), + ), t1)), }, - // HACK: These should be the same timestamp. See the TODO in - // watcher.processEvents. - kvs: kvs(tombstone(`a`, 1), tombstone(`a`, 2)), - expected: []string{`extra writes: [d]"a":uncertain->`}, + kvs: kvs(tombstone(`a`, t1, s1), tombstone(`a`, t2, s2)), }, { name: "two transactionally committed writes (put, delete) of the same key with extra write", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, - withResultOK(put(`a`, `v1`)), - withResultOK(del(`a`)), - ), 1)), - }, - // HACK: These should be the same timestamp. See the TODO in - // watcher.processEvents. - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2)), - expected: []string{ - // NB: the deletion is marked as "missing" because we are using timestamp 1 for the - // txn and the tombstone is at 2; so it isn't marked as materialized in the verifier. - `committed txn overwritten key had write: [w]"a":0.000000001,0->v1 [d]"a":missing->`, + withResultOK(put(`a`, s1)), + withResultOK(del(`a`, s2)), + ), t1)), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2)), }, { - name: "ambiguous transaction committed", + name: "ambiguous put-put transaction committed", steps: []Step{ - step(withResultErr(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(put(`b`, `v2`)), - ), roachpb.NewAmbiguousResultError(errors.New("boom")))), + step(withAmbResult(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(put(`b`, s2)), + ))), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 1, `v2`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t1, s2)), }, { - name: "ambiguous transaction with delete committed", + name: "ambiguous put-del transaction committed", steps: []Step{ - step(withResultErr(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(del(`b`)), - ), roachpb.NewAmbiguousResultError(errors.New("boom")))), - }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`b`, 1)), - // TODO(sarkesian): If able to determine the tombstone resulting from a - // delete in an ambiguous txn, this should pass without error. - // For now we fail validation on all ambiguous transactions with deletes. - expected: []string{ - `unable to validate delete operations in ambiguous transactions: [w]"a":0.000000001,0->v1 [d]"b":missing->`, + step(withAmbResult(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(del(`b`, s2)), + ))), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`b`, t1, s2)), }, { - name: "ambiguous transaction did not commit", + // NB: this case is a tough nut to crack if we rely on timestamps since we + // don't have a single timestamp result here and no unique values. But we + // use sequence numbers so no problem! We learn the commit timestamp from + // them if any of the writes show up. + name: "ambiguous del-del transaction committed", steps: []Step{ - step(withResultErr(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(put(`b`, `v2`)), - ), roachpb.NewAmbiguousResultError(errors.New("boom")))), + step(withAmbResult(closureTxn(ClosureTxnType_Commit, + withResult(del(`a`, s1)), + withResult(del(`a`, s2)), + ))), }, - kvs: nil, - expected: nil, + kvs: kvs(tombstone(`a`, t1, s2)), }, { - name: "ambiguous transaction with delete did not commit", + name: "ambiguous del-del transaction committed but wrong seq", steps: []Step{ - step(withResultErr(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(del(`b`)), - ), roachpb.NewAmbiguousResultError(errors.New("boom")))), + step(withAmbResult(closureTxn(ClosureTxnType_Commit, + withResult(del(`a`, s1)), + withResult(del(`a`, s2)), + ))), }, - kvs: nil, - expected: nil, + kvs: kvs(tombstone(`a`, t1, s1)), }, { - name: "ambiguous transaction committed but has validation error", + name: "ambiguous put-put transaction did not commit", steps: []Step{ - step(withResultErr(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(put(`b`, `v2`)), - ), roachpb.NewAmbiguousResultError(errors.New("boom")))), + step(withAmbResult(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(put(`b`, s2)), + ))), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `ambiguous txn non-atomic timestamps: [w]"a":0.000000001,0->v1 [w]"b":0.000000002,0->v2`, + kvs: nil, + }, + { + name: "ambiguous put-del transaction did not commit", + steps: []Step{ + step(withAmbResult(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(del(`b`, s2)), + ))), }, + kvs: nil, }, { - name: "ambiguous transaction with delete committed but has validation error", + name: "ambiguous put-put transaction committed but has validation error", steps: []Step{ - step(withResultErr(withTimestamp(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(del(`b`)), - ), 2), roachpb.NewAmbiguousResultError(errors.New("boom")))), + step(withAmbResult(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(put(`b`, s2)), + ))), }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`b`, 2)), - // TODO(sarkesian): If able to determine the tombstone resulting from a - // delete in an ambiguous txn, we should get the following error: - // `ambiguous txn non-atomic timestamps: [w]"a":0.000000001,0->v1 [w]"b":0.000000002,0->v2` - // For now we fail validation on all ambiguous transactions with deletes. - expected: []string{ - `unable to validate delete operations in ambiguous transactions: [w]"a":0.000000001,0->v1 [d]"b":missing->`, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), + }, + { + name: "ambiguous put-del transaction committed but has validation error", + steps: []Step{ + step(withAmbResult(withTimestamp(closureTxn(ClosureTxnType_Commit, + withResult(put(`a`, s1)), + withResult(del(`b`, s2)), + ), t2))), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`b`, t2, s2)), }, { name: "one read before write", steps: []Step{ - step(withReadResult(get(`a`), ``)), - step(withResult(put(`a`, `v1`))), + step(withReadResultTS(get(`a`), ``, t1)), + step(withResultTS(put(`a`, s1), t2)), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + kvs: kvs(kv(`a`, t2, s1)), }, { name: "one read before delete", steps: []Step{ - step(withReadResult(get(`a`), ``)), - step(withResult(del(`a`))), + step(withReadResultTS(get(`a`), ``, t1)), + step(withResultTS(del(`a`, s1), t2)), }, - kvs: kvs(tombstone(`a`, 1)), - expected: nil, + kvs: kvs(tombstone(`a`, t2, s1)), }, { name: "one read before write and delete", steps: []Step{ - step(withReadResult(get(`a`), ``)), - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(del(`a`), 2)), + step(withReadResultTS(get(`a`), ``, t1)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(del(`a`, s2), t2)), }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2)), }, { name: "one read before write returning wrong value", steps: []Step{ - step(withReadResult(get(`a`), `v2`)), - step(withResult(put(`a`, `v1`))), - }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{ - `committed get non-atomic timestamps: [r]"a":[0,0, 0,0)->v2`, + step(withReadResultTS(get(`a`), v1, t1)), + step(withResultTS(put(`a`, s1), t2)), }, + kvs: kvs(kv(`a`, t2, s1)), }, { name: "one read after write", steps: []Step{ - step(withResult(put(`a`, `v1`))), - step(withReadResult(get(`a`), `v1`)), + step(withResultTS(put(`a`, s1), t1)), + step(withReadResultTS(get(`a`), v1, t2)), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "one read after write and delete", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(withTimestamp(del(`a`), 2), 2)), - step(withResultTS(withReadResult(get(`a`), `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(withTimestamp(del(`a`, s2), t2), t2)), + step(withReadResultTS(get(`a`), v1, t1)), }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2)), }, { name: "one read after write and delete returning tombstone", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(del(`a`), 2)), - step(withReadResult(get(`a`), ``)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(del(`a`, s2), t2)), + step(withReadResultTS(get(`a`), ``, t3)), }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2)), }, { name: "one read after write returning wrong value", steps: []Step{ - step(withResult(put(`a`, `v1`))), - step(withReadResult(get(`a`), `v2`)), - }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{ - `committed get non-atomic timestamps: [r]"a":[0,0, 0,0)->v2`, + step(withResultTS(put(`a`, s1), t1)), + step(withReadResultTS(get(`a`), v2, t2)), }, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "one read in between writes", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withReadResult(get(`a`), `v1`)), - step(withResultTS(put(`a`, `v2`), 2)), + step(withResultTS(put(`a`, s1), t1)), + step(withReadResultTS(get(`a`), v1, t2)), + step(withResultTS(put(`a`, s2), t3)), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t3, s2)), }, { name: "one read in between write and delete", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withReadResult(get(`a`), `v1`)), - step(withResultTS(del(`a`), 2)), + step(withResultTS(put(`a`, s1), t1)), + step(withReadResultTS(get(`a`), v1, t2)), + step(withResultTS(del(`a`, s2), t3)), }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t3, s2)), }, { name: "batch of reads after writes", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResult(batch( - withReadResult(get(`a`), `v1`), - withReadResult(get(`b`), `v2`), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(batch( + withReadResult(get(`a`), v1), + withReadResult(get(`b`), v2), withReadResult(get(`c`), ``), - ))), + ), t3)), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "batch of reads after writes and deletes", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(del(`a`), 3)), - step(withResultTS(del(`b`), 4)), - step(withResult(batch( - withReadResult(get(`a`), `v1`), - withReadResult(get(`b`), `v2`), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(del(`a`, s3), t3)), + step(withResultTS(del(`b`, s4), t4)), + step(withResultTS(batch( + withReadResult(get(`a`), v1), + withReadResult(get(`b`), v2), withReadResult(get(`c`), ``), - ))), + ), t3)), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), tombstone(`a`, 3), tombstone(`b`, 4)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), tombstone(`a`, t3, s3), tombstone(`b`, t4, s4)), }, { name: "batch of reads after writes and deletes returning tombstones", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(del(`a`), 3)), - step(withResultTS(del(`b`), 4)), - step(withResult(batch( + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(del(`a`, s3), t3)), + step(withResultTS(del(`b`, s3), t4)), + step(withResultTS(batch( withReadResult(get(`a`), ``), withReadResult(get(`b`), ``), withReadResult(get(`c`), ``), - ))), + ), t5)), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), tombstone(`a`, 3), tombstone(`b`, 4)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), tombstone(`a`, t3, s3), tombstone(`b`, t4, s4)), }, { name: "batch of reads after writes returning wrong values", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResult(batch( + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(batch( withReadResult(get(`a`), ``), - withReadResult(get(`b`), `v1`), - withReadResult(get(`c`), `v2`), - ))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `committed batch non-atomic timestamps: ` + - `[r]"a":[, 0.000000001,0)-> [r]"b":[0,0, 0,0)->v1 [r]"c":[0,0, 0,0)->v2`, + withReadResult(get(`b`), v1), + withReadResult(get(`c`), v2), + ), t3)), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "batch of reads after writes and deletes returning wrong values", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(del(`a`), 3)), - step(withResultTS(del(`b`), 4)), - step(withResult(batch( + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(del(`a`, s3), t3)), + step(withResultTS(del(`b`, s4), t4)), + step(withResultTS(batch( withReadResult(get(`a`), ``), - withReadResult(get(`b`), `v1`), - withReadResult(get(`c`), `v2`), - ))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), tombstone(`a`, 3), tombstone(`b`, 4)), - expected: []string{ - `committed batch non-atomic timestamps: ` + - `[r]"a":[, 0.000000001,0),[0.000000003,0, )-> [r]"b":[0,0, 0,0)->v1 [r]"c":[0,0, 0,0)->v2`, + withReadResult(get(`b`), v1), + withReadResult(get(`c`), v2), + ), t5)), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), tombstone(`a`, t3, s3), tombstone(`b`, t4, s4)), }, { - name: "batch of reads after writes with non-empty time overlap", + name: "batch of reads after writes with empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResult(batch( + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(batch( withReadResult(get(`a`), ``), - withReadResult(get(`b`), `v2`), + withReadResult(get(`b`), v2), withReadResult(get(`c`), ``), - ))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `committed batch non-atomic timestamps: ` + - `[r]"a":[, 0.000000001,0)-> [r]"b":[0.000000002,0, )->v2 [r]"c":[, )->`, + ), t3)), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "batch of reads after writes and deletes with valid time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(del(`a`), 3)), - step(withResultTS(del(`b`), 4)), - step(withResult(batch( + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(del(`a`, s3), t3)), + step(withResultTS(del(`b`, s4), t4)), + step(withResultTS(batch( withReadResult(get(`a`), ``), - withReadResult(get(`b`), `v2`), + withReadResult(get(`b`), v2), withReadResult(get(`c`), ``), - ))), + ), t3)), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), tombstone(`a`, 3), tombstone(`b`, 4)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), tombstone(`a`, t3, s3), tombstone(`b`, t4, s4)), }, { name: "transactional reads with non-empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 3)), - step(withResultTS(put(`b`, `v3`), 2)), - step(withResultTS(put(`b`, `v4`), 3)), - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withReadResult(get(`a`), `v1`), - withReadResult(get(`b`), `v3`), - ), 3))), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t3)), + step(withResultTS(put(`b`, s3), t2)), + step(withResultTS(put(`b`, s4), t3)), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withReadResult(get(`a`), v1), + withReadResult(get(`b`), v3), + ), t3)), }, // Reading v1 is valid from 1-3 and v3 is valid from 2-3: overlap 2-3 - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 3, `v2`), kv(`b`, 2, `v3`), kv(`b`, 3, `v4`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t3, s2), kv(`b`, t2, s3), kv(`b`, t3, s4)), }, { name: "transactional reads after writes and deletes with non-empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(del(`a`), 3)), - step(withResultTS(del(`b`), 4)), - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(del(`a`, s3), t3)), + step(withResultTS(del(`b`, s4), t4)), + step(withResultTS(closureTxn(ClosureTxnType_Commit, withReadResult(get(`a`), ``), - withReadResult(get(`b`), `v2`), + withReadResult(get(`b`), v2), withReadResult(get(`c`), ``), - ), 4))), + ), t4)), }, // Reading (a, ) is valid from min-1 or 3-max, and (b, v2) is valid from 2-4: overlap 3-4 - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), tombstone(`a`, 3), tombstone(`b`, 4)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), tombstone(`a`, t3, s3), tombstone(`b`, t4, s4)), }, { name: "transactional reads with empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 2)), - step(withResultTS(put(`b`, `v3`), 2)), - step(withResultTS(put(`b`, `v4`), 3)), - step(withResult(withTimestamp(closureTxn(ClosureTxnType_Commit, - withReadResult(get(`a`), `v1`), - withReadResult(get(`b`), `v3`), - ), 3))), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), + step(withResultTS(put(`b`, s3), t2)), + step(withResultTS(put(`b`, s4), t3)), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withReadResult(get(`a`), v1), + withReadResult(get(`b`), v3), + ), t3)), }, // Reading v1 is valid from 1-2 and v3 is valid from 2-3: no overlap - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`), kv(`b`, 2, `v3`), kv(`b`, 3, `v4`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[r]"a":[0.000000001,0, 0.000000002,0)->v1 [r]"b":[0.000000002,0, 0.000000003,0)->v3`, - }, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t2, s2), kv(`b`, t2, s3), kv(`b`, t3, s4)), }, { name: "transactional reads after writes and deletes with empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withResultOK(del(`a`)), - withResultOK(del(`b`)), - ), 3)), + withResultOK(del(`a`, s3)), + withResultOK(del(`b`, s4)), + ), t3)), step(withResultTS(closureTxn(ClosureTxnType_Commit, withReadResult(get(`a`), ``), - withReadResult(get(`b`), `v2`), + withReadResult(get(`b`), v2), withReadResult(get(`c`), ``), - ), 4)), + ), t4)), }, // Reading (a, ) is valid from min-1 or 3-max, and (b, v2) is valid from 2-3: no overlap - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), tombstone(`a`, 3), tombstone(`b`, 3)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[r]"a":[, 0.000000001,0),[0.000000003,0, )-> [r]"b":[0.000000002,0, 0.000000003,0)->v2 [r]"c":[, )->`, - }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), tombstone(`a`, t3, s3), tombstone(`b`, t3, s4)), }, { name: "transactional reads and deletes after write with non-empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withReadResult(get(`a`), `v1`), - withResult(del(`a`)), + withReadResult(get(`a`), v1), + withResult(del(`a`, s2)), withReadResult(get(`a`), ``), - ), 2)), - step(withResultTS(put(`a`, `v2`), 3)), - step(withResultTS(del(`a`), 4)), + ), t2)), + step(withResultTS(put(`a`, s3), t3)), + step(withResultTS(del(`a`, s4), t4)), }, // Reading (a, v1) is valid from 1-2, reading (a, ) is valid from min-1, 2-3, or 4-max: overlap in txn view at 2 - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2), kv(`a`, 3, `v2`), tombstone(`a`, 4)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2), kv(`a`, t3, s3), tombstone(`a`, t4, s4)), }, { name: "transactional reads and deletes after write with empty time overlap", steps: []Step{ - step(withResult(put(`a`, `v1`))), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, withReadResult(get(`a`), ``), - withResult(del(`a`)), + withResult(del(`a`, s2)), withReadResult(get(`a`), ``), - ), 2)), - step(withResultTS(put(`a`, `v2`), 3)), - step(withResultTS(del(`a`), 4)), + ), t2)), + step(withResultTS(put(`a`, s3), t3)), + step(withResultTS(del(`a`, s4), t4)), }, // First read of (a, ) is valid from min-1 or 4-max, delete is valid at 2: no overlap - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2), kv(`a`, 3, `v2`), tombstone(`a`, 4)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[r]"a":[, 0.000000001,0),[0.000000004,0, )-> [d]"a":0.000000002,0-> [r]"a":[, 0.000000001,0),[0.000000004,0, ),[0.000000002,0, 0.000000003,0)->`, - }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2), kv(`a`, t3, s3), tombstone(`a`, t4, s4)), }, { name: "transactional reads one missing with non-empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 2)), - step(withResultTS(put(`b`, `v3`), 2)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), + step(withResultTS(put(`b`, s3), t2)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withReadResult(get(`a`), `v1`), + withReadResult(get(`a`), v1), withReadResult(get(`b`), ``), - ), 1)), + ), t1)), }, // Reading v1 is valid from 1-2 and v3 is valid from 0-2: overlap 1-2 - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`), kv(`b`, 2, `v3`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t2, s2), kv(`b`, t2, s3)), }, { name: "transactional reads one missing with empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 2)), - step(withResultTS(put(`b`, `v3`), 1)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), + step(withResultTS(put(`b`, s3), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withReadResult(get(`a`), `v1`), + withReadResult(get(`a`), v1), withReadResult(get(`b`), ``), - ), 1)), + ), t1)), }, // Reading v1 is valid from 1-2 and v3 is valid from 0-1: no overlap - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`), kv(`b`, 1, `v3`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[r]"a":[0.000000001,0, 0.000000002,0)->v1 [r]"b":[, 0.000000001,0)->`, - }, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t2, s2), kv(`b`, t1, s3)), }, { name: "transactional read and write with non-empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 3)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t3)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withReadResult(get(`a`), `v1`), - withResult(put(`b`, `v3`)), - ), 2)), + withReadResult(get(`a`), v1), + withResult(put(`b`, s3)), + ), t2)), }, // Reading v1 is valid from 1-3 and v3 is valid at 2: overlap @2 - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 3, `v2`), kv(`b`, 2, `v3`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t3, s2), kv(`b`, t2, s3)), }, { name: "transactional read and write with empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 2)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withReadResult(get(`a`), `v1`), - withResultOK(put(`b`, `v3`)), - ), 2)), + withReadResult(get(`a`), v1), + withResultOK(put(`b`, s3)), + ), t2)), }, // Reading v1 is valid from 1-2 and v3 is valid at 2: no overlap - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`), kv(`b`, 2, `v3`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[r]"a":[0.000000001,0, 0.000000002,0)->v1 [w]"b":0.000000002,0->v3`, - }, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t2, s2), kv(`b`, t2, s3)), }, { name: "transaction with read before and after write", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, withReadResult(get(`a`), ``), - withResult(put(`a`, `v1`)), - withReadResult(get(`a`), `v1`), - ), 1)), + withResult(put(`a`, s1)), + withReadResult(get(`a`), v1), + ), t1)), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "transaction with read before and after delete", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withReadResult(get(`a`), `v1`), - withResult(del(`a`)), + withReadResult(get(`a`), v1), + withResult(del(`a`, s2)), withReadResult(get(`a`), ``), - ), 2)), + ), t2)), }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2)), }, { name: "transaction with incorrect read before write", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, - withReadResult(get(`a`), `v1`), - withResult(put(`a`, `v1`)), - withReadResult(get(`a`), `v1`), - ), 1)), - }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[r]"a":[0,0, 0,0)->v1 [w]"a":0.000000001,0->v1 [r]"a":[0.000000001,0, )->v1`, + withReadResult(get(`a`), v1), + withResult(put(`a`, s1)), + withReadResult(get(`a`), v1), + ), t1)), }, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "transaction with incorrect read before delete", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, withReadResult(get(`a`), ``), - withResult(del(`a`)), + withResult(del(`a`, s2)), withReadResult(get(`a`), ``), - ), 2)), - }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[r]"a":[, 0.000000001,0)-> [d]"a":0.000000002,0-> [r]"a":[, 0.000000001,0),[0.000000002,0, )->`, + ), t2)), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2)), }, { name: "transaction with incorrect read after write", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, withReadResult(get(`a`), ``), - withResult(put(`a`, `v1`)), + withResult(put(`a`, s1)), withReadResult(get(`a`), ``), - ), 1)), - }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[r]"a":[, )-> [w]"a":0.000000001,0->v1 [r]"a":[, 0.000000001,0)->`, + ), t1)), }, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "transaction with incorrect read after delete", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withReadResult(get(`a`), `v1`), - withResultOK(del(`a`)), - withReadResult(get(`a`), `v1`), - ), 2)), - }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[r]"a":[0.000000001,0, )->v1 [d]"a":0.000000002,0-> [r]"a":[0.000000001,0, 0.000000002,0)->v1`, + withReadResult(get(`a`), v1), + withResultOK(del(`a`, s2)), + withReadResult(get(`a`), v1), + ), t2)), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2)), }, { name: "two transactionally committed puts of the same key with reads", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, withReadResult(get(`a`), ``), - withResult(put(`a`, `v1`)), - withReadResult(get(`a`), `v1`), - withResult(put(`a`, `v2`)), - withReadResult(get(`a`), `v2`), - ), 1)), + withResult(put(`a`, s1)), + withReadResult(get(`a`), v1), + withResult(put(`a`, s2)), + withReadResult(get(`a`), v2), + ), t1)), }, - kvs: kvs(kv(`a`, 1, `v2`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s2)), }, { name: "two transactionally committed put/delete ops of the same key with reads", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, withReadResult(get(`a`), ``), - withResult(put(`a`, `v1`)), - withReadResult(get(`a`), `v1`), - withResult(del(`a`)), + withResult(put(`a`, s1)), + withReadResult(get(`a`), v1), + withResult(del(`a`, s2)), withReadResult(get(`a`), ``), - ), 1)), + ), t1)), }, - kvs: kvs(tombstone(`a`, 1)), - expected: nil, + kvs: kvs(tombstone(`a`, t1, s2)), }, { name: "two transactionally committed put/delete ops of the same key with incorrect read", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, withReadResult(get(`a`), ``), - withResult(put(`a`, `v1`)), - withReadResult(get(`a`), `v1`), - withResult(del(`a`)), - withReadResult(get(`a`), `v1`), - ), 1)), - }, - kvs: kvs(tombstone(`a`, 1)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[r]"a":[, )-> [w]"a":missing->v1 [r]"a":[0.000000001,0, )->v1 [d]"a":0.000000001,0-> [r]"a":[0,0, 0,0)->v1`, + withResult(put(`a`, s1)), + withReadResult(get(`a`), v1), + withResult(del(`a`, s2)), + withReadResult(get(`a`), v1), + ), t1)), }, + kvs: kvs(tombstone(`a`, t1, s2)), }, { name: "one transactional put with correct commit time", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - ), 1)), + withResult(put(`a`, s1)), + ), t1)), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "one transactional put with incorrect commit time", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - ), 1)), - }, - kvs: kvs(kv(`a`, 2, `v1`)), - expected: []string{ - `mismatched write timestamp 0.000000001,0: [w]"a":0.000000002,0->v1`, + withResult(put(`a`, s1)), + ), t1)), }, + kvs: kvs(kv(`a`, t2, s1)), }, { name: "one transactional delete with write on another key after delete", steps: []Step{ // NB: this Delete comes first in operation order, but the write is delayed. - step(withResultTS(del(`a`), 3)), + step(withResultTS(del(`a`, s1), t3)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withResult(put(`b`, `v1`)), - withResult(del(`a`)), - ), 2)), + withResult(put(`b`, s2)), + withResult(del(`a`, s3)), + ), t2)), }, - kvs: kvs(tombstone(`a`, 2), tombstone(`a`, 3), kv(`b`, 2, `v1`)), - // This should fail validation if we match delete operations to tombstones by operation order, - // and should pass if we correctly use the transaction timestamp. While the first delete is - // an earlier operation, the transactional delete actually commits first. - expected: nil, + kvs: kvs(tombstone(`a`, t2, s3), tombstone(`a`, t3, s1), kv(`b`, t2, s2)), }, { name: "two transactional deletes with out of order commit times", steps: []Step{ - step(withResultTS(del(`a`), 2)), - step(withResultTS(del(`b`), 3)), + step(withResultTS(del(`a`, s1), t2)), + step(withResultTS(del(`b`, s2), t3)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withResult(del(`a`)), - withResult(del(`b`)), - ), 1)), + withResult(del(`a`, s3)), + withResult(del(`b`, s4)), + ), t1)), }, - kvs: kvs(tombstone(`a`, 1), tombstone(`a`, 2), tombstone(`b`, 1), tombstone(`b`, 3)), - // This should fail validation if we match delete operations to tombstones by operation order, - // and should pass if we correctly use the transaction timestamp. While the first two deletes are - // earlier operations, the transactional deletes actually commits first. - expected: nil, + kvs: kvs(tombstone(`a`, t1, s3), tombstone(`a`, t2, s1), tombstone(`b`, t1, s4), tombstone(`b`, t3, s2)), }, { name: "one transactional scan followed by delete within time range", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - withResult(del(`a`)), - ), 2)), - step(withResultTS(put(`b`, `v2`), 3)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + withResult(del(`a`, s2)), + ), t2)), + step(withResultTS(put(`b`, s3), t3)), }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2), kv(`b`, 3, `v2`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2), kv(`b`, t3, s3)), }, { name: "one transactional scan followed by delete outside time range", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - withResult(del(`a`)), - ), 4)), - step(withResultTS(put(`b`, `v2`), 3)), - }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 4), kv(`b`, 3, `v2`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[s]{a-c}:{0:[0.000000001,0, ), gap:[, 0.000000003,0)}->["a":v1] [d]"a":0.000000004,0->`, + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + withResult(del(`a`, s2)), + ), t4)), + step(withResultTS(put(`b`, s3), t3)), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t4, s2), kv(`b`, t3, s3)), }, { name: "one scan before write", steps: []Step{ - step(withScanResult(scan(`a`, `c`))), - step(withResult(put(`a`, `v1`))), + step(withScanResultTS(scan(`a`, `c`), t1)), + step(withResultTS(put(`a`, s1), t2)), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + kvs: kvs(kv(`a`, t2, s1)), }, { name: "one scan before write returning wrong value", steps: []Step{ - step(withScanResult(scan(`a`, `c`), scanKV(`a`, `v2`))), - step(withResult(put(`a`, `v1`))), - }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{ - `committed scan non-atomic timestamps: ` + - `[s]{a-c}:{0:[0,0, 0,0), gap:[, )}->["a":v2]`, + step(withScanResultTS(scan(`a`, `c`), t1, scanKV(`a`, v2))), + step(withResultTS(put(`a`, s1), t2)), }, + kvs: kvs(kv(`a`, t2, s1)), }, { name: "one scan after write", steps: []Step{ - step(withResult(put(`a`, `v1`))), - step(withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`))), + step(withResultTS(put(`a`, s1), t1)), + step(withScanResultTS(scan(`a`, `c`), t2, scanKV(`a`, v1))), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "one scan after write returning wrong value", steps: []Step{ - step(withResult(put(`a`, `v1`))), - step(withScanResult(scan(`a`, `c`), scanKV(`a`, `v2`))), - }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{ - `committed scan non-atomic timestamps: ` + - `[s]{a-c}:{0:[0,0, 0,0), gap:[, )}->["a":v2]`, + step(withResultTS(put(`a`, s1), t1)), + step(withScanResultTS(scan(`a`, `c`), t2, scanKV(`a`, v2))), }, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "one scan after writes", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`), scanKV(`b`, `v2`))), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withScanResultTS(scan(`a`, `c`), t3, scanKV(`a`, v1), scanKV(`b`, v2))), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "one reverse scan after writes", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withScanResult(reverseScan(`a`, `c`), scanKV(`b`, `v2`), scanKV(`a`, `v1`))), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withScanResultTS(reverseScan(`a`, `c`), t3, scanKV(`b`, v2), scanKV(`a`, v1))), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "one scan after writes and delete", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(del(`a`), 3)), - step(withResultTS(put(`a`, `v3`), 4)), - step(withScanResult(scan(`a`, `c`), scanKV(`b`, `v2`))), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(del(`a`, s3), t3)), + step(withResultTS(put(`a`, s4), t4)), + step(withScanResultTS(scan(`a`, `c`), t5, scanKV(`b`, v2))), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), tombstone(`a`, 3), kv(`a`, 4, `v3`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), tombstone(`a`, t3, s3), kv(`a`, t4, s4)), }, { name: "one scan after write returning extra key", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`), scanKV(`a2`, `v3`), scanKV(`b`, `v2`))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `committed scan non-atomic timestamps: ` + - `[s]{a-c}:{0:[0.000000001,0, ), 1:[0,0, 0,0), 2:[0.000000002,0, ), gap:[, )}->["a":v1, "a2":v3, "b":v2]`, + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withScanResultTS(scan(`a`, `c`), t3, scanKV(`a`, v1), scanKV(`a2`, v3), scanKV(`b`, v2))), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "one tranactional scan after write and delete returning extra key", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withResult(put(`b`, `v2`)), - withResult(del(`a`)), - ), 2)), - step(withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`), scanKV(`b`, `v2`))), - }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2), kv(`b`, 2, `v2`)), - expected: []string{ - `committed scan non-atomic timestamps: ` + - `[s]{a-c}:{0:[0.000000001,0, 0.000000002,0), 1:[0.000000002,0, ), gap:[, )}->["a":v1, "b":v2]`, + withResult(put(`b`, s2)), + withResult(del(`a`, s3)), + ), t2)), + step(withScanResultTS(scan(`a`, `c`), t3, scanKV(`a`, v1), scanKV(`b`, v2))), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s3), kv(`b`, t2, s2)), }, { name: "one reverse scan after write returning extra key", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withScanResult(reverseScan(`a`, `c`), scanKV(`b`, `v2`), scanKV(`a2`, `v3`), scanKV(`a`, `v1`))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `committed reverse scan non-atomic timestamps: ` + - `[rs]{a-c}:{0:[0.000000002,0, ), 1:[0,0, 0,0), 2:[0.000000001,0, ), gap:[, )}->["b":v2, "a2":v3, "a":v1]`, + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withScanResultTS(reverseScan(`a`, `c`), t3, scanKV(`b`, v2), scanKV(`a2`, v3), scanKV(`a`, v1))), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "one scan after write returning missing key", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withScanResult(scan(`a`, `c`), scanKV(`b`, `v2`))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `committed scan non-atomic timestamps: ` + - `[s]{a-c}:{0:[0.000000002,0, ), gap:[, 0.000000001,0)}->["b":v2]`, + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withScanResultTS(scan(`a`, `c`), t3, scanKV(`b`, v2))), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "one scan after writes and delete returning missing key", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, - withResult(put(`a`, `v1`)), - withResult(put(`b`, `v2`)), - ), 1)), + withResult(put(`a`, s1)), + withResult(put(`b`, s2)), + ), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`b`, `v2`)), - withResult(del(`a`)), - ), 2)), - step(withResultTS(put(`a`, `v3`), 3)), - step(withResultTS(del(`a`), 4)), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 1, `v2`), tombstone(`a`, 2), kv(`a`, 3, `v3`), tombstone(`a`, 4)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[s]{a-c}:{0:[0.000000001,0, ), gap:[, 0.000000001,0),[0.000000004,0, )}->["b":v2] [d]"a":0.000000002,0->`, + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`b`, v2)), + withResult(del(`a`, s3)), + ), t2)), + step(withResultTS(put(`a`, s4), t3)), + step(withResultTS(del(`a`, s5), t4)), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t1, s2), tombstone(`a`, t2, s3), kv(`a`, t3, s4), tombstone(`a`, t4, s5)), }, { name: "one reverse scan after write returning missing key", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withScanResult(reverseScan(`a`, `c`), scanKV(`b`, `v2`))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `committed reverse scan non-atomic timestamps: ` + - `[rs]{a-c}:{0:[0.000000002,0, ), gap:[, 0.000000001,0)}->["b":v2]`, + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withScanResultTS(reverseScan(`a`, `c`), t3, scanKV(`b`, v2))), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "one scan after writes returning results in wrong order", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withScanResult(scan(`a`, `c`), scanKV(`b`, `v2`), scanKV(`a`, `v1`))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `scan result not ordered correctly: ` + - `[s]{a-c}:{0:[0.000000002,0, ), 1:[0.000000001,0, ), gap:[, )}->["b":v2, "a":v1]`, + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withScanResultTS(scan(`a`, `c`), t3, scanKV(`b`, v2), scanKV(`a`, v1))), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "one reverse scan after writes returning results in wrong order", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withScanResult(reverseScan(`a`, `c`), scanKV(`a`, `v1`), scanKV(`b`, `v2`))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `scan result not ordered correctly: ` + - `[rs]{a-c}:{0:[0.000000001,0, ), 1:[0.000000002,0, ), gap:[, )}->["a":v1, "b":v2]`, + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withScanResultTS(reverseScan(`a`, `c`), t3, scanKV(`a`, v1), scanKV(`b`, v2))), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "one scan after writes returning results outside scan boundary", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(put(`c`, `v3`), 3)), - step(withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`), scanKV(`b`, `v2`), scanKV(`c`, `v3`))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), kv(`c`, 3, `v3`)), - expected: []string{ - `key "c" outside scan bounds: ` + - `[s]{a-c}:{0:[0.000000001,0, ), 1:[0.000000002,0, ), 2:[0.000000003,0, ), gap:[, )}->["a":v1, "b":v2, "c":v3]`, + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(put(`c`, s3), t3)), + step(withScanResultTS(scan(`a`, `c`), t4, scanKV(`a`, v1), scanKV(`b`, v2), scanKV(`c`, v3))), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), kv(`c`, t3, s3)), }, { name: "one reverse scan after writes returning results outside scan boundary", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(put(`c`, `v3`), 3)), - step(withScanResult(reverseScan(`a`, `c`), scanKV(`c`, `v3`), scanKV(`b`, `v2`), scanKV(`a`, `v1`))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), kv(`c`, 3, `v3`)), - expected: []string{ - `key "c" outside scan bounds: ` + - `[rs]{a-c}:{0:[0.000000003,0, ), 1:[0.000000002,0, ), 2:[0.000000001,0, ), gap:[, )}->["c":v3, "b":v2, "a":v1]`, + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(put(`c`, s3), t3)), + step(withScanResultTS(reverseScan(`a`, `c`), t4, scanKV(`c`, v3), scanKV(`b`, v2), scanKV(`a`, v1))), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), kv(`c`, t3, s3)), }, { name: "one scan in between writes", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`))), - step(withResultTS(put(`a`, `v2`), 2)), + step(withResultTS(put(`a`, s1), t1)), + step(withScanResultTS(scan(`a`, `c`), t2, scanKV(`a`, v1))), + step(withResultTS(put(`a`, s2), t3)), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t3, s2)), }, { name: "batch of scans after writes", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResult(batch( - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`), scanKV(`b`, `v2`)), - withScanResult(scan(`b`, `d`), scanKV(`b`, `v2`)), - withScanResult(scan(`c`, `e`)), - ))), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(batch( + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1), scanKV(`b`, v2)), + withScanResultTS(scan(`b`, `d`), noTS, scanKV(`b`, v2)), + withScanResultTS(scan(`c`, `e`), noTS), + ), t3)), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "batch of scans after writes returning wrong values", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResult(batch( - withScanResult(scan(`a`, `c`)), - withScanResult(scan(`b`, `d`), scanKV(`b`, `v1`)), - withScanResult(scan(`c`, `e`), scanKV(`c`, `v2`)), - ))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `committed batch non-atomic timestamps: ` + - `[s]{a-c}:{gap:[, 0.000000001,0)}->[] ` + - `[s]{b-d}:{0:[0,0, 0,0), gap:[, )}->["b":v1] ` + - `[s]{c-e}:{0:[0,0, 0,0), gap:[, )}->["c":v2]`, + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(batch( + withScanResultTS(scan(`a`, `c`), noTS), + withScanResultTS(scan(`b`, `d`), noTS, scanKV(`b`, v1)), + withScanResultTS(scan(`c`, `e`), noTS, scanKV(`c`, v2)), + ), t3)), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "batch of scans after writes with non-empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResult(batch( - withScanResult(scan(`a`, `c`), scanKV(`b`, `v1`)), - withScanResult(scan(`b`, `d`), scanKV(`b`, `v1`)), - withScanResult(scan(`c`, `e`)), - ))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`)), - expected: []string{ - `committed batch non-atomic timestamps: ` + - `[s]{a-c}:{0:[0,0, 0,0), gap:[, 0.000000001,0)}->["b":v1] ` + - `[s]{b-d}:{0:[0,0, 0,0), gap:[, )}->["b":v1] ` + - `[s]{c-e}:{gap:[, )}->[]`, + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(batch( + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`b`, v1)), + withScanResultTS(scan(`b`, `d`), noTS, scanKV(`b`, v1)), + withScanResultTS(scan(`c`, `e`), noTS), + ), t3)), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2)), }, { name: "transactional scans with non-empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 3)), - step(withResultTS(put(`b`, `v3`), 2)), - step(withResultTS(put(`b`, `v4`), 3)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t3)), + step(withResultTS(put(`b`, s3), t2)), + step(withResultTS(put(`b`, s4), t3)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`), scanKV(`b`, `v3`)), - withScanResult(scan(`b`, `d`), scanKV(`b`, `v3`)), - ), 2)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1), scanKV(`b`, v3)), + withScanResultTS(scan(`b`, `d`), noTS, scanKV(`b`, v3)), + ), t2)), }, // Reading v1 is valid from 1-3 and v3 is valid from 2-3: overlap 2-3 - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 3, `v2`), kv(`b`, 2, `v3`), kv(`b`, 3, `v4`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t3, s2), kv(`b`, t2, s3), kv(`b`, t3, s4)), }, { name: "transactional scans after delete with non-empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 3)), - step(withResultTS(put(`b`, `v3`), 1)), - step(withResultTS(del(`b`), 2)), - step(withResultTS(put(`b`, `v4`), 4)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t3)), + step(withResultTS(put(`b`, s3), t1)), + step(withResultTS(del(`b`, s4), t2)), + step(withResultTS(put(`b`, s5), t4)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - withScanResult(scan(`b`, `d`)), - ), 2)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + withScanResultTS(scan(`b`, `d`), noTS), + ), t2)), }, // Reading v1 is valid from 1-3 and for `b` is valid -1 and 2-4: overlap 2-3 - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 3, `v2`), kv(`b`, 1, `v3`), tombstone(`b`, 2), kv(`b`, 4, `v4`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t3, s2), kv(`b`, t1, s3), tombstone(`b`, t2, s4), kv(`b`, t4, s5)), }, { name: "transactional scans with empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 2)), - step(withResultTS(put(`b`, `v3`), 2)), - step(withResultTS(put(`b`, `v4`), 3)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), + step(withResultTS(put(`b`, s3), t2)), + step(withResultTS(put(`b`, s4), t3)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`), scanKV(`b`, `v3`)), - withScanResult(scan(`b`, `d`), scanKV(`b`, `v3`)), - ), 2)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1), scanKV(`b`, v3)), + withScanResultTS(scan(`b`, `d`), noTS, scanKV(`b`, v3)), + ), t2)), }, // Reading v1 is valid from 1-2 and v3 is valid from 2-3: no overlap - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`), kv(`b`, 2, `v3`), kv(`b`, 3, `v4`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[s]{a-c}:{0:[0.000000001,0, 0.000000002,0), 1:[0.000000002,0, 0.000000003,0), gap:[, )}->["a":v1, "b":v3] ` + - `[s]{b-d}:{0:[0.000000002,0, 0.000000003,0), gap:[, )}->["b":v3]`, - }, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t2, s2), kv(`b`, t2, s3), kv(`b`, t3, s4)), }, { name: "transactional scans after delete with empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 2)), - step(withResultTS(put(`b`, `v3`), 1)), - step(withResultTS(del(`b`), 3)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), + step(withResultTS(put(`b`, s3), t1)), + step(withResultTS(del(`b`, s4), t3)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - withScanResult(scan(`b`, `d`)), - ), 3)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + withScanResultTS(scan(`b`, `d`), noTS), + ), t3)), }, // Reading v1 is valid from 1-2 and for `b` is valid from -1, 3-: no overlap - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`), kv(`b`, 1, `v3`), tombstone(`b`, 3)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[s]{a-c}:{0:[0.000000001,0, 0.000000002,0), gap:[, 0.000000001,0),[0.000000003,0, )}->["a":v1] ` + - `[s]{b-d}:{gap:[, 0.000000001,0),[0.000000003,0, )}->[]`, - }, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t2, s2), kv(`b`, t1, s3), tombstone(`b`, t3, s4)), }, { name: "transactional scans one missing with non-empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 2)), - step(withResultTS(put(`b`, `v3`), 2)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), + step(withResultTS(put(`b`, s3), t2)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - withScanResult(scan(`b`, `d`)), - ), 2)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + withScanResultTS(scan(`b`, `d`), noTS), + ), t2)), }, // Reading v1 is valid from 1-2 and v3 is valid from 0-2: overlap 1-2 - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`), kv(`b`, 2, `v3`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t2, s2), kv(`b`, t2, s3)), }, { name: "transactional scans one missing with empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 2)), - step(withResultTS(put(`b`, `v3`), 1)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), + step(withResultTS(put(`b`, s3), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - withScanResult(scan(`b`, `d`)), - ), 1)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + withScanResultTS(scan(`b`, `d`), noTS), + ), t1)), }, // Reading v1 is valid from 1-2 and v3 is valid from 0-1: no overlap - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`), kv(`b`, 1, `v3`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[s]{a-c}:{0:[0.000000001,0, 0.000000002,0), gap:[, 0.000000001,0)}->["a":v1] ` + - `[s]{b-d}:{gap:[, 0.000000001,0)}->[]`, - }, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t2, s2), kv(`b`, t1, s3)), }, { name: "transactional scan and write with non-empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 3)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t3)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - withResult(put(`b`, `v3`)), - ), 2)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + withResult(put(`b`, s3)), + ), t2)), }, // Reading v1 is valid from 1-3 and v3 is valid at 2: overlap @2 - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 3, `v2`), kv(`b`, 2, `v3`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t3, s2), kv(`b`, t2, s3)), }, { name: "transactional scan and write with empty time overlap", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`a`, `v2`), 2)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - withResult(put(`b`, `v3`)), - ), 2)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + withResult(put(`b`, s3)), + ), t2)), }, // Reading v1 is valid from 1-2 and v3 is valid at 2: no overlap - kvs: kvs(kv(`a`, 1, `v1`), kv(`a`, 2, `v2`), kv(`b`, 2, `v3`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[s]{a-c}:{0:[0.000000001,0, 0.000000002,0), gap:[, )}->["a":v1] [w]"b":0.000000002,0->v3`, - }, + kvs: kvs(kv(`a`, t1, s1), kv(`a`, t2, s2), kv(`b`, t2, s3)), }, { name: "transaction with scan before and after write", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`)), - withResult(put(`a`, `v1`)), - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - ), 1)), + withScanResultTS(scan(`a`, `c`), noTS), + withResult(put(`a`, s1)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + ), t1)), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "transaction with incorrect scan before write", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - withResult(put(`a`, `v1`)), - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - ), 1)), - }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[s]{a-c}:{0:[0,0, 0,0), gap:[, )}->["a":v1] ` + - `[w]"a":0.000000001,0->v1 ` + - `[s]{a-c}:{0:[0.000000001,0, ), gap:[, )}->["a":v1]`, + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + withResult(put(`a`, s1)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + ), t1)), }, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "transaction with incorrect scan after write", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`)), - withResult(put(`a`, `v1`)), - withScanResult(scan(`a`, `c`)), - ), 1)), - }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[s]{a-c}:{gap:[, )}->[] [w]"a":0.000000001,0->v1 [s]{a-c}:{gap:[, 0.000000001,0)}->[]`, + withScanResultTS(scan(`a`, `c`), noTS), + withResult(put(`a`, s1)), + withScanResultTS(scan(`a`, `c`), noTS), + ), t1)), }, + kvs: kvs(kv(`a`, t1, s1)), }, { name: "two transactionally committed puts of the same key with scans", steps: []Step{ step(withResultTS(closureTxn(ClosureTxnType_Commit, - withScanResult(scan(`a`, `c`)), - withResult(put(`a`, `v1`)), - withScanResult(scan(`a`, `c`), scanKV(`a`, `v1`)), - withResult(put(`a`, `v2`)), - withScanResult(scan(`a`, `c`), scanKV(`a`, `v2`)), - withResult(put(`b`, `v3`)), - withScanResult(scan(`a`, `c`), scanKV(`a`, `v2`), scanKV(`b`, `v3`)), - ), 1)), + withScanResultTS(scan(`a`, `c`), noTS), + withResult(put(`a`, s1)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v1)), + withResult(put(`a`, s2)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v2)), + withResult(put(`b`, s3)), + withScanResultTS(scan(`a`, `c`), noTS, scanKV(`a`, v2), scanKV(`b`, v3)), + ), t1)), }, - kvs: kvs(kv(`a`, 1, `v2`), kv(`b`, 1, `v3`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s2), kv(`b`, t1, s3)), }, { name: "one deleterange before write", steps: []Step{ - step(withDeleteRangeResult(delRange(`a`, `c`))), - step(withResult(put(`a`, `v1`))), + step(withDeleteRangeResult(delRange(`a`, `c`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: nil, + kvs: kvs(kv(`a`, t2, s2)), }, { name: "one deleterange before write returning wrong value", steps: []Step{ - step(withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`))), - step(withResult(put(`a`, `v1`))), - }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{ - `committed deleteRange missing write: ` + - `[dr.s]{a-c}:{0:[0.000000001,0, ), gap:[, )}->["a"] ` + - `[dr.d]"a":missing->`, + step(withDeleteRangeResult(delRange(`a`, `c`, s1), t1, roachpb.Key(`a`))), + step(withResultTS(put(`a`, s2), t2)), }, + kvs: kvs(kv(`a`, t2, s2)), }, { name: "one deleterange after write", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`)), - ), 2)), + withDeleteRangeResult(delRange(`a`, `c`, s2), noTS, roachpb.Key(`a`)), + ), t2)), }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2)), }, { name: "one deleterange after write returning wrong value", steps: []Step{ - step(withResult(put(`a`, `v1`))), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withDeleteRangeResult(delRange(`a`, `c`)), - ), 2)), - }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2)), - expected: []string{ - `extra writes: [d]"a":uncertain->`, + withDeleteRangeResult(delRange(`a`, `c`, s2), t2), + ), t2)), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2)), }, { name: "one deleterange after write missing write", steps: []Step{ - step(withResult(put(`a`, `v1`))), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withDeleteRangeResult(delRange(`a`, `c`, s2), t2, roachpb.Key(`a`)), + ), t1)), + }, + kvs: kvs(kv(`a`, t1, s1)), + }, + { + name: "one deleterange after write extra deletion", + steps: []Step{ + step(withResultTS(put(`a`, s1), t2)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`)), - ), 1)), + withDeleteRangeResult(delRange(`a`, `c`, s2), t2, roachpb.Key(`a`), roachpb.Key(`b`)), + ), t2)), }, - kvs: kvs(kv(`a`, 1, `v1`)), - expected: []string{ - `committed txn missing write: ` + - `[dr.s]{a-c}:{0:[0.000000001,0, ), gap:[, )}->["a"] ` + - `[dr.d]"a":missing->`, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2)), + }, + { + name: "one deleterange after write with spurious deletion", + steps: []Step{ + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(closureTxn(ClosureTxnType_Commit, + withDeleteRangeResult(delRange(`a`, `c`, s2), t2, roachpb.Key(`a`), roachpb.Key(`b`)), + ), t2)), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2), tombstone(`b`, t2, s2)), }, { name: "one deleterange after writes", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(put(`c`, `v3`), 3)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(put(`c`, s3), t3)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`), roachpb.Key(`b`)), - ), 4)), - step(withScanResult(scan(`a`, `d`), scanKV(`c`, `v3`))), + withDeleteRangeResult(delRange(`a`, `c`, s4), noTS, roachpb.Key(`a`), roachpb.Key(`b`)), + ), t4)), + step(withScanResultTS(scan(`a`, `d`), t4, scanKV(`c`, v3))), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), kv(`c`, 3, `v3`), tombstone(`a`, 4), tombstone(`b`, 4)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), kv(`c`, t3, s3), tombstone(`a`, t4, s4), tombstone(`b`, t4, s4)), }, { name: "one deleterange after writes with write timestamp disagreement", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(put(`c`, `v3`), 3)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(put(`c`, s3), t3)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`), roachpb.Key(`b`)), - ), 4)), - step(withScanResult(scan(`a`, `d`), scanKV(`c`, `v3`))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), kv(`c`, 3, `v3`), tombstone(`a`, 4), tombstone(`b`, 5)), - expected: []string{ - `committed txn missing write: ` + - `[dr.s]{a-c}:{0:[0.000000001,0, ), 1:[0.000000002,0, 0.000000005,0), gap:[, )}->["a", "b"] ` + - `[dr.d]"a":0.000000004,0-> [dr.d]"b":missing->`, + withDeleteRangeResult(delRange(`a`, `c`, s4), noTS, roachpb.Key(`a`), roachpb.Key(`b`), roachpb.Key(`c`)), + ), t4)), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), kv(`c`, t3, s3), tombstone(`a`, t3, s4), tombstone(`b`, t4, s4), tombstone(`c`, t4, s4)), }, { name: "one deleterange after writes with missing write", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(put(`c`, `v3`), 3)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(put(`c`, s3), t3)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`), roachpb.Key(`b`)), - ), 4)), - step(withScanResult(scan(`a`, `d`), scanKV(`c`, `v3`))), - }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), kv(`c`, 3, `v3`), tombstone(`a`, 4)), - expected: []string{ - `committed txn missing write: ` + - `[dr.s]{a-c}:{0:[0.000000001,0, ), 1:[0.000000002,0, ), gap:[, )}->["a", "b"] ` + - `[dr.d]"a":0.000000004,0-> [dr.d]"b":missing->`, - `committed scan non-atomic timestamps: [s]{a-d}:{0:[0.000000003,0, ), gap:[, 0.000000001,0)}->["c":v3]`, + withDeleteRangeResult(delRange(`a`, `c`, s4), noTS, roachpb.Key(`a`), roachpb.Key(`b`)), + ), t4)), + step(withScanResultTS(scan(`a`, `d`), t5, scanKV(`c`, v3))), }, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), kv(`c`, t3, s3), tombstone(`a`, t4, s4)), }, { name: "one deleterange after writes and delete", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`b`, `v2`), 2)), - step(withResultTS(del(`a`), 4)), - step(withResultTS(put(`a`, `v3`), 5)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`b`, s2), t2)), + step(withResultTS(del(`a`, s3), t4)), + step(withResultTS(put(`a`, s4), t5)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`), roachpb.Key(`b`)), - ), 3)), + withDeleteRangeResult(delRange(`a`, `c`, s5), noTS, roachpb.Key(`a`), roachpb.Key(`b`)), + ), t3)), }, - kvs: kvs(kv(`a`, 1, `v1`), kv(`b`, 2, `v2`), tombstone(`a`, 3), tombstone(`b`, 3), tombstone(`a`, 4), kv(`a`, 5, `v3`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), kv(`b`, t2, s2), tombstone(`a`, t3, s5), tombstone(`b`, t3, s5), tombstone(`a`, t4, s3), kv(`b`, t5, s4)), }, { name: "one transactional deleterange followed by put after writes", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`)), - withResult(put(`b`, `v2`)), - ), 2)), + withDeleteRangeResult(delRange(`a`, `c`, s2), noTS, roachpb.Key(`a`)), + withResult(put(`b`, s3)), + ), t2)), }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2), kv(`b`, 2, `v2`)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2), kv(`b`, t2, s3)), }, { name: "one transactional deleterange followed by put after writes with write timestamp disagreement", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`)), - withResult(put(`b`, `v2`)), - ), 2)), - }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2), kv(`b`, 3, `v2`)), - expected: []string{ - `committed txn non-atomic timestamps: ` + - `[dr.s]{a-c}:{0:[0.000000001,0, ), gap:[, )}->["a"] ` + - `[dr.d]"a":0.000000002,0-> [w]"b":0.000000003,0->v2`, + withDeleteRangeResult(delRange(`a`, `c`, s2), noTS, roachpb.Key(`a`)), + withResult(put(`b`, s3)), + ), t2)), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s2), kv(`b`, t3, s3)), }, { name: "one transactional put shadowed by deleterange after writes", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withResult(put(`b`, `v2`)), - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`), roachpb.Key(`b`)), - ), 2)), + withResult(put(`b`, s2)), + withDeleteRangeResult(delRange(`a`, `c`, s3), noTS, roachpb.Key(`a`), roachpb.Key(`b`)), + ), t2)), }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2), tombstone(`b`, 2)), - expected: nil, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s3), tombstone(`b`, t2, s3)), }, { name: "one transactional put shadowed by deleterange after writes with write timestamp disagreement", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), + step(withResultTS(put(`a`, s1), t1)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withResult(put(`b`, `v2`)), - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`), roachpb.Key(`b`)), - ), 2)), - }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 2), tombstone(`b`, 3)), - expected: []string{ - `committed txn missing write: ` + - `[w]"b":missing->v2 ` + - `[dr.s]{a-c}:{0:[0.000000001,0, ), 1:[0,0, ), gap:[, )}->["a", "b"] ` + - `[dr.d]"a":0.000000002,0-> [dr.d]"b":missing->`, + withResult(put(`b`, s2)), + withDeleteRangeResult(delRange(`a`, `c`, s3), noTS, roachpb.Key(`a`), roachpb.Key(`b`)), + ), t2)), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t2, s3), tombstone(`b`, t3, s3)), }, { name: "one deleterange after writes returning keys outside span boundary", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`d`, `v2`), 2)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`d`, s2), t2)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`), roachpb.Key(`d`)), - ), 3)), - }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 3), kv(`d`, 2, `v2`)), - expected: []string{ - `key "d" outside delete range bounds: ` + - `[dr.s]{a-c}:{0:[0.000000001,0, ), 1:[0.000000002,0, ), gap:[, )}->["a", "d"] ` + - `[dr.d]"a":0.000000003,0-> [dr.d]"d":missing->`, + withDeleteRangeResult(delRange(`a`, `c`, s3), noTS, roachpb.Key(`a`), roachpb.Key(`d`)), + ), t3)), }, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t3, s3), kv(`d`, t2, s2)), }, { name: "one deleterange after writes incorrectly deleting keys outside span boundary", steps: []Step{ - step(withResultTS(put(`a`, `v1`), 1)), - step(withResultTS(put(`d`, `v2`), 2)), + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(put(`d`, s2), t2)), step(withResultTS(closureTxn(ClosureTxnType_Commit, - withDeleteRangeResult(delRange(`a`, `c`), roachpb.Key(`a`), roachpb.Key(`d`)), - ), 3)), + withDeleteRangeResult(delRange(`a`, `c`, s3), noTS, roachpb.Key(`a`), roachpb.Key(`d`)), + ), t3)), }, - kvs: kvs(kv(`a`, 1, `v1`), tombstone(`a`, 3), kv(`d`, 2, `v2`), tombstone(`d`, 3)), - expected: []string{ - `key "d" outside delete range bounds: ` + - `[dr.s]{a-c}:{0:[0.000000001,0, ), 1:[0.000000002,0, ), gap:[, )}->["a", "d"] ` + - `[dr.d]"a":0.000000003,0-> [dr.d]"d":0.000000003,0->`, + kvs: kvs(kv(`a`, t1, s1), tombstone(`a`, t3, s3), kv(`d`, t2, s2), tombstone(`d`, t3, s3)), + }, + { + name: "single mvcc rangedel", + steps: []Step{ + step(withResultTS(delRangeUsingTombstone(`a`, `b`, s1), t1)), }, + kvs: kvs(rd(`a`, `b`, t1, s1)), + }, + { + name: "single mvcc rangedel after put", + steps: []Step{ + step(withResultTS(put(`a`, s1), t1)), + step(withResultTS(delRangeUsingTombstone(`a`, `b`, s2), t2)), + }, + kvs: kvs(kv(`a`, t1, s1), rd(`a`, `b`, t2, s2)), + }, + { + name: "single mvcc rangedel before put", + steps: []Step{ + step(withResultTS(delRangeUsingTombstone(`a`, `b`, s1), t1)), + step(withResultTS(put(`a`, s2), t2)), + }, + kvs: kvs(rd(`a`, `b`, t1, s1), kv(`a`, t2, s2)), + }, + { + name: "two overlapping rangedels", + steps: []Step{ + step(withResultTS(delRangeUsingTombstone(`a`, `c`, s1), t1)), + step(withResultTS(delRangeUsingTombstone(`b`, `d`, s2), t2)), + }, + // Note: you see rangedel fragmentation in action here, which has to + // happen. Even if we decided to hand pebble overlapping rangedels, it + // would fragment them for us, and we'd get what you see below back when + // we read. + kvs: kvs( + rd(`a`, `b`, t1, s1), + rd(`b`, `c`, t1, s1), + rd(`b`, `c`, t2, s2), + rd(`c`, `d`, t2, s2), + ), + }, + { + name: "batch of touching rangedels", + steps: []Step{step(withResultTS(batch( + delRangeUsingTombstone(`a`, `b`, s1), + delRangeUsingTombstone(`b`, `d`, s2), + ), t1)), + }, + // Note that the tombstones aren't merged. In fact, our use of sequence numbers + // embedded in MVCCValueHeader implies that pebble can never merge adjacent + // tombstones from the same batch/txn. + kvs: kvs( + rd(`a`, `b`, t1, s1), + rd(`b`, `d`, t1, s2), + ), + }, + { + // Note also that self-overlapping batches or rangedels in txns aren't + // allowed today, so this particular example exists in this unit test but + // not in real CRDB. But we can have "touching" rangedels today, see + // above. + name: "batch of two overlapping rangedels", + steps: []Step{step(withResultTS(batch( + delRangeUsingTombstone(`a`, `c`, s1), + delRangeUsingTombstone(`b`, `d`, s2), + ), t1)), + }, + // Note that the tombstones aren't merged. In fact, our use of sequence numbers + // embedded in MVCCValueHeader implies that pebble can never merge adjacent + // tombstones from the same batch/txn. + // Note also that self-overlapping batches or rangedels in txns aren't + // allowed today, so this particular example exists in this unit test but + // not in real CRDB. But we can have "touching" rangedels today. + kvs: kvs( + rd(`a`, `b`, t1, s1), + rd(`b`, `d`, t1, s2), + ), + }, + { + name: "read before rangedel", + steps: []Step{ + step(withResultTS(put(`b`, s1), t1)), + step(withReadResultTS(get(`b`), v1, t2)), + step(withResultTS(delRangeUsingTombstone(`a`, `c`, s3), t3)), + }, + kvs: kvs( + kv(`b`, t1, s1), + rd(`a`, `c`, t3, s3), + ), + }, + { + // MVCC range deletions are executed individually when the range is split, + // and if this happens kvnemesis will report a failure since the writes + // will in all likelihood have non-atomic timestamps. + // In an actual run we avoid this by adding a test hook to DistSender to + // avoid splitting MVCC rangedels across ranges, instead failing with a + // hard error, and the generator attempts - imperfectly - to respect the + // split points. + name: "rangedel with range split", + steps: []Step{ + step(withResultTS(delRangeUsingTombstone(`a`, `c`, s1), t2)), + }, + kvs: kvs( + rd(`a`, `b`, t2, s1), + rd(`b`, `c`, t1, s1), + ), }, } + w := echotest.Walk(t, testutils.TestDataPath(t, t.Name())) + defer w.Check(t) for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + t.Run(test.name, w.Do(t, test.name, func(t *testing.T, path string) { e, err := MakeEngine() require.NoError(t, err) defer e.Close() + + var buf strings.Builder + for _, step := range test.steps { + fmt.Fprintln(&buf, strings.TrimSpace(step.String())) + } + + tr := &SeqTracker{} for _, kv := range test.kvs { - e.Put(kv.Key, kv.Value) + seq := kv.seq() + tr.Add(kv.key, kv.endKey, kv.ts, seq) + // NB: we go a little beyond what is truly necessary by embedding the + // sequence numbers (inside kv.val) unconditionally, as they would be in + // a real run. But we *do* need to embed them in `e.DeleteRange`, for + // otherwise pebble might start merging adjacent MVCC range dels (since + // they could have the same timestamp and empty value, where the seqno + // would really produce unique values). + if len(kv.endKey) == 0 { + k := storage.MVCCKey{ + Key: kv.key, + Timestamp: kv.ts, + } + e.Put(k, kv.val) + fmt.Fprintln(&buf, k, "@", seq, mustGetStringValue(kv.val)) + } else { + k := storage.MVCCRangeKey{ + StartKey: kv.key, + EndKey: kv.endKey, + Timestamp: kv.ts, + } + e.DeleteRange(kv.key, kv.endKey, kv.ts, kv.val) + fmt.Fprintln(&buf, k, "@", seq, mustGetStringValue(kv.val)) + } } - var actual []string - if failures := Validate(test.steps, e); len(failures) > 0 { - actual = make([]string, len(failures)) + + if failures := Validate(test.steps, e, tr); len(failures) > 0 { for i := range failures { - actual[i] = failures[i].Error() + fmt.Fprintln(&buf, failures[i]) } } - assert.Equal(t, test.expected, actual) - }) + // TODO(during review): prefix all test names with a (padded) number so + // that it's easier to join up the test case and the file in a sorted + // view. + echotest.Require(t, buf.String(), path) + })) } } diff --git a/pkg/kv/kvnemesis/watcher.go b/pkg/kv/kvnemesis/watcher.go index 908bfa1394b6..98d9c113d2ce 100644 --- a/pkg/kv/kvnemesis/watcher.go +++ b/pkg/kv/kvnemesis/watcher.go @@ -12,13 +12,17 @@ package kvnemesis import ( "context" + "fmt" + "math/rand" "reflect" + "strings" "time" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -157,52 +161,18 @@ func (w *Watcher) processEvents(ctx context.Context, eventC chan kvcoord.RangeFe case *roachpb.RangeFeedError: return e.Error.GoError() case *roachpb.RangeFeedValue: - log.Infof(ctx, `rangefeed Put %s %s -> %s (prev %s)`, - e.Key, e.Value.Timestamp, e.Value.PrettyPrint(), e.PrevValue.PrettyPrint()) - w.mu.Lock() - // TODO(dan): If the exact key+ts is put into kvs more than once, the - // Engine will keep the last. This matches our txn semantics (if a key - // is written in a transaction more than once, only the last is kept) - // but it means that we'll won't catch it if we violate those semantics. - // Consider first doing a Get and somehow failing if this exact key+ts - // has previously been put with a different value. - if len(e.Value.RawBytes) == 0 { - w.mu.kvs.Delete(storage.MVCCKey{Key: e.Key, Timestamp: e.Value.Timestamp}) - } else { - w.mu.kvs.Put(storage.MVCCKey{Key: e.Key, Timestamp: e.Value.Timestamp}, e.Value.RawBytes) - } - prevTs := e.Value.Timestamp.Prev() - prevValue := w.mu.kvs.Get(e.Key, prevTs) - - // RangeFeed doesn't send the timestamps of the previous values back - // because changefeeds don't need them. It would likely be easy to - // implement, but would add unnecessary allocations in changefeeds, - // which don't need them. This means we'd want to make it an option in - // the request, which seems silly to do for only this test. - prevValue.Timestamp = hlc.Timestamp{} - // Additionally, ensure that deletion tombstones and missing keys are - // normalized as the nil slice, so that they can be matched properly - // between the RangeFeed and the Engine. - if len(e.PrevValue.RawBytes) == 0 { - e.PrevValue.RawBytes = nil - } - prevValueMismatch := !reflect.DeepEqual(prevValue, e.PrevValue) - var engineContents string - if prevValueMismatch { - engineContents = w.mu.kvs.DebugPrint(" ") + if err := w.handleValue(ctx, roachpb.Span{Key: e.Key}, e.Value, &e.PrevValue); err != nil { + return err } - w.mu.Unlock() - - if prevValueMismatch { - log.Infof(ctx, "rangefeed mismatch\n%s", engineContents) - panic(errors.Errorf( - `expected (%s, %s) previous value %s got: %s`, e.Key, prevTs, prevValue, e.PrevValue)) + case *roachpb.RangeFeedDeleteRange: + if err := w.handleValue(ctx, e.Span, roachpb.Value{Timestamp: e.Timestamp}, nil /* prevV */); err != nil { + return err } case *roachpb.RangeFeedCheckpoint: w.mu.Lock() frontierAdvanced, err := w.mu.frontier.Forward(e.Span, e.ResolvedTS) if err != nil { - panic(errors.Wrapf(err, "unexpected frontier error advancing to %s@%s", e.Span, e.ResolvedTS)) + return errors.Wrapf(err, "unexpected frontier error advancing to %s@%s", e.Span, e.ResolvedTS) } if frontierAdvanced { frontier := w.mu.frontier.Frontier() @@ -220,7 +190,87 @@ func (w *Watcher) processEvents(ctx context.Context, eventC chan kvcoord.RangeFe } } w.mu.Unlock() + default: + return errors.Errorf("unknown event: %T", e) } } } } + +func (w *Watcher) handleValue( + ctx context.Context, span roachpb.Span, v roachpb.Value, prevV *roachpb.Value, +) error { + w.mu.Lock() + defer w.mu.Unlock() + + var buf strings.Builder + fmt.Fprintf(&buf, `rangefeed %s %s -> %s`, span, v.Timestamp, v.PrettyPrint()) + if prevV != nil { + fmt.Fprintf(&buf, ` (prev %s)`, prevV.PrettyPrint()) + } + // TODO(dan): If the exact key+ts is put into kvs more than once, the + // Engine will keep the last. This matches our txn semantics (if a key + // is written in a transaction more than once, only the last is kept) + // but it means that we'll won't catch it if we violate those semantics. + // Consider first doing a Get and somehow failing if this exact key+ts + // has previously been put with a different value. + if len(span.EndKey) > 0 { + // If we have two operations that are not atomic (i.e. aren't in a batch) + // and they produce touching tombstones at the same timestamp, then + // `.mu.kvs` will merge them but they wouldn't be merged in pebble, since + // their MVCCValueHeader will contain different seqnos (and thus the value + // isn't identical). To work around that, we put random stuff in here. This + // is never interpreted - the seqno is only pulled out via an interceptor at + // the rangefeed boundary, and handed to the tracker. This is merely our + // local copy. + // + // TODO(during review): plumb the seq through the rangefeed and then we can + // use the real thing here and avoid this problem in a more natural way. + var vh enginepb.MVCCValueHeader + vh.KVNemesisSeq.Set(int64(rand.Int31())) + mvccV := storage.MVCCValue{ + MVCCValueHeader: vh, + } + + sl, err := storage.EncodeMVCCValue(mvccV) + if err != nil { + return err + } + + w.mu.kvs.DeleteRange(span.Key, span.EndKey, v.Timestamp, sl) + return nil + } + + // Handle a point write. + w.mu.kvs.Put(storage.MVCCKey{Key: span.Key, Timestamp: v.Timestamp}, v.RawBytes) + prevTs := v.Timestamp.Prev() + getPrevV := w.mu.kvs.Get(span.Key, prevTs) + + // RangeFeed doesn't send the timestamps of the previous values back + // because changefeeds don't need them. It would likely be easy to + // implement, but would add unnecessary allocations in changefeeds, + // which don't need them. This means we'd want to make it an option in + // the request, which seems silly to do for only this test. + getPrevV.Timestamp = hlc.Timestamp{} + // Additionally, ensure that deletion tombstones and missing keys are + // normalized as the nil slice, so that they can be matched properly + // between the RangeFeed and the Engine. + if len(prevV.RawBytes) == 0 { + prevV.RawBytes = nil + } + prevValueMismatch := !reflect.DeepEqual(prevV, &getPrevV) + var engineContents string + if prevValueMismatch { + engineContents = w.mu.kvs.DebugPrint(" ") + } + + if prevValueMismatch { + log.Infof(ctx, "rangefeed mismatch\n%s", engineContents) + s := mustGetStringValue(getPrevV.RawBytes) + fmt.Println(s) + return errors.Errorf( + `expected (%s, %s) has previous value %s in kvs, but rangefeed has: %s`, + span, prevTs, mustGetStringValue(getPrevV.RawBytes), mustGetStringValue(prevV.RawBytes)) + } + return nil +} diff --git a/pkg/kv/kvserver/BUILD.bazel b/pkg/kv/kvserver/BUILD.bazel index 9f129b8189ce..449912dd3ad2 100644 --- a/pkg/kv/kvserver/BUILD.bazel +++ b/pkg/kv/kvserver/BUILD.bazel @@ -113,6 +113,7 @@ go_library( "//pkg/kv", "//pkg/kv/kvbase", "//pkg/kv/kvclient/rangecache", + "//pkg/kv/kvnemesis/kvnemesisutil", "//pkg/kv/kvserver/abortspan", "//pkg/kv/kvserver/allocator", "//pkg/kv/kvserver/allocator/allocatorimpl", diff --git a/pkg/kv/kvserver/batcheval/cmd_scan.go b/pkg/kv/kvserver/batcheval/cmd_scan.go index ffac770c5232..d19b0039fda8 100644 --- a/pkg/kv/kvserver/batcheval/cmd_scan.go +++ b/pkg/kv/kvserver/batcheval/cmd_scan.go @@ -101,5 +101,6 @@ func Scan( } } res.Local.EncounteredIntents = scanRes.Intents + return res, nil } diff --git a/pkg/kv/kvserver/kvserverbase/base.go b/pkg/kv/kvserver/kvserverbase/base.go index 4e7a5caebbf7..4c26b7c627d5 100644 --- a/pkg/kv/kvserver/kvserverbase/base.go +++ b/pkg/kv/kvserver/kvserverbase/base.go @@ -63,7 +63,7 @@ type FilterArgs struct { // ProposalFilterArgs groups the arguments to ReplicaProposalFilter. type ProposalFilterArgs struct { Ctx context.Context - Cmd kvserverpb.RaftCommand + Cmd *kvserverpb.RaftCommand QuotaAlloc *quotapool.IntAlloc CmdID CmdIDKey Req roachpb.BatchRequest diff --git a/pkg/kv/kvserver/rangefeed/catchup_scan.go b/pkg/kv/kvserver/rangefeed/catchup_scan.go index 06a2fd974cec..1da71b839460 100644 --- a/pkg/kv/kvserver/rangefeed/catchup_scan.go +++ b/pkg/kv/kvserver/rangefeed/catchup_scan.go @@ -67,6 +67,7 @@ type CatchUpIterator struct { span roachpb.Span startTime hlc.Timestamp // exclusive pacer *kvadmission.Pacer + OnEmit func(key, endKey roachpb.Key, ts hlc.Timestamp, vh enginepb.MVCCValueHeader) } // NewCatchUpIterator returns a CatchUpIterator for the given Reader over the @@ -189,15 +190,23 @@ func (i *CatchUpIterator) CatchUpScan( var span roachpb.Span a, span.Key = a.Copy(rangeKeys.Bounds.Key, 0) a, span.EndKey = a.Copy(rangeKeys.Bounds.EndKey, 0) + ts := rangeKeys.Versions[j].Timestamp err := outputFn(&roachpb.RangeFeedEvent{ DeleteRange: &roachpb.RangeFeedDeleteRange{ Span: span, - Timestamp: rangeKeys.Versions[j].Timestamp, + Timestamp: ts, }, }) if err != nil { return err } + if i.OnEmit != nil { + v, err := storage.DecodeMVCCValue(rangeKeys.Versions[j].Value) + if err != nil { + return err + } + i.OnEmit(span.Key, span.EndKey, ts, v.MVCCValueHeader) + } } } // If there's no point key here (e.g. we found a bare range key above), then @@ -325,6 +334,9 @@ func (i *CatchUpIterator) CatchUpScan( }, }) reorderBuf = append(reorderBuf, event) + if i.OnEmit != nil { + i.OnEmit(key, nil, ts, mvccVal.MVCCValueHeader) + } } } diff --git a/pkg/kv/kvserver/replica_evaluate.go b/pkg/kv/kvserver/replica_evaluate.go index d3b555bc80ba..da170e5b2cbb 100644 --- a/pkg/kv/kvserver/replica_evaluate.go +++ b/pkg/kv/kvserver/replica_evaluate.go @@ -14,6 +14,7 @@ import ( "bytes" "context" + "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency" @@ -267,6 +268,10 @@ func evaluateBatch( reply := br.Responses[index].GetInner() + if seq := kvnemesisutil.Seq(args.Header().KVNemesisSeq.Get()); seq != 0 { + ctx = kvnemesisutil.WithSeq(ctx, seq) + } + // Note that `reply` is populated even when an error is returned: it // may carry a response transaction and in the case of WriteTooOldError // (which is sometimes deferred) it is fully populated. diff --git a/pkg/kv/kvserver/replica_raft.go b/pkg/kv/kvserver/replica_raft.go index ecbbbd1b93fa..4505e6c9bf40 100644 --- a/pkg/kv/kvserver/replica_raft.go +++ b/pkg/kv/kvserver/replica_raft.go @@ -274,7 +274,7 @@ func (r *Replica) evalAndPropose( if filter := r.store.TestingKnobs().TestingProposalFilter; filter != nil { filterArgs := kvserverbase.ProposalFilterArgs{ Ctx: ctx, - Cmd: *proposal.command, + Cmd: proposal.command, QuotaAlloc: proposal.quotaAlloc, CmdID: idKey, Req: *ba, diff --git a/pkg/kv/kvserver/replica_rangefeed.go b/pkg/kv/kvserver/replica_rangefeed.go index 9945ba0a548a..a0b11277ab9c 100644 --- a/pkg/kv/kvserver/replica_rangefeed.go +++ b/pkg/kv/kvserver/replica_rangefeed.go @@ -226,7 +226,11 @@ func (r *Replica) rangeFeedWithRangeID( // Assert that we still hold the raftMu when this is called to ensure // that the catchUpIter reads from the current snapshot. r.raftMu.AssertHeld() - return rangefeed.NewCatchUpIterator(r.Engine(), span, startTime, iterSemRelease, pacer) + i := rangefeed.NewCatchUpIterator(r.Engine(), span, startTime, iterSemRelease, pacer) + if f := r.store.TestingKnobs().RangefeedValueHeaderFilter; f != nil { + i.OnEmit = f + } + return i } } p := r.registerWithRangefeedRaftMuLocked( @@ -550,6 +554,58 @@ func (r *Replica) populatePrevValsInLogicalOpLogRaftMuLocked( } } +func loadValueHeaderForDeleteRange( + reader storage.Reader, t *enginepb.MVCCDeleteRangeOp, +) (enginepb.MVCCValueHeader, error) { + it := reader.NewMVCCIterator(storage.MVCCKeyIterKind, storage.IterOptions{ + LowerBound: t.StartKey, + UpperBound: t.EndKey, + KeyTypes: storage.IterKeyTypeRangesOnly, + }) + defer it.Close() + + it.SeekGE(storage.MVCCKey{Key: t.StartKey}) + var span roachpb.Span + var vh enginepb.MVCCValueHeader + for ; ; it.Next() { + ok, err := it.Valid() + if err != nil { + return enginepb.MVCCValueHeader{}, err + } + if !ok { + break + } + if !it.RangeKeyChanged() { + continue + } + rkv, ok := it.RangeKeys().FirstAtOrAbove(t.Timestamp) + if !ok || rkv.Timestamp != t.Timestamp { + return enginepb.MVCCValueHeader{}, errors.AssertionFailedf("missing range key segment") + } + v, err := storage.DecodeMVCCValue(rkv.Value) + if err != nil { + return enginepb.MVCCValueHeader{}, err + } + if vh.IsEmpty() { + vh = v.MVCCValueHeader + // TODO check all equal + } + bounds := it.RangeBounds().Clone() + if len(span.EndKey) == 0 { + span = bounds + } else if !span.EndKey.Equal(bounds.Key) { + return enginepb.MVCCValueHeader{}, errors.AssertionFailedf("previous EndKey doesn't match up next StartKey: %s vs %s", span, bounds) + } else { + span.EndKey = bounds.EndKey + } + } + if o := (roachpb.Span{Key: t.StartKey, EndKey: t.EndKey}); !span.Equal(o) { + return enginepb.MVCCValueHeader{}, errors.AssertionFailedf("event span != iter span: %s != %s", span, o) + } + // Made it! + return vh, nil +} + // handleLogicalOpLogRaftMuLocked passes the logical op log to the active // rangefeed, if one is running. The method accepts a reader, which is used to // look up the values associated with key-value writes in the log before handing @@ -578,6 +634,8 @@ func (r *Replica) handleLogicalOpLogRaftMuLocked( return } + vhf := r.store.TestingKnobs().RangefeedValueHeaderFilter + // When reading straight from the Raft log, some logical ops will not be // fully populated. Read from the Reader to populate all fields. for _, op := range ops.Ops { @@ -592,10 +650,19 @@ func (r *Replica) handleLogicalOpLogRaftMuLocked( case *enginepb.MVCCWriteIntentOp, *enginepb.MVCCUpdateIntentOp, *enginepb.MVCCAbortIntentOp, - *enginepb.MVCCAbortTxnOp, - *enginepb.MVCCDeleteRangeOp: + *enginepb.MVCCAbortTxnOp: // Nothing to do. continue + case *enginepb.MVCCDeleteRangeOp: + if vhf == nil { + continue + } + vh, err := loadValueHeaderForDeleteRange(reader, t) + if err != nil { + panic(err) + } + vhf(t.StartKey, t.EndKey, t.Timestamp, vh) + continue default: panic(errors.AssertionFailedf("unknown logical op %T", t)) } @@ -615,7 +682,7 @@ func (r *Replica) handleLogicalOpLogRaftMuLocked( // Read the value directly from the Reader. This is performed in the // same raftMu critical section that the logical op's corresponding // WriteBatch is applied, so the value should exist. - val, _, err := storage.MVCCGet(ctx, reader, key, ts, storage.MVCCGetOptions{Tombstones: true}) + val, _, vh, err := storage.MVCCGetWithValueHeader(ctx, reader, key, ts, storage.MVCCGetOptions{Tombstones: true}) if val == nil && err == nil { err = errors.New("value missing in reader") } @@ -625,6 +692,10 @@ func (r *Replica) handleLogicalOpLogRaftMuLocked( )) return } + + if vhf != nil { + vhf(key, nil, ts, vh) + } *valPtr = val.RawBytes } diff --git a/pkg/kv/kvserver/testing_knobs.go b/pkg/kv/kvserver/testing_knobs.go index da7d2a8931dc..31fbced01414 100644 --- a/pkg/kv/kvserver/testing_knobs.go +++ b/pkg/kv/kvserver/testing_knobs.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/syncutil" ) @@ -403,6 +404,12 @@ type StoreTestingKnobs struct { // renewing expiration based leases. LeaseRenewalDurationOverride time.Duration + // RangefeedValueHeaderFilter, if set, is invoked before each value emitted on + // the rangefeed, be it in steady state or during the catch-up scan. + // + // TODO(before merge): plumb the seqno through the rangefeed. + RangefeedValueHeaderFilter func(key, endKey roachpb.Key, ts hlc.Timestamp, vh enginepb.MVCCValueHeader) + // MakeSystemConfigSpanUnavailableToQueues makes the system config span // unavailable to queues that ask for it. MakeSystemConfigSpanUnavailableToQueues bool diff --git a/pkg/roachpb/BUILD.bazel b/pkg/roachpb/BUILD.bazel index 3f0b6ef41496..970e60898b44 100644 --- a/pkg/roachpb/BUILD.bazel +++ b/pkg/roachpb/BUILD.bazel @@ -40,6 +40,7 @@ go_library( "//pkg/storage/enginepb", "//pkg/util", "//pkg/util/bitarray", + "//pkg/util/buildutil", "//pkg/util/caller", "//pkg/util/duration", "//pkg/util/encoding", @@ -95,6 +96,7 @@ go_test( "//pkg/testutils/zerofields", "//pkg/util", "//pkg/util/bitarray", + "//pkg/util/buildutil", "//pkg/util/duration", "//pkg/util/encoding", "//pkg/util/hlc", diff --git a/pkg/roachpb/api.go b/pkg/roachpb/api.go index a8fe3074064e..a79e2f8db2c7 100644 --- a/pkg/roachpb/api.go +++ b/pkg/roachpb/api.go @@ -15,6 +15,7 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" + _ "github.com/cockroachdb/cockroach/pkg/util/buildutil" // see RequestHeader.KVNemesisSeq "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" "github.com/cockroachdb/cockroach/pkg/util/protoutil" diff --git a/pkg/roachpb/api.proto b/pkg/roachpb/api.proto index b1d504992ee7..71bfdfeea575 100644 --- a/pkg/roachpb/api.proto +++ b/pkg/roachpb/api.proto @@ -84,6 +84,12 @@ enum ResumeReason { // RequestHeader is supplied with every storage node request. message RequestHeader { reserved 1, 2; + message Empty{}; + Empty kvnemesis_seq = 6 [ + (gogoproto.customname) = "KVNemesisSeq", + (gogoproto.nullable) = false, + (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/util/buildutil.TestingInt64"]; + // The key for request. If the request operates on a range, this // represents the starting key for the range. bytes key = 3 [(gogoproto.casttype) = "Key"]; diff --git a/pkg/roachpb/api_test.go b/pkg/roachpb/api_test.go index 36ce4b0fd15b..8187877c28c5 100644 --- a/pkg/roachpb/api_test.go +++ b/pkg/roachpb/api_test.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" + "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/redact" @@ -406,3 +407,20 @@ func TestFlagCombinations(t *testing.T) { } } } + +func TestRequestHeaderRoundTrip(t *testing.T) { + var seq buildutil.TestingInt64 + seq.Set(123) + exp := seq.Get() + if buildutil.CrdbTestBuild { + require.EqualValues(t, 123, exp) + } + rh := RequestHeader{KVNemesisSeq: seq} + sl, err := rh.Marshal() + require.NoError(t, err) + + rh.Reset() + require.NoError(t, rh.Unmarshal(sl)) + + require.Equal(t, exp, rh.KVNemesisSeq.Get()) +} diff --git a/pkg/roachpb/span_group.go b/pkg/roachpb/span_group.go index 070a0bda9474..3aa3c9f0e26f 100644 --- a/pkg/roachpb/span_group.go +++ b/pkg/roachpb/span_group.go @@ -101,7 +101,11 @@ func (g *SpanGroup) Slice() []Span { if rg == nil { return nil } - ret := make([]Span, 0, rg.Len()) + n := rg.Len() + if n == 0 { + return nil + } + ret := make([]Span, 0, n) it := rg.Iterator() for { rng, next := it.Next() diff --git a/pkg/roachprod/prometheus/prometheus_test.go b/pkg/roachprod/prometheus/prometheus_test.go index fc496179509b..67d49e1dc6dc 100644 --- a/pkg/roachprod/prometheus/prometheus_test.go +++ b/pkg/roachprod/prometheus/prometheus_test.go @@ -34,13 +34,13 @@ var nodeIPMap = map[install.Node]string{ func TestMakeYAMLConfig(t *testing.T) { testCases := []struct { - name string + testfile string useWorkloadHelpers bool cluster install.Nodes workloadScrapeConfigs []ScrapeConfig }{ { - name: "multiple scrape nodes", + testfile: "multipleScrapeNodes.txt", useWorkloadHelpers: false, workloadScrapeConfigs: []ScrapeConfig{ { @@ -78,7 +78,7 @@ func TestMakeYAMLConfig(t *testing.T) { }, }, { - name: "using make commands", + testfile: "usingMakeCommands.txt", useWorkloadHelpers: true, cluster: install.Nodes{8, 9}, workloadScrapeConfigs: []ScrapeConfig{ @@ -106,10 +106,8 @@ func TestMakeYAMLConfig(t *testing.T) { }, } - w := echotest.Walk(t, testutils.TestDataPath(t)) - defer w.Check(t) for _, tc := range testCases { - t.Run(tc.name, w.Do(t, tc.name, func(t *testing.T, path string) { + t.Run(tc.testfile, func(t *testing.T) { var promCfg Config for i, workloadConfig := range tc.workloadScrapeConfigs { if tc.useWorkloadHelpers { @@ -131,7 +129,7 @@ func TestMakeYAMLConfig(t *testing.T) { } cfg, err := makeYAMLConfig(promCfg.ScrapeConfigs, nodeIPMap) require.NoError(t, err) - echotest.Require(t, cfg, path) - })) + echotest.Require(t, cfg, testutils.TestDataPath(t, tc.testfile)) + }) } } diff --git a/pkg/roachprod/prometheus/testdata/multipleScrapeNodes.txt b/pkg/roachprod/prometheus/testdata/multipleScrapeNodes.txt new file mode 100644 index 000000000000..85fa3430a5cf --- /dev/null +++ b/pkg/roachprod/prometheus/testdata/multipleScrapeNodes.txt @@ -0,0 +1,19 @@ +echo +---- +global: + scrape_interval: 10s + scrape_timeout: 5s +scrape_configs: +- job_name: workload0 + static_configs: + - targets: + - 127.0.0.1:2002 + - 127.0.0.3:2003 + - 127.0.0.4:2003 + - 127.0.0.5:2003 + metrics_path: /b +- job_name: workload1 + static_configs: + - targets: + - 127.0.0.6:2009 + metrics_path: /c diff --git a/pkg/roachprod/prometheus/testdata/usingMakeCommands.txt b/pkg/roachprod/prometheus/testdata/usingMakeCommands.txt new file mode 100644 index 000000000000..9e34cea68699 --- /dev/null +++ b/pkg/roachprod/prometheus/testdata/usingMakeCommands.txt @@ -0,0 +1,28 @@ +echo +---- +global: + scrape_interval: 10s + scrape_timeout: 5s +scrape_configs: +- job_name: workload0 + static_configs: + - targets: + - 127.0.0.3:2005 + - 127.0.0.4:2005 + - 127.0.0.5:2005 + - 127.0.0.6:2009 + metrics_path: / +- job_name: cockroach-n8 + static_configs: + - labels: + node: "8" + targets: + - 127.0.0.8:26258 + metrics_path: /_status/vars +- job_name: cockroach-n9 + static_configs: + - labels: + node: "9" + targets: + - 127.0.0.9:26258 + metrics_path: /_status/vars diff --git a/pkg/storage/BUILD.bazel b/pkg/storage/BUILD.bazel index 47bb6db5dc2a..c9e03383a0a4 100644 --- a/pkg/storage/BUILD.bazel +++ b/pkg/storage/BUILD.bazel @@ -50,6 +50,7 @@ go_library( "//pkg/cli/exit", "//pkg/clusterversion", "//pkg/keys", + "//pkg/kv/kvnemesis/kvnemesisutil", "//pkg/kv/kvserver/concurrency/lock", "//pkg/kv/kvserver/diskmap", "//pkg/kv/kvserver/uncertainty", diff --git a/pkg/storage/enginepb/BUILD.bazel b/pkg/storage/enginepb/BUILD.bazel index baf83f62d193..1304480fa877 100644 --- a/pkg/storage/enginepb/BUILD.bazel +++ b/pkg/storage/enginepb/BUILD.bazel @@ -46,6 +46,7 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/storage/enginepb", visibility = ["//visibility:public"], deps = [ + "//pkg/util/buildutil", "//pkg/util/hlc", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", diff --git a/pkg/storage/enginepb/mvcc3.go b/pkg/storage/enginepb/mvcc3.go index 4db670f1699b..13a10325678c 100644 --- a/pkg/storage/enginepb/mvcc3.go +++ b/pkg/storage/enginepb/mvcc3.go @@ -10,7 +10,10 @@ package enginepb -import "github.com/cockroachdb/errors" +import ( + _ "github.com/cockroachdb/cockroach/pkg/util/buildutil" // see MVCCValueHeader.KVNemesisSeq + "github.com/cockroachdb/errors" +) // SafeValue implements the redact.SafeValue interface. func (MVCCStatsDelta) SafeValue() {} @@ -58,5 +61,5 @@ func (h MVCCValueHeader) IsEmpty() bool { // NB: We don't use a struct comparison like h == MVCCValueHeader{} due to a // Go 1.19 performance regression, see: // https://github.com/cockroachdb/cockroach/issues/88818 - return h.LocalTimestamp.IsEmpty() + return h.LocalTimestamp.IsEmpty() && h.KVNemesisSeq.Get() == 0 } diff --git a/pkg/storage/enginepb/mvcc3.proto b/pkg/storage/enginepb/mvcc3.proto index df98c642236e..49a40067c7fc 100644 --- a/pkg/storage/enginepb/mvcc3.proto +++ b/pkg/storage/enginepb/mvcc3.proto @@ -147,6 +147,12 @@ message IgnoredSeqNumRange { message MVCCValueHeader { option (gogoproto.equal) = true; + message Empty{}; + Empty kvnemesis_seq = 2 [ + (gogoproto.customname) = "KVNemesisSeq", + (gogoproto.nullable) = false, + (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/util/buildutil.TestingInt64"]; + // The local clock timestamp records the value of the local HLC clock on the // leaseholder when the key was originally written. It is used to make claims // about the relative real time ordering of the key-value's writer and readers diff --git a/pkg/storage/enginepb/mvcc3_test.go b/pkg/storage/enginepb/mvcc3_test.go index 408c47d21323..96ce833f8070 100644 --- a/pkg/storage/enginepb/mvcc3_test.go +++ b/pkg/storage/enginepb/mvcc3_test.go @@ -23,6 +23,7 @@ func TestMVCCValueHeader_IsEmpty(t *testing.T) { allFieldsSet := MVCCValueHeader{ LocalTimestamp: hlc.ClockTimestamp{WallTime: 1, Logical: 1, Synthetic: true}, } + allFieldsSet.KVNemesisSeq.Set(123) require.NoError(t, zerofields.NoZeroField(allFieldsSet), "make sure you update the IsEmpty method") require.True(t, MVCCValueHeader{}.IsEmpty()) require.False(t, allFieldsSet.IsEmpty()) diff --git a/pkg/storage/mvcc.go b/pkg/storage/mvcc.go index ad62efe0768e..0e9142ea9224 100644 --- a/pkg/storage/mvcc.go +++ b/pkg/storage/mvcc.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/uncertainty" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -31,6 +32,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/admission" + "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/iterutil" @@ -970,13 +972,23 @@ func newMVCCIterator( func MVCCGet( ctx context.Context, reader Reader, key roachpb.Key, timestamp hlc.Timestamp, opts MVCCGetOptions, ) (*roachpb.Value, *roachpb.Intent, error) { + // TODO(during review): should this be inlined for performance? How do I tell? + value, intent, _, err := MVCCGetWithValueHeader(ctx, reader, key, timestamp, opts) + return value, intent, err +} + +// MVCCGetWithValueHeader is like MVCCGet, but in addition returns the +// MVCCValueHeader for the value. +func MVCCGetWithValueHeader( + ctx context.Context, reader Reader, key roachpb.Key, timestamp hlc.Timestamp, opts MVCCGetOptions, +) (*roachpb.Value, *roachpb.Intent, enginepb.MVCCValueHeader, error) { iter := newMVCCIterator(reader, timestamp, false /* rangeKeyMasking */, IterOptions{ KeyTypes: IterKeyTypePointsAndRanges, Prefix: true, }) defer iter.Close() - value, intent, err := mvccGet(ctx, iter, key, timestamp, opts) - return value.ToPointer(), intent, err + value, intent, vh, err := mvccGetWithValueHeader(ctx, iter, key, timestamp, opts) + return value.ToPointer(), intent, vh, err } func mvccGet( @@ -986,17 +998,28 @@ func mvccGet( timestamp hlc.Timestamp, opts MVCCGetOptions, ) (value optionalValue, intent *roachpb.Intent, err error) { + value, intent, _, err = mvccGetWithValueHeader(ctx, iter, key, timestamp, opts) + return value, intent, err +} + +func mvccGetWithValueHeader( + ctx context.Context, + iter MVCCIterator, + key roachpb.Key, + timestamp hlc.Timestamp, + opts MVCCGetOptions, +) (value optionalValue, intent *roachpb.Intent, vh enginepb.MVCCValueHeader, err error) { if len(key) == 0 { - return optionalValue{}, nil, emptyKeyError() + return optionalValue{}, nil, enginepb.MVCCValueHeader{}, emptyKeyError() } if timestamp.WallTime < 0 { - return optionalValue{}, nil, errors.Errorf("cannot write to %q at timestamp %s", key, timestamp) + return optionalValue{}, nil, enginepb.MVCCValueHeader{}, errors.Errorf("cannot write to %q at timestamp %s", key, timestamp) } if util.RaceEnabled && !iter.IsPrefix() { - return optionalValue{}, nil, errors.AssertionFailedf("mvccGet called with non-prefix iterator") + return optionalValue{}, nil, enginepb.MVCCValueHeader{}, errors.AssertionFailedf("mvccGet called with non-prefix iterator") } if err := opts.validate(); err != nil { - return optionalValue{}, nil, err + return optionalValue{}, nil, enginepb.MVCCValueHeader{}, err } mvccScanner := pebbleMVCCScannerPool.Get().(*pebbleMVCCScanner) @@ -1026,36 +1049,38 @@ func mvccGet( recordIteratorStats(ctx, mvccScanner.parent) if mvccScanner.err != nil { - return optionalValue{}, nil, mvccScanner.err + return optionalValue{}, nil, enginepb.MVCCValueHeader{}, mvccScanner.err } intents, err := buildScanIntents(mvccScanner.intentsRepr()) if err != nil { - return optionalValue{}, nil, err + return optionalValue{}, nil, enginepb.MVCCValueHeader{}, err } if opts.errOnIntents() && len(intents) > 0 { - return optionalValue{}, nil, &roachpb.WriteIntentError{Intents: intents} + return optionalValue{}, nil, enginepb.MVCCValueHeader{}, &roachpb.WriteIntentError{Intents: intents} } if len(intents) > 1 { - return optionalValue{}, nil, errors.Errorf("expected 0 or 1 intents, got %d", len(intents)) + return optionalValue{}, nil, enginepb.MVCCValueHeader{}, errors.Errorf("expected 0 or 1 intents, got %d", len(intents)) } else if len(intents) == 1 { intent = &intents[0] } if len(mvccScanner.results.repr) == 0 { - return optionalValue{}, intent, nil + return optionalValue{}, intent, enginepb.MVCCValueHeader{}, nil } mvccKey, rawValue, _, err := MVCCScanDecodeKeyValue(mvccScanner.results.repr) if err != nil { - return optionalValue{}, nil, err + return optionalValue{}, nil, enginepb.MVCCValueHeader{}, err } value = makeOptionalValue(roachpb.Value{ RawBytes: rawValue, Timestamp: mvccKey.Timestamp, }) - return value, intent, nil + // TODO(during review): if MVCCValueHeader ever picks up a pointer, is it + // still safe to return it (since it's in curUnsafeValue)? + return value, intent, mvccScanner.curUnsafeValue.MVCCValueHeader, nil } // MVCCGetAsTxn constructs a temporary transaction from the given transaction @@ -1976,6 +2001,13 @@ func mvccPutInternal( versionValue := MVCCValue{} versionValue.Value = value versionValue.LocalTimestamp = localTimestamp + + if buildutil.CrdbTestBuild { + if seq, seqOK := kvnemesisutil.FromContext(ctx); seqOK { + versionValue.KVNemesisSeq.Set(int64(seq)) + } + } + if !versionValue.LocalTimestampNeeded(versionKey.Timestamp) || !writer.ShouldWriteLocalTimestamps(ctx) { versionValue.LocalTimestamp = hlc.ClockTimestamp{} @@ -3226,6 +3258,11 @@ func MVCCDeleteRangeUsingTombstone( if !value.LocalTimestampNeeded(timestamp) || !rw.ShouldWriteLocalTimestamps(ctx) { value.LocalTimestamp = hlc.ClockTimestamp{} } + if buildutil.CrdbTestBuild { + if seq, ok := kvnemesisutil.FromContext(ctx); ok { + value.KVNemesisSeq.Set(int64(seq)) + } + } valueRaw, err := EncodeMVCCValue(value) if err != nil { return err @@ -3781,6 +3818,7 @@ func MVCCScan( UpperBound: endKey, }) defer iter.Close() + return mvccScanToKvs(ctx, iter, key, endKey, timestamp, opts) } diff --git a/pkg/storage/mvcc_test.go b/pkg/storage/mvcc_test.go index a148d1821fba..5793bf476ee3 100644 --- a/pkg/storage/mvcc_test.go +++ b/pkg/storage/mvcc_test.go @@ -278,7 +278,7 @@ func TestMVCCGetNoMoreOldVersion(t *testing.T) { } } -func TestMVCCGetAndDelete(t *testing.T) { +func TestMVCCGetWithValueHeader(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -286,32 +286,34 @@ func TestMVCCGetAndDelete(t *testing.T) { engine := NewDefaultInMemForTesting() defer engine.Close() - if err := MVCCPut(ctx, engine, nil, testKey1, hlc.Timestamp{WallTime: 1}, hlc.ClockTimestamp{}, value1, nil); err != nil { + if err := MVCCPut(ctx, engine, nil, testKey1, hlc.Timestamp{WallTime: 1, Logical: 1}, hlc.ClockTimestamp{WallTime: 1}, value1, nil); err != nil { t.Fatal(err) } - value, _, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, MVCCGetOptions{}) + value, _, vh, err := MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if value == nil { t.Fatal("the value should not be empty") } + require.Equal(t, hlc.ClockTimestamp{WallTime: 1}, vh.LocalTimestamp) - _, err = MVCCDelete(ctx, engine, nil, testKey1, hlc.Timestamp{WallTime: 3}, hlc.ClockTimestamp{}, nil) + _, err = MVCCDelete(ctx, engine, nil, testKey1, hlc.Timestamp{WallTime: 3}, hlc.ClockTimestamp{WallTime: 2, Logical: 1}, nil) if err != nil { t.Fatal(err) } // Read the latest version which should be deleted. - value, _, err = MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{}) + value, _, vh, err = MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if value != nil { t.Fatal("the value should be empty") } + require.Zero(t, vh.LocalTimestamp) // Read the latest version with tombstone. - value, _, err = MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, + value, _, vh, err = MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{Tombstones: true}) if err != nil { t.Fatal(err) @@ -319,9 +321,11 @@ func TestMVCCGetAndDelete(t *testing.T) { t.Fatalf("the value should be non-nil with empty RawBytes; got %+v", value) } + require.Equal(t, hlc.ClockTimestamp{WallTime: 2, Logical: 1}, vh.LocalTimestamp) + // Read the old version which should still exist. for _, logical := range []int32{0, math.MaxInt32} { - value, _, err = MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2, Logical: logical}, + value, _, vh, err := MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2, Logical: logical}, MVCCGetOptions{}) if err != nil { t.Fatal(err) @@ -329,6 +333,7 @@ func TestMVCCGetAndDelete(t *testing.T) { if value == nil { t.Fatal("the value should not be empty") } + require.Equal(t, hlc.ClockTimestamp{WallTime: 1}, vh.LocalTimestamp) } } diff --git a/pkg/storage/mvcc_value.go b/pkg/storage/mvcc_value.go index 98ff415e9ef8..288cf5ed5bdf 100644 --- a/pkg/storage/mvcc_value.go +++ b/pkg/storage/mvcc_value.go @@ -253,14 +253,12 @@ func decodeExtendedMVCCValue(buf []byte) (MVCCValue, error) { if len(buf) < int(headerSize) { return MVCCValue{}, errMVCCValueMissingHeader } - var header enginepb.MVCCValueHeader + var v MVCCValue // NOTE: we don't use protoutil to avoid passing header through an interface, // which would cause a heap allocation and incur the cost of dynamic dispatch. - if err := header.Unmarshal(buf[extendedPreludeSize:headerSize]); err != nil { + if err := v.MVCCValueHeader.Unmarshal(buf[extendedPreludeSize:headerSize]); err != nil { return MVCCValue{}, errors.Wrapf(err, "unmarshaling MVCCValueHeader") } - var v MVCCValue - v.LocalTimestamp = header.LocalTimestamp v.Value.RawBytes = buf[headerSize:] return v, nil } diff --git a/pkg/storage/point_synthesizing_iter.go b/pkg/storage/point_synthesizing_iter.go index a993d624bcf7..1274c58c3186 100644 --- a/pkg/storage/point_synthesizing_iter.go +++ b/pkg/storage/point_synthesizing_iter.go @@ -667,7 +667,7 @@ func (i *PointSynthesizingIter) Key() MVCCKey { // UnsafeKey implements MVCCIterator. func (i *PointSynthesizingIter) UnsafeKey() MVCCKey { if i.atPoint { - return i.iterKey + return i.iterKey.Clone() // didn't fix it } if i.rangeKeysIdx >= i.rangeKeysEnd || i.rangeKeysIdx < 0 { return MVCCKey{} @@ -675,13 +675,13 @@ func (i *PointSynthesizingIter) UnsafeKey() MVCCKey { return MVCCKey{ Key: i.rangeKeysPos, Timestamp: i.rangeKeys[i.rangeKeysIdx].Timestamp, - } + }.Clone() // didn't fix it } // UnsafeRawKey implements MVCCIterator. func (i *PointSynthesizingIter) UnsafeRawKey() []byte { if i.atPoint { - return i.iter.UnsafeRawKey() + return append([]byte(nil), i.iter.UnsafeRawKey()...) // didn't fix it } i.rawKeyBuf = EncodeMVCCKeyToBuf(i.rawKeyBuf[:0], i.UnsafeKey()) return i.rawKeyBuf @@ -690,7 +690,7 @@ func (i *PointSynthesizingIter) UnsafeRawKey() []byte { // UnsafeRawMVCCKey implements MVCCIterator. func (i *PointSynthesizingIter) UnsafeRawMVCCKey() []byte { if i.atPoint { - return i.iter.UnsafeRawMVCCKey() + return append([]byte(nil), i.iter.UnsafeRawMVCCKey()...) // didn't fix it } i.rawKeyBuf = EncodeMVCCKeyToBuf(i.rawKeyBuf[:0], i.UnsafeKey()) return i.rawKeyBuf @@ -707,12 +707,12 @@ func (i *PointSynthesizingIter) Value() []byte { // UnsafeValue implements MVCCIterator. func (i *PointSynthesizingIter) UnsafeValue() []byte { if i.atPoint { - return i.iter.UnsafeValue() + return append([]byte(nil), i.iter.UnsafeValue()...) // didn't fix it } if i.rangeKeysIdx >= len(i.rangeKeys) || i.rangeKeysIdx < 0 { return nil } - return i.rangeKeys[i.rangeKeysIdx].Value + return append([]byte(nil), i.rangeKeys[i.rangeKeysIdx].Value...) // didn't fix it } // MVCCValueLenAndIsTombstone implements the MVCCIterator interface. diff --git a/pkg/testutils/lint/passes/fmtsafe/functions.go b/pkg/testutils/lint/passes/fmtsafe/functions.go index d5a2ffa82b73..8bfa1a16ea7c 100644 --- a/pkg/testutils/lint/passes/fmtsafe/functions.go +++ b/pkg/testutils/lint/passes/fmtsafe/functions.go @@ -154,6 +154,8 @@ var requireConstFmt = map[string]bool{ "(github.com/cockroachdb/cockroach/pkg/sql/logictest/logictestbase.stdlogger).Fatalf": true, "(github.com/cockroachdb/cockroach/pkg/sql/logictest/logictestbase.stdlogger).Logf": true, + "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis.l": true, + // Error things are populated in the init() message. } diff --git a/pkg/util/buildutil/BUILD.bazel b/pkg/util/buildutil/BUILD.bazel index 4b8cc18cf7c3..f0728e977920 100644 --- a/pkg/util/buildutil/BUILD.bazel +++ b/pkg/util/buildutil/BUILD.bazel @@ -13,6 +13,7 @@ go_library( }), importpath = "github.com/cockroachdb/cockroach/pkg/util/buildutil", visibility = ["//visibility:public"], + deps = ["//pkg/util/buildutil/testingint"], ) REMOVE_GO_BUILD_CONSTRAINTS = "cat $< | grep -v '//go:build' | grep -v '// +build' > $@" @@ -36,7 +37,10 @@ go_test( srcs = ["crdb_test_test.go"], args = ["-test.timeout=295s"], embed = [":buildutil"], # keep - deps = ["@com_github_stretchr_testify//require"], + deps = [ + "//pkg/build/bazel", + "@com_github_stretchr_testify//require", + ], ) get_x_data(name = "get_x_data") diff --git a/pkg/util/buildutil/crdb_test_off.go b/pkg/util/buildutil/crdb_test_off.go index bde569cf2984..8c07b4efbd90 100644 --- a/pkg/util/buildutil/crdb_test_off.go +++ b/pkg/util/buildutil/crdb_test_off.go @@ -19,3 +19,34 @@ package buildutil // metamorphic-style perturbations that will not affect test results but will // exercise different parts of the code. const CrdbTestBuild = false + +// TestingInt64 is an empty struct that can be used as a `gogoproto.casttype` in +// proto messages. It uses no space. When the crdb_test build tag is set, this +// type is instead represented by a RealTestingInt64. +type TestingInt64 struct{} + +// Unmarshal implements (part of) protoutil.Message. +func (m *TestingInt64) Unmarshal([]byte) error { return nil } + +// Marshal implements (part of) protoutil.Message. +func (m *TestingInt64) Marshal([]byte) error { return nil } + +// MarshalToSizedBuffer implements (part of) protoutil.Message. +func (m *TestingInt64) MarshalToSizedBuffer([]byte) (int, error) { return 0, nil } + +// Size implements (part of) protoutil.Message. +func (m *TestingInt64) Size() int { return 0 } + +// Equal implements `gogoproto.equal`. +func (m *TestingInt64) Equal(n interface{}) bool { + if n == nil { + return m == nil + } + return *m == *(n.(*TestingInt64)) +} + +// Set is a no-op. +func (m *TestingInt64) Set(int64) {} + +// Get returns zero. +func (m TestingInt64) Get() int64 { return 0 } diff --git a/pkg/util/buildutil/crdb_test_on.go b/pkg/util/buildutil/crdb_test_on.go index 29163bda9b18..560023ea944e 100644 --- a/pkg/util/buildutil/crdb_test_on.go +++ b/pkg/util/buildutil/crdb_test_on.go @@ -13,9 +13,13 @@ package buildutil +import "github.com/cockroachdb/cockroach/pkg/util/buildutil/testingint" + // CrdbTestBuild is a flag that is set to true if the binary was compiled // with the 'crdb_test' build tag (which is the case for all test targets). This // flag can be used to enable expensive checks, test randomizations, or other // metamorphic-style perturbations that will not affect test results but will // exercise different parts of the code. const CrdbTestBuild = true + +type TestingInt64 = testingint.RealTestingInt64 diff --git a/pkg/util/buildutil/crdb_test_test.go b/pkg/util/buildutil/crdb_test_test.go index 784d986b1004..727185b5ae19 100644 --- a/pkg/util/buildutil/crdb_test_test.go +++ b/pkg/util/buildutil/crdb_test_test.go @@ -13,11 +13,14 @@ package buildutil import ( "testing" + "github.com/cockroachdb/cockroach/pkg/build/bazel" "github.com/stretchr/testify/require" ) func TestCrdbTestOn(t *testing.T) { // Sanity-check: make sure CrdbTestBuild is set. This should be true for - // any test. - require.True(t, CrdbTestBuild) + // any test built with bazel. + if bazel.BuiltWithBazel() { + require.True(t, CrdbTestBuild) + } } diff --git a/pkg/util/buildutil/testingint/BUILD.bazel b/pkg/util/buildutil/testingint/BUILD.bazel new file mode 100644 index 000000000000..b5c5f4448f58 --- /dev/null +++ b/pkg/util/buildutil/testingint/BUILD.bazel @@ -0,0 +1,23 @@ +load("//build/bazelutil/unused_checker:unused.bzl", "get_x_data") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "testingint", + srcs = ["testing_int64.go"], + importpath = "github.com/cockroachdb/cockroach/pkg/util/buildutil/testingint", + visibility = ["//visibility:public"], + deps = [ + "@com_github_gogo_protobuf//proto", + "@com_github_pkg_errors//:errors", + ], +) + +go_test( + name = "testingint_test", + srcs = ["testing_int64_test.go"], + args = ["-test.timeout=295s"], + embed = [":testingint"], + deps = ["@com_github_stretchr_testify//require"], +) + +get_x_data(name = "get_x_data") diff --git a/pkg/util/buildutil/testingint/testing_int64.go b/pkg/util/buildutil/testingint/testing_int64.go new file mode 100644 index 000000000000..53e0383e8475 --- /dev/null +++ b/pkg/util/buildutil/testingint/testing_int64.go @@ -0,0 +1,68 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +// + +package testingint + +import ( + gogoproto "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" +) + +// RealTestingInt64 is an int64 with methods that allow it to be used as a +// `gogoproto.casttype`, and which has a getter/setter. See +// `buildutil.TestingInt64`. +type RealTestingInt64 int64 + +// Unmarshal implements (a part of) protoutil.Message. +func (m *RealTestingInt64) Unmarshal(buf []byte) error { + x, n := gogoproto.DecodeVarint(buf) + if n == 0 { + return errors.Errorf("unable to unmarshal %x as varint", buf) + } + *m = RealTestingInt64(x) + return nil +} + +// Marshal implements (a part of) protoutil.Message. +func (m *RealTestingInt64) Marshal(buf []byte) error { + _ = append(buf[:0], gogoproto.EncodeVarint(uint64(*m))...) + return nil +} + +// MarshalToSizedBuffer implements (a part of) protoutil.Message. +func (m *RealTestingInt64) MarshalToSizedBuffer(buf []byte) (int, error) { + sl := gogoproto.EncodeVarint(uint64(*m)) + _ = append(buf[:len(buf)-len(sl)], sl...) + return len(sl), nil +} + +// Size implements (a part of) protoutil.Message. +func (m *RealTestingInt64) Size() int { + return len(gogoproto.EncodeVarint(uint64(*m))) +} + +// Equal implements (gogoproto.equal). +func (m *RealTestingInt64) Equal(n interface{}) bool { + if n == nil { + return m == nil + } + return *m == *(n.(*RealTestingInt64)) +} + +// Set updates the receiver. Not thread safe. +func (m *RealTestingInt64) Set(n int64) { + *m = RealTestingInt64(n) +} + +// Get reads the receiver. Not thread safe. +func (m RealTestingInt64) Get() int64 { + return int64(m) +} diff --git a/pkg/util/buildutil/testingint/testing_int64_test.go b/pkg/util/buildutil/testingint/testing_int64_test.go new file mode 100644 index 000000000000..193aa62226a8 --- /dev/null +++ b/pkg/util/buildutil/testingint/testing_int64_test.go @@ -0,0 +1,37 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +// + +package testingint + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTestingInt64(t *testing.T) { + m := RealTestingInt64(123) + require.Equal(t, 1, m.Size()) + buf1 := make([]byte, m.Size()) + require.NoError(t, m.Marshal(buf1)) + buf2 := make([]byte, m.Size()) + n, err := m.MarshalToSizedBuffer(buf2) + require.NoError(t, err) + require.Equal(t, m.Size(), n) + + var r1 RealTestingInt64 + require.NoError(t, r1.Unmarshal(buf1)) + require.EqualValues(t, r1, 123) + + var r2 RealTestingInt64 + require.NoError(t, r2.Unmarshal(buf2)) + require.EqualValues(t, r2, 123) +}