diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html
index a7fc2552b0f6..63f8c48759e9 100644
--- a/docs/generated/settings/settings.html
+++ b/docs/generated/settings/settings.html
@@ -33,7 +33,7 @@
kv.bulk_io_write.max_rate | byte size | 8.0 EiB | the rate limit (bytes/sec) to use for writes to disk on behalf of bulk io ops |
kv.bulk_sst.sync_size | byte size | 2.0 MiB | threshold after which non-Rocks SST writes must fsync (0 disables) |
kv.closed_timestamp.close_fraction | float | 0.2 | fraction of closed timestamp target duration specifying how frequently the closed timestamp is advanced |
-kv.closed_timestamp.follower_reads_enabled | boolean | false | allow (all) replicas to serve consistent historical reads based on closed timestamp information |
+kv.closed_timestamp.follower_reads_enabled | boolean | true | allow (all) replicas to serve consistent historical reads based on closed timestamp information |
kv.closed_timestamp.target_duration | duration | 30s | if nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this duration |
kv.follower_read.target_multiple | float | 3 | if above 1, encourages the distsender to perform a read against the closest replica if a request is older than kv.closed_timestamp.target_duration * (1 + kv.closed_timestamp.close_fraction * this) less a clock uncertainty interval. This value also is used to create follower_timestamp(). (WARNING: may compromise cluster stability or correctness; do not edit without supervision) |
kv.import.batch_size | byte size | 32 MiB | the maximum size of the payload in an AddSSTable request (WARNING: may compromise cluster stability or correctness; do not edit without supervision) |
diff --git a/pkg/storage/client_replica_test.go b/pkg/storage/client_replica_test.go
index e1cbf26d3b8f..d5c62600038c 100644
--- a/pkg/storage/client_replica_test.go
+++ b/pkg/storage/client_replica_test.go
@@ -1603,8 +1603,22 @@ func TestRangeInfo(t *testing.T) {
if pErr != nil {
t.Fatal(pErr)
}
- lhsLease, _ = lhsReplica1.GetLease()
- rhsLease, _ = rhsReplica1.GetLease()
+ // Retry reading the lease from replica1 until we observe the new lease
+ // triggered above. This is to protect against the case where the lease
+ // transfer is applied on replica0 but not on replica1 at the time of the
+ // above query. In this scenario replica0 can serve a follower read which
+ // and the RangeInfo will contain the updated lease. The below call to
+ // GetLease may still return the prior lease value if the lease transfer has
+ // not yet been applied.
+ prevLStart, prevRStart := lhsLease.Start, rhsLease.Start
+ testutils.SucceedsSoon(t, func() error {
+ lhsLease, _ = lhsReplica1.GetLease()
+ rhsLease, _ = rhsReplica1.GetLease()
+ if prevLStart.Less(lhsLease.Start) && prevRStart.Less(rhsLease.Start) {
+ return nil
+ }
+ return fmt.Errorf("waiting for lease transfers to be applied on replica1")
+ })
expRangeInfos = []roachpb.RangeInfo{
{
Desc: *lhsReplica1.Desc(),
diff --git a/pkg/storage/replica_follower_read.go b/pkg/storage/replica_follower_read.go
index 59bdcb747741..7ec03d95f927 100644
--- a/pkg/storage/replica_follower_read.go
+++ b/pkg/storage/replica_follower_read.go
@@ -32,7 +32,7 @@ import (
var FollowerReadsEnabled = settings.RegisterBoolSetting(
"kv.closed_timestamp.follower_reads_enabled",
"allow (all) replicas to serve consistent historical reads based on closed timestamp information",
- false,
+ true,
)
// canServeFollowerRead tests, when a range lease could not be