diff --git a/pkg/bench/rttanalysis/testdata/benchmark_expectations b/pkg/bench/rttanalysis/testdata/benchmark_expectations index d4c28fb817c1..8f6a1e46e4eb 100644 --- a/pkg/bench/rttanalysis/testdata/benchmark_expectations +++ b/pkg/bench/rttanalysis/testdata/benchmark_expectations @@ -53,9 +53,9 @@ exp,benchmark 11,GrantRole/grant_1_role 15,GrantRole/grant_2_roles 3,ORMQueries/activerecord_type_introspection_query -3,ORMQueries/django_table_introspection_1_table -3,ORMQueries/django_table_introspection_4_tables -3,ORMQueries/django_table_introspection_8_tables +6,ORMQueries/django_table_introspection_1_table +6,ORMQueries/django_table_introspection_4_tables +6,ORMQueries/django_table_introspection_8_tables 2,ORMQueries/has_column_privilege_using_attnum 2,ORMQueries/has_column_privilege_using_column_name 1,ORMQueries/has_schema_privilege_1 @@ -73,8 +73,8 @@ exp,benchmark 4,ORMQueries/information_schema._pg_index_position 3,ORMQueries/pg_attribute 3,ORMQueries/pg_class -9,ORMQueries/pg_is_other_temp_schema -17,ORMQueries/pg_is_other_temp_schema_multiple_times +7,ORMQueries/pg_is_other_temp_schema +7,ORMQueries/pg_is_other_temp_schema_multiple_times 4,ORMQueries/pg_my_temp_schema 4,ORMQueries/pg_my_temp_schema_multiple_times 4,ORMQueries/pg_namespace diff --git a/pkg/cmd/roachtest/tests/activerecord_blocklist.go b/pkg/cmd/roachtest/tests/activerecord_blocklist.go index e9607611ba47..d5561b27414d 100644 --- a/pkg/cmd/roachtest/tests/activerecord_blocklist.go +++ b/pkg/cmd/roachtest/tests/activerecord_blocklist.go @@ -71,6 +71,7 @@ var activeRecordIgnoreList = blocklist{ "LengthValidationTest#test_validates_size_of_association_using_within": "flaky - sometimes complains that a relation does not exist", "PostgresqlInfinityTest#test_where_clause_with_infinite_range_on_a_datetime_column": "flaky - sometimes complains that a relation does not exist", "PostgresqlIntervalTest#test_interval_type": "flaky", + "PostgresqlTimestampFixtureTest#test_bc_timestamp": "flaky - sometimes datetime format does not match", "PostgresqlTimestampWithAwareTypesTest#test_timestamp_with_zone_values_with_rails_time_zone_support_and_time_zone_set": "flaky - sometimes complains given Time instead of ActiveSupport::TimeWithZone", "PostgresqlTimestampWithTimeZoneTest#test_timestamp_with_zone_values_with_rails_time_zone_support_and_timestamptz_and_time_zone_set": "flaky - sometimes complains given Time instead of ActiveSupport::TimeWithZone", "RelationTest#test_finding_last_with_arel_order": "flaky - sometimes complains that a relation does not exist", diff --git a/pkg/kv/kvserver/allocator/storepool/BUILD.bazel b/pkg/kv/kvserver/allocator/storepool/BUILD.bazel index 5264758d4e4e..e44210e9d9d7 100644 --- a/pkg/kv/kvserver/allocator/storepool/BUILD.bazel +++ b/pkg/kv/kvserver/allocator/storepool/BUILD.bazel @@ -4,6 +4,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "storepool", srcs = [ + "override_store_pool.go", "store_pool.go", "test_helpers.go", ], @@ -35,7 +36,10 @@ go_library( go_test( name = "storepool_test", - srcs = ["store_pool_test.go"], + srcs = [ + "override_store_pool_test.go", + "store_pool_test.go", + ], args = ["-test.timeout=295s"], embed = [":storepool"], deps = [ diff --git a/pkg/kv/kvserver/allocator/storepool/override_store_pool.go b/pkg/kv/kvserver/allocator/storepool/override_store_pool.go new file mode 100644 index 000000000000..cdd3d2f2a07f --- /dev/null +++ b/pkg/kv/kvserver/allocator/storepool/override_store_pool.go @@ -0,0 +1,172 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package storepool + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/hlc" +) + +// OverrideStorePool is an implementation of AllocatorStorePool that allows +// the ability to override a node's liveness status for the purposes of +// evaluation for the allocator, otherwise delegating to an actual StorePool +// for all its logic, including management and lookup of store descriptors. +// +// The OverrideStorePool is meant to provide a read-only overlay to an +// StorePool, and as such, read-only methods are dispatched to the underlying +// StorePool using the configured NodeLivenessFunc override. Methods that +// mutate the state of the StorePool such as UpdateLocalStoreAfterRebalance +// are instead no-ops. +// +// NB: Despite the fact that StorePool.DetailsMu is held in write mode in +// some of the dispatched functions, these do not mutate the state of the +// underlying StorePool. +type OverrideStorePool struct { + sp *StorePool + + overrideNodeLivenessFn NodeLivenessFunc +} + +var _ AllocatorStorePool = &OverrideStorePool{} + +func NewOverrideStorePool(storePool *StorePool, nl NodeLivenessFunc) *OverrideStorePool { + return &OverrideStorePool{ + sp: storePool, + overrideNodeLivenessFn: nl, + } +} + +func (o *OverrideStorePool) String() string { + return o.sp.statusString(o.overrideNodeLivenessFn) +} + +// IsStoreReadyForRoutineReplicaTransfer implements the AllocatorStorePool interface. +func (o *OverrideStorePool) IsStoreReadyForRoutineReplicaTransfer( + ctx context.Context, targetStoreID roachpb.StoreID, +) bool { + return o.sp.isStoreReadyForRoutineReplicaTransferInternal(ctx, targetStoreID, o.overrideNodeLivenessFn) +} + +// DecommissioningReplicas implements the AllocatorStorePool interface. +func (o *OverrideStorePool) DecommissioningReplicas( + repls []roachpb.ReplicaDescriptor, +) []roachpb.ReplicaDescriptor { + return o.sp.decommissioningReplicasWithLiveness(repls, o.overrideNodeLivenessFn) +} + +// GetStoreList implements the AllocatorStorePool interface. +func (o *OverrideStorePool) GetStoreList( + filter StoreFilter, +) (StoreList, int, ThrottledStoreReasons) { + o.sp.DetailsMu.Lock() + defer o.sp.DetailsMu.Unlock() + + var storeIDs roachpb.StoreIDSlice + for storeID := range o.sp.DetailsMu.StoreDetails { + storeIDs = append(storeIDs, storeID) + } + return o.sp.getStoreListFromIDsLocked(storeIDs, o.overrideNodeLivenessFn, filter) +} + +// GetStoreListFromIDs implements the AllocatorStorePool interface. +func (o *OverrideStorePool) GetStoreListFromIDs( + storeIDs roachpb.StoreIDSlice, filter StoreFilter, +) (StoreList, int, ThrottledStoreReasons) { + o.sp.DetailsMu.Lock() + defer o.sp.DetailsMu.Unlock() + return o.sp.getStoreListFromIDsLocked(storeIDs, o.overrideNodeLivenessFn, filter) +} + +// LiveAndDeadReplicas implements the AllocatorStorePool interface. +func (o *OverrideStorePool) LiveAndDeadReplicas( + repls []roachpb.ReplicaDescriptor, includeSuspectAndDrainingStores bool, +) (liveReplicas, deadReplicas []roachpb.ReplicaDescriptor) { + return o.sp.liveAndDeadReplicasWithLiveness(repls, o.overrideNodeLivenessFn, includeSuspectAndDrainingStores) +} + +// ClusterNodeCount implements the AllocatorStorePool interface. +func (o *OverrideStorePool) ClusterNodeCount() int { + return o.sp.ClusterNodeCount() +} + +// IsDeterministic implements the AllocatorStorePool interface. +func (o *OverrideStorePool) IsDeterministic() bool { + return o.sp.deterministic +} + +// Clock implements the AllocatorStorePool interface. +func (o *OverrideStorePool) Clock() *hlc.Clock { + return o.sp.clock +} + +// GetLocalitiesByNode implements the AllocatorStorePool interface. +func (o *OverrideStorePool) GetLocalitiesByNode( + replicas []roachpb.ReplicaDescriptor, +) map[roachpb.NodeID]roachpb.Locality { + return o.sp.GetLocalitiesByNode(replicas) +} + +// GetLocalitiesByStore implements the AllocatorStorePool interface. +func (o *OverrideStorePool) GetLocalitiesByStore( + replicas []roachpb.ReplicaDescriptor, +) map[roachpb.StoreID]roachpb.Locality { + return o.sp.GetLocalitiesByStore(replicas) +} + +// GetStores implements the AllocatorStorePool interface. +func (o *OverrideStorePool) GetStores() map[roachpb.StoreID]roachpb.StoreDescriptor { + return o.sp.GetStores() +} + +// GetStoreDescriptor implements the AllocatorStorePool interface. +func (o *OverrideStorePool) GetStoreDescriptor( + storeID roachpb.StoreID, +) (roachpb.StoreDescriptor, bool) { + return o.sp.GetStoreDescriptor(storeID) +} + +// GossipNodeIDAddress implements the AllocatorStorePool interface. +func (o *OverrideStorePool) GossipNodeIDAddress( + nodeID roachpb.NodeID, +) (*util.UnresolvedAddr, error) { + return o.sp.GossipNodeIDAddress(nodeID) +} + +// UpdateLocalStoreAfterRebalance implements the AllocatorStorePool interface. +// This override method is a no-op, as +// StorePool.UpdateLocalStoreAfterRebalance(..) is not a read-only method and +// mutates the state of the held store details. +func (o *OverrideStorePool) UpdateLocalStoreAfterRebalance( + _ roachpb.StoreID, _ allocator.RangeUsageInfo, _ roachpb.ReplicaChangeType, +) { +} + +// UpdateLocalStoresAfterLeaseTransfer implements the AllocatorStorePool interface. +// This override method is a no-op, as +// StorePool.UpdateLocalStoresAfterLeaseTransfer(..) is not a read-only method and +// mutates the state of the held store details. +func (o *OverrideStorePool) UpdateLocalStoresAfterLeaseTransfer( + _ roachpb.StoreID, _ roachpb.StoreID, _ float64, +) { +} + +// UpdateLocalStoreAfterRelocate implements the AllocatorStorePool interface. +// This override method is a no-op, as +// StorePool.UpdateLocalStoreAfterRelocate(..) is not a read-only method and +// mutates the state of the held store details. +func (o *OverrideStorePool) UpdateLocalStoreAfterRelocate( + _, _ []roachpb.ReplicationTarget, _, _ []roachpb.ReplicaDescriptor, _ roachpb.StoreID, _ float64, +) { +} diff --git a/pkg/kv/kvserver/allocator/storepool/override_store_pool_test.go b/pkg/kv/kvserver/allocator/storepool/override_store_pool_test.go new file mode 100644 index 000000000000..155fc1bade06 --- /dev/null +++ b/pkg/kv/kvserver/allocator/storepool/override_store_pool_test.go @@ -0,0 +1,373 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package storepool + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/testutils/gossiputil" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/stretchr/testify/require" +) + +// TestOverrideStorePoolStatusString tests the status string output of the +// store pool implementation, including any liveness overrides. +func TestOverrideStorePoolStatusString(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + stopper, g, _, testStorePool, mnl := CreateTestStorePool(ctx, st, + TestTimeUntilStoreDead, false, /* deterministic */ + func() int { return 10 }, /* nodeCount */ + livenesspb.NodeLivenessStatus_DEAD) + defer stopper.Stop(ctx) + sg := gossiputil.NewStoreGossiper(g) + + livenessOverrides := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus) + sp := NewOverrideStorePool(testStorePool, func(nid roachpb.NodeID, now time.Time, timeUntilStoreDead time.Duration) livenesspb.NodeLivenessStatus { + if overriddenLiveness, ok := livenessOverrides[nid]; ok { + return overriddenLiveness + } + + return mnl.NodeLivenessFunc(nid, now, timeUntilStoreDead) + }) + + stores := []*roachpb.StoreDescriptor{ + { + StoreID: 1, + Node: roachpb.NodeDescriptor{NodeID: 1}, + }, + { + StoreID: 2, + Node: roachpb.NodeDescriptor{NodeID: 2}, + }, + { + StoreID: 3, + Node: roachpb.NodeDescriptor{NodeID: 3}, + }, + { + StoreID: 4, + Node: roachpb.NodeDescriptor{NodeID: 4}, + }, + { + StoreID: 5, + Node: roachpb.NodeDescriptor{NodeID: 5}, + }, + } + + sg.GossipStores(stores, t) + for i := 1; i <= 5; i++ { + mnl.SetNodeStatus(roachpb.NodeID(i), livenesspb.NodeLivenessStatus_LIVE) + } + + // Override node 2 as dead. + livenessOverrides[roachpb.NodeID(2)] = livenesspb.NodeLivenessStatus_DEAD + + // Override node 3 as decommissioning. + livenessOverrides[roachpb.NodeID(3)] = livenesspb.NodeLivenessStatus_DECOMMISSIONING + + // Mark node 5 as draining. + mnl.SetNodeStatus(5, livenesspb.NodeLivenessStatus_DRAINING) + + require.Equal(t, "1: range-count=0 fraction-used=0.00\n"+ + "2 (status=1): range-count=0 fraction-used=0.00\n"+ + "3 (status=5): range-count=0 fraction-used=0.00\n"+ + "4: range-count=0 fraction-used=0.00\n"+ + "5 (status=7): range-count=0 fraction-used=0.00\n", + sp.String(), + ) +} + +// TestOverrideStorePoolDecommissioningReplicas validates the ability of to use +// both liveness as well as liveness overrides in determining the set of +// decommissioning replicas. +func TestOverrideStorePoolDecommissioningReplicas(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + stopper, g, _, testStorePool, mnl := CreateTestStorePool(ctx, st, + TestTimeUntilStoreDead, false, /* deterministic */ + func() int { return 10 }, /* nodeCount */ + livenesspb.NodeLivenessStatus_DEAD) + defer stopper.Stop(ctx) + sg := gossiputil.NewStoreGossiper(g) + + livenessOverrides := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus) + sp := NewOverrideStorePool(testStorePool, func(nid roachpb.NodeID, now time.Time, timeUntilStoreDead time.Duration) livenesspb.NodeLivenessStatus { + if overriddenLiveness, ok := livenessOverrides[nid]; ok { + return overriddenLiveness + } + + return mnl.NodeLivenessFunc(nid, now, timeUntilStoreDead) + }) + + stores := []*roachpb.StoreDescriptor{ + { + StoreID: 1, + Node: roachpb.NodeDescriptor{NodeID: 1}, + }, + { + StoreID: 2, + Node: roachpb.NodeDescriptor{NodeID: 2}, + }, + { + StoreID: 3, + Node: roachpb.NodeDescriptor{NodeID: 3}, + }, + { + StoreID: 4, + Node: roachpb.NodeDescriptor{NodeID: 4}, + }, + { + StoreID: 5, + Node: roachpb.NodeDescriptor{NodeID: 5}, + }, + } + + replicas := []roachpb.ReplicaDescriptor{ + { + NodeID: 1, + StoreID: 1, + ReplicaID: 1, + }, + { + NodeID: 2, + StoreID: 2, + ReplicaID: 2, + }, + { + NodeID: 3, + StoreID: 3, + ReplicaID: 4, + }, + { + NodeID: 4, + StoreID: 4, + ReplicaID: 4, + }, + { + NodeID: 5, + StoreID: 5, + ReplicaID: 5, + }, + } + + sg.GossipStores(stores, t) + for i := 1; i <= 5; i++ { + mnl.SetNodeStatus(roachpb.NodeID(i), livenesspb.NodeLivenessStatus_LIVE) + } + + liveReplicas, deadReplicas := sp.LiveAndDeadReplicas(replicas, false /* includeSuspectAndDrainingStores */) + require.Equalf(t, 5, len(liveReplicas), "expected five live replicas, found %d (%v)", len(liveReplicas), liveReplicas) + require.Emptyf(t, deadReplicas, "expected no dead replicas initially, found %d (%v)", len(deadReplicas), deadReplicas) + // Mark node 4 as decommissioning. + mnl.SetNodeStatus(4, livenesspb.NodeLivenessStatus_DECOMMISSIONING) + // Mark node 5 as dead. + mnl.SetNodeStatus(5, livenesspb.NodeLivenessStatus_DEAD) + + // Override node 3 as decommissioning. + livenessOverrides[roachpb.NodeID(3)] = livenesspb.NodeLivenessStatus_DECOMMISSIONING + + liveReplicas, deadReplicas = sp.LiveAndDeadReplicas(replicas, false /* includeSuspectAndDrainingStores */) + // Decommissioning replicas are considered live. + if a, e := liveReplicas, replicas[:4]; !reflect.DeepEqual(a, e) { + t.Fatalf("expected live replicas %+v; got %+v", e, a) + } + if a, e := deadReplicas, replicas[4:]; !reflect.DeepEqual(a, e) { + t.Fatalf("expected dead replicas %+v; got %+v", e, a) + } + + decommissioningReplicas := sp.DecommissioningReplicas(replicas) + if a, e := decommissioningReplicas, replicas[2:4]; !reflect.DeepEqual(a, e) { + t.Fatalf("expected decommissioning replicas %+v; got %+v", e, a) + } +} + +// TestOverrideStorePoolGetStoreList tests the functionality of the store list +// with and without liveness overrides. +func TestOverrideStorePoolGetStoreList(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + // We're going to manually mark stores dead in this test. + stopper, g, _, testStorePool, mnl := CreateTestStorePool(ctx, st, + TestTimeUntilStoreDead, false, /* deterministic */ + func() int { return 10 }, /* nodeCount */ + livenesspb.NodeLivenessStatus_DEAD) + defer stopper.Stop(ctx) + sg := gossiputil.NewStoreGossiper(g) + + livenessOverrides := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus) + sp := NewOverrideStorePool(testStorePool, func(nid roachpb.NodeID, now time.Time, timeUntilStoreDead time.Duration) livenesspb.NodeLivenessStatus { + if overriddenLiveness, ok := livenessOverrides[nid]; ok { + return overriddenLiveness + } + + return mnl.NodeLivenessFunc(nid, now, timeUntilStoreDead) + }) + + constraints := []roachpb.ConstraintsConjunction{ + { + Constraints: []roachpb.Constraint{ + {Type: roachpb.Constraint_REQUIRED, Value: "ssd"}, + {Type: roachpb.Constraint_REQUIRED, Value: "dc"}, + }, + }, + } + required := []string{"ssd", "dc"} + // Nothing yet. + sl, _, _ := sp.GetStoreList(StoreFilterNone) + sl = sl.ExcludeInvalid(constraints) + require.Emptyf(t, sl.Stores, "expected no stores, instead %+v", sl.Stores) + + matchingStore := roachpb.StoreDescriptor{ + StoreID: 1, + Node: roachpb.NodeDescriptor{NodeID: 1}, + Attrs: roachpb.Attributes{Attrs: required}, + } + supersetStore := roachpb.StoreDescriptor{ + StoreID: 2, + Node: roachpb.NodeDescriptor{NodeID: 2}, + Attrs: roachpb.Attributes{Attrs: append(required, "db")}, + } + unmatchingStore := roachpb.StoreDescriptor{ + StoreID: 3, + Node: roachpb.NodeDescriptor{NodeID: 3}, + Attrs: roachpb.Attributes{Attrs: []string{"ssd", "otherdc"}}, + } + emptyStore := roachpb.StoreDescriptor{ + StoreID: 4, + Node: roachpb.NodeDescriptor{NodeID: 4}, + Attrs: roachpb.Attributes{}, + } + deadStore := roachpb.StoreDescriptor{ + StoreID: 5, + Node: roachpb.NodeDescriptor{NodeID: 5}, + Attrs: roachpb.Attributes{Attrs: required}, + } + decommissioningStore := roachpb.StoreDescriptor{ + StoreID: 6, + Node: roachpb.NodeDescriptor{NodeID: 6}, + Attrs: roachpb.Attributes{Attrs: required}, + } + absentStore := roachpb.StoreDescriptor{ + StoreID: 7, + Node: roachpb.NodeDescriptor{NodeID: 7}, + Attrs: roachpb.Attributes{Attrs: required}, + } + suspectedStore := roachpb.StoreDescriptor{ + StoreID: 8, + Node: roachpb.NodeDescriptor{NodeID: 8}, + Attrs: roachpb.Attributes{Attrs: required}, + } + + // Gossip and mark all alive initially. + sg.GossipStores([]*roachpb.StoreDescriptor{ + &matchingStore, + &supersetStore, + &unmatchingStore, + &emptyStore, + &deadStore, + &decommissioningStore, + &suspectedStore, + // absentStore is purposefully not gossiped. + }, t) + for i := 1; i <= 8; i++ { + mnl.SetNodeStatus(roachpb.NodeID(i), livenesspb.NodeLivenessStatus_LIVE) + } + + // Set deadStore as dead. + livenessOverrides[deadStore.Node.NodeID] = livenesspb.NodeLivenessStatus_DEAD + + // Set decommissioningStore as decommissioning. + livenessOverrides[decommissioningStore.Node.NodeID] = livenesspb.NodeLivenessStatus_DECOMMISSIONING + + // Set suspectedStore as suspected. + testStorePool.DetailsMu.Lock() + testStorePool.DetailsMu.StoreDetails[suspectedStore.StoreID].LastAvailable = testStorePool.clock.Now().GoTime() + testStorePool.DetailsMu.StoreDetails[suspectedStore.StoreID].LastUnavailable = testStorePool.clock.Now().GoTime() + testStorePool.DetailsMu.Unlock() + + // No filter or limited set of store IDs. + require.NoError(t, verifyStoreList( + sp, + constraints, + nil, /* storeIDs */ + StoreFilterNone, + []int{ + int(matchingStore.StoreID), + int(supersetStore.StoreID), + int(suspectedStore.StoreID), + }, + /* expectedAliveStoreCount */ 5, + /* expectedThrottledStoreCount */ 1, + )) + + // Filter out suspected stores but don't limit the set of store IDs. + require.NoError(t, verifyStoreList( + sp, + constraints, + nil, /* storeIDs */ + StoreFilterSuspect, + []int{ + int(matchingStore.StoreID), + int(supersetStore.StoreID), + }, + /* expectedAliveStoreCount */ 5, + /* expectedThrottledStoreCount */ 1, + )) + + limitToStoreIDs := roachpb.StoreIDSlice{ + matchingStore.StoreID, + decommissioningStore.StoreID, + absentStore.StoreID, + suspectedStore.StoreID, + } + + // No filter but limited to limitToStoreIDs. + // Note that supersetStore is not included. + require.NoError(t, verifyStoreList( + sp, + constraints, + limitToStoreIDs, + StoreFilterNone, + []int{ + int(matchingStore.StoreID), + int(suspectedStore.StoreID), + }, + /* expectedAliveStoreCount */ 2, + /* expectedThrottledStoreCount */ 1, + )) + + // Filter out suspected stores and limit to limitToStoreIDs. + // Note that suspectedStore is not included. + require.NoError(t, verifyStoreList( + sp, + constraints, + limitToStoreIDs, + StoreFilterSuspect, + []int{ + int(matchingStore.StoreID), + }, + /* expectedAliveStoreCount */ 2, + /* expectedThrottledStoreCount */ 1, + )) +} diff --git a/pkg/kv/kvserver/allocator/storepool/store_pool.go b/pkg/kv/kvserver/allocator/storepool/store_pool.go index a3ef04e9c9e2..7ff75c231574 100644 --- a/pkg/kv/kvserver/allocator/storepool/store_pool.go +++ b/pkg/kv/kvserver/allocator/storepool/store_pool.go @@ -334,6 +334,8 @@ type localityWithString struct { // AllocatorStorePool provides an interface for use by the allocator to a list // of all known stores in the cluster and information on their health. type AllocatorStorePool interface { + fmt.Stringer + // ClusterNodeCount returns the number of nodes that are possible allocation // targets. // See comment on StorePool.ClusterNodeCount(). @@ -487,6 +489,10 @@ func NewStorePool( } func (sp *StorePool) String() string { + return sp.statusString(sp.NodeLivenessFn) +} + +func (sp *StorePool) statusString(nl NodeLivenessFunc) string { sp.DetailsMu.RLock() defer sp.DetailsMu.RUnlock() @@ -504,7 +510,7 @@ func (sp *StorePool) String() string { for _, id := range ids { detail := sp.DetailsMu.StoreDetails[id] fmt.Fprintf(&buf, "%d", id) - status := detail.status(now, timeUntilStoreDead, sp.NodeLivenessFn, timeAfterStoreSuspect) + status := detail.status(now, timeUntilStoreDead, nl, timeAfterStoreSuspect) if status != storeStatusAvailable { fmt.Fprintf(&buf, " (status=%d)", status) } @@ -706,6 +712,14 @@ func (sp *StorePool) GetStoreDescriptor(storeID roachpb.StoreID) (roachpb.StoreD // from the provided repls and returns them in a slice. func (sp *StorePool) DecommissioningReplicas( repls []roachpb.ReplicaDescriptor, +) (decommissioningReplicas []roachpb.ReplicaDescriptor) { + return sp.decommissioningReplicasWithLiveness(repls, sp.NodeLivenessFn) +} + +// decommissioningReplicasWithLiveness filters out replicas on decommissioning node/store +// from the provided repls and returns them in a slice, using the provided NodeLivenessFunc. +func (sp *StorePool) decommissioningReplicasWithLiveness( + repls []roachpb.ReplicaDescriptor, nl NodeLivenessFunc, ) (decommissioningReplicas []roachpb.ReplicaDescriptor) { sp.DetailsMu.Lock() defer sp.DetailsMu.Unlock() @@ -718,7 +732,7 @@ func (sp *StorePool) DecommissioningReplicas( for _, repl := range repls { detail := sp.GetStoreDetailLocked(repl.StoreID) - switch detail.status(now, timeUntilStoreDead, sp.NodeLivenessFn, timeAfterStoreSuspect) { + switch detail.status(now, timeUntilStoreDead, nl, timeAfterStoreSuspect) { case storeStatusDecommissioning: decommissioningReplicas = append(decommissioningReplicas, repl) } @@ -776,7 +790,7 @@ func (sp *StorePool) IsDead(storeID roachpb.StoreID) (bool, time.Duration, error // liveness or deadness at the moment) or an error if the store is not found in // the pool. func (sp *StorePool) IsUnknown(storeID roachpb.StoreID) (bool, error) { - status, err := sp.storeStatus(storeID) + status, err := sp.storeStatus(storeID, sp.NodeLivenessFn) if err != nil { return false, err } @@ -786,7 +800,7 @@ func (sp *StorePool) IsUnknown(storeID roachpb.StoreID) (bool, error) { // IsDraining returns true if the given store's status is `storeStatusDraining` // or an error if the store is not found in the pool. func (sp *StorePool) IsDraining(storeID roachpb.StoreID) (bool, error) { - status, err := sp.storeStatus(storeID) + status, err := sp.storeStatus(storeID, sp.NodeLivenessFn) if err != nil { return false, err } @@ -796,14 +810,16 @@ func (sp *StorePool) IsDraining(storeID roachpb.StoreID) (bool, error) { // IsLive returns true if the node is considered alive by the store pool or an error // if the store is not found in the pool. func (sp *StorePool) IsLive(storeID roachpb.StoreID) (bool, error) { - status, err := sp.storeStatus(storeID) + status, err := sp.storeStatus(storeID, sp.NodeLivenessFn) if err != nil { return false, err } return status == storeStatusAvailable, nil } -func (sp *StorePool) storeStatus(storeID roachpb.StoreID) (storeStatus, error) { +func (sp *StorePool) storeStatus( + storeID roachpb.StoreID, nl NodeLivenessFunc, +) (storeStatus, error) { sp.DetailsMu.Lock() defer sp.DetailsMu.Unlock() @@ -816,7 +832,7 @@ func (sp *StorePool) storeStatus(storeID roachpb.StoreID) (storeStatus, error) { now := sp.clock.Now().GoTime() timeUntilStoreDead := TimeUntilStoreDead.Get(&sp.st.SV) timeAfterStoreSuspect := TimeAfterStoreSuspect.Get(&sp.st.SV) - return sd.status(now, timeUntilStoreDead, sp.NodeLivenessFn, timeAfterStoreSuspect), nil + return sd.status(now, timeUntilStoreDead, nl, timeAfterStoreSuspect), nil } // LiveAndDeadReplicas divides the provided repls slice into two slices: the @@ -833,6 +849,16 @@ func (sp *StorePool) storeStatus(storeID roachpb.StoreID) (storeStatus, error) { // they are excluded from the returned slices. func (sp *StorePool) LiveAndDeadReplicas( repls []roachpb.ReplicaDescriptor, includeSuspectAndDrainingStores bool, +) (liveReplicas, deadReplicas []roachpb.ReplicaDescriptor) { + return sp.liveAndDeadReplicasWithLiveness(repls, sp.NodeLivenessFn, includeSuspectAndDrainingStores) +} + +// liveAndDeadReplicasWithLiveness divides the provided repls slice into two slices: the +// first for live replicas, and the second for dead replicas, using the +// provided NodeLivenessFunc. +// See comment on StorePool.LiveAndDeadReplicas(..). +func (sp *StorePool) liveAndDeadReplicasWithLiveness( + repls []roachpb.ReplicaDescriptor, nl NodeLivenessFunc, includeSuspectAndDrainingStores bool, ) (liveReplicas, deadReplicas []roachpb.ReplicaDescriptor) { sp.DetailsMu.Lock() defer sp.DetailsMu.Unlock() @@ -844,7 +870,7 @@ func (sp *StorePool) LiveAndDeadReplicas( for _, repl := range repls { detail := sp.GetStoreDetailLocked(repl.StoreID) // Mark replica as dead if store is dead. - status := detail.status(now, timeUntilStoreDead, sp.NodeLivenessFn, timeAfterStoreSuspect) + status := detail.status(now, timeUntilStoreDead, nl, timeAfterStoreSuspect) switch status { case storeStatusDead: deadReplicas = append(deadReplicas, repl) @@ -1033,7 +1059,7 @@ func (sp *StorePool) GetStoreList(filter StoreFilter) (StoreList, int, Throttled for storeID := range sp.DetailsMu.StoreDetails { storeIDs = append(storeIDs, storeID) } - return sp.getStoreListFromIDsLocked(storeIDs, filter) + return sp.getStoreListFromIDsLocked(storeIDs, sp.NodeLivenessFn, filter) } // GetStoreListFromIDs is the same function as GetStoreList but only returns stores @@ -1043,13 +1069,13 @@ func (sp *StorePool) GetStoreListFromIDs( ) (StoreList, int, ThrottledStoreReasons) { sp.DetailsMu.Lock() defer sp.DetailsMu.Unlock() - return sp.getStoreListFromIDsLocked(storeIDs, filter) + return sp.getStoreListFromIDsLocked(storeIDs, sp.NodeLivenessFn, filter) } // getStoreListFromIDsRLocked is the same function as GetStoreList but requires // that the detailsMU read lock is held. func (sp *StorePool) getStoreListFromIDsLocked( - storeIDs roachpb.StoreIDSlice, filter StoreFilter, + storeIDs roachpb.StoreIDSlice, nl NodeLivenessFunc, filter StoreFilter, ) (StoreList, int, ThrottledStoreReasons) { if sp.deterministic { sort.Sort(storeIDs) @@ -1071,7 +1097,7 @@ func (sp *StorePool) getStoreListFromIDsLocked( // Do nothing; this store is not in the StorePool. continue } - switch s := detail.status(now, timeUntilStoreDead, sp.NodeLivenessFn, timeAfterStoreSuspect); s { + switch s := detail.status(now, timeUntilStoreDead, nl, timeAfterStoreSuspect); s { case storeStatusThrottled: aliveStoreCount++ throttled = append(throttled, detail.throttledBecause) @@ -1214,13 +1240,13 @@ func (sp *StorePool) IsStoreReadyForRoutineReplicaTransfer( if sp.OverrideIsStoreReadyForRoutineReplicaTransferFn != nil { return sp.OverrideIsStoreReadyForRoutineReplicaTransferFn(ctx, targetStoreID) } - return sp.isStoreReadyForRoutineReplicaTransferInternal(ctx, targetStoreID) + return sp.isStoreReadyForRoutineReplicaTransferInternal(ctx, targetStoreID, sp.NodeLivenessFn) } func (sp *StorePool) isStoreReadyForRoutineReplicaTransferInternal( - ctx context.Context, targetStoreID roachpb.StoreID, + ctx context.Context, targetStoreID roachpb.StoreID, nl NodeLivenessFunc, ) bool { - status, err := sp.storeStatus(targetStoreID) + status, err := sp.storeStatus(targetStoreID, nl) if err != nil { return false } diff --git a/pkg/kv/kvserver/allocator/storepool/store_pool_test.go b/pkg/kv/kvserver/allocator/storepool/store_pool_test.go index bdea163c45d1..51c456d5390e 100644 --- a/pkg/kv/kvserver/allocator/storepool/store_pool_test.go +++ b/pkg/kv/kvserver/allocator/storepool/store_pool_test.go @@ -78,7 +78,7 @@ func TestStorePoolGossipUpdate(t *testing.T) { // verifyStoreList ensures that the returned list of stores is correct. func verifyStoreList( - sp *StorePool, + sp AllocatorStorePool, constraints []roachpb.ConstraintsConjunction, storeIDs roachpb.StoreIDSlice, // optional filter StoreFilter, diff --git a/pkg/sql/faketreeeval/BUILD.bazel b/pkg/sql/faketreeeval/BUILD.bazel index 81f115711eb7..9887eede5948 100644 --- a/pkg/sql/faketreeeval/BUILD.bazel +++ b/pkg/sql/faketreeeval/BUILD.bazel @@ -14,7 +14,6 @@ go_library( "//pkg/security/username", "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", - "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/pgwire/pgnotice", diff --git a/pkg/sql/faketreeeval/evalctx.go b/pkg/sql/faketreeeval/evalctx.go index d47e8d4ce800..6036a582b6d3 100644 --- a/pkg/sql/faketreeeval/evalctx.go +++ b/pkg/sql/faketreeeval/evalctx.go @@ -22,7 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" @@ -55,11 +54,6 @@ func (so *DummySequenceOperators) GetSerialSequenceNameFromColumn( return nil, errors.WithStack(errSequenceOperators) } -// ParseQualifiedTableName is part of the eval.DatabaseCatalog interface. -func (so *DummySequenceOperators) ParseQualifiedTableName(sql string) (*tree.TableName, error) { - return nil, errors.WithStack(errSequenceOperators) -} - // ResolveTableName is part of the eval.DatabaseCatalog interface. func (so *DummySequenceOperators) ResolveTableName( ctx context.Context, tn *tree.TableName, @@ -74,13 +68,6 @@ func (so *DummySequenceOperators) SchemaExists( return false, errors.WithStack(errSequenceOperators) } -// IsTableVisible is part of the eval.DatabaseCatalog interface. -func (so *DummySequenceOperators) IsTableVisible( - ctx context.Context, curDB string, searchPath sessiondata.SearchPath, tableID oid.Oid, -) (bool, bool, error) { - return false, false, errors.WithStack(errSequenceOperators) -} - // IsTypeVisible is part of the eval.DatabaseCatalog interface. func (so *DummySequenceOperators) IsTypeVisible( ctx context.Context, curDB string, searchPath sessiondata.SearchPath, typeID oid.Oid, @@ -373,23 +360,11 @@ func (ep *DummyEvalPlanner) ValidateAllMultiRegionZoneConfigsInCurrentDatabase( return errors.WithStack(errEvalPlanner) } -// ParseQualifiedTableName is part of the eval.DatabaseCatalog interface. -func (ep *DummyEvalPlanner) ParseQualifiedTableName(sql string) (*tree.TableName, error) { - return parser.ParseQualifiedTableName(sql) -} - // SchemaExists is part of the eval.DatabaseCatalog interface. func (ep *DummyEvalPlanner) SchemaExists(ctx context.Context, dbName, scName string) (bool, error) { return false, errors.WithStack(errEvalPlanner) } -// IsTableVisible is part of the eval.DatabaseCatalog interface. -func (ep *DummyEvalPlanner) IsTableVisible( - ctx context.Context, curDB string, searchPath sessiondata.SearchPath, tableID oid.Oid, -) (bool, bool, error) { - return false, false, errors.WithStack(errEvalPlanner) -} - // IsTypeVisible is part of the eval.DatabaseCatalog interface. func (ep *DummyEvalPlanner) IsTypeVisible( ctx context.Context, curDB string, searchPath sessiondata.SearchPath, typeID oid.Oid, diff --git a/pkg/sql/importer/import_table_creation.go b/pkg/sql/importer/import_table_creation.go index 5474b2c27ce0..54b63352d5c7 100644 --- a/pkg/sql/importer/import_table_creation.go +++ b/pkg/sql/importer/import_table_creation.go @@ -27,7 +27,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/faketreeeval" - "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -290,16 +289,6 @@ func (so *importSequenceOperators) GetSerialSequenceNameFromColumn( return nil, errors.WithStack(errSequenceOperators) } -// ParseQualifiedTableName implements the eval.DatabaseCatalog interface. -func (so *importSequenceOperators) ParseQualifiedTableName(sql string) (*tree.TableName, error) { - name, err := parser.ParseTableName(sql) - if err != nil { - return nil, err - } - tn := name.ToTableName() - return &tn, nil -} - // ResolveTableName implements the eval.DatabaseCatalog interface. func (so *importSequenceOperators) ResolveTableName( ctx context.Context, tn *tree.TableName, @@ -314,13 +303,6 @@ func (so *importSequenceOperators) SchemaExists( return false, errSequenceOperators } -// IsTableVisible is part of the eval.DatabaseCatalog interface. -func (so *importSequenceOperators) IsTableVisible( - ctx context.Context, curDB string, searchPath sessiondata.SearchPath, tableID oid.Oid, -) (bool, bool, error) { - return false, false, errors.WithStack(errSequenceOperators) -} - // IsTypeVisible is part of the eval.DatabaseCatalog interface. func (so *importSequenceOperators) IsTypeVisible( ctx context.Context, curDB string, searchPath sessiondata.SearchPath, typeID oid.Oid, diff --git a/pkg/sql/logictest/testdata/logic_test/as_of b/pkg/sql/logictest/testdata/logic_test/as_of index b9facf210f2e..f4c0273ec8c2 100644 --- a/pkg/sql/logictest/testdata/logic_test/as_of +++ b/pkg/sql/logictest/testdata/logic_test/as_of @@ -50,9 +50,12 @@ SELECT * FROM t AS OF SYSTEM TIME follower_read_timestamp('boom') statement error pq: AS OF SYSTEM TIME: only constant expressions, with_min_timestamp, with_max_staleness, or follower_read_timestamp are allowed SELECT * FROM t AS OF SYSTEM TIME now() -statement error cannot specify timestamp in the future +statement error pq: AS OF SYSTEM TIME: interval value '10s' too large, AS OF interval must be <= -1µs SELECT * FROM t AS OF SYSTEM TIME '10s' +statement error pq: AS OF SYSTEM TIME: interval value '00:00:00.000001' too large, AS OF interval must be <= -1µs +SELECT * FROM t AS OF SYSTEM TIME interval '1 microsecond' + # Verify that the TxnTimestamp used to generate now() and current_timestamp() is # set to the historical timestamp. diff --git a/pkg/sql/logictest/testdata/logic_test/pg_builtins b/pkg/sql/logictest/testdata/logic_test/pg_builtins index 3702fbc10dd7..0aca9e1cf64a 100644 --- a/pkg/sql/logictest/testdata/logic_test/pg_builtins +++ b/pkg/sql/logictest/testdata/logic_test/pg_builtins @@ -99,10 +99,12 @@ SET DATABASE = test; query TB rowsort SELECT c.relname, pg_table_is_visible(c.oid) FROM pg_class c -WHERE c.relname IN ('is_visible', 'not_visible') +WHERE c.relname IN ('is_visible', 'not_visible', 'is_visible_pkey', 'not_visible_pkey') ---- -is_visible true -not_visible false +is_visible true +is_visible_pkey true +not_visible false +not_visible_pkey false # Looking up a table in a different database should return NULL. query B diff --git a/pkg/sql/logictest/testdata/logic_test/pg_catalog b/pkg/sql/logictest/testdata/logic_test/pg_catalog index 7c4cb01ebb9c..1cd374f70b6f 100644 --- a/pkg/sql/logictest/testdata/logic_test/pg_catalog +++ b/pkg/sql/logictest/testdata/logic_test/pg_catalog @@ -312,7 +312,8 @@ pg_catalog.pg_namespace CREATE TABLE pg_catalog.pg_namespace ( oid OID NULL, nspname NAME NOT NULL, nspowner OID NULL, - nspacl STRING[] NULL + nspacl STRING[] NULL, + INDEX pg_namespace_oid_idx (oid ASC) STORING (nspname, nspowner, nspacl) ) query TTBTTTB colnames @@ -2593,7 +2594,7 @@ objoid classoid objsubid description 4294967098 4294967123 0 pg_largeobject_metadata was created for compatibility and is currently unimplemented 4294967096 4294967123 0 locks held by active processes (empty - feature does not exist) 4294967095 4294967123 0 available materialized views (empty - feature does not exist) -4294967094 4294967123 0 available namespaces (incomplete; namespaces and databases are congruent in CockroachDB) +4294967094 4294967123 0 available namespaces 4294967093 4294967123 0 opclass (empty - Operator classes not supported yet) 4294967092 4294967123 0 operators (incomplete) 4294967091 4294967123 0 pg_opfamily was created for compatibility and is currently unimplemented diff --git a/pkg/sql/logictest/testdata/logic_test/pgoidtype b/pkg/sql/logictest/testdata/logic_test/pgoidtype index 3e829cd015a2..5a9c3fb1b599 100644 --- a/pkg/sql/logictest/testdata/logic_test/pgoidtype +++ b/pkg/sql/logictest/testdata/logic_test/pgoidtype @@ -579,3 +579,114 @@ SELECT 1:::OID >= -2147483649:::INT8 query error OID out of range: -2147483649 SELECT -2147483649:::INT8 >= 1:::OID + +# Test that we can cast index names to a regclass, and vice-versa. + +statement ok +CREATE TABLE table_with_indexes (a INT PRIMARY key, b INT) + +query TT +SELECT relname, oid::regclass::string from pg_class where oid='table_with_indexes_pkey'::regclass +---- +table_with_indexes_pkey table_with_indexes_pkey + +statement ok +CREATE INDEX my_b_index ON table_with_indexes(b) + +query TT +SELECT relname, oid::regclass::string from pg_class where oid='my_b_index'::regclass +---- +my_b_index my_b_index + +# Test that we can cast table and index names from different schemas. + +statement ok +CREATE SCHEMA other_schema + +statement ok +CREATE TABLE other_schema.table_with_indexes (a INT PRIMARY key, b INT) + +statement ok +CREATE INDEX my_b_index ON other_schema.table_with_indexes(b) + +query TTT +SELECT c.relname, c.oid::regclass::string, n.nspname from pg_class c +JOIN pg_namespace n ON c.relnamespace = n.oid +WHERE c.oid='other_schema.table_with_indexes'::regclass +---- +table_with_indexes table_with_indexes other_schema + +query TTT +SELECT c.relname, c.oid::regclass::string, n.nspname from pg_class c +JOIN pg_namespace n ON c.relnamespace = n.oid +WHERE c.oid='other_schema.table_with_indexes_pkey'::regclass +---- +table_with_indexes_pkey table_with_indexes_pkey other_schema + +query TTT +SELECT c.relname, c.oid::regclass::string, n.nspname from pg_class c +JOIN pg_namespace n ON c.relnamespace = n.oid +WHERE c.oid='other_schema.my_b_index'::regclass +---- +my_b_index my_b_index other_schema + +# Changing the search_path should influence what gets resolved while casting. + +statement ok +SET search_path = other_schema, public + +query TTT +SELECT c.relname, c.oid::regclass::string, n.nspname from pg_class c +JOIN pg_namespace n ON c.relnamespace = n.oid +WHERE c.oid='table_with_indexes'::regclass +---- +table_with_indexes table_with_indexes other_schema + +query TTT +SELECT c.relname, c.oid::regclass::string, n.nspname from pg_class c +JOIN pg_namespace n ON c.relnamespace = n.oid +WHERE c.oid='table_with_indexes_pkey'::regclass +---- +table_with_indexes_pkey table_with_indexes_pkey other_schema + +query TTT +SELECT c.relname, c.oid::regclass::string, n.nspname from pg_class c +JOIN pg_namespace n ON c.relnamespace = n.oid +WHERE c.oid='my_b_index'::regclass +---- +my_b_index my_b_index other_schema + +statement ok +SET search_path = public, other_schema + +query TTT +SELECT c.relname, c.oid::regclass::string, n.nspname from pg_class c +JOIN pg_namespace n ON c.relnamespace = n.oid +WHERE c.oid='table_with_indexes'::regclass +---- +table_with_indexes table_with_indexes public + +query TTT +SELECT c.relname, c.oid::regclass::string, n.nspname from pg_class c +JOIN pg_namespace n ON c.relnamespace = n.oid +WHERE c.oid='table_with_indexes_pkey'::regclass +---- +table_with_indexes_pkey table_with_indexes_pkey public + +query TTT +SELECT c.relname, c.oid::regclass::string, n.nspname from pg_class c +JOIN pg_namespace n ON c.relnamespace = n.oid +WHERE c.oid='my_b_index'::regclass +---- +my_b_index my_b_index public + +statement ok +RESET search_path + +# Check that we can cast table names from pg_catalog also. +query TTT +SELECT c.relname, c.oid::regclass::string, n.nspname from pg_class c +JOIN pg_namespace n ON c.relnamespace = n.oid +WHERE c.oid='pg_type'::regclass +---- +pg_type pg_type pg_catalog diff --git a/pkg/sql/logictest/testdata/logic_test/temp_table b/pkg/sql/logictest/testdata/logic_test/temp_table index b3258268493d..11b103d55dae 100644 --- a/pkg/sql/logictest/testdata/logic_test/temp_table +++ b/pkg/sql/logictest/testdata/logic_test/temp_table @@ -19,6 +19,15 @@ SELECT table_name, type FROM [SHOW TABLES] ---- tbl table +# Verify that temp schema is visible in pg_namespace index. +let $tempSchemaOID +SELECT oid FROM pg_namespace where nspname LIKE 'pg_temp%' + +query B +SELECT nspname LIKE 'pg_temp%' FROM pg_namespace where oid = $tempSchemaOID +---- +true + statement ok DROP TABLE tbl diff --git a/pkg/sql/opt/exec/execbuilder/testdata/virtual b/pkg/sql/opt/exec/execbuilder/testdata/virtual index 0f7a1ca488c7..95ca0ad4e86c 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/virtual +++ b/pkg/sql/opt/exec/execbuilder/testdata/virtual @@ -130,19 +130,15 @@ ORDER BY distribution: local vectorized: true · -• sort -│ order: +conname +• render │ -└── • render +└── • virtual table lookup join + │ table: pg_namespace@pg_namespace_oid_idx + │ equality: (connamespace) = (oid) + │ pred: nspname = 'public' │ - └── • hash join - │ equality: (oid) = (connamespace) - │ - ├── • filter - │ │ filter: nspname = 'public' - │ │ - │ └── • virtual table - │ table: pg_namespace@primary + └── • sort + │ order: +conname │ └── • virtual table lookup join │ table: pg_attribute@pg_attribute_attrelid_idx diff --git a/pkg/sql/pg_catalog.go b/pkg/sql/pg_catalog.go index 404cbfadb78d..796e4ed30487 100644 --- a/pkg/sql/pg_catalog.go +++ b/pkg/sql/pg_catalog.go @@ -21,6 +21,7 @@ import ( "time" "unicode" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" @@ -28,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catprivilege" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" @@ -2047,12 +2049,13 @@ https://www.postgresql.org/docs/9.6/view-pg-matviews.html`, }, } +var adminOID = makeOidHasher().UserOid(username.MakeSQLUsernameFromPreNormalizedString("admin")) + var pgCatalogNamespaceTable = virtualSchemaTable{ - comment: `available namespaces (incomplete; namespaces and databases are congruent in CockroachDB) + comment: `available namespaces https://www.postgresql.org/docs/9.5/catalog-pg-namespace.html`, schema: vtable.PGCatalogNamespace, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - h := makeOidHasher() return forEachDatabaseDesc(ctx, p, dbContext, true, /* requiresPrivileges */ func(db catalog.DatabaseDescriptor) error { return forEachSchema(ctx, p, db, func(sc catalog.SchemaDescriptor) error { @@ -2065,10 +2068,7 @@ https://www.postgresql.org/docs/9.5/catalog-pg-namespace.html`, } } else if sc.SchemaKind() == catalog.SchemaPublic { // admin is the owner of the public schema. - // - // TODO(ajwerner): The public schema effectively carries the privileges - // of the database so consider using the database's owner for public. - ownerOID = h.UserOid(username.MakeSQLUsernameFromPreNormalizedString("admin")) + ownerOID = adminOID } return addRow( schemaOid(sc.GetID()), // oid @@ -2079,6 +2079,64 @@ https://www.postgresql.org/docs/9.5/catalog-pg-namespace.html`, }) }) }, + indexes: []virtualIndex{ + { + populate: func(ctx context.Context, unwrappedConstraint tree.Datum, p *planner, db catalog.DatabaseDescriptor, + addRow func(...tree.Datum) error, + ) (bool, error) { + coid := tree.MustBeDOid(unwrappedConstraint) + ooid := coid.Oid + sc, ok, err := func() (_ catalog.SchemaDescriptor, found bool, _ error) { + // The system database still does not have a physical public schema. + if !db.HasPublicSchemaWithDescriptor() && ooid == keys.SystemPublicSchemaID { + return schemadesc.GetPublicSchema(), true, nil + } + if sc, ok := schemadesc.GetVirtualSchemaByID(descpb.ID(ooid)); ok { + return sc, true, nil + } + if sc, err := p.Descriptors().GetImmutableSchemaByID( + ctx, p.Txn(), descpb.ID(ooid), tree.SchemaLookupFlags{Required: true}, + ); err == nil { + return sc, true, nil + } else if !sqlerrors.IsUndefinedSchemaError(err) { + return nil, false, err + } + // Fallback to looking for temporary schemas. + schemaNames, err := getSchemaNames(ctx, p, db) + if err != nil { + return nil, false, err + } + if scName, ok := schemaNames[descpb.ID(ooid)]; ok { + return schemadesc.NewTemporarySchema(scName, descpb.ID(ooid), db.GetID()), true, nil + } + return nil, false, nil + }() + if !ok || err != nil { + return false, err + } + ownerOID := tree.DNull + if sc.SchemaKind() == catalog.SchemaUserDefined { + var err error + ownerOID, err = getOwnerOID(ctx, p, sc) + if err != nil { + return false, err + } + } else if sc.SchemaKind() == catalog.SchemaPublic { + // admin is the owner of the public schema. + ownerOID = adminOID + } + if err := addRow( + schemaOid(sc.GetID()), // oid + tree.NewDString(sc.GetName()), // nspname + ownerOID, // nspowner + tree.DNull, // nspacl + ); err != nil { + return false, err + } + return true, nil + }, + }, + }, } var ( diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index 7ca8f4180de8..604d087b33f9 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -589,15 +589,6 @@ func (p *planner) GetTypeFromValidSQLSyntax(sql string) (*types.T, error) { return tree.ResolveType(context.TODO(), ref, p.semaCtx.GetTypeResolver()) } -// ParseQualifiedTableName implements the eval.DatabaseCatalog interface. -// This exists to get around a circular dependency between sql/sem/tree and -// sql/parser. sql/parser depends on tree to make objects, so tree cannot import -// ParseQualifiedTableName even though some builtins need that function. -// TODO(jordan): remove this once builtins can be moved outside of sql/sem/tree. -func (p *planner) ParseQualifiedTableName(sql string) (*tree.TableName, error) { - return parser.ParseQualifiedTableName(sql) -} - // ResolveTableName implements the eval.DatabaseCatalog interface. func (p *planner) ResolveTableName(ctx context.Context, tn *tree.TableName) (tree.ID, error) { flags := tree.ObjectLookupFlagsWithRequiredTableKind(tree.ResolveAnyTableKind) diff --git a/pkg/sql/resolver.go b/pkg/sql/resolver.go index c6d141722050..1608bac5b152 100644 --- a/pkg/sql/resolver.go +++ b/pkg/sql/resolver.go @@ -138,56 +138,6 @@ func (p *planner) SchemaExists(ctx context.Context, dbName, scName string) (foun return found, err } -// IsTableVisible is part of the eval.DatabaseCatalog interface. -func (p *planner) IsTableVisible( - ctx context.Context, curDB string, searchPath sessiondata.SearchPath, tableID oid.Oid, -) (isVisible, exists bool, err error) { - dbDesc, err := p.Descriptors().GetImmutableDatabaseByName(ctx, p.Txn(), curDB, - tree.DatabaseLookupFlags{ - Required: true, - AvoidLeased: p.skipDescriptorCache, - }) - if err != nil { - return false, false, err - } - // It is critical that we set ParentID on the flags in order to ensure that - // we do not do a very expensive, and ultimately fruitless lookup for an - // OID which definitely does not exist. Only OIDs corresponding to relations - // in the current database are relevant for this function. If we have already - // fetched all the tables in the current database, then we can use that - // fact to avoid a KV lookup. The descs layer relies on our setting this - // field in the flags to avoid that lookup. - flags := p.ObjectLookupFlags(true /* required */, false /* requireMutable */) - flags.ParentID = dbDesc.GetID() - tableDesc, err := p.Descriptors().GetImmutableTableByID(ctx, p.Txn(), descpb.ID(tableID), flags) - if err != nil { - // If a "not found" error happened here, we return "not exists" rather than - // the error. - if errors.Is(err, catalog.ErrDescriptorNotFound) || - errors.Is(err, catalog.ErrDescriptorDropped) || - pgerror.GetPGCode(err) == pgcode.UndefinedTable || - pgerror.GetPGCode(err) == pgcode.UndefinedObject { - return false, false, nil //nolint:returnerrcheck - } - return false, false, err - } - schemaID := tableDesc.GetParentSchemaID() - schemaDesc, err := p.Descriptors().GetImmutableSchemaByID(ctx, p.Txn(), schemaID, - tree.SchemaLookupFlags{ - Required: true, - AvoidLeased: p.skipDescriptorCache}) - if err != nil { - return false, false, err - } - iter := searchPath.Iter() - for scName, ok := iter.Next(); ok; scName, ok = iter.Next() { - if schemaDesc.GetName() == scName { - return true, true, nil - } - } - return false, true, nil -} - // IsTypeVisible is part of the eval.DatabaseCatalog interface. func (p *planner) IsTypeVisible( ctx context.Context, curDB string, searchPath sessiondata.SearchPath, typeID oid.Oid, diff --git a/pkg/sql/sem/asof/as_of.go b/pkg/sql/sem/asof/as_of.go index 557139d9f99e..f51b38ed1160 100644 --- a/pkg/sql/sem/asof/as_of.go +++ b/pkg/sql/sem/asof/as_of.go @@ -199,16 +199,30 @@ func Eval( } stmtTimestamp := evalCtx.GetStmtTimestamp() - ret.Timestamp, err = DatumToHLC(evalCtx, stmtTimestamp, d) + ret.Timestamp, err = DatumToHLC(evalCtx, stmtTimestamp, d, AsOf) if err != nil { return eval.AsOfSystemTime{}, errors.Wrap(err, "AS OF SYSTEM TIME") } return ret, nil } +// DatumToHLCUsage specifies which statement DatumToHLC() is used for. +type DatumToHLCUsage int64 + +const ( + // AsOf is when the DatumToHLC() is used for an AS OF SYSTEM TIME statement. + // In this case, if the interval is not synthetic, its value has to be negative + // and last longer than a nanosecond. + AsOf DatumToHLCUsage = iota + // Split is when the DatumToHLC() is used for a SPLIT statement. + // In this case, if the interval is not synthetic, its value has to be positive + // and last longer than a nanosecond. + Split +) + // DatumToHLC performs the conversion from a Datum to an HLC timestamp. func DatumToHLC( - evalCtx *eval.Context, stmtTimestamp time.Time, d tree.Datum, + evalCtx *eval.Context, stmtTimestamp time.Time, d tree.Datum, usage DatumToHLCUsage, ) (hlc.Timestamp, error) { ts := hlc.Timestamp{} var convErr error @@ -237,6 +251,12 @@ func DatumToHLC( if iv, err := tree.ParseDInterval(evalCtx.GetIntervalStyle(), s); err == nil { if (iv.Duration == duration.Duration{}) { convErr = errors.Errorf("interval value %v too small, absolute value must be >= %v", d, time.Microsecond) + } else if (usage == AsOf && iv.Duration.Compare(duration.Duration{}) > 0 && !syn) { + convErr = errors.Errorf("interval value %v too large, AS OF interval must be <= -%v", d, time.Microsecond) + } else if (usage == Split && iv.Duration.Compare(duration.Duration{}) < 0) { + // Do we need to consider if the timestamp is synthetic (see + // hlc.Timestamp.Synthetic), as for AS OF stmt? + convErr = errors.Errorf("interval value %v too small, SPLIT AT interval must be >= %v", d, time.Microsecond) } ts.WallTime = duration.Add(stmtTimestamp, iv.Duration).UnixNano() ts.Synthetic = syn @@ -252,6 +272,11 @@ func DatumToHLC( case *tree.DDecimal: ts, convErr = hlc.DecimalToHLC(&d.Decimal) case *tree.DInterval: + if (usage == AsOf && d.Duration.Compare(duration.Duration{}) > 0) { + convErr = errors.Errorf("interval value %v too large, AS OF interval must be <= -%v", d, time.Microsecond) + } else if (usage == Split && d.Duration.Compare(duration.Duration{}) < 0) { + convErr = errors.Errorf("interval value %v too small, SPLIT interval must be >= %v", d, time.Microsecond) + } ts.WallTime = duration.Add(stmtTimestamp, d.Duration).UnixNano() default: convErr = errors.WithSafeDetails( diff --git a/pkg/sql/sem/builtins/pg_builtins.go b/pkg/sql/sem/builtins/pg_builtins.go index 96065d1077c1..3e0aad341e19 100644 --- a/pkg/sql/sem/builtins/pg_builtins.go +++ b/pkg/sql/sem/builtins/pg_builtins.go @@ -1217,16 +1217,26 @@ SELECT description ReturnType: tree.FixedReturnType(types.Bool), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { oidArg := tree.MustBeDOid(args[0]) - isVisible, exists, err := evalCtx.Planner.IsTableVisible( - ctx, evalCtx.SessionData().Database, evalCtx.SessionData().SearchPath, oidArg.Oid, + row, err := evalCtx.Planner.QueryRowEx( + ctx, "pg_table_is_visible", + sessiondata.NoSessionDataOverride, + "SELECT n.nspname from pg_class c INNER LOOKUP JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.oid=$1 LIMIT 1", + oidArg.Oid, ) if err != nil { return nil, err } - if !exists { + if row == nil { return tree.DNull, nil } - return tree.MakeDBool(tree.DBool(isVisible)), nil + foundSchemaName := string(tree.MustBeDString(row[0])) + iter := evalCtx.SessionData().SearchPath.Iter() + for scName, ok := iter.Next(); ok; scName, ok = iter.Next() { + if foundSchemaName == scName { + return tree.DBoolTrue, nil + } + } + return tree.DBoolFalse, nil }, Info: "Returns whether the table with the given OID belongs to one of the schemas on the search path.", Volatility: volatility.Stable, diff --git a/pkg/sql/sem/eval/BUILD.bazel b/pkg/sql/sem/eval/BUILD.bazel index a59c2cb558a4..27d151487323 100644 --- a/pkg/sql/sem/eval/BUILD.bazel +++ b/pkg/sql/sem/eval/BUILD.bazel @@ -67,6 +67,7 @@ go_library( "//pkg/sql/sem/tree/treewindow", "//pkg/sql/sessiondata", "//pkg/sql/sessiondatapb", + "//pkg/sql/sqlerrors", "//pkg/sql/sqlliveness", "//pkg/sql/sqltelemetry", "//pkg/sql/types", diff --git a/pkg/sql/sem/eval/deps.go b/pkg/sql/sem/eval/deps.go index c3b8208c69d1..cb3ae873830f 100644 --- a/pkg/sql/sem/eval/deps.go +++ b/pkg/sql/sem/eval/deps.go @@ -54,11 +54,6 @@ const ( // and is to be used from Context. type DatabaseCatalog interface { - // ParseQualifiedTableName parses a SQL string of the form - // `[ database_name . ] [ schema_name . ] table_name`. - // NB: this is deprecated! Use parser.ParseQualifiedTableName when possible. - ParseQualifiedTableName(sql string) (*tree.TableName, error) - // ResolveTableName expands the given table name and // makes it point to a valid object. // If the database name is not given, it uses the search path to find it, and @@ -70,12 +65,6 @@ type DatabaseCatalog interface { // whether it exists. SchemaExists(ctx context.Context, dbName, scName string) (found bool, err error) - // IsTableVisible checks if the table with the given ID belongs to a schema - // on the given sessiondata.SearchPath. - IsTableVisible( - ctx context.Context, curDB string, searchPath sessiondata.SearchPath, tableID oid.Oid, - ) (isVisible bool, exists bool, err error) - // IsTypeVisible checks if the type with the given ID belongs to a schema // on the given sessiondata.SearchPath. IsTypeVisible( diff --git a/pkg/sql/sem/eval/parse_doid.go b/pkg/sql/sem/eval/parse_doid.go index 41691008d963..b7fd8f364f9b 100644 --- a/pkg/sql/sem/eval/parse_doid.go +++ b/pkg/sql/sem/eval/parse_doid.go @@ -12,6 +12,7 @@ package eval import ( "context" + "fmt" "regexp" "strings" @@ -20,6 +21,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" + "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" @@ -178,12 +181,19 @@ func ParseDOid(ctx context.Context, evalCtx *Context, s string, t *types.T) (*tr if err != nil { return nil, err } - id, err := evalCtx.Planner.ResolveTableName(ctx, &tn) + if id, err := evalCtx.Planner.ResolveTableName(ctx, &tn); err == nil { + // tree.ID is a uint32, so this type conversion is safe. + return tree.NewDOidWithTypeAndName(oid.Oid(id), t, tn.ObjectName.String()), nil + } else if pgerror.GetPGCode(err) != pgcode.UndefinedTable { + return nil, err + } + // If the above resulted in an UndefinedTable error, then we can try + // searching for an index with this name, + oidRes, err := indexNameToOID(ctx, evalCtx, tn) if err != nil { return nil, err } - // tree.ID is a uint32, so this type conversion is safe. - return tree.NewDOidWithTypeAndName(oid.Oid(id), t, tn.ObjectName.String()), nil + return tree.NewDOidWithTypeAndName(oidRes, t, tn.ObjectName.String()), nil default: d, _ /* errSafeToIgnore */, err := evalCtx.Planner.ResolveOIDFromString(ctx, t, tree.NewDString(s)) @@ -191,6 +201,55 @@ func ParseDOid(ctx context.Context, evalCtx *Context, s string, t *types.T) (*tr } } +// indexNameToOID finds the OID for the given index. If the name is qualified, +// then the index must belong to that schema. Otherwise, the schemas are +// searched in order of the search_path. +func indexNameToOID(ctx context.Context, evalCtx *Context, tn tree.TableName) (oid.Oid, error) { + query := `SELECT c.oid FROM %[1]spg_catalog.pg_class AS c + JOIN %[1]spg_catalog.pg_namespace AS n ON c.relnamespace = n.oid + WHERE c.relname = $1 + AND n.nspname = $2 + LIMIT 1` + args := []interface{}{tn.Object(), tn.Schema()} + if !tn.ExplicitSchema { + // If there is no explicit schema, then we need a different query that + // looks for the object name for each schema in the search_path. Choose + // the first match in the order of the search_path array. There is an + // unused $2 placeholder in the query so that the call to QueryRow can + // be consolidated. + query = `WITH + current_schemas AS ( + SELECT * FROM unnest(current_schemas(true)) WITH ORDINALITY AS scname + ) + SELECT c.oid + FROM %[1]spg_catalog.pg_class AS c + JOIN %[1]spg_catalog.pg_namespace AS n ON c.relnamespace = n.oid + JOIN current_schemas AS cs ON cs.scname = n.nspname + WHERE c.relname = $1 + ORDER BY cs.ordinality ASC + LIMIT 1` + args = []interface{}{tn.Object()} + } + catalogPrefix := "" + if tn.ExplicitCatalog { + catalogPrefix = tn.CatalogName.String() + "." + } + row, err := evalCtx.Planner.QueryRowEx( + ctx, + "regclass-cast", + sessiondata.NoSessionDataOverride, + fmt.Sprintf(query, catalogPrefix), + args..., + ) + if err != nil { + return 0, err + } + if row == nil { + return 0, sqlerrors.NewUndefinedRelationError(&tn) + } + return tree.MustBeDOid(row[0]).Oid, nil +} + // castStringToRegClassTableName normalizes a TableName from a string. func castStringToRegClassTableName(s string) (tree.TableName, error) { components, err := splitIdentifierList(s) diff --git a/pkg/sql/split.go b/pkg/sql/split.go index 87f584e73a6b..c7b057d976cd 100644 --- a/pkg/sql/split.go +++ b/pkg/sql/split.go @@ -129,7 +129,7 @@ func parseExpirationTime( return hlc.MaxTimestamp, nil } stmtTimestamp := evalCtx.GetStmtTimestamp() - ts, err := asof.DatumToHLC(evalCtx, stmtTimestamp, d) + ts, err := asof.DatumToHLC(evalCtx, stmtTimestamp, d, asof.Split) if err != nil { return ts, errors.Wrap(err, "SPLIT AT") } diff --git a/pkg/sql/split_test.go b/pkg/sql/split_test.go index 806a7abc9dbf..29a6f6316253 100644 --- a/pkg/sql/split_test.go +++ b/pkg/sql/split_test.go @@ -147,7 +147,7 @@ func TestSplitAt(t *testing.T) { }, { in: "ALTER TABLE d.i SPLIT AT VALUES (17) WITH EXPIRATION '-1 day'::interval", - error: "SPLIT AT: expiration time should be greater than or equal to current time", + error: "SPLIT AT: interval value '-1 days' too small, SPLIT interval must be >= 1µs", }, { in: "ALTER TABLE d.i SPLIT AT VALUES (17) WITH EXPIRATION '0.1us'", diff --git a/pkg/sql/vtable/pg_catalog.go b/pkg/sql/vtable/pg_catalog.go index 4f517d857aef..8d5d64d0fc14 100644 --- a/pkg/sql/vtable/pg_catalog.go +++ b/pkg/sql/vtable/pg_catalog.go @@ -508,7 +508,8 @@ CREATE TABLE pg_catalog.pg_namespace ( oid OID, nspname NAME NOT NULL, nspowner OID, - nspacl STRING[] + nspacl STRING[], + INDEX (oid) )` // PGCatalogOpclass describes the schema of the pg_catalog.pg_opclass table.