diff --git a/docs/tech-notes/version_upgrades.md b/docs/tech-notes/version_upgrades.md index 55c3226028fa..9305fb60a2fc 100644 --- a/docs/tech-notes/version_upgrades.md +++ b/docs/tech-notes/version_upgrades.md @@ -44,7 +44,7 @@ the specific fields should not be considered too much. They do not relate directly to the executable version! Instead, each `cockroach` executable has a range of supported -cluster versions (in the code: `minBinaryVersion` ... `binaryVersion`). +cluster versions (in the code: `minBinaryVersion` ... `latestVersion`). If a `cockroach` command observes a cluster version earlier than its minimum supported version, or later than its maximum supported version, it terminates. diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index 7f1950881269..7ffa53601546 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -8454,7 +8454,7 @@ func TestRestoringAcrossVersions(t *testing.T) { // Bump the version down to outside our MinBinarySupportedVersion, and write // it back out. This makes it ineligible for restore because of our restore // version policy. - minSupportedVersion := tc.ApplicationLayer(0).ClusterSettings().Version.BinaryMinSupportedVersion() + minSupportedVersion := tc.ApplicationLayer(0).ClusterSettings().Version.MinSupportedVersion() minSupportedVersion.Major -= 1 setManifestClusterVersion(minSupportedVersion) @@ -8468,7 +8468,7 @@ func TestRestoringAcrossVersions(t *testing.T) { // Bump the version down to the min supported binary version, and write it // back out. This makes it eligible for restore because of our restore // version policy. - minBinaryVersion := tc.ApplicationLayer(0).ClusterSettings().Version.BinaryMinSupportedVersion() + minBinaryVersion := tc.ApplicationLayer(0).ClusterSettings().Version.MinSupportedVersion() setManifestClusterVersion(minBinaryVersion) sqlDB.Exec(t, `RESTORE DATABASE r1 FROM 'nodelocal://1/cross_version'`) sqlDB.Exec(t, `DROP DATABASE r1`) diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index b71583ee5c3d..c4ffe8170fda 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -1937,7 +1937,7 @@ func (r *restoreResumer) validateJobIsResumable(execConfig *sql.ExecutorConfig) // the CreationClusterVersion may still be equal to binaryVersion, // which means the cluster restore will proceed. creationClusterVersion := r.job.Payload().CreationClusterVersion - binaryVersion := execConfig.Settings.Version.BinaryVersion() + binaryVersion := execConfig.Settings.Version.LatestVersion() isClusterRestore := details.DescriptorCoverage == tree.AllDescriptors if isClusterRestore && creationClusterVersion.Less(binaryVersion) { return clusterRestoreDuringUpgradeErr(creationClusterVersion, binaryVersion) diff --git a/pkg/ccl/backupccl/restore_mid_schema_change_test.go b/pkg/ccl/backupccl/restore_mid_schema_change_test.go index 85b36f308434..8eb030c04e8a 100644 --- a/pkg/ccl/backupccl/restore_mid_schema_change_test.go +++ b/pkg/ccl/backupccl/restore_mid_schema_change_test.go @@ -231,10 +231,10 @@ func restoreMidSchemaChange( // option to ensure the restore is successful on development branches. This // is because, while the backups were generated on release branches and have // versions such as 22.2 in their manifest, the development branch will have - // a BinaryMinSupportedVersion offset by the clusterversion.DevOffset + // a MinSupportedVersion offset by the clusterversion.DevOffset // described in `pkg/clusterversion/cockroach_versions.go`. This will mean // that the manifest version is always less than the - // BinaryMinSupportedVersion which will in turn fail the restore unless we + // MinSupportedVersion which will in turn fail the restore unless we // pass in the specified option to elide the compatability check. restoreQuery := "RESTORE defaultdb.* FROM LATEST IN $1 WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION" if isClusterRestore { diff --git a/pkg/ccl/backupccl/restore_old_sequences_test.go b/pkg/ccl/backupccl/restore_old_sequences_test.go index e1bd2a5c3bbc..342bd97e0058 100644 --- a/pkg/ccl/backupccl/restore_old_sequences_test.go +++ b/pkg/ccl/backupccl/restore_old_sequences_test.go @@ -83,10 +83,10 @@ func restoreOldSequencesTest(exportDir string, isSchemaOnly bool) func(t *testin // option to ensure the restore is successful on development branches. This // is because, while the backups were generated on release branches and have // versions such as 22.2 in their manifest, the development branch will have - // a BinaryMinSupportedVersion offset by the clusterversion.DevOffset + // a MinSupportedVersion offset by the clusterversion.DevOffset // described in `pkg/clusterversion/cockroach_versions.go`. This will mean // that the manifest version is always less than the - // BinaryMinSupportedVersion which will in turn fail the restore unless we + // MinSupportedVersion which will in turn fail the restore unless we // pass in the specified option to elide the compatability check. restoreQuery := `RESTORE test.* FROM LATEST IN $1 WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION` if isSchemaOnly { diff --git a/pkg/ccl/backupccl/restore_old_versions_test.go b/pkg/ccl/backupccl/restore_old_versions_test.go index 1fd4da582d59..16fe9cf002c6 100644 --- a/pkg/ccl/backupccl/restore_old_versions_test.go +++ b/pkg/ccl/backupccl/restore_old_versions_test.go @@ -162,10 +162,10 @@ func restoreOldVersionClusterTest(exportDir string) func(t *testing.T) { // option to ensure the restore is successful on development branches. This // is because, while the backups were generated on release branches and have // versions such as 22.2 in their manifest, the development branch will have - // a BinaryMinSupportedVersion offset by the clusterversion.DevOffset + // a MinSupportedVersion offset by the clusterversion.DevOffset // described in `pkg/clusterversion/cockroach_versions.go`. This will mean // that the manifest version is always less than the - // BinaryMinSupportedVersion which will in turn fail the restore unless we + // MinSupportedVersion which will in turn fail the restore unless we // pass in the specified option to elide the compatability check. sqlDB.Exec(t, `RESTORE FROM LATEST IN $1 WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION`, localFoo) @@ -332,10 +332,10 @@ func fullClusterRestoreSystemRoleMembersWithoutIDs(exportDir string) func(t *tes // option to ensure the restore is successful on development branches. This // is because, while the backups were generated on release branches and have // versions such as 22.2 in their manifest, the development branch will have - // a BinaryMinSupportedVersion offset by the clusterversion.DevOffset + // a MinSupportedVersion offset by the clusterversion.DevOffset // described in `pkg/clusterversion/cockroach_versions.go`. This will mean // that the manifest version is always less than the - // BinaryMinSupportedVersion which will in turn fail the restore unless we + // MinSupportedVersion which will in turn fail the restore unless we // pass in the specified option to elide the compatability check. sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo)) @@ -367,10 +367,10 @@ func fullClusterRestoreSystemPrivilegesWithoutIDs(exportDir string) func(t *test // option to ensure the restore is successful on development branches. This // is because, while the backups were generated on release branches and have // versions such as 22.2 in their manifest, the development branch will have - // a BinaryMinSupportedVersion offset by the clusterversion.DevOffset + // a MinSupportedVersion offset by the clusterversion.DevOffset // described in `pkg/clusterversion/cockroach_versions.go`. This will mean // that the manifest version is always less than the - // BinaryMinSupportedVersion which will in turn fail the restore unless we + // MinSupportedVersion which will in turn fail the restore unless we // pass in the specified option to elide the compatability check. sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo)) @@ -402,10 +402,10 @@ func fullClusterRestoreSystemDatabaseRoleSettingsWithoutIDs(exportDir string) fu // option to ensure the restore is successful on development branches. This // is because, while the backups were generated on release branches and have // versions such as 22.2 in their manifest, the development branch will have - // a BinaryMinSupportedVersion offset by the clusterversion.DevOffset + // a MinSupportedVersion offset by the clusterversion.DevOffset // described in `pkg/clusterversion/cockroach_versions.go`. This will mean // that the manifest version is always less than the - // BinaryMinSupportedVersion which will in turn fail the restore unless we + // MinSupportedVersion which will in turn fail the restore unless we // pass in the specified option to elide the compatability check. sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo)) @@ -437,10 +437,10 @@ func fullClusterRestoreSystemExternalConnectionsWithoutIDs(exportDir string) fun // option to ensure the restore is successful on development branches. This // is because, while the backups were generated on release branches and have // versions such as 22.2 in their manifest, the development branch will have - // a BinaryMinSupportedVersion offset by the clusterversion.DevOffset + // a MinSupportedVersion offset by the clusterversion.DevOffset // described in `pkg/clusterversion/cockroach_versions.go`. This will mean // that the manifest version is always less than the - // BinaryMinSupportedVersion which will in turn fail the restore unless we + // MinSupportedVersion which will in turn fail the restore unless we // pass in the specified option to elide the compatability check. sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo)) diff --git a/pkg/ccl/backupccl/restore_planning.go b/pkg/ccl/backupccl/restore_planning.go index 1ffca9b23c65..38d4049f1b38 100644 --- a/pkg/ccl/backupccl/restore_planning.go +++ b/pkg/ccl/backupccl/restore_planning.go @@ -1646,7 +1646,7 @@ func checkBackupManifestVersionCompatability( // We support restoring a backup that was taken on a cluster with a cluster // version >= the earliest binary version that we can interoperate with. - minimumRestoreableVersion := version.BinaryMinSupportedVersion() + minimumRestoreableVersion := version.MinSupportedVersion() currentActiveVersion := version.ActiveVersion(ctx) for i := range mainBackupManifests { @@ -1869,7 +1869,7 @@ func doRestorePlan( // Validate that we aren't in the middle of an upgrade. To avoid unforseen // issues, we want to avoid full cluster restores if it is possible that an // upgrade is in progress. We also check this during Resume. - binaryVersion := p.ExecCfg().Settings.Version.BinaryVersion() + binaryVersion := p.ExecCfg().Settings.Version.LatestVersion() clusterVersion := p.ExecCfg().Settings.Version.ActiveVersion(ctx).Version if clusterVersion.Less(binaryVersion) { return clusterRestoreDuringUpgradeErr(clusterVersion, binaryVersion) diff --git a/pkg/ccl/kvccl/kvtenantccl/upgradeccl/tenant_upgrade_test.go b/pkg/ccl/kvccl/kvtenantccl/upgradeccl/tenant_upgrade_test.go index 41f1a587def9..8a60cce5e360 100644 --- a/pkg/ccl/kvccl/kvtenantccl/upgradeccl/tenant_upgrade_test.go +++ b/pkg/ccl/kvccl/kvtenantccl/upgradeccl/tenant_upgrade_test.go @@ -271,7 +271,7 @@ func TestTenantUpgrade(t *testing.T) { v1, false, // initializeVersion ) - // Initialize the version to the BinaryMinSupportedVersion. + // Initialize the version to the MinSupportedVersion. require.NoError(t, clusterversion.Initialize(ctx, clusterversion.TestingBinaryMinSupportedVersion, &settings.SV)) @@ -429,7 +429,7 @@ func TestTenantUpgradeFailure(t *testing.T) { v0, false, // initializeVersion ) - // Initialize the version to the BinaryMinSupportedVersion. + // Initialize the version to the MinSupportedVersion. ts := serverutils.StartServerOnly(t, base.TestServerArgs{ DefaultTestTenant: base.TestControlsTenantsExplicitly, Settings: settings, diff --git a/pkg/ccl/kvccl/kvtenantccl/upgradeinterlockccl/local_test_util_test.go b/pkg/ccl/kvccl/kvtenantccl/upgradeinterlockccl/local_test_util_test.go index 65fcafa4c1a5..a9518368846d 100644 --- a/pkg/ccl/kvccl/kvtenantccl/upgradeinterlockccl/local_test_util_test.go +++ b/pkg/ccl/kvccl/kvtenantccl/upgradeinterlockccl/local_test_util_test.go @@ -118,7 +118,7 @@ func runTest(t *testing.T, variant sharedtestutil.TestVariant, test sharedtestut slinstance.DefaultHeartBeat.Override(ctx, &s.SV, heartbeatOverride) } - // Initialize the version to the BinaryMinSupportedVersion so that + // Initialize the version to the MinSupportedVersion so that // we can perform upgrades. settings := cluster.MakeTestingClusterSettingsWithVersions(bv, msv, false /* initializeVersion */) disableBackgroundTasks(settings) diff --git a/pkg/clusterversion/clusterversion.go b/pkg/clusterversion/clusterversion.go index e460a04f17a2..10e1d07e2d1e 100644 --- a/pkg/clusterversion/clusterversion.go +++ b/pkg/clusterversion/clusterversion.go @@ -8,11 +8,11 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -// Package clusterversion defines the interfaces to interact with cluster/binary -// versions in order accommodate backward incompatible behaviors. It handles the -// feature gates and so must maintain a fairly lightweight set of dependencies. -// The upgrade sub-package handles advancing a cluster from one version to -// a later one. +// Package clusterversion defines the interfaces to interact with cluster +// versions in order to accommodate backward incompatible behaviors. It handles +// the feature gates and so must maintain a fairly lightweight set of +// dependencies. The upgrade sub-package handles advancing a cluster from one +// version to a later one. // // Ideally, every code change in a database would be backward compatible, but // this is not always possible. Some features, fixes, or cleanups need to @@ -21,15 +21,16 @@ // disruption. It works as follows: // // - Each node in the cluster is running a binary that was released at some -// version ("binary version"). We allow for rolling upgrades, so two nodes in -// the cluster may be running different binary versions. All nodes in a given -// cluster must be within 1 major release of each other (i.e. to upgrade two -// major releases, the cluster must first be rolled onto X+1 and then to X+2). +// version which corresponds to a certain logical cluster version ("latest +// version"). We allow for rolling upgrades, so two nodes in the cluster may +// be running different binary versions. All nodes in a given cluster must +// be within 1 major release of each other (i.e. to upgrade two major +// releases, the cluster must first be rolled onto X+1 and then to X+2). // - Separate from the build versions of the binaries, the cluster itself has a -// logical "active cluster version", the version all the binaries are -// currently operating at. This is used for two related things: first as a -// promise from the user that they'll never downgrade any nodes in the cluster -// to a binary below some "minimum supported version", and second, to unlock +// logical "active cluster version", the version all nodes are currently +// operating at. This is used for two related things: first as a promise +// from the user that they'll never downgrade any nodes in the cluster to a +// binary below some "minimum supported version", and second, to unlock // features that are not backwards compatible (which is now safe given that // the old binary will never be used). // - Each binary can operate within a "range of supported versions". When a @@ -108,12 +109,12 @@ type Handle interface { // node has, and it will too, eventually. IsActive(context.Context, Key) bool - // BinaryVersion returns the build version of this binary. - BinaryVersion() roachpb.Version + // LatestVersion returns the latest cluster version understood by this binary. + LatestVersion() roachpb.Version - // BinaryMinSupportedVersion returns the earliest binary version that can + // MinSupportedVersion returns the earliest cluster version that can // interoperate with this binary. - BinaryMinSupportedVersion() roachpb.Version + MinSupportedVersion() roachpb.Version // SetActiveVersion lets the caller set the given cluster version as the // currently active one. When a new active version is set, all subsequent @@ -148,7 +149,7 @@ type Handle interface { // handleImpl is a concrete implementation of Handle. It mostly relegates to the // underlying cluster version setting, though provides a way for callers to -// override the binary and minimum supported versions (for tests usually). +// override the latest and minimum supported versions (for tests usually). type handleImpl struct { // setting is the version that this handle operates on. setting *clusterVersionSetting @@ -156,20 +157,19 @@ type handleImpl struct { // immutable cluster version setting. sv *settings.Values - // Each handler stores its own view of the binary and minimum supported - // version. Tests can use `MakeVersionHandleWithOverride` to specify - // versions other than the baked in ones, but by default - // (`MakeVersionHandle`) they are initialized with this binary's build - // and minimum supported versions. - binaryVersion roachpb.Version - binaryMinSupportedVersion roachpb.Version + // Each handler stores its own view of the latest and minimum supported + // version. Tests can use `MakeVersionHandleWithOverride` to specify versions + // other than the baked in ones, but by default (`MakeVersionHandle`) they are + // initialized with this binary's latest and minimum supported versions. + latestVersion roachpb.Version + minSupportedVersion roachpb.Version } var _ Handle = (*handleImpl)(nil) -// MakeVersionHandle returns a Handle that has its binary and minimum -// supported versions initialized to this binary's build and it's minimum -// supported versions respectively. +// MakeVersionHandle returns a Handle that has its latest and minimum supported +// versions initialized to this binary's build and its minimum supported +// versions respectively. func MakeVersionHandle(sv *settings.Values) Handle { return MakeVersionHandleWithOverride(sv, Latest.Version(), MinSupported.Version()) } @@ -180,21 +180,21 @@ func MakeVersionHandle(sv *settings.Values) Handle { // It's typically used in tests that want to override the default binary and // minimum supported versions. func MakeVersionHandleWithOverride( - sv *settings.Values, binaryVersion, binaryMinSupportedVersion roachpb.Version, + sv *settings.Values, latestVersion, minSupportedVersion roachpb.Version, ) Handle { - return newHandleImpl(version, sv, binaryVersion, binaryMinSupportedVersion) + return newHandleImpl(version, sv, latestVersion, minSupportedVersion) } func newHandleImpl( setting *clusterVersionSetting, sv *settings.Values, - binaryVersion, binaryMinSupportedVersion roachpb.Version, + latestVersion, minSupportedVersion roachpb.Version, ) Handle { return &handleImpl{ - setting: setting, - sv: sv, - binaryVersion: binaryVersion, - binaryMinSupportedVersion: binaryMinSupportedVersion, + setting: setting, + sv: sv, + latestVersion: latestVersion, + minSupportedVersion: minSupportedVersion, } } @@ -240,14 +240,14 @@ func (v *handleImpl) IsActive(ctx context.Context, key Key) bool { return v.setting.isActive(ctx, v.sv, key) } -// BinaryVersion implements the Handle interface. -func (v *handleImpl) BinaryVersion() roachpb.Version { - return v.binaryVersion +// LatestVersion is part of the Handle interface. +func (v *handleImpl) LatestVersion() roachpb.Version { + return v.latestVersion } -// BinaryMinSupportedVersion implements the Handle interface. -func (v *handleImpl) BinaryMinSupportedVersion() roachpb.Version { - return v.binaryMinSupportedVersion +// MinSupportedVersion is part of the Handle interface. +func (v *handleImpl) MinSupportedVersion() roachpb.Version { + return v.minSupportedVersion } // IsActiveVersion returns true if the features of the supplied version are diff --git a/pkg/clusterversion/setting.go b/pkg/clusterversion/setting.go index 55824cbcf5b2..9f7ee9439308 100644 --- a/pkg/clusterversion/setting.go +++ b/pkg/clusterversion/setting.go @@ -41,9 +41,9 @@ const KeyVersionSetting = "version" // the version setting without looking at what's been persisted: The setting // specifies the minimum binary version we have to expect to be in a mixed // cluster with. We can't assume it is this binary's -// binaryMinSupportedVersion as the cluster could've started up earlier and +// minSupportedVersion as the cluster could've started up earlier and // enabled features that are no longer compatible it; we can't assume it's our -// binaryVersion as that would enable features that may trip up older versions +// latestVersion as that would enable features that may trip up older versions // running in the same cluster. Hence, only once we get word of the "safe" // version to use can we allow moving parts that actually need to know what's // going on. @@ -87,8 +87,8 @@ func (cv *clusterVersionSetting) initialize( // initializes it once more. // // It's also used in production code during bootstrap, where the version - // is first initialized to BinaryMinSupportedVersion and then - // re-initialized to BootstrapVersion (=BinaryVersion). + // is first initialized to MinSupportedVersion and then + // re-initialized to BootstrapVersion (=LatestVersion). if version.Less(ver.Version) { return errors.AssertionFailedf("cannot initialize version to %s because already set to: %s", version, ver) @@ -225,17 +225,17 @@ func (cv *clusterVersionSetting) validateBinaryVersions( ver roachpb.Version, sv *settings.Values, ) error { vh := sv.Opaque().(Handle) - if vh.BinaryMinSupportedVersion() == (roachpb.Version{}) { - panic("BinaryMinSupportedVersion not set") + if vh.MinSupportedVersion() == (roachpb.Version{}) { + panic("MinSupportedVersion not set") } - if vh.BinaryVersion().Less(ver) { + if vh.LatestVersion().Less(ver) { // TODO(tschottdorf): also ask gossip about other nodes. return errors.Errorf("cannot upgrade to %s: node running %s", - ver, vh.BinaryVersion()) + ver, vh.LatestVersion()) } - if ver.Less(vh.BinaryMinSupportedVersion()) { + if ver.Less(vh.MinSupportedVersion()) { return errors.Errorf("node at %s cannot run %s (minimum version is %s)", - vh.BinaryVersion(), ver, vh.BinaryMinSupportedVersion()) + vh.LatestVersion(), ver, vh.MinSupportedVersion()) } return nil } diff --git a/pkg/kv/kvserver/kvstorage/init.go b/pkg/kv/kvserver/kvstorage/init.go index 8b747ef18ab3..c34fa9b8e74c 100644 --- a/pkg/kv/kvserver/kvstorage/init.go +++ b/pkg/kv/kvserver/kvstorage/init.go @@ -506,7 +506,7 @@ func LoadAndReconcileReplicas(ctx context.Context, eng storage.Engine) ([]Replic } // Migrate into RaftReplicaID. This migration can be removed once the - // BinaryMinSupportedVersion is >= 23.1, and we can assert that + // MinSupportedVersion is >= 23.1, and we can assert that // repl.ReplicaID != 0 always holds. if descReplicaID != 0 { diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index b04a24c22cf5..e33b9dc9898f 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -177,7 +177,7 @@ func (tc *testContext) Start(ctx context.Context, t testing.TB, stopper *stop.St func (tc *testContext) StartWithStoreConfig( ctx context.Context, t testing.TB, stopper *stop.Stopper, cfg StoreConfig, ) { - tc.StartWithStoreConfigAndVersion(ctx, t, stopper, cfg, cfg.Settings.Version.BinaryVersion()) + tc.StartWithStoreConfigAndVersion(ctx, t, stopper, cfg, cfg.Settings.Version.LatestVersion()) } // StartWithStoreConfigAndVersion is like StartWithStoreConfig but additionally diff --git a/pkg/rpc/context_test.go b/pkg/rpc/context_test.go index 0cd9f0ed1b60..a4f6f8d4d1f4 100644 --- a/pkg/rpc/context_test.go +++ b/pkg/rpc/context_test.go @@ -1409,7 +1409,7 @@ func grpcRunKeepaliveTestCase(testCtx context.Context, c grpcKeepaliveTestCase) // Perform an initial request-response round trip. log.Infof(ctx, "first ping") - request := PingRequest{ServerVersion: clientCtx.Settings.Version.BinaryVersion()} + request := PingRequest{ServerVersion: clientCtx.Settings.Version.LatestVersion()} if err := heartbeatClient.Send(&request); err != nil { return err } diff --git a/pkg/rpc/heartbeat.go b/pkg/rpc/heartbeat.go index ffe8b8d39b4a..85f3044019de 100644 --- a/pkg/rpc/heartbeat.go +++ b/pkg/rpc/heartbeat.go @@ -101,7 +101,7 @@ func checkVersion( minVersion := activeVersion.Version if tenantID, isTenant := roachpb.ClientTenantFromContext(ctx); isTenant && !roachpb.IsSystemTenantID(tenantID.ToUint64()) { - minVersion = version.BinaryMinSupportedVersion() + minVersion = version.MinSupportedVersion() } if peerVersion.Less(minVersion) { return errors.Errorf( @@ -164,7 +164,7 @@ func (hs *HeartbeatService) Ping(ctx context.Context, request *PingRequest) (*Pi response := PingResponse{ Pong: request.Ping, ServerTime: hs.clock.Now().UnixNano(), - ServerVersion: hs.version.BinaryVersion(), + ServerVersion: hs.version.LatestVersion(), ClusterName: hs.clusterName, DisableClusterNameVerification: hs.disableClusterNameVerification, } diff --git a/pkg/rpc/heartbeat_test.go b/pkg/rpc/heartbeat_test.go index f49a6fe79f0d..7aadc1394193 100644 --- a/pkg/rpc/heartbeat_test.go +++ b/pkg/rpc/heartbeat_test.go @@ -56,7 +56,7 @@ func TestHeartbeatReply(t *testing.T) { request := &PingRequest{ Ping: "testPing", - ServerVersion: st.Version.BinaryVersion(), + ServerVersion: st.Version.LatestVersion(), } response, err := heartbeat.Ping(context.Background(), request) if err != nil { @@ -140,7 +140,7 @@ func TestManualHeartbeat(t *testing.T) { request := &PingRequest{ Ping: "testManual", - ServerVersion: st.Version.BinaryVersion(), + ServerVersion: st.Version.LatestVersion(), } manualHeartbeat.ready <- nil ctx := context.Background() @@ -196,7 +196,7 @@ func TestClusterIDCompare(t *testing.T) { request := &PingRequest{ Ping: "testPing", ClusterID: &td.clientClusterID, - ServerVersion: st.Version.BinaryVersion(), + ServerVersion: st.Version.LatestVersion(), } _, err := heartbeat.Ping(context.Background(), request) if td.expectError && err == nil { @@ -241,7 +241,7 @@ func TestNodeIDCompare(t *testing.T) { request := &PingRequest{ Ping: "testPing", TargetNodeID: td.clientNodeID, - ServerVersion: st.Version.BinaryVersion(), + ServerVersion: st.Version.LatestVersion(), } _, err := heartbeat.Ping(context.Background(), request) if td.expectError && err == nil { @@ -274,7 +274,7 @@ func TestTenantVersionCheck(t *testing.T) { request := &PingRequest{ Ping: "testPing", - ServerVersion: st.Version.BinaryMinSupportedVersion(), + ServerVersion: st.Version.MinSupportedVersion(), } const failedRE = `version compatibility check failed on ping request:` + ` cluster requires at least version .*, but peer has version .*` diff --git a/pkg/rpc/peer.go b/pkg/rpc/peer.go index 5faf1c342e21..aa3052c30d9f 100644 --- a/pkg/rpc/peer.go +++ b/pkg/rpc/peer.go @@ -362,7 +362,7 @@ func runSingleHeartbeat( request := &PingRequest{ OriginAddr: opts.AdvertiseAddr, TargetNodeID: k.NodeID, - ServerVersion: opts.Settings.Version.BinaryVersion(), + ServerVersion: opts.Settings.Version.LatestVersion(), LocalityAddress: opts.LocalityAddresses, ClusterID: &clusterID, OriginNodeID: opts.NodeID.Get(), diff --git a/pkg/server/init.go b/pkg/server/init.go index 5f68b0cbd34e..ad35ca084d30 100644 --- a/pkg/server/init.go +++ b/pkg/server/init.go @@ -273,7 +273,7 @@ func (s *initServer) ServeAndWait( // Bootstrap() did its job. At this point, we know that the cluster // version will be the bootstrap version (aka the binary version[1]), // but the version setting does not know yet (it was initialized as - // BinaryMinSupportedVersion because the engines were all + // MinSupportedVersion because the engines were all // uninitialized). Given that the bootstrap version was persisted to // all the engines, it's now safe for us to bump the version setting // itself and start operating at the latest cluster version. @@ -521,7 +521,7 @@ func (s *initServer) attemptJoinTo( } // DiskClusterVersion returns the cluster version synthesized from disk. This -// is always non-zero since it falls back to the BinaryMinSupportedVersion. +// is always non-zero since it falls back to the MinSupportedVersion. func (s *initServer) DiskClusterVersion() clusterversion.ClusterVersion { return s.inspectedDiskState.clusterVersion } @@ -637,8 +637,8 @@ func newInitServerConfig( cfg Config, getDialOpts func(context.Context, string, rpc.ConnectionClass) ([]grpc.DialOption, error), ) initServerCfg { - binaryVersion := cfg.Settings.Version.BinaryVersion() - binaryMinSupportedVersion := cfg.Settings.Version.BinaryMinSupportedVersion() + binaryVersion := cfg.Settings.Version.LatestVersion() + binaryMinSupportedVersion := cfg.Settings.Version.MinSupportedVersion() if knobs := cfg.TestingKnobs.Server; knobs != nil { if overrideVersion := knobs.(*TestingKnobs).BinaryVersionOverride; overrideVersion != (roachpb.Version{}) { // We are customizing the cluster version. We can only bootstrap a fresh diff --git a/pkg/server/migration.go b/pkg/server/migration.go index ba021fe2b0c7..af20205c07a4 100644 --- a/pkg/server/migration.go +++ b/pkg/server/migration.go @@ -53,9 +53,9 @@ func (m *migrationServer) ValidateTargetClusterVersion( // We're validating the following: // // node's minimum supported version <= target version <= node's binary version - if targetCV.Less(versionSetting.BinaryMinSupportedVersion()) { + if targetCV.Less(versionSetting.MinSupportedVersion()) { msg := fmt.Sprintf("target cluster version %s less than binary's min supported version %s", - targetCV, versionSetting.BinaryMinSupportedVersion()) + targetCV, versionSetting.MinSupportedVersion()) log.Warningf(ctx, "%s", msg) return nil, errors.Newf("%s", redact.Safe(msg)) } @@ -72,9 +72,9 @@ func (m *migrationServer) ValidateTargetClusterVersion( // It would be a bit clearer to use negative internal versions, to be able // to surface more obvious errors. Alternatively we could simply construct // a better error message here. - if versionSetting.BinaryVersion().Less(targetCV.Version) { + if versionSetting.LatestVersion().Less(targetCV.Version) { msg := fmt.Sprintf("binary version %s less than target cluster version %s", - versionSetting.BinaryVersion(), targetCV) + versionSetting.LatestVersion(), targetCV) log.Warningf(ctx, "%s", msg) return nil, errors.Newf("%s", redact.Safe(msg)) } @@ -112,8 +112,8 @@ func bumpClusterVersion( versionSetting := st.Version prevCV, err := kvstorage.SynthesizeClusterVersionFromEngines( - ctx, engines, versionSetting.BinaryVersion(), - versionSetting.BinaryMinSupportedVersion(), + ctx, engines, versionSetting.LatestVersion(), + versionSetting.MinSupportedVersion(), ) if err != nil { return err diff --git a/pkg/server/node.go b/pkg/server/node.go index 6a1bda87f474..0885a903d7b7 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -634,7 +634,7 @@ func (n *Node) start( Locality: locality, LocalityAddress: localityAddress, ClusterName: clusterName, - ServerVersion: n.storeCfg.Settings.Version.BinaryVersion(), + ServerVersion: n.storeCfg.Settings.Version.LatestVersion(), BuildTag: build.GetInfo().Tag, StartedAt: n.startedAt, HTTPAddress: util.MakeUnresolvedAddr(httpAddr.Network(), httpAddr.String()), diff --git a/pkg/server/server.go b/pkg/server/server.go index 8549bdf8b7e2..9d4316b79d5e 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -1484,8 +1484,8 @@ func (s *topLevelServer) PreStart(ctx context.Context) error { inspectedDiskState, err := inspectEngines( ctx, s.engines, - s.cfg.Settings.Version.BinaryVersion(), - s.cfg.Settings.Version.BinaryMinSupportedVersion(), + s.cfg.Settings.Version.LatestVersion(), + s.cfg.Settings.Version.MinSupportedVersion(), ) if err != nil { return err diff --git a/pkg/server/server_controller_new_server.go b/pkg/server/server_controller_new_server.go index dece6aa48d27..9f1bcf62b5c7 100644 --- a/pkg/server/server_controller_new_server.go +++ b/pkg/server/server_controller_new_server.go @@ -188,7 +188,7 @@ func makeSharedProcessTenantServerConfig( // have to run all known migrations since then. So initialize // the version setting to the minimum supported version. if err := clusterversion.Initialize( - ctx, st.Version.BinaryMinSupportedVersion(), &st.SV, + ctx, st.Version.MinSupportedVersion(), &st.SV, ); err != nil { return BaseConfig{}, SQLConfig{}, err } diff --git a/pkg/server/server_sql.go b/pkg/server/server_sql.go index ac72cc5fa50e..f00944b072e1 100644 --- a/pkg/server/server_sql.go +++ b/pkg/server/server_sql.go @@ -520,7 +520,7 @@ func (r *refreshInstanceSessionListener) OnSessionDeleted( r.cfg.AdvertiseAddr, r.cfg.SQLAdvertiseAddr, r.cfg.Locality, - r.cfg.Settings.Version.BinaryVersion(), + r.cfg.Settings.Version.LatestVersion(), nodeID, ); err != nil { log.Warningf(ctx, "failed to update instance with new session ID: %v", err) @@ -1511,7 +1511,7 @@ func (s *SQLServer) preStart( s.cfg.AdvertiseAddr, s.cfg.SQLAdvertiseAddr, s.distSQLServer.Locality, - s.execCfg.Settings.Version.BinaryVersion(), + s.execCfg.Settings.Version.LatestVersion(), nodeID, ) } @@ -1522,7 +1522,7 @@ func (s *SQLServer) preStart( s.cfg.AdvertiseAddr, s.cfg.SQLAdvertiseAddr, s.distSQLServer.Locality, - s.execCfg.Settings.Version.BinaryVersion(), + s.execCfg.Settings.Version.LatestVersion(), ) }) if err != nil { @@ -1660,20 +1660,20 @@ func (s *SQLServer) preStart( }); err != nil { return err } - if s.execCfg.Settings.Version.BinaryVersion().Less(tenantActiveVersion.Version) { + if s.execCfg.Settings.Version.LatestVersion().Less(tenantActiveVersion.Version) { return errors.WithHintf(errors.Newf("preventing SQL server from starting because its binary version "+ "is too low for the tenant active version: server binary version = %v, tenant active version = %v", - s.execCfg.Settings.Version.BinaryVersion(), tenantActiveVersion.Version), + s.execCfg.Settings.Version.LatestVersion(), tenantActiveVersion.Version), "use a tenant binary whose version is at least %v", tenantActiveVersion.Version) } // Prevent the server from starting if its minimum supported binary version is too high // for the tenant cluster version. - if tenantActiveVersion.Version.Less(s.execCfg.Settings.Version.BinaryMinSupportedVersion()) { + if tenantActiveVersion.Version.Less(s.execCfg.Settings.Version.MinSupportedVersion()) { return errors.WithHintf(errors.Newf("preventing SQL server from starting because its executable "+ "version is too new to run the current active logical version of the virtual cluster"), "finalize the virtual cluster version to at least %v or downgrade the"+ - "executable version to at most %v", s.execCfg.Settings.Version.BinaryMinSupportedVersion(), tenantActiveVersion.Version, + "executable version to at most %v", s.execCfg.Settings.Version.MinSupportedVersion(), tenantActiveVersion.Version, ) } diff --git a/pkg/server/settings_cache_test.go b/pkg/server/settings_cache_test.go index c46823b9c303..f1c85d01435c 100644 --- a/pkg/server/settings_cache_test.go +++ b/pkg/server/settings_cache_test.go @@ -117,8 +117,8 @@ func TestCachedSettingsServerRestart(t *testing.T) { inspectState, err := inspectEngines( context.Background(), s.Engines(), - s.ClusterSettings().Version.BinaryVersion(), - s.ClusterSettings().Version.BinaryMinSupportedVersion(), + s.ClusterSettings().Version.LatestVersion(), + s.ClusterSettings().Version.MinSupportedVersion(), ) require.NoError(t, err) diff --git a/pkg/server/status.go b/pkg/server/status.go index 3c0e09d67144..1f8a8831348c 100644 --- a/pkg/server/status.go +++ b/pkg/server/status.go @@ -1703,7 +1703,7 @@ func (s *statusServer) Profile( // If the request has a SenderVersion, then ensure the current node has the // same server version before collecting a profile. if req.SenderServerVersion != nil { - serverVersion := s.st.Version.BinaryVersion() + serverVersion := s.st.Version.LatestVersion() if !serverVersion.Equal(*req.SenderServerVersion) { return nil, errors.Newf("server version of the node being profiled %s != sender version %s", serverVersion.String(), req.SenderServerVersion.String()) diff --git a/pkg/server/tenant_migration.go b/pkg/server/tenant_migration.go index c1a6f208f4e7..4f7f31bad579 100644 --- a/pkg/server/tenant_migration.go +++ b/pkg/server/tenant_migration.go @@ -52,20 +52,20 @@ func validateTargetClusterVersion( targetCV *clusterversion.ClusterVersion, instanceID base.SQLInstanceID, ) error { - if targetCV.Less(tenantVersion.BinaryMinSupportedVersion()) { + if targetCV.Less(tenantVersion.MinSupportedVersion()) { err := errors.Newf("requested tenant cluster upgrade version %s is less than the "+ "binary's minimum supported version %s for SQL server instance %d", - targetCV, tenantVersion.BinaryMinSupportedVersion(), + targetCV, tenantVersion.MinSupportedVersion(), instanceID) log.Warningf(ctx, "%v", err) return err } - if tenantVersion.BinaryVersion().Less(targetCV.Version) { + if tenantVersion.LatestVersion().Less(targetCV.Version) { err := errors.Newf("sql server %d is running a binary version %s which is "+ "less than the attempted upgrade version %s", instanceID, - tenantVersion.BinaryVersion(), targetCV) + tenantVersion.LatestVersion(), targetCV) log.Warningf(ctx, "%v", err) return errors.WithHintf(err, "upgrade sql server %d binary to version %s (or higher) to allow tenant upgrade to succeed", diff --git a/pkg/server/testing_knobs.go b/pkg/server/testing_knobs.go index 126f4d1e7f6b..0dd31d66b2b1 100644 --- a/pkg/server/testing_knobs.go +++ b/pkg/server/testing_knobs.go @@ -69,7 +69,7 @@ type TestingKnobs struct { // Case 1: // ------ // If the test has not overridden the - // `cluster.Settings.Version.BinaryMinSupportedVersion`, then the cluster will + // `cluster.Settings.Version.MinSupportedVersion`, then the cluster will // be bootstrapped at `binaryMinSupportedVersion` (if this server is the one // bootstrapping the cluster). After all the servers in the test cluster have // been started, `SET CLUSTER SETTING version = BinaryVersionOverride` will be @@ -78,7 +78,7 @@ type TestingKnobs struct { // Case 2: // ------ // If the test has overridden the - // `cluster.Settings.Version.BinaryMinSupportedVersion` then it is not safe + // `cluster.Settings.Version.MinSupportedVersion` then it is not safe // for us to bootstrap at `binaryMinSupportedVersion` as it might be less than // the overridden minimum supported version. Furthermore, we do not have the // initial cluster data (system tables etc.) to bootstrap at the overridden diff --git a/pkg/settings/cluster/cluster_settings.go b/pkg/settings/cluster/cluster_settings.go index 1fd7ce1fda0a..efc053c690c6 100644 --- a/pkg/settings/cluster/cluster_settings.go +++ b/pkg/settings/cluster/cluster_settings.go @@ -136,39 +136,39 @@ func MakeClusterSettings() *Settings { return s } -// MakeTestingClusterSettings returns a Settings object that has its binary and -// minimum supported versions set to the baked in binary version. It also -// initializes the cluster version setting to the binary version. +// MakeTestingClusterSettings returns a Settings object that is initialized with +// the latest version. // // It is typically used for testing or one-off situations in which a Settings // object is needed, but cluster settings don't play a crucial role. func MakeTestingClusterSettings() *Settings { return MakeTestingClusterSettingsWithVersions( - clusterversion.TestingBinaryVersion, - clusterversion.TestingBinaryVersion, + clusterversion.Latest.Version(), + clusterversion.Latest.Version(), true /* initializeVersion */) } // MakeTestingClusterSettingsWithVersions returns a Settings object that has its -// binary and minimum supported versions set to the provided versions. -// It also can also initialize the cluster version setting to the specified -// binaryVersion. +// latest and minimum supported versions set to the provided versions. // -// It is typically used in tests that want to override the default binary and +// It can optionally initialize the cluster version setting to the specified +// latestVersion. +// +// It is typically used in tests that want to override the binary's latest and // minimum supported versions. func MakeTestingClusterSettingsWithVersions( - binaryVersion, binaryMinSupportedVersion roachpb.Version, initializeVersion bool, + latestVersion, minSupportedVersion roachpb.Version, initializeVersion bool, ) *Settings { s := &Settings{} sv := &s.SV s.Version = clusterversion.MakeVersionHandleWithOverride( - &s.SV, binaryVersion, binaryMinSupportedVersion) + &s.SV, latestVersion, minSupportedVersion) sv.Init(context.TODO(), s.Version) if initializeVersion { - // Initialize cluster version to specified binaryVersion. - if err := clusterversion.Initialize(context.TODO(), binaryVersion, &s.SV); err != nil { + // Initialize cluster version to specified latestVersion. + if err := clusterversion.Initialize(context.TODO(), latestVersion, &s.SV); err != nil { log.Fatalf(context.TODO(), "unable to initialize version: %s", err) } } @@ -183,7 +183,7 @@ func TestingCloneClusterSettings(st *Settings) *Settings { ExternalIODir: st.ExternalIODir, } result.Version = clusterversion.MakeVersionHandleWithOverride( - &result.SV, st.Version.BinaryVersion(), st.Version.BinaryMinSupportedVersion(), + &result.SV, st.Version.LatestVersion(), st.Version.MinSupportedVersion(), ) result.SV.TestingCopyForServer(&st.SV, result.Version) return result diff --git a/pkg/sql/catalog/internal/catkv/system_database_cache.go b/pkg/sql/catalog/internal/catkv/system_database_cache.go index 18ff4c4c2a23..81989c1cf711 100644 --- a/pkg/sql/catalog/internal/catkv/system_database_cache.go +++ b/pkg/sql/catalog/internal/catkv/system_database_cache.go @@ -68,7 +68,7 @@ func NewSystemDatabaseCache(codec keys.SQLCodec, settings *cluster.Settings) *Sy } return nil }) - c.mu.m[settings.Version.BinaryVersion()] = &warm + c.mu.m[settings.Version.LatestVersion()] = &warm return c } diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index 87689a73d41f..62e0ddf04f4e 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -4937,7 +4937,7 @@ value if you rely on the HLC for accuracy.`, Types: tree.ParamTypes{}, ReturnType: tree.FixedReturnType(types.String), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { - v := evalCtx.Settings.Version.BinaryVersion().String() + v := evalCtx.Settings.Version.LatestVersion().String() return tree.NewDString(v), nil }, Info: "Returns the version of CockroachDB this node is running.", diff --git a/pkg/sql/sqlinstance/sqlinstance.go b/pkg/sql/sqlinstance/sqlinstance.go index 6eaa4ccd495e..3e5fd6ea8e29 100644 --- a/pkg/sql/sqlinstance/sqlinstance.go +++ b/pkg/sql/sqlinstance/sqlinstance.go @@ -50,7 +50,7 @@ func (ii InstanceInfo) GetLocality() roachpb.Locality { // SafeFormat implements redact.SafeFormatter. func (ii InstanceInfo) SafeFormat(s interfaces.SafePrinter, verb rune) { s.Printf( - "Instance{RegionPrefix: %v, InstanceID: %d, SQLAddr: %v, RPCAddr: %v, SessionID: %s, Locality: %v, BinaryVersion: %v}", + "Instance{RegionPrefix: %v, InstanceID: %d, SQLAddr: %v, RPCAddr: %v, SessionID: %s, Locality: %v, LatestVersion: %v}", redact.SafeString(base64.StdEncoding.EncodeToString(ii.Region)), ii.InstanceID, ii.InstanceSQLAddr, diff --git a/pkg/storage/min_version_test.go b/pkg/storage/min_version_test.go index ed3c2333fee9..8fac016167a3 100644 --- a/pkg/storage/min_version_test.go +++ b/pkg/storage/min_version_test.go @@ -130,14 +130,14 @@ func TestMinVersion_IsNotEncrypted(t *testing.T) { EncryptionAtRest(nil)) require.NoError(t, err) defer p.Close() - require.NoError(t, p.SetMinVersion(st.Version.BinaryVersion())) + require.NoError(t, p.SetMinVersion(st.Version.LatestVersion())) // Reading the file directly through the unencrypted MemFS should // succeed and yield the correct version. v, ok, err := getMinVersion(fs, "") require.NoError(t, err) require.True(t, ok) - require.Equal(t, st.Version.BinaryVersion(), v) + require.Equal(t, st.Version.LatestVersion(), v) } func fauxNewEncryptedEnvFunc( diff --git a/pkg/storage/pebble.go b/pkg/storage/pebble.go index 1593bb9d68ea..fa4656ec5ca9 100644 --- a/pkg/storage/pebble.go +++ b/pkg/storage/pebble.go @@ -1233,18 +1233,18 @@ func NewPebble(ctx context.Context, cfg PebbleConfig) (p *Pebble, err error) { // sequence (so now some stores have v21.2, but others v22.1) you are // expected to run v22.1 again (hopefully without the crash this time) which // would then rewrite all the stores. - if v := cfg.Settings.Version; storeClusterVersion.Less(v.BinaryMinSupportedVersion()) { - if storeClusterVersion.Major < clusterversion.DevOffset && v.BinaryVersion().Major >= clusterversion.DevOffset { + if v := cfg.Settings.Version; storeClusterVersion.Less(v.MinSupportedVersion()) { + if storeClusterVersion.Major < clusterversion.DevOffset && v.LatestVersion().Major >= clusterversion.DevOffset { return nil, errors.Errorf( "store last used with cockroach non-development version v%s "+ "cannot be opened by development version v%s", - storeClusterVersion, v.BinaryVersion(), + storeClusterVersion, v.LatestVersion(), ) } return nil, errors.Errorf( "store last used with cockroach version v%s "+ "is too old for running version v%s (which requires data from v%s or later)", - storeClusterVersion, v.BinaryVersion(), v.BinaryMinSupportedVersion(), + storeClusterVersion, v.LatestVersion(), v.MinSupportedVersion(), ) } opts.ErrorIfNotExists = true @@ -1289,7 +1289,7 @@ func NewPebble(ctx context.Context, cfg PebbleConfig) (p *Pebble, err error) { storeClusterVersion = cfg.Settings.Version.ActiveVersionOrEmpty(ctx).Version if storeClusterVersion == (roachpb.Version{}) { // If there is no active version, use the minimum supported version. - storeClusterVersion = cfg.Settings.Version.BinaryMinSupportedVersion() + storeClusterVersion = cfg.Settings.Version.MinSupportedVersion() } } diff --git a/pkg/upgrade/doc.go b/pkg/upgrade/doc.go index 0ada1d0791a2..7e2ce62617c1 100644 --- a/pkg/upgrade/doc.go +++ b/pkg/upgrade/doc.go @@ -40,7 +40,7 @@ v24.1.0 and let's futher say that we're creating the cluster at version v23.2.0. On cluster creation, we'll run all permanent upgrades <= v23.2.0, and none of the non-permanent ones. Then, let's say that all the nodes are upgraded to -BinaryVersion=v24.1.0 binaries (and automatic version upgrades are turned off); +LatestVersion=v24.1.0 binaries (and automatic version upgrades are turned off); the cluster logical version remains v23.2.0. No upgrades are run. Then automatic upgrades are turned on, and `SET CLUSTER VERSION 24.1.0` is run in the background. At this point, all permanent and non-permanent upgrades > 23.2.0 and @@ -53,8 +53,8 @@ left in system.migrations. Upgrades need to be idempotent: they might be run multiple times as the jobs error or nodes crash in the middle of running one of the jobs. However, if an -upgrade has been run successfully by a binary with BinaryVersion=b, it is not -run again by a binary with a different BinaryVersion. This is a useful guarantee +upgrade has been run successfully by a binary with LatestVersion=b, it is not +run again by a binary with a different LatestVersion. This is a useful guarantee for permanent upgrades, as it allows the code for an upgrade to change between versions (for example in response to updated bootstrap schema), without needing to worry about making the upgrade work for cluster being upgraded. Consider the diff --git a/pkg/upgrade/upgradecluster/tenant_cluster.go b/pkg/upgrade/upgradecluster/tenant_cluster.go index 1310ea031d7f..3370d33bbba1 100644 --- a/pkg/upgrade/upgradecluster/tenant_cluster.go +++ b/pkg/upgrade/upgradecluster/tenant_cluster.go @@ -66,7 +66,7 @@ import ( // running at cluster version 20.1 (which is necessary when a 20.1 cluster is // upgraded to 20.2). // -// BinaryMinSupportedVersion BinaryVersion +// MinSupportedVersion LatestVersion // | | // v...........................................v // (possible range of active diff --git a/pkg/upgrade/upgrademanager/manager_external_test.go b/pkg/upgrade/upgrademanager/manager_external_test.go index cdf0888572e5..9425802711cd 100644 --- a/pkg/upgrade/upgrademanager/manager_external_test.go +++ b/pkg/upgrade/upgrademanager/manager_external_test.go @@ -465,7 +465,7 @@ func TestConcurrentMigrationAttempts(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - // We're going to be migrating from the BinaryMinSupportedVersion to imaginary future versions. + // We're going to be migrating from the MinSupportedVersion to imaginary future versions. current := clusterversion.TestingBinaryMinSupportedVersion versions := []roachpb.Version{current} for i := int32(1); i <= 4; i++ {