Skip to content

Commit

Permalink
multitenant: append WITH REPLICATION STATUS columns to `SHOW TENANT…
Browse files Browse the repository at this point in the history
…` columns

Informs #87851

`WITH CAPABILITIES` will be adding another set of columns so append relevant
columns instead of creating all possible column sets ahead of time.

Release note: None
  • Loading branch information
ecwall committed Jan 19, 2023
1 parent f27fa9d commit f0b06e4
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 55 deletions.
6 changes: 3 additions & 3 deletions pkg/sql/catalog/colinfo/result_columns.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,16 +275,16 @@ var ExportColumns = ResultColumns{
{Name: "bytes", Typ: types.Int},
}

// TenantColumns appear in all SHOW TENANT queries.
var TenantColumns = ResultColumns{
{Name: "id", Typ: types.Int},
{Name: "name", Typ: types.String},
{Name: "status", Typ: types.String},
}

// TenantColumnsWithReplication is appended to TenantColumns for
// SHOW TENANT ... WITH REPLICATION STATUS queries.
var TenantColumnsWithReplication = ResultColumns{
{Name: "id", Typ: types.Int},
{Name: "name", Typ: types.String},
{Name: "status", Typ: types.String},
{Name: "source_tenant_name", Typ: types.String},
{Name: "source_cluster_uri", Typ: types.String},
{Name: "replication_job_id", Typ: types.Int},
Expand Down
102 changes: 50 additions & 52 deletions pkg/sql/show_tenant.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,10 @@ func (p *planner) ShowTenant(ctx context.Context, n *tree.ShowTenant) (planNode,
tenantSpec: tspec,
withReplication: n.WithReplication,
}

node.columns = colinfo.TenantColumns
if n.WithReplication {
node.columns = colinfo.TenantColumnsWithReplication
} else {
node.columns = colinfo.TenantColumns
node.columns = append(node.columns, colinfo.TenantColumnsWithReplication...)
}

return node, nil
Expand Down Expand Up @@ -234,61 +234,59 @@ func (n *showTenantNode) Next(params runParams) (bool, error) {

func (n *showTenantNode) Values() tree.Datums {
v := n.values
tenantId := tree.NewDInt(tree.DInt(v.tenantInfo.ID))
tenantName := tree.NewDString(string(v.tenantInfo.Name))
tenantStatus := tree.NewDString(string(v.tenantStatus))
if !n.withReplication {
return tree.Datums{
tenantId,
tenantName,
tenantStatus,
}
tenantInfo := v.tenantInfo
result := tree.Datums{
tree.NewDInt(tree.DInt(tenantInfo.ID)),
tree.NewDString(string(tenantInfo.Name)),
tree.NewDString(string(v.tenantStatus)),
}

// This is a 'SHOW TENANT name WITH REPLICATION STATUS' command.
sourceTenantName := tree.DNull
sourceClusterUri := tree.DNull
replicationJobId := tree.NewDInt(tree.DInt(v.tenantInfo.TenantReplicationJobID))
replicatedTimestamp := tree.DNull
retainedTimestamp := tree.DNull
cutoverTimestamp := tree.DNull
if n.withReplication {
// This is a 'SHOW TENANT name WITH REPLICATION STATUS' command.
sourceTenantName := tree.DNull
sourceClusterUri := tree.DNull
replicationJobId := tree.NewDInt(tree.DInt(tenantInfo.TenantReplicationJobID))
replicatedTimestamp := tree.DNull
retainedTimestamp := tree.DNull
cutoverTimestamp := tree.DNull

if v.replicationInfo != nil {
sourceTenantName = tree.NewDString(string(v.replicationInfo.IngestionDetails.SourceTenantName))
sourceClusterUri = tree.NewDString(v.replicationInfo.IngestionDetails.StreamAddress)
if v.replicationInfo.ReplicationLagInfo != nil {
minIngested := v.replicationInfo.ReplicationLagInfo.MinIngestedTimestamp
// The latest fully replicated time. Truncating to the nearest microsecond
// because if we don't, then MakeDTimestamp rounds to the nearest
// microsecond. In that case a user may want to cutover to a rounded-up
// time, which is a time that we may never replicate to. Instead, we show
// a time that we know we replicated to.
replicatedTimestamp, _ = tree.MakeDTimestampTZ(minIngested.GoTime().Truncate(time.Microsecond), time.Nanosecond)
}
// The protected timestamp on the destination cluster. Same as with the
// replicatedTimestamp, we want to show a retained time that is within the
// window (retained to replicated) and not below it. We take a timestamp
// that is greater than the protected timestamp by a microsecond or less
// (it's not exactly ceil but close enough).
retainedCeil := v.protectedTimestamp.GoTime().Truncate(time.Microsecond).Add(time.Microsecond)
retainedTimestamp, _ = tree.MakeDTimestampTZ(retainedCeil, time.Nanosecond)
progress := v.replicationInfo.IngestionProgress
if progress != nil && !progress.CutoverTime.IsEmpty() {
cutoverTimestamp = eval.TimestampToDecimalDatum(progress.CutoverTime)
replicationInfo := v.replicationInfo
if replicationInfo != nil {
sourceTenantName = tree.NewDString(string(replicationInfo.IngestionDetails.SourceTenantName))
sourceClusterUri = tree.NewDString(replicationInfo.IngestionDetails.StreamAddress)
if replicationInfo.ReplicationLagInfo != nil {
minIngested := replicationInfo.ReplicationLagInfo.MinIngestedTimestamp
// The latest fully replicated time. Truncating to the nearest microsecond
// because if we don't, then MakeDTimestamp rounds to the nearest
// microsecond. In that case a user may want to cutover to a rounded-up
// time, which is a time that we may never replicate to. Instead, we show
// a time that we know we replicated to.
replicatedTimestamp, _ = tree.MakeDTimestampTZ(minIngested.GoTime().Truncate(time.Microsecond), time.Nanosecond)
}
// The protected timestamp on the destination cluster. Same as with the
// replicatedTimestamp, we want to show a retained time that is within the
// window (retained to replicated) and not below it. We take a timestamp
// that is greater than the protected timestamp by a microsecond or less
// (it's not exactly ceil but close enough).
retainedCeil := v.protectedTimestamp.GoTime().Truncate(time.Microsecond).Add(time.Microsecond)
retainedTimestamp, _ = tree.MakeDTimestampTZ(retainedCeil, time.Nanosecond)
progress := replicationInfo.IngestionProgress
if progress != nil && !progress.CutoverTime.IsEmpty() {
cutoverTimestamp = eval.TimestampToDecimalDatum(progress.CutoverTime)
}
}
}

return tree.Datums{
tenantId,
tenantName,
tenantStatus,
sourceTenantName,
sourceClusterUri,
replicationJobId,
replicatedTimestamp,
retainedTimestamp,
cutoverTimestamp,
result = append(result,
sourceTenantName,
sourceClusterUri,
replicationJobId,
replicatedTimestamp,
retainedTimestamp,
cutoverTimestamp,
)
}

return result
}

func (n *showTenantNode) Close(_ context.Context) {}

0 comments on commit f0b06e4

Please sign in to comment.