diff --git a/pkg/ccl/changefeedccl/cdceval/expr_eval.go b/pkg/ccl/changefeedccl/cdceval/expr_eval.go index afa9e473f05f..f4238779222c 100644 --- a/pkg/ccl/changefeedccl/cdceval/expr_eval.go +++ b/pkg/ccl/changefeedccl/cdceval/expr_eval.go @@ -293,7 +293,7 @@ func (e *exprEval) evalProjection( if err != nil { return cdcevent.Row{}, err } - if err := e.projection.SetValueDatumAt(e.evalCtx, i, d); err != nil { + if err := e.projection.SetValueDatumAt(ctx, e.evalCtx, i, d); err != nil { return cdcevent.Row{}, err } } diff --git a/pkg/ccl/changefeedccl/cdcevent/event.go b/pkg/ccl/changefeedccl/cdcevent/event.go index 875ad250f5da..1b3f39a36f71 100644 --- a/pkg/ccl/changefeedccl/cdcevent/event.go +++ b/pkg/ccl/changefeedccl/cdcevent/event.go @@ -557,7 +557,7 @@ func TestingGetFamilyIDFromKey( // MakeRowFromTuple converts a SQL datum produced by, for example, SELECT ROW(foo.*), // into the same kind of cdcevent.Row you'd get as a result of an insert, but without // the primary key. -func MakeRowFromTuple(evalCtx *eval.Context, t *tree.DTuple) Row { +func MakeRowFromTuple(ctx context.Context, evalCtx *eval.Context, t *tree.DTuple) Row { r := Projection{EventDescriptor: &EventDescriptor{}} names := t.ResolvedType().TupleLabels() for i, d := range t.D { @@ -568,10 +568,10 @@ func MakeRowFromTuple(evalCtx *eval.Context, t *tree.DTuple) Row { name = names[i] } r.AddValueColumn(name, d.ResolvedType()) - if err := r.SetValueDatumAt(evalCtx, i, d); err != nil { + if err := r.SetValueDatumAt(ctx, evalCtx, i, d); err != nil { if build.IsRelease() { - log.Warningf(context.Background(), "failed to set row value from tuple due to error %v", err) - _ = r.SetValueDatumAt(evalCtx, i, tree.DNull) + log.Warningf(ctx, "failed to set row value from tuple due to error %v", err) + _ = r.SetValueDatumAt(ctx, evalCtx, i, tree.DNull) } else { panic(err) } diff --git a/pkg/ccl/changefeedccl/cdcevent/event_test.go b/pkg/ccl/changefeedccl/cdcevent/event_test.go index d077580ae310..b2dbbbd08956 100644 --- a/pkg/ccl/changefeedccl/cdcevent/event_test.go +++ b/pkg/ccl/changefeedccl/cdcevent/event_test.go @@ -413,7 +413,7 @@ func TestMakeRowFromTuple(t *testing.T) { st := cluster.MakeTestingClusterSettings() evalCtx := eval.MakeTestingEvalContext(st) - rowFromUnlabeledTuple := MakeRowFromTuple(&evalCtx, unlabeledTuple) + rowFromUnlabeledTuple := MakeRowFromTuple(context.Background(), &evalCtx, unlabeledTuple) expectedCols := []struct { name string typ *types.T @@ -446,7 +446,7 @@ func TestMakeRowFromTuple(t *testing.T) { remainingCols = expectedCols - rowFromLabeledTuple := MakeRowFromTuple(&evalCtx, labeledTuple) + rowFromLabeledTuple := MakeRowFromTuple(context.Background(), &evalCtx, labeledTuple) require.NoError(t, rowFromLabeledTuple.ForEachColumn().Datum(func(d tree.Datum, col ResultColumn) error { current := remainingCols[0] diff --git a/pkg/ccl/changefeedccl/cdcevent/projection.go b/pkg/ccl/changefeedccl/cdcevent/projection.go index ee7e53d68bf7..6558be2b76ef 100644 --- a/pkg/ccl/changefeedccl/cdcevent/projection.go +++ b/pkg/ccl/changefeedccl/cdcevent/projection.go @@ -9,6 +9,8 @@ package cdcevent import ( + "context" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -66,7 +68,9 @@ func (p *Projection) AddValueColumn(name string, typ *types.T) { } // SetValueDatumAt sets value datum at specified position. -func (p *Projection) SetValueDatumAt(evalCtx *eval.Context, pos int, d tree.Datum) error { +func (p *Projection) SetValueDatumAt( + ctx context.Context, evalCtx *eval.Context, pos int, d tree.Datum, +) error { pos += len(p.keyCols) if pos >= len(p.datums) { return errors.AssertionFailedf("%d out of bounds", pos) @@ -78,7 +82,7 @@ func (p *Projection) SetValueDatumAt(evalCtx *eval.Context, pos int, d tree.Datu return pgerror.Newf(pgcode.DatatypeMismatch, "expected type %s for column %s@%d, found %s", col.Typ, col.Name, pos, d.ResolvedType()) } - cd, err := eval.PerformCast(evalCtx, d, col.Typ) + cd, err := eval.PerformCast(ctx, evalCtx, d, col.Typ) if err != nil { return errors.Wrapf(err, "expected type %s for column %s@%d, found %s", col.Typ, col.Name, pos, d.ResolvedType()) diff --git a/pkg/ccl/changefeedccl/cdcevent/projection_test.go b/pkg/ccl/changefeedccl/cdcevent/projection_test.go index 0d299a1f3210..e0c88c59a3de 100644 --- a/pkg/ccl/changefeedccl/cdcevent/projection_test.go +++ b/pkg/ccl/changefeedccl/cdcevent/projection_test.go @@ -29,8 +29,9 @@ func TestProjection(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) + ctx := context.Background() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.Background()) + defer s.Stopper().Stop(ctx) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, `CREATE TYPE status AS ENUM ('open', 'closed', 'inactive')`) @@ -61,7 +62,7 @@ CREATE TABLE foo ( idx := 0 require.NoError(t, input.ForEachColumn().Datum(func(d tree.Datum, col ResultColumn) error { p.AddValueColumn(col.Name, col.Typ) - err := p.SetValueDatumAt(&evalCtx, idx, d) + err := p.SetValueDatumAt(ctx, &evalCtx, idx, d) idx++ return err })) @@ -76,9 +77,9 @@ CREATE TABLE foo ( input := TestingMakeEventRow(desc, 0, encDatums, false) p := MakeProjection(input.EventDescriptor) p.AddValueColumn("wrong_type", types.Int) - require.Regexp(t, "expected type int", p.SetValueDatumAt(&evalCtx, 0, tree.NewDString("fail"))) + require.Regexp(t, "expected type int", p.SetValueDatumAt(ctx, &evalCtx, 0, tree.NewDString("fail"))) // But we allow NULL. - require.NoError(t, p.SetValueDatumAt(&evalCtx, 0, tree.DNull)) + require.NoError(t, p.SetValueDatumAt(ctx, &evalCtx, 0, tree.DNull)) }) t.Run("project_extra_column", func(t *testing.T) { @@ -87,12 +88,12 @@ CREATE TABLE foo ( idx := 0 require.NoError(t, input.ForEachColumn().Datum(func(d tree.Datum, col ResultColumn) error { p.AddValueColumn(col.Name, col.Typ) - err := p.SetValueDatumAt(&evalCtx, idx, d) + err := p.SetValueDatumAt(ctx, &evalCtx, idx, d) idx++ return err })) p.AddValueColumn("test", types.Int) - require.NoError(t, p.SetValueDatumAt(&evalCtx, idx, tree.NewDInt(5))) + require.NoError(t, p.SetValueDatumAt(ctx, &evalCtx, idx, tree.NewDInt(5))) pr, err := p.Project(input) require.NoError(t, err) diff --git a/pkg/ccl/changefeedccl/encoder_json.go b/pkg/ccl/changefeedccl/encoder_json.go index a8fade1d662d..ec3057183efd 100644 --- a/pkg/ccl/changefeedccl/encoder_json.go +++ b/pkg/ccl/changefeedccl/encoder_json.go @@ -415,7 +415,7 @@ func init() { Types: tree.VariadicType{FixedTypes: []*types.T{types.AnyTuple}, VarType: types.String}, ReturnType: tree.FixedReturnType(types.Bytes), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { - row := cdcevent.MakeRowFromTuple(evalCtx, tree.MustBeDTuple(args[0])) + row := cdcevent.MakeRowFromTuple(ctx, evalCtx, tree.MustBeDTuple(args[0])) flags := make([]string, len(args)-1) for i, d := range args[1:] { flags[i] = string(tree.MustBeDString(d)) diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingest_manager.go b/pkg/ccl/streamingccl/streamingest/stream_ingest_manager.go index d418ddbe5d65..9ed03a234d4d 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingest_manager.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingest_manager.go @@ -27,16 +27,20 @@ type streamIngestManagerImpl struct{} // CompleteStreamIngestion implements streaming.StreamIngestManager interface. func (r *streamIngestManagerImpl) CompleteStreamIngestion( - evalCtx *eval.Context, txn *kv.Txn, ingestionJobID jobspb.JobID, cutoverTimestamp hlc.Timestamp, + ctx context.Context, + evalCtx *eval.Context, + txn *kv.Txn, + ingestionJobID jobspb.JobID, + cutoverTimestamp hlc.Timestamp, ) error { - return completeStreamIngestion(evalCtx, txn, ingestionJobID, cutoverTimestamp) + return completeStreamIngestion(ctx, evalCtx, txn, ingestionJobID, cutoverTimestamp) } // GetStreamIngestionStats implements streaming.StreamIngestManager interface. func (r *streamIngestManagerImpl) GetStreamIngestionStats( - evalCtx *eval.Context, txn *kv.Txn, ingestionJobID jobspb.JobID, + ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, ingestionJobID jobspb.JobID, ) (*streampb.StreamIngestionStats, error) { - return getStreamIngestionStats(evalCtx, txn, ingestionJobID) + return getStreamIngestionStats(ctx, evalCtx, txn, ingestionJobID) } func newStreamIngestManagerWithPrivilegesCheck( diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go index 25ccd3f5b051..bb7a39a6c9c8 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go @@ -34,10 +34,14 @@ import ( // completeStreamIngestion terminates the stream as of specified time. func completeStreamIngestion( - evalCtx *eval.Context, txn *kv.Txn, ingestionJobID jobspb.JobID, cutoverTimestamp hlc.Timestamp, + ctx context.Context, + evalCtx *eval.Context, + txn *kv.Txn, + ingestionJobID jobspb.JobID, + cutoverTimestamp hlc.Timestamp, ) error { jobRegistry := evalCtx.Planner.ExecutorConfig().(*sql.ExecutorConfig).JobRegistry - return jobRegistry.UpdateJobWithTxn(evalCtx.Ctx(), ingestionJobID, txn, false, /* useReadLock */ + return jobRegistry.UpdateJobWithTxn(ctx, ingestionJobID, txn, false, /* useReadLock */ func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { // TODO(adityamaru): This should change in the future, a user should be // allowed to correct their cutover time if the process of reverting the job @@ -56,10 +60,10 @@ func completeStreamIngestion( } func getStreamIngestionStats( - evalCtx *eval.Context, txn *kv.Txn, ingestionJobID jobspb.JobID, + ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, ingestionJobID jobspb.JobID, ) (*streampb.StreamIngestionStats, error) { registry := evalCtx.Planner.ExecutorConfig().(*sql.ExecutorConfig).JobRegistry - j, err := registry.LoadJobWithTxn(evalCtx.Ctx(), ingestionJobID, txn) + j, err := registry.LoadJobWithTxn(ctx, ingestionJobID, txn) if err != nil { return nil, err } @@ -95,17 +99,17 @@ func getStreamIngestionStats( stats.ReplicationLagInfo = lagInfo } - client, err := streamclient.GetFirstActiveClient(evalCtx.Ctx(), progress.GetStreamIngest().StreamAddresses) + client, err := streamclient.GetFirstActiveClient(ctx, progress.GetStreamIngest().StreamAddresses) if err != nil { return nil, err } - streamStatus, err := client.Heartbeat(evalCtx.Ctx(), streaming.StreamID(details.StreamID), hlc.MaxTimestamp) + streamStatus, err := client.Heartbeat(ctx, streaming.StreamID(details.StreamID), hlc.MaxTimestamp) if err != nil { stats.ProducerError = err.Error() } else { stats.ProducerStatus = &streamStatus } - return stats, client.Close(evalCtx.Ctx()) + return stats, client.Close(ctx) } type streamIngestionResumer struct { diff --git a/pkg/ccl/streamingccl/streamproducer/replication_manager.go b/pkg/ccl/streamingccl/streamproducer/replication_manager.go index e69036a6ef50..d7f156eb39b7 100644 --- a/pkg/ccl/streamingccl/streamproducer/replication_manager.go +++ b/pkg/ccl/streamingccl/streamproducer/replication_manager.go @@ -26,16 +26,20 @@ type replicationStreamManagerImpl struct{} // StartReplicationStream implements streaming.ReplicationStreamManager interface. func (r *replicationStreamManagerImpl) StartReplicationStream( - evalCtx *eval.Context, txn *kv.Txn, tenantID uint64, + ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, tenantID uint64, ) (streaming.StreamID, error) { - return startReplicationStreamJob(evalCtx, txn, tenantID) + return startReplicationStreamJob(ctx, evalCtx, txn, tenantID) } // HeartbeatReplicationStream implements streaming.ReplicationStreamManager interface. func (r *replicationStreamManagerImpl) HeartbeatReplicationStream( - evalCtx *eval.Context, streamID streaming.StreamID, frontier hlc.Timestamp, txn *kv.Txn, + ctx context.Context, + evalCtx *eval.Context, + streamID streaming.StreamID, + frontier hlc.Timestamp, + txn *kv.Txn, ) (streampb.StreamReplicationStatus, error) { - return heartbeatReplicationStream(evalCtx, streamID, frontier, txn) + return heartbeatReplicationStream(ctx, evalCtx, streamID, frontier, txn) } // StreamPartition implements streaming.ReplicationStreamManager interface. @@ -45,18 +49,22 @@ func (r *replicationStreamManagerImpl) StreamPartition( return streamPartition(evalCtx, streamID, opaqueSpec) } -// GetReplicationStreamSpec implements ReplicationStreamManager interface. +// GetReplicationStreamSpec implements streaming.ReplicationStreamManager interface. func (r *replicationStreamManagerImpl) GetReplicationStreamSpec( - evalCtx *eval.Context, txn *kv.Txn, streamID streaming.StreamID, + ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, streamID streaming.StreamID, ) (*streampb.ReplicationStreamSpec, error) { - return getReplicationStreamSpec(evalCtx, txn, streamID) + return getReplicationStreamSpec(ctx, evalCtx, txn, streamID) } // CompleteReplicationStream implements ReplicationStreamManager interface. func (r *replicationStreamManagerImpl) CompleteReplicationStream( - evalCtx *eval.Context, txn *kv.Txn, streamID streaming.StreamID, successfulIngestion bool, + ctx context.Context, + evalCtx *eval.Context, + txn *kv.Txn, + streamID streaming.StreamID, + successfulIngestion bool, ) error { - return completeReplicationStream(evalCtx, txn, streamID, successfulIngestion) + return completeReplicationStream(ctx, evalCtx, txn, streamID, successfulIngestion) } func newReplicationStreamManagerWithPrivilegesCheck( diff --git a/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go b/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go index 6fd6782d6481..357f9b9253cb 100644 --- a/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go +++ b/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go @@ -35,10 +35,10 @@ import ( // 1. Tracks the liveness of the replication stream consumption // 2. TODO(casper): Updates the protected timestamp for spans being replicated func startReplicationStreamJob( - evalCtx *eval.Context, txn *kv.Txn, tenantID uint64, + ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, tenantID uint64, ) (streaming.StreamID, error) { execConfig := evalCtx.Planner.ExecutorConfig().(*sql.ExecutorConfig) - hasAdminRole, err := evalCtx.SessionAccessor.HasAdminRole(evalCtx.Ctx()) + hasAdminRole, err := evalCtx.SessionAccessor.HasAdminRole(ctx) if err != nil { return streaming.InvalidStreamID, err @@ -52,7 +52,7 @@ func startReplicationStreamJob( timeout := streamingccl.StreamReplicationJobLivenessTimeout.Get(&evalCtx.Settings.SV) ptsID := uuid.MakeV4() jr := makeProducerJobRecord(registry, tenantID, timeout, evalCtx.SessionData().User(), ptsID) - if _, err := registry.CreateAdoptableJobWithTxn(evalCtx.Ctx(), jr, jr.JobID, txn); err != nil { + if _, err := registry.CreateAdoptableJobWithTxn(ctx, jr, jr.JobID, txn); err != nil { return streaming.InvalidStreamID, err } @@ -67,7 +67,7 @@ func startReplicationStreamJob( pts := jobsprotectedts.MakeRecord(ptsID, int64(jr.JobID), statementTime, deprecatedSpansToProtect, jobsprotectedts.Jobs, targetToProtect) - if err := ptp.Protect(evalCtx.Ctx(), txn, pts); err != nil { + if err := ptp.Protect(ctx, txn, pts); err != nil { return streaming.InvalidStreamID, err } return streaming.StreamID(jr.JobID), nil @@ -155,7 +155,11 @@ func updateReplicationStreamProgress( // record to the specified frontier. If 'frontier' is hlc.MaxTimestamp, returns the producer job // progress without updating it. func heartbeatReplicationStream( - evalCtx *eval.Context, streamID streaming.StreamID, frontier hlc.Timestamp, txn *kv.Txn, + ctx context.Context, + evalCtx *eval.Context, + streamID streaming.StreamID, + frontier hlc.Timestamp, + txn *kv.Txn, ) (streampb.StreamReplicationStatus, error) { execConfig := evalCtx.Planner.ExecutorConfig().(*sql.ExecutorConfig) timeout := streamingccl.StreamReplicationJobLivenessTimeout.Get(&evalCtx.Settings.SV) @@ -164,7 +168,7 @@ func heartbeatReplicationStream( // job progress. if frontier == hlc.MaxTimestamp { var status streampb.StreamReplicationStatus - pj, err := execConfig.JobRegistry.LoadJob(evalCtx.Ctx(), jobspb.JobID(streamID)) + pj, err := execConfig.JobRegistry.LoadJob(ctx, jobspb.JobID(streamID)) if jobs.HasJobNotFoundError(err) || testutils.IsError(err, "not found in system.jobs table") { status.StreamStatus = streampb.StreamReplicationStatus_STREAM_INACTIVE return status, nil @@ -174,7 +178,7 @@ func heartbeatReplicationStream( } status.StreamStatus = convertProducerJobStatusToStreamStatus(pj.Status()) payload := pj.Payload() - ptsRecord, err := execConfig.ProtectedTimestampProvider.GetRecord(evalCtx.Ctx(), txn, + ptsRecord, err := execConfig.ProtectedTimestampProvider.GetRecord(ctx, txn, payload.GetStreamReplication().ProtectedTimestampRecordID) // Nil protected timestamp indicates it was not created or has been released. if errors.Is(err, protectedts.ErrNotExists) { @@ -187,18 +191,18 @@ func heartbeatReplicationStream( return status, nil } - return updateReplicationStreamProgress(evalCtx.Ctx(), + return updateReplicationStreamProgress(ctx, expirationTime, execConfig.ProtectedTimestampProvider, execConfig.JobRegistry, streamID, frontier, txn) } // getReplicationStreamSpec gets a replication stream specification for the specified stream. func getReplicationStreamSpec( - evalCtx *eval.Context, txn *kv.Txn, streamID streaming.StreamID, + ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, streamID streaming.StreamID, ) (*streampb.ReplicationStreamSpec, error) { jobExecCtx := evalCtx.JobExecContext.(sql.JobExecContext) // Returns error if the replication stream is not active - j, err := jobExecCtx.ExecCfg().JobRegistry.LoadJob(evalCtx.Ctx(), jobspb.JobID(streamID)) + j, err := jobExecCtx.ExecCfg().JobRegistry.LoadJob(ctx, jobspb.JobID(streamID)) if err != nil { return nil, errors.Wrapf(err, "replication stream %d has error", streamID) } @@ -209,7 +213,7 @@ func getReplicationStreamSpec( // Partition the spans with SQLPlanner var noTxn *kv.Txn dsp := jobExecCtx.DistSQLPlanner() - planCtx := dsp.NewPlanningCtx(evalCtx.Ctx(), jobExecCtx.ExtendedEvalContext(), + planCtx := dsp.NewPlanningCtx(ctx, jobExecCtx.ExtendedEvalContext(), nil /* planner */, noTxn, sql.DistributionTypeSystemTenantOnly) details, ok := j.Details().(jobspb.StreamReplicationDetails) @@ -221,7 +225,7 @@ func getReplicationStreamSpec( for _, span := range replicatedSpans { spans = append(spans, *span) } - spanPartitions, err := dsp.PartitionSpans(evalCtx.Ctx(), planCtx, spans) + spanPartitions, err := dsp.PartitionSpans(ctx, planCtx, spans) if err != nil { return nil, err } @@ -250,11 +254,15 @@ func getReplicationStreamSpec( } func completeReplicationStream( - evalCtx *eval.Context, txn *kv.Txn, streamID streaming.StreamID, successfulIngestion bool, + ctx context.Context, + evalCtx *eval.Context, + txn *kv.Txn, + streamID streaming.StreamID, + successfulIngestion bool, ) error { registry := evalCtx.Planner.ExecutorConfig().(*sql.ExecutorConfig).JobRegistry const useReadLock = false - return registry.UpdateJobWithTxn(evalCtx.Ctx(), jobspb.JobID(streamID), txn, useReadLock, + return registry.UpdateJobWithTxn(ctx, jobspb.JobID(streamID), txn, useReadLock, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { // Updates the stream ingestion status, make the job resumer exit running // when picking up the new status. diff --git a/pkg/security/password.go b/pkg/security/password.go index ca0d05fbac17..85e3b2787053 100644 --- a/pkg/security/password.go +++ b/pkg/security/password.go @@ -135,9 +135,7 @@ func GetConfiguredPasswordCost( // GetConfiguredPasswordHashMethod returns the configured hash method // to use before storing passwords provided in cleartext from clients. -func GetConfiguredPasswordHashMethod( - ctx context.Context, sv *settings.Values, -) (method password.HashMethod) { +func GetConfiguredPasswordHashMethod(sv *settings.Values) (method password.HashMethod) { return password.HashMethod(PasswordHashMethod.Get(sv)) } diff --git a/pkg/security/password/password_test.go b/pkg/security/password/password_test.go index 0a13de7b78c1..e827648f4091 100644 --- a/pkg/security/password/password_test.go +++ b/pkg/security/password/password_test.go @@ -57,7 +57,7 @@ func TestBCryptToSCRAMConversion(t *testing.T) { // Check conversion succeeds. autoUpgradePasswordHashesBool := security.AutoUpgradePasswordHashes.Get(&s.SV) - method := security.GetConfiguredPasswordHashMethod(ctx, &s.SV) + method := security.GetConfiguredPasswordHashMethod(&s.SV) converted, prevHash, newHashBytes, hashMethod, err := password.MaybeUpgradePasswordHash(ctx, autoUpgradePasswordHashesBool, method, cleartext, bh, nil, log.Infof) require.NoError(t, err) require.True(t, converted) @@ -72,7 +72,7 @@ func TestBCryptToSCRAMConversion(t *testing.T) { // Check that converted hash can't be converted further. autoUpgradePasswordHashesBool = security.AutoUpgradePasswordHashes.Get(&s.SV) - method = security.GetConfiguredPasswordHashMethod(ctx, &s.SV) + method = security.GetConfiguredPasswordHashMethod(&s.SV) ec, _, _, _, err := password.MaybeUpgradePasswordHash(ctx, autoUpgradePasswordHashesBool, method, cleartext, newHash, nil, log.Infof) require.NoError(t, err) diff --git a/pkg/sql/colexec/colexecbase/cast.eg.go b/pkg/sql/colexec/colexecbase/cast.eg.go index d55ce1850367..a55fa27fb763 100644 --- a/pkg/sql/colexec/colexecbase/cast.eg.go +++ b/pkg/sql/colexec/colexecbase/cast.eg.go @@ -1306,13 +1306,16 @@ func (c *castNativeToDatumOp) Next() coldata.Batch { if inputVec.Nulls().MaybeHasNulls() { for scratchIdx, outputIdx := range sel[:n] { { - var evalCtx *eval.Context = c.evalCtx + var ( + ctx context.Context = c.Ctx + evalCtx *eval.Context = c.evalCtx + ) converted := scratch[scratchIdx] if true && converted == tree.DNull { outputNulls.SetNull(outputIdx) continue } - res, err := eval.PerformCast(evalCtx, converted, toType) + res, err := eval.PerformCast(ctx, evalCtx, converted, toType) if err != nil { colexecerror.ExpectedError(err) } @@ -1322,13 +1325,16 @@ func (c *castNativeToDatumOp) Next() coldata.Batch { } else { for scratchIdx, outputIdx := range sel[:n] { { - var evalCtx *eval.Context = c.evalCtx + var ( + ctx context.Context = c.Ctx + evalCtx *eval.Context = c.evalCtx + ) converted := scratch[scratchIdx] if false && converted == tree.DNull { outputNulls.SetNull(outputIdx) continue } - res, err := eval.PerformCast(evalCtx, converted, toType) + res, err := eval.PerformCast(ctx, evalCtx, converted, toType) if err != nil { colexecerror.ExpectedError(err) } @@ -1342,9 +1348,10 @@ func (c *castNativeToDatumOp) Next() coldata.Batch { for idx := 0; idx < n; idx++ { { var ( - scratchIdx int = idx - outputIdx int = idx - evalCtx *eval.Context = c.evalCtx + ctx context.Context = c.Ctx + scratchIdx int = idx + outputIdx int = idx + evalCtx *eval.Context = c.evalCtx ) //gcassert:bce converted := scratch[scratchIdx] @@ -1352,7 +1359,7 @@ func (c *castNativeToDatumOp) Next() coldata.Batch { outputNulls.SetNull(outputIdx) continue } - res, err := eval.PerformCast(evalCtx, converted, toType) + res, err := eval.PerformCast(ctx, evalCtx, converted, toType) if err != nil { colexecerror.ExpectedError(err) } @@ -1363,9 +1370,10 @@ func (c *castNativeToDatumOp) Next() coldata.Batch { for idx := 0; idx < n; idx++ { { var ( - scratchIdx int = idx - outputIdx int = idx - evalCtx *eval.Context = c.evalCtx + ctx context.Context = c.Ctx + scratchIdx int = idx + outputIdx int = idx + evalCtx *eval.Context = c.evalCtx ) //gcassert:bce converted := scratch[scratchIdx] @@ -1373,7 +1381,7 @@ func (c *castNativeToDatumOp) Next() coldata.Batch { outputNulls.SetNull(outputIdx) continue } - res, err := eval.PerformCast(evalCtx, converted, toType) + res, err := eval.PerformCast(ctx, evalCtx, converted, toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12187,7 +12195,7 @@ func (c *castDatumBoolOp) Next() coldata.Batch { var r bool { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12217,7 +12225,7 @@ func (c *castDatumBoolOp) Next() coldata.Batch { var r bool { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12249,7 +12257,7 @@ func (c *castDatumBoolOp) Next() coldata.Batch { var r bool { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12279,7 +12287,7 @@ func (c *castDatumBoolOp) Next() coldata.Batch { var r bool { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12344,7 +12352,7 @@ func (c *castDatumInt2Op) Next() coldata.Batch { var r int16 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12374,7 +12382,7 @@ func (c *castDatumInt2Op) Next() coldata.Batch { var r int16 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12406,7 +12414,7 @@ func (c *castDatumInt2Op) Next() coldata.Batch { var r int16 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12436,7 +12444,7 @@ func (c *castDatumInt2Op) Next() coldata.Batch { var r int16 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12501,7 +12509,7 @@ func (c *castDatumInt4Op) Next() coldata.Batch { var r int32 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12531,7 +12539,7 @@ func (c *castDatumInt4Op) Next() coldata.Batch { var r int32 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12563,7 +12571,7 @@ func (c *castDatumInt4Op) Next() coldata.Batch { var r int32 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12593,7 +12601,7 @@ func (c *castDatumInt4Op) Next() coldata.Batch { var r int32 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12658,7 +12666,7 @@ func (c *castDatumIntOp) Next() coldata.Batch { var r int64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12688,7 +12696,7 @@ func (c *castDatumIntOp) Next() coldata.Batch { var r int64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12720,7 +12728,7 @@ func (c *castDatumIntOp) Next() coldata.Batch { var r int64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12750,7 +12758,7 @@ func (c *castDatumIntOp) Next() coldata.Batch { var r int64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12815,7 +12823,7 @@ func (c *castDatumFloatOp) Next() coldata.Batch { var r float64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12845,7 +12853,7 @@ func (c *castDatumFloatOp) Next() coldata.Batch { var r float64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12877,7 +12885,7 @@ func (c *castDatumFloatOp) Next() coldata.Batch { var r float64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12907,7 +12915,7 @@ func (c *castDatumFloatOp) Next() coldata.Batch { var r float64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -12972,7 +12980,7 @@ func (c *castDatumDecimalOp) Next() coldata.Batch { var r apd.Decimal { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13002,7 +13010,7 @@ func (c *castDatumDecimalOp) Next() coldata.Batch { var r apd.Decimal { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13034,7 +13042,7 @@ func (c *castDatumDecimalOp) Next() coldata.Batch { var r apd.Decimal { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13064,7 +13072,7 @@ func (c *castDatumDecimalOp) Next() coldata.Batch { var r apd.Decimal { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13129,7 +13137,7 @@ func (c *castDatumDateOp) Next() coldata.Batch { var r int64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13159,7 +13167,7 @@ func (c *castDatumDateOp) Next() coldata.Batch { var r int64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13191,7 +13199,7 @@ func (c *castDatumDateOp) Next() coldata.Batch { var r int64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13221,7 +13229,7 @@ func (c *castDatumDateOp) Next() coldata.Batch { var r int64 { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13286,7 +13294,7 @@ func (c *castDatumTimestampOp) Next() coldata.Batch { var r time.Time { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13316,7 +13324,7 @@ func (c *castDatumTimestampOp) Next() coldata.Batch { var r time.Time { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13348,7 +13356,7 @@ func (c *castDatumTimestampOp) Next() coldata.Batch { var r time.Time { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13378,7 +13386,7 @@ func (c *castDatumTimestampOp) Next() coldata.Batch { var r time.Time { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13443,7 +13451,7 @@ func (c *castDatumIntervalOp) Next() coldata.Batch { var r duration.Duration { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13473,7 +13481,7 @@ func (c *castDatumIntervalOp) Next() coldata.Batch { var r duration.Duration { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13505,7 +13513,7 @@ func (c *castDatumIntervalOp) Next() coldata.Batch { var r duration.Duration { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13535,7 +13543,7 @@ func (c *castDatumIntervalOp) Next() coldata.Batch { var r duration.Duration { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13600,7 +13608,7 @@ func (c *castDatumStringOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13629,7 +13637,7 @@ func (c *castDatumStringOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13660,7 +13668,7 @@ func (c *castDatumStringOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13689,7 +13697,7 @@ func (c *castDatumStringOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13753,7 +13761,7 @@ func (c *castDatumBytesOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13782,7 +13790,7 @@ func (c *castDatumBytesOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13813,7 +13821,7 @@ func (c *castDatumBytesOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13842,7 +13850,7 @@ func (c *castDatumBytesOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13906,7 +13914,7 @@ func (c *castDatumTimestamptzOp) Next() coldata.Batch { var r time.Time { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13936,7 +13944,7 @@ func (c *castDatumTimestamptzOp) Next() coldata.Batch { var r time.Time { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13968,7 +13976,7 @@ func (c *castDatumTimestamptzOp) Next() coldata.Batch { var r time.Time { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -13998,7 +14006,7 @@ func (c *castDatumTimestamptzOp) Next() coldata.Batch { var r time.Time { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14063,7 +14071,7 @@ func (c *castDatumUuidOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14092,7 +14100,7 @@ func (c *castDatumUuidOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14123,7 +14131,7 @@ func (c *castDatumUuidOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14152,7 +14160,7 @@ func (c *castDatumUuidOp) Next() coldata.Batch { var r []byte { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14216,7 +14224,7 @@ func (c *castDatumJsonbOp) Next() coldata.Batch { var r json.JSON { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14245,7 +14253,7 @@ func (c *castDatumJsonbOp) Next() coldata.Batch { var r json.JSON { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14276,7 +14284,7 @@ func (c *castDatumJsonbOp) Next() coldata.Batch { var r json.JSON { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14305,7 +14313,7 @@ func (c *castDatumJsonbOp) Next() coldata.Batch { var r json.JSON { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14368,7 +14376,7 @@ func (c *castDatumDatumOp) Next() coldata.Batch { var r interface{} { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14403,7 +14411,7 @@ func (c *castDatumDatumOp) Next() coldata.Batch { var r interface{} { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14440,7 +14448,7 @@ func (c *castDatumDatumOp) Next() coldata.Batch { var r interface{} { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } @@ -14475,7 +14483,7 @@ func (c *castDatumDatumOp) Next() coldata.Batch { var r interface{} { - _castedDatum, err := eval.PerformCast(evalCtx, v.(tree.Datum), toType) + _castedDatum, err := eval.PerformCast(c.Ctx, evalCtx, v.(tree.Datum), toType) if err != nil { colexecerror.ExpectedError(err) } diff --git a/pkg/sql/colexec/colexecbase/cast_test.go b/pkg/sql/colexec/colexecbase/cast_test.go index fcab275f8ccb..85f1448ad660 100644 --- a/pkg/sql/colexec/colexecbase/cast_test.go +++ b/pkg/sql/colexec/colexecbase/cast_test.go @@ -68,7 +68,7 @@ func TestRandomizedCast(t *testing.T) { // We don't allow any NULL datums to be generated, so disable this // ability in the RandDatum function. fromDatum := randgen.RandDatum(rng, from, false) - toDatum, err := eval.PerformCast(&evalCtx, fromDatum, to) + toDatum, err := eval.PerformCast(ctx, &evalCtx, fromDatum, to) var toPhys interface{} if err != nil { errorExpected = true diff --git a/pkg/sql/colexec/colexecbase/cast_tmpl.go b/pkg/sql/colexec/colexecbase/cast_tmpl.go index 151fa7e146af..db2ad96e1762 100644 --- a/pkg/sql/colexec/colexecbase/cast_tmpl.go +++ b/pkg/sql/colexec/colexecbase/cast_tmpl.go @@ -411,22 +411,22 @@ func (c *castNativeToDatumOp) Next() coldata.Batch { if sel != nil { if inputVec.Nulls().MaybeHasNulls() { for scratchIdx, outputIdx := range sel[:n] { - setNativeToDatumCast(outputCol, outputNulls, scratch, scratchIdx, outputIdx, toType, c.evalCtx, true, false) + setNativeToDatumCast(c.Ctx, outputCol, outputNulls, scratch, scratchIdx, outputIdx, toType, c.evalCtx, true, false) } } else { for scratchIdx, outputIdx := range sel[:n] { - setNativeToDatumCast(outputCol, outputNulls, scratch, scratchIdx, outputIdx, toType, c.evalCtx, false, false) + setNativeToDatumCast(c.Ctx, outputCol, outputNulls, scratch, scratchIdx, outputIdx, toType, c.evalCtx, false, false) } } } else { _ = scratch[n-1] if inputVec.Nulls().MaybeHasNulls() { for idx := 0; idx < n; idx++ { - setNativeToDatumCast(outputCol, outputNulls, scratch, idx, idx, toType, c.evalCtx, true, true) + setNativeToDatumCast(c.Ctx, outputCol, outputNulls, scratch, idx, idx, toType, c.evalCtx, true, true) } } else { for idx := 0; idx < n; idx++ { - setNativeToDatumCast(outputCol, outputNulls, scratch, idx, idx, toType, c.evalCtx, false, true) + setNativeToDatumCast(c.Ctx, outputCol, outputNulls, scratch, idx, idx, toType, c.evalCtx, false, true) } } } @@ -440,6 +440,7 @@ func (c *castNativeToDatumOp) Next() coldata.Batch { // execgen:inline // execgen:template func setNativeToDatumCast( + ctx context.Context, outputCol coldata.DatumVec, outputNulls *coldata.Nulls, scratch []tree.Datum, @@ -458,7 +459,7 @@ func setNativeToDatumCast( outputNulls.SetNull(outputIdx) continue } - res, err := eval.PerformCast(evalCtx, converted, toType) + res, err := eval.PerformCast(ctx, evalCtx, converted, toType) if err != nil { colexecerror.ExpectedError(err) } diff --git a/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go b/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go index 74af323aa18d..5ff53e380432 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go @@ -619,7 +619,7 @@ func getDatumToNativeCastFunc(nonDatumPhysicalRepresentation string) castFunc { return func(to, from, evalCtx, toType, _ string) string { convStr := ` { - _castedDatum, err := eval.PerformCast(%[3]s, %[2]s.(tree.Datum), %[4]s) + _castedDatum, err := eval.PerformCast(c.Ctx, %[3]s, %[2]s.(tree.Datum), %[4]s) if err != nil { colexecerror.ExpectedError(err) } @@ -633,7 +633,7 @@ func getDatumToNativeCastFunc(nonDatumPhysicalRepresentation string) castFunc { func datumToDatum(to, from, evalCtx, toType, _ string) string { convStr := ` { - _castedDatum, err := eval.PerformCast(%[3]s, %[2]s.(tree.Datum), %[4]s) + _castedDatum, err := eval.PerformCast(c.Ctx, %[3]s, %[2]s.(tree.Datum), %[4]s) if err != nil { colexecerror.ExpectedError(err) } diff --git a/pkg/sql/conn_executor_prepare.go b/pkg/sql/conn_executor_prepare.go index 2271951948b8..d0e46f7d4ed6 100644 --- a/pkg/sql/conn_executor_prepare.go +++ b/pkg/sql/conn_executor_prepare.go @@ -420,6 +420,7 @@ func (ex *connExecutor) execBind( } } d, err := pgwirebase.DecodeDatum( + ctx, ex.planner.EvalContext(), typ, qArgFormatCodes[i], diff --git a/pkg/sql/copy.go b/pkg/sql/copy.go index 58e1ea86ec1c..0a54e817ba07 100644 --- a/pkg/sql/copy.go +++ b/pkg/sql/copy.go @@ -705,6 +705,7 @@ func (c *copyMachine) readBinaryTuple(ctx context.Context) (readSoFar []byte, er return readSoFar, err } d, err := pgwirebase.DecodeDatum( + ctx, c.parsingEvalCtx, c.resultColumns[i].Typ, pgwirebase.FormatBinary, diff --git a/pkg/sql/create_role.go b/pkg/sql/create_role.go index a8fbbdcb6a3f..c65b24bb99c4 100644 --- a/pkg/sql/create_role.go +++ b/pkg/sql/create_role.go @@ -332,7 +332,7 @@ func (p *planner) checkPasswordAndGetHash( "Passwords must be %d characters or longer.", minLength) } - method := security.GetConfiguredPasswordHashMethod(ctx, &st.SV) + method := security.GetConfiguredPasswordHashMethod(&st.SV) cost, err := security.GetConfiguredPasswordCost(ctx, &st.SV, method) if err != nil { return hashedPassword, errors.HandleAsAssertionFailure(err) diff --git a/pkg/sql/importer/import_job.go b/pkg/sql/importer/import_job.go index 8dfacf76482c..d53deed9cb51 100644 --- a/pkg/sql/importer/import_job.go +++ b/pkg/sql/importer/import_job.go @@ -1378,7 +1378,7 @@ func writeNonDropDatabaseChange( ) ([]jobspb.JobID, error) { var job *jobs.Job var err error - if job, err = createNonDropDatabaseChangeJob(p.User(), desc.ID, jobDesc, p, txn); err != nil { + if job, err = createNonDropDatabaseChangeJob(ctx, p.User(), desc.ID, jobDesc, p, txn); err != nil { return nil, err } @@ -1397,6 +1397,7 @@ func writeNonDropDatabaseChange( } func createNonDropDatabaseChangeJob( + ctx context.Context, user username.SQLUsername, databaseID descpb.ID, jobDesc string, @@ -1416,7 +1417,7 @@ func createNonDropDatabaseChangeJob( jobID := p.ExecCfg().JobRegistry.MakeJobID() return p.ExecCfg().JobRegistry.CreateJobWithTxn( - p.ExtendedEvalContext().Ctx(), + ctx, jobRecord, jobID, txn, diff --git a/pkg/sql/importer/read_import_pgdump.go b/pkg/sql/importer/read_import_pgdump.go index d17d4349ae2f..08c367e7d871 100644 --- a/pkg/sql/importer/read_import_pgdump.go +++ b/pkg/sql/importer/read_import_pgdump.go @@ -363,6 +363,7 @@ func getSchemaByNameFromMap( } func createPostgresTables( + ctx context.Context, evalCtx *eval.Context, p sql.JobExecContext, createTbl map[schemaAndTableName]*tree.CreateTable, @@ -377,7 +378,7 @@ func createPostgresTables( if create == nil { continue } - schema, err := getSchemaByNameFromMap(evalCtx.Ctx(), schemaAndTableName, schemaNameToDesc, evalCtx.Settings.Version) + schema, err := getSchemaByNameFromMap(ctx, schemaAndTableName, schemaNameToDesc, evalCtx.Settings.Version) if err != nil { return nil, err } @@ -385,11 +386,11 @@ func createPostgresTables( // Bundle imports do not support user defined types, and so we nil out the // type resolver to protect against unexpected behavior on UDT resolution. semaCtxPtr := makeSemaCtxWithoutTypeResolver(p.SemaCtx()) - id, err := getNextPlaceholderDescID(evalCtx.Ctx(), p.ExecCfg()) + id, err := getNextPlaceholderDescID(ctx, p.ExecCfg()) if err != nil { return nil, err } - desc, err := MakeSimpleTableDescriptor(evalCtx.Ctx(), semaCtxPtr, p.ExecCfg().Settings, + desc, err := MakeSimpleTableDescriptor(ctx, semaCtxPtr, p.ExecCfg().Settings, create, parentDB, schema, id, fks, walltime) if err != nil { return nil, err @@ -403,6 +404,7 @@ func createPostgresTables( } func resolvePostgresFKs( + ctx context.Context, evalCtx *eval.Context, parentDB catalog.DatabaseDescriptor, tableFKs map[schemaAndTableName][]*tree.ForeignKeyConstraintTableDef, @@ -415,7 +417,7 @@ func resolvePostgresFKs( if desc == nil { continue } - schema, err := getSchemaByNameFromMap(evalCtx.Ctx(), schemaAndTableName, schemaNameToDesc, evalCtx.Settings.Version) + schema, err := getSchemaByNameFromMap(ctx, schemaAndTableName, schemaNameToDesc, evalCtx.Settings.Version) if err != nil { return err } @@ -431,7 +433,7 @@ func resolvePostgresFKs( constraint.Table.CatalogName = "defaultdb" } if err := sql.ResolveFK( - evalCtx.Ctx(), nil /* txn */, &fks.resolver, + ctx, nil /* txn */, &fks.resolver, parentDB, schema, desc, constraint, backrefs, sql.NewTable, tree.ValidationDefault, evalCtx, @@ -553,8 +555,9 @@ func readPostgresCreateTable( // Construct table descriptors. backrefs := make(map[descpb.ID]*tabledesc.Mutable) - tableDescs, err := createPostgresTables(evalCtx, p, schemaObjects.createTbl, fks, backrefs, - parentDB, walltime, schemaNameToDesc) + tableDescs, err := createPostgresTables( + ctx, evalCtx, p, schemaObjects.createTbl, fks, backrefs, parentDB, walltime, schemaNameToDesc, + ) if err != nil { return nil, nil, err } @@ -562,7 +565,7 @@ func readPostgresCreateTable( // Resolve FKs. err = resolvePostgresFKs( - evalCtx, parentDB, schemaObjects.tableFKs, fks, backrefs, schemaNameToDesc, + ctx, evalCtx, parentDB, schemaObjects.tableFKs, fks, backrefs, schemaNameToDesc, ) if err != nil { return nil, nil, err diff --git a/pkg/sql/opt/invertedidx/geo.go b/pkg/sql/opt/invertedidx/geo.go index 98fd20e53cf4..c2bbcdec60f6 100644 --- a/pkg/sql/opt/invertedidx/geo.go +++ b/pkg/sql/opt/invertedidx/geo.go @@ -749,7 +749,11 @@ func (g *geoDatumsToInvertedExpr) IndexedVarNodeFormatter(idx int) tree.NodeForm // NewGeoDatumsToInvertedExpr returns a new geoDatumsToInvertedExpr. func NewGeoDatumsToInvertedExpr( - evalCtx *eval.Context, colTypes []*types.T, expr tree.TypedExpr, config geoindex.Config, + ctx context.Context, + evalCtx *eval.Context, + colTypes []*types.T, + expr tree.TypedExpr, + config geoindex.Config, ) (invertedexpr.DatumsToInvertedExpr, error) { if config.IsEmpty() { return nil, fmt.Errorf("inverted joins are currently only supported for geospatial indexes") @@ -813,7 +817,7 @@ func NewGeoDatumsToInvertedExpr( // it for every row. var invertedExpr inverted.Expression if d, ok := nonIndexParam.(tree.Datum); ok { - invertedExpr = g.getSpanExpr(evalCtx.Ctx(), d, additionalParams, relationship, g.indexConfig) + invertedExpr = g.getSpanExpr(ctx, d, additionalParams, relationship, g.indexConfig) } else if funcExprCount == 1 { // Currently pre-filtering is limited to a single FuncExpr. preFilterRelationship = relationship diff --git a/pkg/sql/opt/invertedidx/inverted_index_expr.go b/pkg/sql/opt/invertedidx/inverted_index_expr.go index 46714fd539f3..e51725c1ecee 100644 --- a/pkg/sql/opt/invertedidx/inverted_index_expr.go +++ b/pkg/sql/opt/invertedidx/inverted_index_expr.go @@ -31,10 +31,14 @@ import ( // NewDatumsToInvertedExpr returns a new DatumsToInvertedExpr. func NewDatumsToInvertedExpr( - evalCtx *eval.Context, colTypes []*types.T, expr tree.TypedExpr, geoConfig geoindex.Config, + ctx context.Context, + evalCtx *eval.Context, + colTypes []*types.T, + expr tree.TypedExpr, + geoConfig geoindex.Config, ) (invertedexpr.DatumsToInvertedExpr, error) { if !geoConfig.IsEmpty() { - return NewGeoDatumsToInvertedExpr(evalCtx, colTypes, expr, geoConfig) + return NewGeoDatumsToInvertedExpr(ctx, evalCtx, colTypes, expr, geoConfig) } return NewJSONOrArrayDatumsToInvertedExpr(evalCtx, colTypes, expr) diff --git a/pkg/sql/opt/memo/logical_props_builder.go b/pkg/sql/opt/memo/logical_props_builder.go index 5dc07d24b5ab..095f0df657f3 100644 --- a/pkg/sql/opt/memo/logical_props_builder.go +++ b/pkg/sql/opt/memo/logical_props_builder.go @@ -11,6 +11,7 @@ package memo import ( + "context" "math" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" @@ -55,14 +56,14 @@ type logicalPropsBuilder struct { disableStats bool } -func (b *logicalPropsBuilder) init(evalCtx *eval.Context, mem *Memo) { +func (b *logicalPropsBuilder) init(ctx context.Context, evalCtx *eval.Context, mem *Memo) { // This initialization pattern ensures that fields are not unwittingly // reused. Field reuse must be explicit. *b = logicalPropsBuilder{ evalCtx: evalCtx, mem: mem, } - b.sb.init(evalCtx, mem.Metadata()) + b.sb.init(ctx, evalCtx, mem.Metadata()) } func (b *logicalPropsBuilder) clear() { diff --git a/pkg/sql/opt/memo/memo.go b/pkg/sql/opt/memo/memo.go index f8e812cd632e..9e1c0a95168f 100644 --- a/pkg/sql/opt/memo/memo.go +++ b/pkg/sql/opt/memo/memo.go @@ -183,7 +183,7 @@ type Memo struct { // information about the context in which it is compiled from the evalContext // argument. If any of that changes, then the memo must be invalidated (see the // IsStale method for more details). -func (m *Memo) Init(evalCtx *eval.Context) { +func (m *Memo) Init(ctx context.Context, evalCtx *eval.Context) { // This initialization pattern ensures that fields are not unwittingly // reused. Field reuse must be explicit. *m = Memo{ @@ -213,7 +213,7 @@ func (m *Memo) Init(evalCtx *eval.Context) { variableInequalityLookupJoinEnabled: evalCtx.SessionData().VariableInequalityLookupJoinEnabled, } m.metadata.Init() - m.logPropsBuilder.init(evalCtx, m) + m.logPropsBuilder.init(ctx, evalCtx, m) } // AllowUnconstrainedNonCoveringIndexScan indicates whether unconstrained @@ -226,8 +226,8 @@ func (m *Memo) AllowUnconstrainedNonCoveringIndexScan() bool { // with the perturb-cost OptTester flag in order to update the query plan tree // after optimization is complete with the real computed cost, not the perturbed // cost. -func (m *Memo) ResetLogProps(evalCtx *eval.Context) { - m.logPropsBuilder.init(evalCtx, m) +func (m *Memo) ResetLogProps(ctx context.Context, evalCtx *eval.Context) { + m.logPropsBuilder.init(ctx, evalCtx, m) } // NotifyOnNewGroup sets a callback function which is invoked each time we diff --git a/pkg/sql/opt/memo/multiplicity_builder_test.go b/pkg/sql/opt/memo/multiplicity_builder_test.go index 00b690fd0546..7c11848a48f3 100644 --- a/pkg/sql/opt/memo/multiplicity_builder_test.go +++ b/pkg/sql/opt/memo/multiplicity_builder_test.go @@ -11,6 +11,7 @@ package memo import ( + "context" "fmt" "testing" @@ -469,12 +470,12 @@ type testOpBuilder struct { } func makeOpBuilder(t *testing.T) testOpBuilder { - ctx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings()) + evalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings()) var mem Memo - mem.Init(&ctx) + mem.Init(context.Background(), &evalCtx) ob := testOpBuilder{ t: t, - evalCtx: &ctx, + evalCtx: &evalCtx, mem: &mem, cat: testcat.New(), } diff --git a/pkg/sql/opt/memo/statistics_builder.go b/pkg/sql/opt/memo/statistics_builder.go index f1c0b188fde8..2cf09000fb77 100644 --- a/pkg/sql/opt/memo/statistics_builder.go +++ b/pkg/sql/opt/memo/statistics_builder.go @@ -11,6 +11,7 @@ package memo import ( + "context" "math" "reflect" @@ -228,14 +229,16 @@ const ( // // See props/statistics.go for more details. type statisticsBuilder struct { + ctx context.Context evalCtx *eval.Context md *opt.Metadata } -func (sb *statisticsBuilder) init(evalCtx *eval.Context, md *opt.Metadata) { +func (sb *statisticsBuilder) init(ctx context.Context, evalCtx *eval.Context, md *opt.Metadata) { // This initialization pattern ensures that fields are not unwittingly // reused. Field reuse must be explicit. *sb = statisticsBuilder{ + ctx: ctx, evalCtx: evalCtx, md: md, } @@ -4594,9 +4597,9 @@ func (sb *statisticsBuilder) numConjunctsInConstraint( // RequestColStat causes a column statistic to be calculated on the relational // expression. This is used for testing. -func RequestColStat(evalCtx *eval.Context, e RelExpr, cols opt.ColSet) { +func RequestColStat(ctx context.Context, evalCtx *eval.Context, e RelExpr, cols opt.ColSet) { var sb statisticsBuilder - sb.init(evalCtx, e.Memo().Metadata()) + sb.init(ctx, evalCtx, e.Memo().Metadata()) sb.colStat(cols, e) } @@ -4688,7 +4691,7 @@ func (sb *statisticsBuilder) buildStatsFromCheckConstraints( values, hasNullValue, _ = filterConstraint.CollectFirstColumnValues(sb.evalCtx) if hasNullValue { log.Infof( - sb.evalCtx.Ctx(), "null value seen in histogram built from check constraint: %s", filterConstraint.String(), + sb.ctx, "null value seen in histogram built from check constraint: %s", filterConstraint.String(), ) } } @@ -4738,7 +4741,7 @@ func (sb *statisticsBuilder) buildStatsFromCheckConstraints( } else { useHistogram = false log.Infof( - sb.evalCtx.Ctx(), "histogram could not be generated from check constraint due to error: %v", err, + sb.ctx, "histogram could not be generated from check constraint due to error: %v", err, ) } } diff --git a/pkg/sql/opt/memo/statistics_builder_test.go b/pkg/sql/opt/memo/statistics_builder_test.go index 69ed63affc90..96be121e817a 100644 --- a/pkg/sql/opt/memo/statistics_builder_test.go +++ b/pkg/sql/opt/memo/statistics_builder_test.go @@ -11,6 +11,7 @@ package memo import ( + "context" "testing" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -88,7 +89,7 @@ func TestGetStatsFromConstraint(t *testing.T) { } var mem Memo - mem.Init(&evalCtx) + mem.Init(context.Background(), &evalCtx) tn := tree.NewUnqualifiedTableName("sel") tab := catalog.Table(tn) tabID := mem.Metadata().AddTable(tab, tn) @@ -104,7 +105,7 @@ func TestGetStatsFromConstraint(t *testing.T) { } sb := &statisticsBuilder{} - sb.init(&evalCtx, mem.Metadata()) + sb.init(context.Background(), &evalCtx, mem.Metadata()) // Make the scan. scan := mem.MemoizeScan(&ScanPrivate{Table: tabID, Cols: cols}) diff --git a/pkg/sql/opt/norm/factory.go b/pkg/sql/opt/norm/factory.go index df56cd912afa..cfac9ad7543a 100644 --- a/pkg/sql/opt/norm/factory.go +++ b/pkg/sql/opt/norm/factory.go @@ -132,7 +132,7 @@ func (f *Factory) Init(ctx context.Context, evalCtx *eval.Context, catalog cat.C if mem == nil { mem = &memo.Memo{} } - mem.Init(evalCtx) + mem.Init(ctx, evalCtx) // This initialization pattern ensures that fields are not unwittingly // reused. Field reuse must be explicit. @@ -358,7 +358,7 @@ func (f *Factory) onMaxConstructorStackDepthExceeded() { if buildutil.CrdbTestBuild { panic(err) } - errorutil.SendReport(f.evalCtx.Ctx(), &f.evalCtx.Settings.SV, err) + errorutil.SendReport(f.ctx, &f.evalCtx.Settings.SV, err) } // onConstructRelational is called as a final step by each factory method that diff --git a/pkg/sql/opt/norm/fold_constants_funcs.go b/pkg/sql/opt/norm/fold_constants_funcs.go index 76821a7e37f6..eb81a41e4558 100644 --- a/pkg/sql/opt/norm/fold_constants_funcs.go +++ b/pkg/sql/opt/norm/fold_constants_funcs.go @@ -403,7 +403,7 @@ func (c *CustomFuncs) FoldAssignmentCast( } datum := memo.ExtractConstDatum(input) - result, err := eval.PerformAssignmentCast(c.f.evalCtx, datum, typ) + result, err := eval.PerformAssignmentCast(c.f.ctx, c.f.evalCtx, datum, typ) if err != nil { // Casts can require KV operations. KV errors are not safe to swallow. // Check if the error is a KV error, and, if so, propagate it rather diff --git a/pkg/sql/opt/norm/scalar_funcs.go b/pkg/sql/opt/norm/scalar_funcs.go index b32428d49aa1..2fc808f78586 100644 --- a/pkg/sql/opt/norm/scalar_funcs.go +++ b/pkg/sql/opt/norm/scalar_funcs.go @@ -139,12 +139,12 @@ func (c *CustomFuncs) UnifyComparison( // means we don't lose any information needed to generate spans, and combined // with monotonicity means that it's safe to convert the RHS to the type of // the LHS. - convertedDatum, err := eval.PerformCast(c.f.evalCtx, cnst.Value, desiredType) + convertedDatum, err := eval.PerformCast(c.f.ctx, c.f.evalCtx, cnst.Value, desiredType) if err != nil { return nil, false } - convertedBack, err := eval.PerformCast(c.f.evalCtx, convertedDatum, originalType) + convertedBack, err := eval.PerformCast(c.f.ctx, c.f.evalCtx, convertedDatum, originalType) if err != nil { return nil, false } diff --git a/pkg/sql/opt/testutils/opttester/opt_tester.go b/pkg/sql/opt/testutils/opttester/opt_tester.go index a5b95793650b..845232ab2b6d 100644 --- a/pkg/sql/opt/testutils/opttester/opt_tester.go +++ b/pkg/sql/opt/testutils/opttester/opt_tester.go @@ -842,7 +842,7 @@ func (ot *OptTester) postProcess(tb testing.TB, d *datadriven.TestData, e opt.Ex if rel, ok := e.(memo.RelExpr); ok { for _, cols := range ot.Flags.ColStats { - memo.RequestColStat(&ot.evalCtx, rel, cols) + memo.RequestColStat(ot.ctx, &ot.evalCtx, rel, cols) } } ot.checkExpectedRules(tb, d) @@ -2015,7 +2015,7 @@ func (ot *OptTester) createTableAs(name tree.TableName, rel memo.RelExpr) (*test // Make sure we have estimated stats for this column. colSet := opt.MakeColSet(col) - memo.RequestColStat(&ot.evalCtx, rel, colSet) + memo.RequestColStat(ot.ctx, &ot.evalCtx, rel, colSet) stat, ok := relProps.Statistics().ColStats.Lookup(colSet) if !ok { return nil, fmt.Errorf("could not find statistic for column %s", colName) @@ -2239,7 +2239,7 @@ func (ot *OptTester) optimizeExpr( return nil, err } if ot.Flags.PerturbCost != 0 { - o.Memo().ResetLogProps(&ot.evalCtx) + o.Memo().ResetLogProps(ot.ctx, &ot.evalCtx) o.RecomputeCost() } return root, nil diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index 60e633464058..9e6c223c9b7a 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -124,8 +124,7 @@ func (ef *execFactory) ConstructScan( scan := ef.planner.Scan() colCfg := makeScanColumnsConfig(table, params.NeededCols) - ctx := ef.planner.extendedEvalCtx.Ctx() - if err := scan.initTable(ctx, ef.planner, tabDesc, colCfg); err != nil { + if err := scan.initTable(ef.ctx, ef.planner, tabDesc, colCfg); err != nil { return nil, err } @@ -639,8 +638,7 @@ func (ef *execFactory) ConstructIndexJoin( tableScan := ef.planner.Scan() - ctx := ef.planner.extendedEvalCtx.Ctx() - if err := tableScan.initTable(ctx, ef.planner, tabDesc, colCfg); err != nil { + if err := tableScan.initTable(ef.ctx, ef.planner, tabDesc, colCfg); err != nil { return nil, err } @@ -701,8 +699,7 @@ func (ef *execFactory) ConstructLookupJoin( colCfg := makeScanColumnsConfig(table, lookupCols) tableScan := ef.planner.Scan() - ctx := ef.planner.extendedEvalCtx.Ctx() - if err := tableScan.initTable(ctx, ef.planner, tabDesc, colCfg); err != nil { + if err := tableScan.initTable(ef.ctx, ef.planner, tabDesc, colCfg); err != nil { return nil, err } @@ -787,8 +784,7 @@ func (ef *execFactory) constructVirtualTableLookupJoin( // Set up a scanNode that we won't actually use, just to get the needed // column analysis. colCfg := makeScanColumnsConfig(table, lookupCols) - ctx := ef.planner.extendedEvalCtx.Ctx() - if err := tableScan.initTable(ctx, ef.planner, tableDesc, colCfg); err != nil { + if err := tableScan.initTable(ef.ctx, ef.planner, tableDesc, colCfg); err != nil { return nil, err } tableScan.index = idx @@ -835,8 +831,7 @@ func (ef *execFactory) ConstructInvertedJoin( colCfg := makeScanColumnsConfig(table, lookupCols) tableScan := ef.planner.Scan() - ctx := ef.planner.extendedEvalCtx.Ctx() - if err := tableScan.initTable(ctx, ef.planner, tabDesc, colCfg); err != nil { + if err := tableScan.initTable(ef.ctx, ef.planner, tabDesc, colCfg); err != nil { return nil, err } tableScan.index = idx @@ -907,8 +902,7 @@ func (ef *execFactory) constructScanForZigzag( tableDesc := table.(*optTable).desc idxDesc := index.(*optIndex).idx scan := ef.planner.Scan() - ctx := ef.planner.extendedEvalCtx.Ctx() - if err := scan.initTable(ctx, ef.planner, tableDesc, colCfg); err != nil { + if err := scan.initTable(ef.ctx, ef.planner, tableDesc, colCfg); err != nil { return nil, nil, err } @@ -1315,8 +1309,6 @@ func (ef *execFactory) ConstructInsert( checkOrdSet exec.CheckOrdinalSet, autoCommit bool, ) (exec.Node, error) { - ctx := ef.planner.extendedEvalCtx.Ctx() - // Derive insert table and column descriptors. rowsNeeded := !returnColOrdSet.Empty() tabDesc := table.(*optTable).desc @@ -1325,7 +1317,7 @@ func (ef *execFactory) ConstructInsert( // Create the table inserter, which does the bulk of the work. internal := ef.planner.SessionData().Internal ri, err := row.MakeInserter( - ctx, + ef.ctx, ef.planner.txn, ef.planner.ExecCfg().Codec, tabDesc, @@ -1386,8 +1378,6 @@ func (ef *execFactory) ConstructInsertFastPath( fkChecks []exec.InsertFastPathFKCheck, autoCommit bool, ) (exec.Node, error) { - ctx := ef.planner.extendedEvalCtx.Ctx() - // Derive insert table and column descriptors. rowsNeeded := !returnColOrdSet.Empty() tabDesc := table.(*optTable).desc @@ -1396,7 +1386,7 @@ func (ef *execFactory) ConstructInsertFastPath( // Create the table inserter, which does the bulk of the work. internal := ef.planner.SessionData().Internal ri, err := row.MakeInserter( - ctx, + ef.ctx, ef.planner.txn, ef.planner.ExecCfg().Codec, tabDesc, @@ -1471,8 +1461,6 @@ func (ef *execFactory) ConstructUpdate( passthrough colinfo.ResultColumns, autoCommit bool, ) (exec.Node, error) { - ctx := ef.planner.extendedEvalCtx.Ctx() - // TODO(radu): the execution code has an annoying limitation that the fetch // columns must be a superset of the update columns, even when the "old" value // of a column is not necessary. The optimizer code for pruning columns is @@ -1498,7 +1486,7 @@ func (ef *execFactory) ConstructUpdate( // Create the table updater, which does the bulk of the work. internal := ef.planner.SessionData().Internal ru, err := row.MakeUpdater( - ctx, + ef.ctx, ef.planner.txn, ef.planner.ExecCfg().Codec, tabDesc, @@ -1588,8 +1576,6 @@ func (ef *execFactory) ConstructUpsert( checks exec.CheckOrdinalSet, autoCommit bool, ) (exec.Node, error) { - ctx := ef.planner.extendedEvalCtx.Ctx() - // Derive table and column descriptors. rowsNeeded := !returnColOrdSet.Empty() tabDesc := table.(*optTable).desc @@ -1600,7 +1586,7 @@ func (ef *execFactory) ConstructUpsert( // Create the table inserter, which does the bulk of the insert-related work. internal := ef.planner.SessionData().Internal ri, err := row.MakeInserter( - ctx, + ef.ctx, ef.planner.txn, ef.planner.ExecCfg().Codec, tabDesc, @@ -1616,7 +1602,7 @@ func (ef *execFactory) ConstructUpsert( // Create the table updater, which does the bulk of the update-related work. ru, err := row.MakeUpdater( - ctx, + ef.ctx, ef.planner.txn, ef.planner.ExecCfg().Codec, tabDesc, @@ -2102,9 +2088,8 @@ func (ef *execFactory) ConstructCancelSessions(input exec.Node, ifExists bool) ( // ConstructCreateStatistics is part of the exec.Factory interface. func (ef *execFactory) ConstructCreateStatistics(cs *tree.CreateStats) (exec.Node, error) { - ctx := ef.planner.extendedEvalCtx.Ctx() if err := featureflag.CheckEnabled( - ctx, + ef.ctx, ef.planner.ExecCfg(), featureStatsEnabled, "ANALYZE/CREATE STATISTICS", diff --git a/pkg/sql/pgwire/encoding_test.go b/pkg/sql/pgwire/encoding_test.go index c23e89f0e410..41f82e0d6ce4 100644 --- a/pkg/sql/pgwire/encoding_test.go +++ b/pkg/sql/pgwire/encoding_test.go @@ -272,6 +272,7 @@ func TestEncodings(t *testing.T) { } d, err := pgwirebase.DecodeDatum( + ctx, &evalCtx, types.OidToType[tc.Oid], code, @@ -335,6 +336,7 @@ func TestExoticNumericEncodings(t *testing.T) { for i, c := range testCases { t.Run(fmt.Sprintf("%d_%s", i, c.Value), func(t *testing.T) { d, err := pgwirebase.DecodeDatum( + context.Background(), &evalCtx, types.Decimal, pgwirebase.FormatBinary, diff --git a/pkg/sql/pgwire/pgwirebase/encoding.go b/pkg/sql/pgwire/pgwirebase/encoding.go index 004fdb5be5a4..fa226345c5e9 100644 --- a/pkg/sql/pgwire/pgwirebase/encoding.go +++ b/pkg/sql/pgwire/pgwirebase/encoding.go @@ -13,6 +13,7 @@ package pgwirebase import ( "bufio" "bytes" + "context" "encoding/binary" "fmt" "io" @@ -313,7 +314,7 @@ func validateArrayDimensions(nDimensions int, nElements int) error { // a datum. If res is nil, then user defined types are not attempted // to be resolved. func DecodeDatum( - evalCtx *eval.Context, typ *types.T, code FormatCode, b []byte, + ctx context.Context, evalCtx *eval.Context, typ *types.T, code FormatCode, b []byte, ) (tree.Datum, error) { id := typ.Oid() switch code { @@ -354,7 +355,7 @@ func DecodeDatum( oid.T_regnamespace, oid.T_regprocedure, oid.T_regdictionary: - return eval.ParseDOid(evalCtx, string(b), typ) + return eval.ParseDOid(ctx, evalCtx, string(b), typ) case oid.T_float4, oid.T_float8: f, err := strconv.ParseFloat(string(b), 64) if err != nil { @@ -512,7 +513,7 @@ func DecodeDatum( case FormatBinary: switch id { case oid.T_record: - return decodeBinaryTuple(evalCtx, b) + return decodeBinaryTuple(ctx, evalCtx, b) case oid.T_bool: if len(b) > 0 { switch b[0] { @@ -789,7 +790,7 @@ func DecodeDatum( return &tree.DBitArray{BitArray: ba}, err default: if typ.Family() == types.ArrayFamily { - return decodeBinaryArray(evalCtx, typ.ArrayContents(), b, code) + return decodeBinaryArray(ctx, evalCtx, typ.ArrayContents(), b, code) } } default: @@ -938,7 +939,7 @@ func pgBinaryToIPAddr(b []byte) (ipaddr.IPAddr, error) { } func decodeBinaryArray( - evalCtx *eval.Context, t *types.T, b []byte, code FormatCode, + ctx context.Context, evalCtx *eval.Context, t *types.T, b []byte, code FormatCode, ) (tree.Datum, error) { var hdr struct { Ndims int32 @@ -983,7 +984,7 @@ func decodeBinaryArray( continue } buf := r.Next(int(vlen)) - elem, err := DecodeDatum(evalCtx, t, code, buf) + elem, err := DecodeDatum(ctx, evalCtx, t, code, buf) if err != nil { return nil, err } @@ -996,7 +997,7 @@ func decodeBinaryArray( const tupleHeaderSize, oidSize, elementSize = 4, 4, 4 -func decodeBinaryTuple(evalCtx *eval.Context, b []byte) (tree.Datum, error) { +func decodeBinaryTuple(ctx context.Context, evalCtx *eval.Context, b []byte) (tree.Datum, error) { bufferLength := len(b) if bufferLength < tupleHeaderSize { @@ -1070,7 +1071,7 @@ func decodeBinaryTuple(evalCtx *eval.Context, b []byte) (tree.Datum, error) { return nil, getSyntaxError("insufficient bytes reading element for binary format. ") } - colDatum, err := DecodeDatum(evalCtx, elementType, FormatBinary, b[bufferStartIdx:bufferEndIdx]) + colDatum, err := DecodeDatum(ctx, evalCtx, elementType, FormatBinary, b[bufferStartIdx:bufferEndIdx]) if err != nil { return nil, err diff --git a/pkg/sql/pgwire/pgwirebase/fuzz.go b/pkg/sql/pgwire/pgwirebase/fuzz.go index 72331e3c03eb..93dfd406e97c 100644 --- a/pkg/sql/pgwire/pgwirebase/fuzz.go +++ b/pkg/sql/pgwire/pgwirebase/fuzz.go @@ -44,7 +44,7 @@ func FuzzDecodeDatum(data []byte) int { evalCtx := eval.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) - _, err := DecodeDatum(evalCtx, typ, code, b) + _, err := DecodeDatum(context.Background(), evalCtx, typ, code, b) if err != nil { return 0 } diff --git a/pkg/sql/pgwire/types_test.go b/pkg/sql/pgwire/types_test.go index cc598e5ddcbe..6c99c75845d6 100644 --- a/pkg/sql/pgwire/types_test.go +++ b/pkg/sql/pgwire/types_test.go @@ -143,7 +143,7 @@ func TestIntArrayRoundTrip(t *testing.T) { evalCtx := eval.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) - got, err := pgwirebase.DecodeDatum(evalCtx, types.IntArray, pgwirebase.FormatText, b[4:]) + got, err := pgwirebase.DecodeDatum(context.Background(), evalCtx, types.IntArray, pgwirebase.FormatText, b[4:]) if err != nil { t.Fatal(err) } @@ -224,7 +224,7 @@ func TestByteArrayRoundTrip(t *testing.T) { evalCtx := eval.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) - got, err := pgwirebase.DecodeDatum(evalCtx, types.Bytes, pgwirebase.FormatText, b[4:]) + got, err := pgwirebase.DecodeDatum(context.Background(), evalCtx, types.Bytes, pgwirebase.FormatText, b[4:]) if err != nil { t.Fatal(err) } @@ -675,7 +675,7 @@ func BenchmarkDecodeBinaryDecimal(b *testing.B) { evalCtx := eval.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) b.StartTimer() - got, err := pgwirebase.DecodeDatum(evalCtx, types.Decimal, pgwirebase.FormatBinary, bytes) + got, err := pgwirebase.DecodeDatum(context.Background(), evalCtx, types.Decimal, pgwirebase.FormatBinary, bytes) b.StopTimer() if err != nil { b.Fatal(err) diff --git a/pkg/sql/physicalplan/expression.go b/pkg/sql/physicalplan/expression.go index 6f5f0d905e4a..454d0d8808e8 100644 --- a/pkg/sql/physicalplan/expression.go +++ b/pkg/sql/physicalplan/expression.go @@ -92,7 +92,7 @@ func MakeExpression( fmtCtx := execinfrapb.ExprFmtCtxBase(ctx, evalCtx) fmtCtx.FormatNode(expr) if log.V(1) { - log.Infof(evalCtx.Ctx(), "Expr %s:\n%s", fmtCtx.String(), tree.ExprDebugString(expr)) + log.Infof(ctx, "Expr %s:\n%s", fmtCtx.String(), tree.ExprDebugString(expr)) } expression.Expr = fmtCtx.CloseAndGetString() return expression, nil diff --git a/pkg/sql/rowexec/inverted_joiner.go b/pkg/sql/rowexec/inverted_joiner.go index 0c3b982cc6ec..1e8a988e7836 100644 --- a/pkg/sql/rowexec/inverted_joiner.go +++ b/pkg/sql/rowexec/inverted_joiner.go @@ -277,7 +277,7 @@ func newInvertedJoiner( return nil, err } ij.datumsToInvertedExpr, err = invertedidx.NewDatumsToInvertedExpr( - ij.EvalCtx, onExprColTypes, invertedExprHelper.Expr, ij.fetchSpec.GeoConfig, + ctx, ij.EvalCtx, onExprColTypes, invertedExprHelper.Expr, ij.fetchSpec.GeoConfig, ) if err != nil { return nil, err diff --git a/pkg/sql/rowexec/zigzagjoiner.go b/pkg/sql/rowexec/zigzagjoiner.go index 65a2299478bd..aa6524181921 100644 --- a/pkg/sql/rowexec/zigzagjoiner.go +++ b/pkg/sql/rowexec/zigzagjoiner.go @@ -347,7 +347,7 @@ func newZigzagJoiner( z.infos[i].fixedValues = fixedValues[i] } else { fv := &spec.Sides[i].FixedValues - if err = execinfra.HydrateTypesInDatumInfo(flowCtx.EvalCtx.Ctx(), &resolver, fv.Columns); err != nil { + if err = execinfra.HydrateTypesInDatumInfo(ctx, &resolver, fv.Columns); err != nil { return nil, err } z.infos[i].fixedValues, err = valuesSpecToEncDatum(fv) diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_owned_by.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_owned_by.go index 1e1e00632b17..3116f7955c2e 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_owned_by.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_owned_by.go @@ -41,7 +41,7 @@ func DropOwnedBy(b BuildCtx, n *tree.DropOwnedBy) { if role != b.SessionData().User() && !b.CurrentUserHasAdminOrIsMemberOf(role) { panic(pgerror.New(pgcode.InsufficientPrivilege, "permission denied to drop objects")) } - ok, err := b.CanPerformDropOwnedBy(b.EvalCtx().Ctx(), role) + ok, err := b.CanPerformDropOwnedBy(b, role) if err != nil { panic(err) } diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index f4843a69be5f..637062e2ac05 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -1051,7 +1051,7 @@ var regularBuiltins = map[string]builtinDefinition{ Types: tree.ArgTypes{{"val", types.String}}, ReturnType: tree.FixedReturnType(types.INet), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { - inet, err := eval.PerformCast(evalCtx, args[0], types.INet) + inet, err := eval.PerformCast(ctx, evalCtx, args[0], types.INet) if err != nil { return nil, pgerror.WithCandidateCode(err, pgcode.InvalidTextRepresentation) } @@ -1967,7 +1967,7 @@ var regularBuiltins = map[string]builtinDefinition{ // PostgreSQL specifies that this variant first casts to the SQL string type, // and only then quotes. We can't use (Datum).String() directly. d := eval.UnwrapDatum(evalCtx, args[0]) - strD, err := eval.PerformCast(evalCtx, d, types.String) + strD, err := eval.PerformCast(ctx, evalCtx, d, types.String) if err != nil { return nil, err } @@ -2008,7 +2008,7 @@ var regularBuiltins = map[string]builtinDefinition{ // PostgreSQL specifies that this variant first casts to the SQL string type, // and only then quotes. We can't use (Datum).String() directly. d := eval.UnwrapDatum(evalCtx, args[0]) - strD, err := eval.PerformCast(evalCtx, d, types.String) + strD, err := eval.PerformCast(ctx, evalCtx, d, types.String) if err != nil { return nil, err } @@ -2167,11 +2167,11 @@ var regularBuiltins = map[string]builtinDefinition{ ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { name := tree.MustBeDString(args[0]) - dOid, err := eval.ParseDOid(evalCtx, string(name), types.RegClass) + dOid, err := eval.ParseDOid(ctx, evalCtx, string(name), types.RegClass) if err != nil { return nil, err } - res, err := evalCtx.Sequence.IncrementSequenceByID(evalCtx.Ctx(), int64(dOid.Oid)) + res, err := evalCtx.Sequence.IncrementSequenceByID(ctx, int64(dOid.Oid)) if err != nil { return nil, err } @@ -2185,7 +2185,7 @@ var regularBuiltins = map[string]builtinDefinition{ ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { oid := tree.MustBeDOid(args[0]) - res, err := evalCtx.Sequence.IncrementSequenceByID(evalCtx.Ctx(), int64(oid.Oid)) + res, err := evalCtx.Sequence.IncrementSequenceByID(ctx, int64(oid.Oid)) if err != nil { return nil, err } @@ -2207,11 +2207,11 @@ var regularBuiltins = map[string]builtinDefinition{ ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { name := tree.MustBeDString(args[0]) - dOid, err := eval.ParseDOid(evalCtx, string(name), types.RegClass) + dOid, err := eval.ParseDOid(ctx, evalCtx, string(name), types.RegClass) if err != nil { return nil, err } - res, err := evalCtx.Sequence.GetLatestValueInSessionForSequenceByID(evalCtx.Ctx(), int64(dOid.Oid)) + res, err := evalCtx.Sequence.GetLatestValueInSessionForSequenceByID(ctx, int64(dOid.Oid)) if err != nil { return nil, err } @@ -2225,7 +2225,7 @@ var regularBuiltins = map[string]builtinDefinition{ ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { oid := tree.MustBeDOid(args[0]) - res, err := evalCtx.Sequence.GetLatestValueInSessionForSequenceByID(evalCtx.Ctx(), int64(oid.Oid)) + res, err := evalCtx.Sequence.GetLatestValueInSessionForSequenceByID(ctx, int64(oid.Oid)) if err != nil { return nil, err } @@ -2268,14 +2268,14 @@ var regularBuiltins = map[string]builtinDefinition{ ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { name := tree.MustBeDString(args[0]) - dOid, err := eval.ParseDOid(evalCtx, string(name), types.RegClass) + dOid, err := eval.ParseDOid(ctx, evalCtx, string(name), types.RegClass) if err != nil { return nil, err } newVal := tree.MustBeDInt(args[1]) if err := evalCtx.Sequence.SetSequenceValueByID( - evalCtx.Ctx(), uint32(dOid.Oid), int64(newVal), true /* isCalled */); err != nil { + ctx, uint32(dOid.Oid), int64(newVal), true /* isCalled */); err != nil { return nil, err } return args[1], nil @@ -2291,7 +2291,7 @@ var regularBuiltins = map[string]builtinDefinition{ oid := tree.MustBeDOid(args[0]) newVal := tree.MustBeDInt(args[1]) if err := evalCtx.Sequence.SetSequenceValueByID( - evalCtx.Ctx(), uint32(oid.Oid), int64(newVal), true /* isCalled */); err != nil { + ctx, uint32(oid.Oid), int64(newVal), true /* isCalled */); err != nil { return nil, err } return args[1], nil @@ -2307,7 +2307,7 @@ var regularBuiltins = map[string]builtinDefinition{ ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { name := tree.MustBeDString(args[0]) - dOid, err := eval.ParseDOid(evalCtx, string(name), types.RegClass) + dOid, err := eval.ParseDOid(ctx, evalCtx, string(name), types.RegClass) if err != nil { return nil, err } @@ -2315,7 +2315,7 @@ var regularBuiltins = map[string]builtinDefinition{ newVal := tree.MustBeDInt(args[1]) if err := evalCtx.Sequence.SetSequenceValueByID( - evalCtx.Ctx(), uint32(dOid.Oid), int64(newVal), isCalled); err != nil { + ctx, uint32(dOid.Oid), int64(newVal), isCalled); err != nil { return nil, err } return args[1], nil @@ -2335,7 +2335,7 @@ var regularBuiltins = map[string]builtinDefinition{ newVal := tree.MustBeDInt(args[1]) if err := evalCtx.Sequence.SetSequenceValueByID( - evalCtx.Ctx(), uint32(oid.Oid), int64(newVal), isCalled); err != nil { + ctx, uint32(oid.Oid), int64(newVal), isCalled); err != nil { return nil, err } return args[1], nil @@ -4041,7 +4041,7 @@ value if you rely on the HLC for accuracy.`, ReturnType: tree.FixedReturnType(types.Bytes), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { uri := string(tree.MustBeDString(args[0])) - content, err := evalCtx.Planner.ExternalReadFile(evalCtx.Ctx(), uri) + content, err := evalCtx.Planner.ExternalReadFile(ctx, uri) return tree.NewDBytes(tree.DBytes(content)), err }, Info: "Read the content of the file at the supplied external storage URI", @@ -4059,7 +4059,7 @@ value if you rely on the HLC for accuracy.`, Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { data := tree.MustBeDBytes(args[0]) uri := string(tree.MustBeDString(args[1])) - if err := evalCtx.Planner.ExternalWriteFile(evalCtx.Ctx(), uri, []byte(data)); err != nil { + if err := evalCtx.Planner.ExternalWriteFile(ctx, uri, []byte(data)); err != nil { return nil, err } return tree.NewDInt(tree.DInt(len(data))), nil @@ -4938,11 +4938,11 @@ value if you rely on the HLC for accuracy.`, ) } res, err := evalCtx.CatalogBuiltins.EncodeTableIndexKey( - evalCtx.Ctx(), tableID, indexID, rowDatums, + ctx, tableID, indexID, rowDatums, func( - _ context.Context, d tree.Datum, t *types.T, + ctx context.Context, d tree.Datum, t *types.T, ) (tree.Datum, error) { - return eval.PerformCast(evalCtx, d, t) + return eval.PerformCast(ctx, evalCtx, d, t) }, ) if err != nil { @@ -5115,7 +5115,7 @@ value if you rely on the HLC for accuracy.`, return nil, errors.Newf("expected string value, got %T", args[0]) } msg := string(s) - log.Fatalf(evalCtx.Ctx(), "force_log_fatal(): %s", msg) + log.Fatalf(ctx, "force_log_fatal(): %s", msg) return nil, nil }, Info: "This function is used only by CockroachDB's developers for testing purposes.", @@ -5140,7 +5140,7 @@ value if you rely on the HLC for accuracy.`, elapsed := duration.MakeDuration(int64(evalCtx.StmtTimestamp.Sub(evalCtx.TxnTimestamp)), 0, 0) if elapsed.Compare(minDuration) < 0 { return nil, evalCtx.Txn.GenerateForcedRetryableError( - evalCtx.Ctx(), "forced by crdb_internal.force_retry()") + ctx, "forced by crdb_internal.force_retry()") } return tree.DZero, nil }, @@ -5404,7 +5404,7 @@ value if you rely on the HLC for accuracy.`, if args[0] == tree.DNull { return tree.DNull, nil } - resps, err := evalCtx.RangeStatsFetcher.RangeStats(evalCtx.Ctx(), + resps, err := evalCtx.RangeStatsFetcher.RangeStats(ctx, roachpb.Key(tree.MustBeDBytes(args[0]))) if err != nil { return nil, pgerror.Wrap(err, pgcode.InvalidParameterValue, "error fetching range stats") @@ -5612,7 +5612,7 @@ value if you rely on the HLC for accuracy.`, tableID := catid.DescID(tree.MustBeDInt(args[0])) indexID := catid.IndexID(tree.MustBeDInt(args[1])) g := tree.MustBeDGeography(args[2]) - n, err := evalCtx.CatalogBuiltins.NumGeographyInvertedIndexEntries(evalCtx.Ctx(), tableID, indexID, g) + n, err := evalCtx.CatalogBuiltins.NumGeographyInvertedIndexEntries(ctx, tableID, indexID, g) if err != nil { return nil, err } @@ -5636,7 +5636,7 @@ value if you rely on the HLC for accuracy.`, tableID := catid.DescID(tree.MustBeDInt(args[0])) indexID := catid.IndexID(tree.MustBeDInt(args[1])) g := tree.MustBeDGeometry(args[2]) - n, err := evalCtx.CatalogBuiltins.NumGeometryInvertedIndexEntries(evalCtx.Ctx(), tableID, indexID, g) + n, err := evalCtx.CatalogBuiltins.NumGeometryInvertedIndexEntries(ctx, tableID, indexID, g) if err != nil { return nil, err } @@ -5801,7 +5801,7 @@ value if you rely on the HLC for accuracy.`, if err != nil { return nil, err } - return eval.PerformAssignmentCast(evalCtx, val, targetType) + return eval.PerformAssignmentCast(ctx, evalCtx, val, targetType) }), Info: "This function is used internally to perform assignment casts during mutations.", // The volatility of an assignment cast depends on the argument @@ -6522,7 +6522,7 @@ table's zone configuration this will return NULL.`, Types: tree.ArgTypes{}, ReturnType: tree.FixedReturnType(types.Bool), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { - isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(evalCtx.Ctx()) + isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(ctx) if err != nil { return nil, err } @@ -6550,7 +6550,7 @@ table's zone configuration this will return NULL.`, Types: tree.ArgTypes{}, ReturnType: tree.FixedReturnType(types.Bool), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { - isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(evalCtx.Ctx()) + isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(ctx) if err != nil { return nil, err } @@ -6618,7 +6618,7 @@ table's zone configuration this will return NULL.`, ReturnType: tree.FixedReturnType(types.Bool), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { state := tree.MustBeDBytes(args[0]) - return evalCtx.Planner.DeserializeSessionState(evalCtx.Ctx(), tree.NewDBytes(state)) + return evalCtx.Planner.DeserializeSessionState(ctx, tree.NewDBytes(state)) }, Info: `This function deserializes the serialized variables into the current session.`, Volatility: volatility.Volatile, @@ -6693,7 +6693,7 @@ table's zone configuration this will return NULL.`, return nil, errInsufficientPriv } oid := tree.MustBeDOid(args[0]) - if err := evalCtx.Planner.RepairTTLScheduledJobForTable(evalCtx.Ctx(), int64(oid.Oid)); err != nil { + if err := evalCtx.Planner.RepairTTLScheduledJobForTable(ctx, int64(oid.Oid)); err != nil { return nil, err } return tree.DVoidDatum, nil @@ -6790,7 +6790,7 @@ table's zone configuration this will return NULL.`, Types: tree.ArgTypes{}, ReturnType: tree.FixedReturnType(types.Void), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { - if err := evalCtx.Planner.RevalidateUniqueConstraintsInCurrentDB(evalCtx.Ctx()); err != nil { + if err := evalCtx.Planner.RevalidateUniqueConstraintsInCurrentDB(ctx); err != nil { return nil, err } return tree.DVoidDatum, nil @@ -6810,11 +6810,11 @@ in the current database. Returns an error if validation fails.`, ReturnType: tree.FixedReturnType(types.Void), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { name := tree.MustBeDString(args[0]) - dOid, err := eval.ParseDOid(evalCtx, string(name), types.RegClass) + dOid, err := eval.ParseDOid(ctx, evalCtx, string(name), types.RegClass) if err != nil { return nil, err } - if err := evalCtx.Planner.RevalidateUniqueConstraintsInTable(evalCtx.Ctx(), int(dOid.Oid)); err != nil { + if err := evalCtx.Planner.RevalidateUniqueConstraintsInTable(ctx, int(dOid.Oid)); err != nil { return nil, err } return tree.DVoidDatum, nil @@ -6835,12 +6835,12 @@ table. Returns an error if validation fails.`, Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { tableName := tree.MustBeDString(args[0]) constraintName := tree.MustBeDString(args[1]) - dOid, err := eval.ParseDOid(evalCtx, string(tableName), types.RegClass) + dOid, err := eval.ParseDOid(ctx, evalCtx, string(tableName), types.RegClass) if err != nil { return nil, err } if err = evalCtx.Planner.RevalidateUniqueConstraint( - evalCtx.Ctx(), int(dOid.Oid), string(constraintName), + ctx, int(dOid.Oid), string(constraintName), ); err != nil { return nil, err } @@ -6861,12 +6861,12 @@ table. Returns an error if validation fails.`, Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { tableName := tree.MustBeDString(args[0]) constraintName := tree.MustBeDString(args[1]) - dOid, err := eval.ParseDOid(evalCtx, string(tableName), types.RegClass) + dOid, err := eval.ParseDOid(ctx, evalCtx, string(tableName), types.RegClass) if err != nil { return nil, err } active, err := evalCtx.Planner.IsConstraintActive( - evalCtx.Ctx(), int(dOid.Oid), string(constraintName), + ctx, int(dOid.Oid), string(constraintName), ) if err != nil { return nil, err @@ -7135,7 +7135,7 @@ specified store on the node it's run from. One of 'mvccGC', 'merge', 'split', ReturnType: tree.FixedReturnType(types.Bool), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { hasViewActivity, err := evalCtx.SessionAccessor.HasRoleOption( - evalCtx.Ctx(), roleoption.VIEWACTIVITY) + ctx, roleoption.VIEWACTIVITY) if err != nil { return nil, err } @@ -7145,13 +7145,13 @@ specified store on the node it's run from. One of 'mvccGC', 'merge', 'split', "VIEWACTIVITY or ADMIN role option") } - isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(evalCtx.Ctx()) + isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(ctx) if err != nil { return nil, err } hasViewActivityRedacted, err := evalCtx.SessionAccessor.HasRoleOption( - evalCtx.Ctx(), roleoption.VIEWACTIVITYREDACTED) + ctx, roleoption.VIEWACTIVITYREDACTED) if err != nil { return nil, err } @@ -7167,7 +7167,7 @@ specified store on the node it's run from. One of 'mvccGC', 'merge', 'split', expiresAfter := time.Duration(tree.MustBeDInterval(args[3]).Nanos()) if err := evalCtx.StmtDiagnosticsRequestInserter( - evalCtx.Ctx(), + ctx, stmtFingerprint, samplingProbability, minExecutionLatency, @@ -7420,7 +7420,7 @@ var formatImpls = makeBuiltin(tree.FunctionProperties{Category: builtinconstants } formatStr := tree.MustBeDString(args[0]) formatArgs := args[1:] - str, err := pgformat.Format(evalCtx, string(formatStr), formatArgs...) + str, err := pgformat.Format(ctx, evalCtx, string(formatStr), formatArgs...) if err != nil { return nil, pgerror.Wrap(err, pgcode.InvalidParameterValue, "error parsing format string") } diff --git a/pkg/sql/sem/builtins/generator_builtins.go b/pkg/sql/sem/builtins/generator_builtins.go index b5da74f6406a..af5509acd9f1 100644 --- a/pkg/sql/sem/builtins/generator_builtins.go +++ b/pkg/sql/sem/builtins/generator_builtins.go @@ -122,7 +122,7 @@ var generators = map[string]builtinDefinition{ }, spanKeyIteratorType, func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (eval.ValueGenerator, error) { - isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(evalCtx.Ctx()) + isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(ctx) if err != nil { return nil, err } @@ -145,7 +145,7 @@ var generators = map[string]builtinDefinition{ }, spanKeyIteratorType, func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (eval.ValueGenerator, error) { - isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(evalCtx.Ctx()) + isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(ctx) if err != nil { return nil, err } @@ -1631,6 +1631,7 @@ type jsonPopulateRecordGenerator struct { target json.JSON wasCalled bool + ctx context.Context evalCtx *eval.Context } @@ -1640,13 +1641,17 @@ func (j jsonPopulateRecordGenerator) ResolvedType() *types.T { } // Start is part of the tree.ValueGenerator interface. -func (j *jsonPopulateRecordGenerator) Start(_ context.Context, _ *kv.Txn) error { return nil } +func (j *jsonPopulateRecordGenerator) Start(ctx context.Context, _ *kv.Txn) error { + j.ctx = ctx + return nil +} // Close is part of the tree.ValueGenerator interface. func (j *jsonPopulateRecordGenerator) Close(_ context.Context) {} // Next is part of the tree.ValueGenerator interface. -func (j *jsonPopulateRecordGenerator) Next(_ context.Context) (bool, error) { +func (j *jsonPopulateRecordGenerator) Next(ctx context.Context) (bool, error) { + j.ctx = ctx if !j.wasCalled { j.wasCalled = true return true, nil @@ -1656,7 +1661,7 @@ func (j *jsonPopulateRecordGenerator) Next(_ context.Context) (bool, error) { // Values is part of the tree.ValueGenerator interface. func (j jsonPopulateRecordGenerator) Values() (tree.Datums, error) { - if err := eval.PopulateRecordWithJSON(j.evalCtx, j.target, j.input.ResolvedType(), j.input); err != nil { + if err := eval.PopulateRecordWithJSON(j.ctx, j.evalCtx, j.target, j.input.ResolvedType(), j.input); err != nil { return nil, err } return j.input.D, nil @@ -1697,13 +1702,17 @@ type jsonPopulateRecordSetGenerator struct { func (j jsonPopulateRecordSetGenerator) ResolvedType() *types.T { return j.input.ResolvedType() } // Start is part of the tree.ValueGenerator interface. -func (j jsonPopulateRecordSetGenerator) Start(_ context.Context, _ *kv.Txn) error { return nil } +func (j jsonPopulateRecordSetGenerator) Start(ctx context.Context, _ *kv.Txn) error { + j.ctx = ctx + return nil +} // Close is part of the tree.ValueGenerator interface. func (j jsonPopulateRecordSetGenerator) Close(_ context.Context) {} // Next is part of the tree.ValueGenerator interface. -func (j *jsonPopulateRecordSetGenerator) Next(_ context.Context) (bool, error) { +func (j *jsonPopulateRecordSetGenerator) Next(ctx context.Context) (bool, error) { + j.ctx = ctx if j.nextIdx >= j.target.Len() { return false, nil } @@ -1722,7 +1731,7 @@ func (j *jsonPopulateRecordSetGenerator) Values() (tree.Datums, error) { } output := tree.NewDTupleWithLen(j.input.ResolvedType(), j.input.D.Len()) copy(output.D, j.input.D) - if err := eval.PopulateRecordWithJSON(j.evalCtx, obj, j.input.ResolvedType(), output); err != nil { + if err := eval.PopulateRecordWithJSON(j.ctx, j.evalCtx, obj, j.input.ResolvedType(), output); err != nil { return nil, err } return output.D, nil @@ -1793,7 +1802,7 @@ func (j *jsonRecordGenerator) Next(ctx context.Context) (bool, error) { continue } v := iter.Value() - datum, err := eval.PopulateDatumWithJSON(j.evalCtx, v, j.types[idx]) + datum, err := eval.PopulateDatumWithJSON(ctx, j.evalCtx, v, j.types[idx]) if err != nil { return false, err } @@ -2375,7 +2384,7 @@ func makePayloadsForTraceGenerator( FROM spans, LATERAL crdb_internal.payloads_for_span(spans.span_id)` it, err := evalCtx.Planner.QueryIteratorEx( - evalCtx.Ctx(), + ctx, "crdb_internal.payloads_for_trace", sessiondata.NoSessionDataOverride, query, diff --git a/pkg/sql/sem/builtins/pg_builtins.go b/pkg/sql/sem/builtins/pg_builtins.go index 44ebf3f11c2a..398129c84dd4 100644 --- a/pkg/sql/sem/builtins/pg_builtins.go +++ b/pkg/sql/sem/builtins/pg_builtins.go @@ -214,7 +214,7 @@ func makePGGetIndexDef(argTypes tree.ArgTypes) tree.Overload { colNumber = *args[1].(*tree.DInt) } r, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_get_indexdef", + ctx, "pg_get_indexdef", sessiondata.NoSessionDataOverride, "SELECT indexdef FROM pg_catalog.pg_indexes WHERE crdb_oid = $1", args[0]) if err != nil { @@ -230,7 +230,7 @@ func makePGGetIndexDef(argTypes tree.ArgTypes) tree.Overload { } // The 3 argument variant for column number other than 0 returns the column name. r, err = evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_get_indexdef", + ctx, "pg_get_indexdef", sessiondata.NoSessionDataOverride, `SELECT ischema.column_name as pg_get_indexdef FROM information_schema.statistics AS ischema @@ -264,7 +264,7 @@ func makePGGetViewDef(argTypes tree.ArgTypes) tree.Overload { ReturnType: tree.FixedReturnType(types.String), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { r, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_get_viewdef", + ctx, "pg_get_viewdef", sessiondata.NoSessionDataOverride, `SELECT definition FROM pg_catalog.pg_views v @@ -295,7 +295,7 @@ func makePGGetConstraintDef(argTypes tree.ArgTypes) tree.Overload { ReturnType: tree.FixedReturnType(types.String), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { r, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_get_constraintdef", + ctx, "pg_get_constraintdef", sessiondata.NoSessionDataOverride, "SELECT condef FROM pg_catalog.pg_constraint WHERE oid=$1", args[0]) if err != nil { @@ -380,7 +380,7 @@ func makePGPrivilegeInquiryDef( var user username.SQLUsername if withUser { arg := eval.UnwrapDatum(evalCtx, args[0]) - userS, err := getNameForArg(evalCtx, arg, "pg_roles", "rolname") + userS, err := getNameForArg(ctx, evalCtx, arg, "pg_roles", "rolname") if err != nil { return nil, err } @@ -437,7 +437,9 @@ func makePGPrivilegeInquiryDef( // getNameForArg determines the object name for the specified argument, which // should be either an unwrapped STRING or an OID. If the object is not found, // the returned string will be empty. -func getNameForArg(evalCtx *eval.Context, arg tree.Datum, pgTable, pgCol string) (string, error) { +func getNameForArg( + ctx context.Context, evalCtx *eval.Context, arg tree.Datum, pgTable, pgCol string, +) (string, error) { var query string switch t := arg.(type) { case *tree.DString: @@ -447,7 +449,7 @@ func getNameForArg(evalCtx *eval.Context, arg tree.Datum, pgTable, pgCol string) default: return "", errors.AssertionFailedf("unexpected arg type %T", t) } - r, err := evalCtx.Planner.QueryRowEx(evalCtx.Ctx(), "get-name-for-arg", + r, err := evalCtx.Planner.QueryRowEx(ctx, "get-name-for-arg", sessiondata.NoSessionDataOverride, query, arg) if err != nil || r == nil { return "", err @@ -524,7 +526,7 @@ func makeToRegOverload(typ *types.T, helpText string) builtinDefinition { if int > 0 { return tree.DNull, nil } - typOid, err := eval.ParseDOid(evalCtx, string(typName), typ) + typOid, err := eval.ParseDOid(ctx, evalCtx, string(typName), typ) if err != nil { //nolint:returnerrcheck return tree.DNull, nil @@ -666,7 +668,7 @@ var pgBuiltins = map[string]builtinDefinition{ } } results, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_get_functiondef", + ctx, "pg_get_functiondef", sessiondata.NoSessionDataOverride, getFuncQuery, idToQuery, @@ -697,7 +699,7 @@ var pgBuiltins = map[string]builtinDefinition{ Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { funcOid := tree.MustBeDOid(args[0]) t, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_get_function_result", + ctx, "pg_get_function_result", sessiondata.NoSessionDataOverride, `SELECT prorettype::REGTYPE::TEXT FROM pg_proc WHERE oid=$1`, funcOid.Oid) if err != nil { @@ -725,7 +727,7 @@ var pgBuiltins = map[string]builtinDefinition{ Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { funcOid := tree.MustBeDOid(args[0]) t, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_get_function_identity_arguments", + ctx, "pg_get_function_identity_arguments", sessiondata.NoSessionDataOverride, `SELECT array_agg(unnest(proargtypes)::REGTYPE::TEXT) FROM pg_proc WHERE oid=$1`, funcOid.Oid) if err != nil { @@ -791,7 +793,7 @@ var pgBuiltins = map[string]builtinDefinition{ if err != nil { return nil, err } - res, err := evalCtx.Sequence.GetSerialSequenceNameFromColumn(evalCtx.Ctx(), qualifiedName, tree.Name(columnName)) + res, err := evalCtx.Sequence.GetSerialSequenceNameFromColumn(ctx, qualifiedName, tree.Name(columnName)) if err != nil { return nil, err } @@ -821,7 +823,7 @@ var pgBuiltins = map[string]builtinDefinition{ return tree.NewDOid(0), nil } oid, errSafeToIgnore, err := evalCtx.Planner.ResolveOIDFromString( - evalCtx.Ctx(), types.RegNamespace, tree.NewDString(schema)) + ctx, types.RegNamespace, tree.NewDString(schema)) if err != nil { // If the OID lookup returns an UndefinedObject error, return 0 // instead. We can hit this path if the session created a temporary @@ -849,7 +851,7 @@ var pgBuiltins = map[string]builtinDefinition{ ReturnType: tree.FixedReturnType(types.Bool), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { schemaArg := eval.UnwrapDatum(evalCtx, args[0]) - schema, err := getNameForArg(evalCtx, schemaArg, "pg_namespace", "nspname") + schema, err := getNameForArg(ctx, evalCtx, schemaArg, "pg_namespace", "nspname") if err != nil { return nil, err } @@ -921,7 +923,7 @@ var pgBuiltins = map[string]builtinDefinition{ Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { oid := args[0] t, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_get_userbyid", + ctx, "pg_get_userbyid", sessiondata.NoSessionDataOverride, "SELECT rolname FROM pg_catalog.pg_roles WHERE oid=$1", oid) if err != nil { @@ -949,7 +951,7 @@ var pgBuiltins = map[string]builtinDefinition{ ReturnType: tree.FixedReturnType(types.String), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { r, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_sequence_parameters", + ctx, "pg_sequence_parameters", sessiondata.NoSessionDataOverride, `SELECT seqstart, seqmin, seqmax, seqincrement, seqcycle, seqcache, seqtypid `+ `FROM pg_catalog.pg_sequence WHERE seqrelid=$1`, args[0]) @@ -1044,7 +1046,7 @@ var pgBuiltins = map[string]builtinDefinition{ // TODO(jordanlewis): Really we'd like to query this directly // on pg_description and let predicate push-down do its job. r, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_get_coldesc", + ctx, "pg_get_coldesc", sessiondata.NoSessionDataOverride, ` SELECT comment FROM system.comments c @@ -1068,7 +1070,7 @@ WHERE c.type=$1::int AND c.object_id=$2::int AND c.sub_id=$3::int LIMIT 1 Types: tree.ArgTypes{{"object_oid", types.Oid}}, ReturnType: tree.FixedReturnType(types.String), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { - return getPgObjDesc(evalCtx, "", args[0].(*tree.DOid).Oid) + return getPgObjDesc(ctx, evalCtx, "", args[0].(*tree.DOid).Oid) }, Info: notUsableInfo, Volatility: volatility.Stable, @@ -1077,7 +1079,9 @@ WHERE c.type=$1::int AND c.object_id=$2::int AND c.sub_id=$3::int LIMIT 1 Types: tree.ArgTypes{{"object_oid", types.Oid}, {"catalog_name", types.String}}, ReturnType: tree.FixedReturnType(types.String), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { - return getPgObjDesc(evalCtx, + return getPgObjDesc( + ctx, + evalCtx, string(tree.MustBeDString(args[1])), args[0].(*tree.DOid).Oid, ) @@ -1092,7 +1096,7 @@ WHERE c.type=$1::int AND c.object_id=$2::int AND c.sub_id=$3::int LIMIT 1 Types: tree.ArgTypes{{"int", types.Int}}, ReturnType: tree.FixedReturnType(types.Oid), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { - return eval.PerformCast(evalCtx, args[0], types.Oid) + return eval.PerformCast(ctx, evalCtx, args[0], types.Oid) }, Info: "Converts an integer to an OID.", Volatility: volatility.Immutable, @@ -1114,7 +1118,7 @@ WHERE c.type=$1::int AND c.object_id=$2::int AND c.sub_id=$3::int LIMIT 1 } r, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_get_shobjdesc", + ctx, "pg_get_shobjdesc", sessiondata.NoSessionDataOverride, fmt.Sprintf(` SELECT description @@ -1188,7 +1192,7 @@ SELECT description Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { oid := tree.MustBeDOid(args[0]) t, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_function_is_visible", + ctx, "pg_function_is_visible", sessiondata.NoSessionDataOverride, "SELECT * from pg_proc WHERE oid=$1 LIMIT 1", oid.Oid) if err != nil { @@ -1262,7 +1266,7 @@ SELECT description Types: tree.ArgTypes{{"reloid", types.Oid}, {"include_triggers", types.Bool}}, ReturnType: tree.FixedReturnType(types.Int4), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { - ret, err := evalCtx.CatalogBuiltins.PGRelationIsUpdatable(evalCtx.Ctx(), tree.MustBeDOid(args[0])) + ret, err := evalCtx.CatalogBuiltins.PGRelationIsUpdatable(ctx, tree.MustBeDOid(args[0])) if err != nil { return nil, err } @@ -1283,7 +1287,7 @@ SELECT description }, ReturnType: tree.FixedReturnType(types.Bool), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { - ret, err := evalCtx.CatalogBuiltins.PGColumnIsUpdatable(evalCtx.Ctx(), tree.MustBeDOid(args[0]), tree.MustBeDInt(args[1])) + ret, err := evalCtx.CatalogBuiltins.PGColumnIsUpdatable(ctx, tree.MustBeDOid(args[0]), tree.MustBeDInt(args[1])) if err != nil { return nil, err } @@ -1303,8 +1307,8 @@ SELECT description durationNanos := int64(float64(*args[0].(*tree.DFloat)) * float64(1000000000)) dur := time.Duration(durationNanos) select { - case <-evalCtx.Ctx().Done(): - return nil, evalCtx.Ctx().Err() + case <-ctx.Done(): + return nil, ctx.Err() case <-time.After(dur): return tree.DBoolTrue, nil } @@ -1431,7 +1435,7 @@ SELECT description argTypeOpts{{"fdw", strOrOidTypes}}, func(ctx context.Context, evalCtx *eval.Context, args tree.Datums, user username.SQLUsername) (eval.HasAnyPrivilegeResult, error) { fdwArg := eval.UnwrapDatum(evalCtx, args[0]) - fdw, err := getNameForArg(evalCtx, fdwArg, "pg_foreign_data_wrapper", "fdwname") + fdw, err := getNameForArg(ctx, evalCtx, fdwArg, "pg_foreign_data_wrapper", "fdwname") if err != nil { return eval.HasNoPrivilege, err } @@ -1475,7 +1479,7 @@ SELECT description switch t := oidArg.(type) { case *tree.DString: var err error - oid, err = eval.ParseDOid(evalCtx, string(*t), types.RegProcedure) + oid, err = eval.ParseDOid(ctx, evalCtx, string(*t), types.RegProcedure) if err != nil { return eval.HasNoPrivilege, err } @@ -1522,7 +1526,7 @@ SELECT description argTypeOpts{{"language", strOrOidTypes}}, func(ctx context.Context, evalCtx *eval.Context, args tree.Datums, user username.SQLUsername) (eval.HasAnyPrivilegeResult, error) { langArg := eval.UnwrapDatum(evalCtx, args[0]) - lang, err := getNameForArg(evalCtx, langArg, "pg_language", "lanname") + lang, err := getNameForArg(ctx, evalCtx, langArg, "pg_language", "lanname") if err != nil { return eval.HasNoPrivilege, err } @@ -1561,7 +1565,7 @@ SELECT description func(ctx context.Context, evalCtx *eval.Context, args tree.Datums, user username.SQLUsername) (eval.HasAnyPrivilegeResult, error) { schemaArg := eval.UnwrapDatum(evalCtx, args[0]) databaseName := evalCtx.SessionData().Database - specifier, err := schemaHasPrivilegeSpecifier(evalCtx, schemaArg, databaseName) + specifier, err := schemaHasPrivilegeSpecifier(ctx, evalCtx, schemaArg, databaseName) if err != nil { return eval.HasNoPrivilege, err } @@ -1615,7 +1619,7 @@ SELECT description argTypeOpts{{"server", strOrOidTypes}}, func(ctx context.Context, evalCtx *eval.Context, args tree.Datums, user username.SQLUsername) (eval.HasAnyPrivilegeResult, error) { serverArg := eval.UnwrapDatum(evalCtx, args[0]) - server, err := getNameForArg(evalCtx, serverArg, "pg_foreign_server", "srvname") + server, err := getNameForArg(ctx, evalCtx, serverArg, "pg_foreign_server", "srvname") if err != nil { return eval.HasNoPrivilege, err } @@ -1688,7 +1692,7 @@ SELECT description argTypeOpts{{"tablespace", strOrOidTypes}}, func(ctx context.Context, evalCtx *eval.Context, args tree.Datums, user username.SQLUsername) (eval.HasAnyPrivilegeResult, error) { tablespaceArg := eval.UnwrapDatum(evalCtx, args[0]) - tablespace, err := getNameForArg(evalCtx, tablespaceArg, "pg_tablespace", "spcname") + tablespace, err := getNameForArg(ctx, evalCtx, tablespaceArg, "pg_tablespace", "spcname") if err != nil { return eval.HasNoPrivilege, err } @@ -1732,7 +1736,7 @@ SELECT description switch t := oidArg.(type) { case *tree.DString: var err error - oid, err = eval.ParseDOid(evalCtx, string(*t), types.RegType) + oid, err = eval.ParseDOid(ctx, evalCtx, string(*t), types.RegType) if err != nil { return eval.HasNoPrivilege, err } @@ -1740,7 +1744,7 @@ SELECT description oid = t } - typ, err := getNameForArg(evalCtx, oid, "pg_type", "typname") + typ, err := getNameForArg(ctx, evalCtx, oid, "pg_type", "typname") if err != nil { return eval.HasNoPrivilege, err } @@ -1772,7 +1776,7 @@ SELECT description argTypeOpts{{"role", strOrOidTypes}}, func(ctx context.Context, evalCtx *eval.Context, args tree.Datums, user username.SQLUsername) (eval.HasAnyPrivilegeResult, error) { roleArg := eval.UnwrapDatum(evalCtx, args[0]) - roleS, err := getNameForArg(evalCtx, roleArg, "pg_roles", "rolname") + roleS, err := getNameForArg(ctx, evalCtx, roleArg, "pg_roles", "rolname") if err != nil { return eval.HasNoPrivilege, err } @@ -2045,7 +2049,7 @@ SELECT description ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { r, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "information_schema._pg_index_position", + ctx, "information_schema._pg_index_position", sessiondata.NoSessionDataOverride, `SELECT (ss.a).n FROM (SELECT information_schema._pg_expandarray(indkey) AS a @@ -2205,7 +2209,9 @@ func getCatalogOidForComments(catalogName string) (id int, ok bool) { // getPgObjDesc queries pg_description for object comments. catalog_name, if not // empty, provides a constraint on which "system catalog" the comment is in. // System catalogs are things like pg_class, pg_type, pg_database, and so on. -func getPgObjDesc(evalCtx *eval.Context, catalogName string, oidVal oid.Oid) (tree.Datum, error) { +func getPgObjDesc( + ctx context.Context, evalCtx *eval.Context, catalogName string, oidVal oid.Oid, +) (tree.Datum, error) { classOidFilter := "" if catalogName != "" { classOid, ok := getCatalogOidForComments(catalogName) @@ -2216,7 +2222,7 @@ func getPgObjDesc(evalCtx *eval.Context, catalogName string, oidVal oid.Oid) (tr classOidFilter = fmt.Sprintf("AND classoid = %d", classOid) } r, err := evalCtx.Planner.QueryRowEx( - evalCtx.Ctx(), "pg_get_objdesc", + ctx, "pg_get_objdesc", sessiondata.NoSessionDataOverride, fmt.Sprintf(` SELECT description @@ -2295,7 +2301,7 @@ func columnHasPrivilegeSpecifier( } func schemaHasPrivilegeSpecifier( - evalCtx *eval.Context, schemaArg tree.Datum, databaseName string, + ctx context.Context, evalCtx *eval.Context, schemaArg tree.Datum, databaseName string, ) (eval.HasPrivilegeSpecifier, error) { specifier := eval.HasPrivilegeSpecifier{ SchemaDatabaseName: &databaseName, @@ -2307,7 +2313,7 @@ func schemaHasPrivilegeSpecifier( specifier.SchemaName = &s schemaIsRequired = true case *tree.DOid: - schemaName, err := getNameForArg(evalCtx, schemaArg, "pg_namespace", "nspname") + schemaName, err := getNameForArg(ctx, evalCtx, schemaArg, "pg_namespace", "nspname") if err != nil { return specifier, err } diff --git a/pkg/sql/sem/builtins/pgformat/format.go b/pkg/sql/sem/builtins/pgformat/format.go index b5900b5e5a5d..97e1520d12dc 100644 --- a/pkg/sql/sem/builtins/pgformat/format.go +++ b/pkg/sql/sem/builtins/pgformat/format.go @@ -11,6 +11,8 @@ package pgformat import ( + "context" + "github.com/cockroachdb/cockroach/pkg/sql/lexbase" "github.com/cockroachdb/cockroach/pkg/sql/sem/cast" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" @@ -51,12 +53,14 @@ func (p *pp) popInt() (v int, ok bool) { // Format formats according to a format specifier in the style of postgres format() // and returns the resulting string. -func Format(evalCtx *eval.Context, format string, a ...tree.Datum) (string, error) { +func Format( + ctx context.Context, evalCtx *eval.Context, format string, a ...tree.Datum, +) (string, error) { p := pp{ evalCtx: evalCtx, buf: evalCtx.FmtCtx(tree.FmtArrayToString), } - err := p.doPrintf(format, a) + err := p.doPrintf(ctx, format, a) if err != nil { return "", err } @@ -86,7 +90,7 @@ func parsenum(s string, start, end int) (num int, isnum bool, newi int) { return } -func (p *pp) printArg(arg tree.Datum, verb rune) (err error) { +func (p *pp) printArg(ctx context.Context, arg tree.Datum, verb rune) (err error) { var writeFunc func(*tree.FmtCtx) (numBytesWritten int) if arg == tree.DNull { switch verb { @@ -118,7 +122,7 @@ func (p *pp) printArg(arg tree.Datum, verb rune) (err error) { writeFunc = func(buf *tree.FmtCtx) int { lenBefore := buf.Len() var dStr tree.Datum - dStr, err = eval.PerformCast(p.evalCtx, arg, types.String) + dStr, err = eval.PerformCast(ctx, p.evalCtx, arg, types.String) // This shouldn't be possible--anything can be cast to // a string. err will be returned by printArg(). if err != nil { @@ -159,7 +163,7 @@ func (p *pp) printArg(arg tree.Datum, verb rune) (err error) { // intFromArg gets the argNumth element of a. On return, isInt reports whether the argument has integer type. func intFromArg( - evalCtx *eval.Context, a []tree.Datum, argNum int, + ctx context.Context, evalCtx *eval.Context, a []tree.Datum, argNum int, ) (num int, isInt bool, newArgNum int) { newArgNum = argNum if argNum < len(a) && argNum >= 0 { @@ -169,7 +173,7 @@ func intFromArg( return 0, true, argNum + 1 } if cast.ValidCast(datum.ResolvedType(), types.Int, cast.ContextImplicit) { - dInt, err := eval.PerformCast(evalCtx, datum, types.Int) + dInt, err := eval.PerformCast(ctx, evalCtx, datum, types.Int) if err == nil { num = int(tree.MustBeDInt(dInt)) isInt = true @@ -187,7 +191,7 @@ func intFromArg( // doPrintf is copied from golang's internal implementation of fmt, // but modified to use the sql function format()'s syntax for width // and positional arguments. -func (p *pp) doPrintf(format string, a []tree.Datum) error { +func (p *pp) doPrintf(ctx context.Context, format string, a []tree.Datum) error { end := len(format) argNum := 0 // we process one argument per non-trivial format formatLoop: @@ -223,7 +227,7 @@ formatLoop: if argNum < 0 { return errors.New("positions must be positive and 1-indexed") } - err := p.printArg(a[argNum], rune(c)) + err := p.printArg(ctx, a[argNum], rune(c)) if err != nil { return err } @@ -260,12 +264,12 @@ formatLoop: if rawArgNum < 1 { return errors.New("positions must be positive and 1-indexed") } - p.width, isNum, argNum = intFromArg(p.evalCtx, a, rawArgNum-1) + p.width, isNum, argNum = intFromArg(ctx, p.evalCtx, a, rawArgNum-1) if !isNum { return errors.New("non-numeric width") } } else { - p.width, isNum, argNum = intFromArg(p.evalCtx, a, argNum) + p.width, isNum, argNum = intFromArg(ctx, p.evalCtx, a, argNum) if !isNum { return errors.New("non-numeric width") } diff --git a/pkg/sql/sem/builtins/pgformat/format_test.go b/pkg/sql/sem/builtins/pgformat/format_test.go index 44be278d44ac..631da7acbd86 100644 --- a/pkg/sql/sem/builtins/pgformat/format_test.go +++ b/pkg/sql/sem/builtins/pgformat/format_test.go @@ -179,7 +179,7 @@ func TestFormatWithWeirdFormatStrings(t *testing.T) { } str := string(b) // Mostly just making sure no panics - _, err := pgformat.Format(evalContext, str, datums...) + _, err := pgformat.Format(context.Background(), evalContext, str, datums...) if err != nil { require.Regexp(t, `position|width|not enough arguments|unrecognized verb|unterminated format`, err.Error(), "input string was %s", str) diff --git a/pkg/sql/sem/builtins/pgformat/fuzz.go b/pkg/sql/sem/builtins/pgformat/fuzz.go index 1f30df34744d..0c52910c743d 100644 --- a/pkg/sql/sem/builtins/pgformat/fuzz.go +++ b/pkg/sql/sem/builtins/pgformat/fuzz.go @@ -14,6 +14,8 @@ package pgformat import ( + "context" + "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" ) @@ -21,13 +23,13 @@ import ( // FuzzFormat passes the input to pgformat.Format() // as both the format string and format arguments. func FuzzFormat(input []byte) int { - ctx := eval.MakeTestingEvalContext(nil) + evalCtx := eval.MakeTestingEvalContext(nil) str := string(input) args := make(tree.Datums, 16) for i := range args { args[i] = tree.NewDString(string(input)) } - _, err := Format(&ctx, str, args...) + _, err := Format(context.Background(), &evalCtx, str, args...) if err == nil { return 0 diff --git a/pkg/sql/sem/builtins/replication_builtins.go b/pkg/sql/sem/builtins/replication_builtins.go index 77fdd7cad205..f01a2a99b42f 100644 --- a/pkg/sql/sem/builtins/replication_builtins.go +++ b/pkg/sql/sem/builtins/replication_builtins.go @@ -58,7 +58,7 @@ var replicationBuiltins = map[string]builtinDefinition{ ingestionJobID := jobspb.JobID(*args[0].(*tree.DInt)) cutoverTime := args[1].(*tree.DTimestampTZ).Time cutoverTimestamp := hlc.Timestamp{WallTime: cutoverTime.UnixNano()} - err = mgr.CompleteStreamIngestion(evalCtx, evalCtx.Txn, ingestionJobID, cutoverTimestamp) + err = mgr.CompleteStreamIngestion(ctx, evalCtx, evalCtx.Txn, ingestionJobID, cutoverTimestamp) if err != nil { return nil, err } @@ -96,7 +96,7 @@ var replicationBuiltins = map[string]builtinDefinition{ return nil, err } ingestionJobID := int64(tree.MustBeDInt(args[0])) - stats, err := mgr.GetStreamIngestionStats(evalCtx, evalCtx.Txn, jobspb.JobID(ingestionJobID)) + stats, err := mgr.GetStreamIngestionStats(ctx, evalCtx, evalCtx.Txn, jobspb.JobID(ingestionJobID)) if err != nil { return nil, err } @@ -136,7 +136,7 @@ var replicationBuiltins = map[string]builtinDefinition{ return nil, err } ingestionJobID := int64(tree.MustBeDInt(args[0])) - stats, err := mgr.GetStreamIngestionStats(evalCtx, evalCtx.Txn, jobspb.JobID(ingestionJobID)) + stats, err := mgr.GetStreamIngestionStats(ctx, evalCtx, evalCtx.Txn, jobspb.JobID(ingestionJobID)) if err != nil { return nil, err } @@ -172,7 +172,7 @@ var replicationBuiltins = map[string]builtinDefinition{ if err != nil { return nil, err } - jobID, err := mgr.StartReplicationStream(evalCtx, evalCtx.Txn, uint64(tenantID)) + jobID, err := mgr.StartReplicationStream(ctx, evalCtx, evalCtx.Txn, uint64(tenantID)) if err != nil { return nil, err } @@ -210,7 +210,7 @@ var replicationBuiltins = map[string]builtinDefinition{ return nil, err } streamID := streaming.StreamID(int(tree.MustBeDInt(args[0]))) - sps, err := mgr.HeartbeatReplicationStream(evalCtx, streamID, frontier, evalCtx.Txn) + sps, err := mgr.HeartbeatReplicationStream(ctx, evalCtx, streamID, frontier, evalCtx.Txn) if err != nil { return nil, err } @@ -275,7 +275,7 @@ var replicationBuiltins = map[string]builtinDefinition{ } streamID := int64(tree.MustBeDInt(args[0])) - spec, err := mgr.GetReplicationStreamSpec(evalCtx, evalCtx.Txn, streaming.StreamID(streamID)) + spec, err := mgr.GetReplicationStreamSpec(ctx, evalCtx, evalCtx.Txn, streaming.StreamID(streamID)) if err != nil { return nil, err } @@ -311,8 +311,9 @@ var replicationBuiltins = map[string]builtinDefinition{ streamID := int64(tree.MustBeDInt(args[0])) successfulIngestion := bool(tree.MustBeDBool(args[1])) - if err := mgr.CompleteReplicationStream(evalCtx, evalCtx.Txn, - streaming.StreamID(streamID), successfulIngestion); err != nil { + if err := mgr.CompleteReplicationStream( + ctx, evalCtx, evalCtx.Txn, streaming.StreamID(streamID), successfulIngestion, + ); err != nil { return nil, err } return tree.NewDInt(tree.DInt(streamID)), err diff --git a/pkg/sql/sem/eval/binary_op.go b/pkg/sql/sem/eval/binary_op.go index 70fc4d82363d..38d73570554d 100644 --- a/pkg/sql/sem/eval/binary_op.go +++ b/pkg/sql/sem/eval/binary_op.go @@ -223,7 +223,7 @@ func (e *evaluator) EvalConcatOp( ctx context.Context, op *tree.ConcatOp, left, right tree.Datum, ) (tree.Datum, error) { if op.Left == types.String { - casted, err := PerformCast(e.ctx(), right, types.String) + casted, err := PerformCast(ctx, e.ctx(), right, types.String) if err != nil { return nil, err } @@ -232,7 +232,7 @@ func (e *evaluator) EvalConcatOp( ), nil } if op.Right == types.String { - casted, err := PerformCast(e.ctx(), left, types.String) + casted, err := PerformCast(ctx, e.ctx(), left, types.String) if err != nil { return nil, err } diff --git a/pkg/sql/sem/eval/cast.go b/pkg/sql/sem/eval/cast.go index e50554f1fe59..9ef99b0fbe8e 100644 --- a/pkg/sql/sem/eval/cast.go +++ b/pkg/sql/sem/eval/cast.go @@ -59,8 +59,10 @@ func ReType(expr tree.TypedExpr, wantedType *types.T) (_ tree.TypedExpr, ok bool // PerformCast performs a cast from the provided Datum to the specified // types.T. The original datum is returned if its type is identical // to the specified type. -func PerformCast(ctx *Context, d tree.Datum, t *types.T) (tree.Datum, error) { - ret, err := performCastWithoutPrecisionTruncation(ctx, d, t, true /* truncateWidth */) +func PerformCast( + ctx context.Context, evalCtx *Context, d tree.Datum, t *types.T, +) (tree.Datum, error) { + ret, err := performCastWithoutPrecisionTruncation(ctx, evalCtx, d, t, true /* truncateWidth */) if err != nil { return nil, err } @@ -75,14 +77,16 @@ func PerformCast(ctx *Context, d tree.Datum, t *types.T) (tree.Datum, error) { // or string values are too wide for the given type, rather than truncating the // value. The one exception to this is casts to the special "char" type which // are truncated. -func PerformAssignmentCast(ctx *Context, d tree.Datum, t *types.T) (tree.Datum, error) { +func PerformAssignmentCast( + ctx context.Context, evalCtx *Context, d tree.Datum, t *types.T, +) (tree.Datum, error) { if !cast.ValidCast(d.ResolvedType(), t, cast.ContextAssignment) { return nil, pgerror.Newf( pgcode.CannotCoerce, "invalid assignment cast: %s -> %s", d.ResolvedType(), t, ) } - d, err := performCastWithoutPrecisionTruncation(ctx, d, t, false /* truncateWidth */) + d, err := performCastWithoutPrecisionTruncation(ctx, evalCtx, d, t, false /* truncateWidth */) if err != nil { return nil, err } @@ -106,7 +110,7 @@ var ( // and casting logic before this can happen. // See also: #55094. func performCastWithoutPrecisionTruncation( - ctx *Context, d tree.Datum, t *types.T, truncateWidth bool, + ctx context.Context, evalCtx *Context, d tree.Datum, t *types.T, truncateWidth bool, ) (tree.Datum, error) { // No conversion is needed if d is NULL. if d == tree.DNull { @@ -395,7 +399,7 @@ func performCastWithoutPrecisionTruncation( // the resolved type for a DFloat is always FLOAT8, meaning // floatTyp.Width() will always return 64. floatTyp := t.ResolvedType() - b := tree.PgwireFormatFloat(nil /* buf */, float64(*t), ctx.SessionData().DataConversionConfig, floatTyp) + b := tree.PgwireFormatFloat(nil /* buf */, float64(*t), evalCtx.SessionData().DataConversionConfig, floatTyp) s = string(b) case *tree.DInt: if typ.Oid() == oid.T_char { @@ -416,7 +420,7 @@ func performCastWithoutPrecisionTruncation( s = tree.AsStringWithFlags(d, tree.FmtBareStrings) case *tree.DTimestampTZ: // Convert to context timezone for correct display. - ts, err := tree.MakeDTimestampTZ(t.In(ctx.GetLocation()), time.Microsecond) + ts, err := tree.MakeDTimestampTZ(t.In(evalCtx.GetLocation()), time.Microsecond) if err != nil { return nil, err } @@ -428,13 +432,13 @@ func performCastWithoutPrecisionTruncation( s = tree.AsStringWithFlags( d, tree.FmtPgwireText, - tree.FmtDataConversionConfig(ctx.SessionData().DataConversionConfig), + tree.FmtDataConversionConfig(evalCtx.SessionData().DataConversionConfig), ) case *tree.DArray: s = tree.AsStringWithFlags( d, tree.FmtPgwireText, - tree.FmtDataConversionConfig(ctx.SessionData().DataConversionConfig), + tree.FmtDataConversionConfig(evalCtx.SessionData().DataConversionConfig), ) case *tree.DInterval: // When converting an interval to string, we need a string representation @@ -443,7 +447,7 @@ func performCastWithoutPrecisionTruncation( s = tree.AsStringWithFlags( d, tree.FmtPgwireText, - tree.FmtDataConversionConfig(ctx.SessionData().DataConversionConfig), + tree.FmtDataConversionConfig(evalCtx.SessionData().DataConversionConfig), ) case *tree.DUuid: s = t.UUID.String() @@ -456,7 +460,7 @@ func performCastWithoutPrecisionTruncation( case *tree.DBytes: s = lex.EncodeByteArrayToRawBytes( string(*t), - ctx.SessionData().DataConversionConfig.BytesEncodeFormat, + evalCtx.SessionData().DataConversionConfig.BytesEncodeFormat, false, /* skipHexPrefix */ ) case *tree.DOid: @@ -495,7 +499,7 @@ func performCastWithoutPrecisionTruncation( if truncateWidth && t.Width() > 0 { s = util.TruncateString(s, int(t.Width())) } - return tree.NewDCollatedString(s, t.Locale(), &ctx.CollationEnv) + return tree.NewDCollatedString(s, t.Locale(), &evalCtx.CollationEnv) } case types.BytesFamily: @@ -658,10 +662,10 @@ func performCastWithoutPrecisionTruncation( case types.DateFamily: switch d := d.(type) { case *tree.DString: - res, _, err := tree.ParseDDate(ctx, string(*d)) + res, _, err := tree.ParseDDate(evalCtx, string(*d)) return res, err case *tree.DCollatedString: - res, _, err := tree.ParseDDate(ctx, d.Contents) + res, _, err := tree.ParseDDate(evalCtx, d.Contents) return res, err case *tree.DDate: return d, nil @@ -670,7 +674,7 @@ func performCastWithoutPrecisionTruncation( t, err := pgdate.MakeDateFromUnixEpoch(int64(*d)) return tree.NewDDate(t), err case *tree.DTimestampTZ: - return tree.NewDDateFromTime(d.Time.In(ctx.GetLocation())) + return tree.NewDDateFromTime(d.Time.In(evalCtx.GetLocation())) case *tree.DTimestamp: return tree.NewDDateFromTime(d.Time) } @@ -679,10 +683,10 @@ func performCastWithoutPrecisionTruncation( roundTo := tree.TimeFamilyPrecisionToRoundDuration(t.Precision()) switch d := d.(type) { case *tree.DString: - res, _, err := tree.ParseDTime(ctx, string(*d), roundTo) + res, _, err := tree.ParseDTime(evalCtx, string(*d), roundTo) return res, err case *tree.DCollatedString: - res, _, err := tree.ParseDTime(ctx, d.Contents, roundTo) + res, _, err := tree.ParseDTime(evalCtx, d.Contents, roundTo) return res, err case *tree.DTime: return d.Round(roundTo), nil @@ -692,7 +696,7 @@ func performCastWithoutPrecisionTruncation( return tree.MakeDTime(timeofday.FromTime(d.Time).Round(roundTo)), nil case *tree.DTimestampTZ: // Strip time zone. Times don't carry their location. - stripped, err := d.EvalAtTimeZone(ctx.GetLocation()) + stripped, err := d.EvalAtTimeZone(evalCtx.GetLocation()) if err != nil { return nil, err } @@ -705,17 +709,17 @@ func performCastWithoutPrecisionTruncation( roundTo := tree.TimeFamilyPrecisionToRoundDuration(t.Precision()) switch d := d.(type) { case *tree.DString: - res, _, err := tree.ParseDTimeTZ(ctx, string(*d), roundTo) + res, _, err := tree.ParseDTimeTZ(evalCtx, string(*d), roundTo) return res, err case *tree.DCollatedString: - res, _, err := tree.ParseDTimeTZ(ctx, d.Contents, roundTo) + res, _, err := tree.ParseDTimeTZ(evalCtx, d.Contents, roundTo) return res, err case *tree.DTime: - return tree.NewDTimeTZFromLocation(timeofday.TimeOfDay(*d).Round(roundTo), ctx.GetLocation()), nil + return tree.NewDTimeTZFromLocation(timeofday.TimeOfDay(*d).Round(roundTo), evalCtx.GetLocation()), nil case *tree.DTimeTZ: return d.Round(roundTo), nil case *tree.DTimestampTZ: - return tree.NewDTimeTZFromTime(d.Time.In(ctx.GetLocation()).Round(roundTo)), nil + return tree.NewDTimeTZFromTime(d.Time.In(evalCtx.GetLocation()).Round(roundTo)), nil } case types.TimestampFamily: @@ -723,10 +727,10 @@ func performCastWithoutPrecisionTruncation( // TODO(knz): Timestamp from float, decimal. switch d := d.(type) { case *tree.DString: - res, _, err := tree.ParseDTimestamp(ctx, string(*d), roundTo) + res, _, err := tree.ParseDTimestamp(evalCtx, string(*d), roundTo) return res, err case *tree.DCollatedString: - res, _, err := tree.ParseDTimestamp(ctx, d.Contents, roundTo) + res, _, err := tree.ParseDTimestamp(evalCtx, d.Contents, roundTo) return res, err case *tree.DDate: t, err := d.ToTime() @@ -740,7 +744,7 @@ func performCastWithoutPrecisionTruncation( return d.Round(roundTo) case *tree.DTimestampTZ: // Strip time zone. Timestamps don't carry their location. - stripped, err := d.EvalAtTimeZone(ctx.GetLocation()) + stripped, err := d.EvalAtTimeZone(evalCtx.GetLocation()) if err != nil { return nil, err } @@ -752,10 +756,10 @@ func performCastWithoutPrecisionTruncation( // TODO(knz): TimestampTZ from float, decimal. switch d := d.(type) { case *tree.DString: - res, _, err := tree.ParseDTimestampTZ(ctx, string(*d), roundTo) + res, _, err := tree.ParseDTimestampTZ(evalCtx, string(*d), roundTo) return res, err case *tree.DCollatedString: - res, _, err := tree.ParseDTimestampTZ(ctx, d.Contents, roundTo) + res, _, err := tree.ParseDTimestampTZ(evalCtx, d.Contents, roundTo) return res, err case *tree.DDate: t, err := d.ToTime() @@ -763,11 +767,11 @@ func performCastWithoutPrecisionTruncation( return nil, err } _, before := t.Zone() - _, after := t.In(ctx.GetLocation()).Zone() + _, after := t.In(evalCtx.GetLocation()).Zone() return tree.MakeDTimestampTZ(t.Add(time.Duration(before-after)*time.Second), roundTo) case *tree.DTimestamp: _, before := d.Time.Zone() - _, after := d.Time.In(ctx.GetLocation()).Zone() + _, after := d.Time.In(evalCtx.GetLocation()).Zone() return tree.MakeDTimestampTZ(d.Time.Add(time.Duration(before-after)*time.Second), roundTo) case *tree.DInt: return tree.MakeDTimestampTZ(timeutil.Unix(int64(*d), 0), roundTo) @@ -782,9 +786,9 @@ func performCastWithoutPrecisionTruncation( } switch v := d.(type) { case *tree.DString: - return tree.ParseDIntervalWithTypeMetadata(ctx.GetIntervalStyle(), string(*v), itm) + return tree.ParseDIntervalWithTypeMetadata(evalCtx.GetIntervalStyle(), string(*v), itm) case *tree.DCollatedString: - return tree.ParseDIntervalWithTypeMetadata(ctx.GetIntervalStyle(), v.Contents, itm) + return tree.ParseDIntervalWithTypeMetadata(evalCtx.GetIntervalStyle(), v.Contents, itm) case *tree.DInt: return tree.NewDInterval(duration.FromInt64(int64(*v)), itm), nil case *tree.DFloat: @@ -834,7 +838,7 @@ func performCastWithoutPrecisionTruncation( case types.ArrayFamily: switch v := d.(type) { case *tree.DString: - res, _, err := tree.ParseDArrayFromString(ctx, string(*v), t.ArrayContents()) + res, _, err := tree.ParseDArrayFromString(evalCtx, string(*v), t.ArrayContents()) return res, err case *tree.DArray: dcast := tree.NewDArray(t.ArrayContents()) @@ -845,7 +849,7 @@ func performCastWithoutPrecisionTruncation( ecast := tree.DNull if e != tree.DNull { var err error - ecast, err = PerformCast(ctx, e, t.ArrayContents()) + ecast, err = PerformCast(ctx, evalCtx, e, t.ArrayContents()) if err != nil { return nil, err } @@ -860,14 +864,14 @@ func performCastWithoutPrecisionTruncation( case types.OidFamily: switch v := d.(type) { case *tree.DOid: - return performIntToOidCast(ctx.Ctx(), ctx.Planner, t, tree.DInt(v.Oid)) + return performIntToOidCast(ctx, evalCtx.Planner, t, tree.DInt(v.Oid)) case *tree.DInt: - return performIntToOidCast(ctx.Ctx(), ctx.Planner, t, *v) + return performIntToOidCast(ctx, evalCtx.Planner, t, *v) case *tree.DString: if t.Oid() != oid.T_oid && string(*v) == tree.ZeroOidValue { return tree.WrapAsZeroOid(t), nil } - return ParseDOid(ctx, string(*v), t) + return ParseDOid(ctx, evalCtx, string(*v), t) } case types.TupleFamily: switch v := d.(type) { @@ -886,14 +890,14 @@ func performCastWithoutPrecisionTruncation( ret := tree.NewDTupleWithLen(t, len(v.D)) for i := range v.D { var err error - ret.D[i], err = PerformCast(ctx, v.D[i], t.TupleContents()[i]) + ret.D[i], err = PerformCast(ctx, evalCtx, v.D[i], t.TupleContents()[i]) if err != nil { return nil, err } } return ret, nil case *tree.DString: - res, _, err := tree.ParseDTupleFromString(ctx, string(*v), t) + res, _, err := tree.ParseDTupleFromString(evalCtx, string(*v), t) return res, err } case types.VoidFamily: diff --git a/pkg/sql/sem/eval/cast_map_test.go b/pkg/sql/sem/eval/cast_map_test.go index 21c63bcea437..ebf1f92c8569 100644 --- a/pkg/sql/sem/eval/cast_map_test.go +++ b/pkg/sql/sem/eval/cast_map_test.go @@ -11,6 +11,7 @@ package eval_test import ( + "context" "testing" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -54,7 +55,7 @@ func TestCastMap(t *testing.T) { } } - _, err := eval.PerformCast(&evalCtx, srcDatum, tgtType) + _, err := eval.PerformCast(context.Background(), &evalCtx, srcDatum, tgtType) // If the error is a CannotCoerce error, then PerformCast does not // support casting from src to tgt. The one exception is negative // integers to bit types which return the same error code (see the TODO diff --git a/pkg/sql/sem/eval/context.go b/pkg/sql/sem/eval/context.go index b7a08516a0d3..4a0877d03405 100644 --- a/pkg/sql/sem/eval/context.go +++ b/pkg/sql/sem/eval/context.go @@ -606,11 +606,6 @@ func (ec *Context) GetDateStyle() pgdate.DateStyle { return ec.SessionData().GetDateStyle() } -// Ctx returns the session's context. -func (ec *Context) Ctx() context.Context { - return ec.Context -} - // BoundedStaleness returns true if this query uses bounded staleness. func (ec *Context) BoundedStaleness() bool { return ec.AsOfSystemTime != nil && diff --git a/pkg/sql/sem/eval/expr.go b/pkg/sql/sem/eval/expr.go index e10416707a82..440fc8d89fea 100644 --- a/pkg/sql/sem/eval/expr.go +++ b/pkg/sql/sem/eval/expr.go @@ -187,7 +187,7 @@ func (e *evaluator) EvalCastExpr(ctx context.Context, expr *tree.CastExpr) (tree return d, nil } d = UnwrapDatum(e.ctx(), d) - return PerformCast(e.ctx(), d, expr.ResolvedType()) + return PerformCast(ctx, e.ctx(), d, expr.ResolvedType()) } func (e *evaluator) EvalCoalesceExpr( diff --git a/pkg/sql/sem/eval/json.go b/pkg/sql/sem/eval/json.go index a0008751f681..103de9c9ed01 100644 --- a/pkg/sql/sem/eval/json.go +++ b/pkg/sql/sem/eval/json.go @@ -11,6 +11,8 @@ package eval import ( + "context" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -22,7 +24,9 @@ import ( // PopulateDatumWithJSON is used for the json to record function family, like // json_populate_record. It's less restrictive than the casting system, which // is why it's implemented separately. -func PopulateDatumWithJSON(ctx *Context, j json.JSON, desiredType *types.T) (tree.Datum, error) { +func PopulateDatumWithJSON( + ctx context.Context, evalCtx *Context, j json.JSON, desiredType *types.T, +) (tree.Datum, error) { if j == json.NullJSONValue { return tree.DNull, nil } @@ -40,7 +44,7 @@ func PopulateDatumWithJSON(ctx *Context, j json.JSON, desiredType *types.T) (tre if err != nil { return nil, err } - d.Array[i], err = PopulateDatumWithJSON(ctx, elt, elementTyp) + d.Array[i], err = PopulateDatumWithJSON(ctx, evalCtx, elt, elementTyp) if err != nil { return nil, err } @@ -51,7 +55,7 @@ func PopulateDatumWithJSON(ctx *Context, j json.JSON, desiredType *types.T) (tre for i := range tup.D { tup.D[i] = tree.DNull } - err := PopulateRecordWithJSON(ctx, j, desiredType, tup) + err := PopulateRecordWithJSON(ctx, evalCtx, j, desiredType, tup) return tup, err } var s string @@ -68,7 +72,7 @@ func PopulateDatumWithJSON(ctx *Context, j json.JSON, desiredType *types.T) (tre default: s = j.String() } - return PerformCast(ctx, tree.NewDString(s), desiredType) + return PerformCast(ctx, evalCtx, tree.NewDString(s), desiredType) } // PopulateRecordWithJSON is used for the json to record function family, like @@ -81,7 +85,7 @@ func PopulateDatumWithJSON(ctx *Context, j json.JSON, desiredType *types.T) (tre // Each field will be set by a best-effort coercion to its type from the JSON // field. The logic is more permissive than casts. func PopulateRecordWithJSON( - ctx *Context, j json.JSON, desiredType *types.T, tup *tree.DTuple, + ctx context.Context, evalCtx *Context, j json.JSON, desiredType *types.T, tup *tree.DTuple, ) error { if j.Type() != json.ObjectJSONType { return pgerror.Newf(pgcode.InvalidParameterValue, "expected JSON object") @@ -100,7 +104,7 @@ func PopulateRecordWithJSON( // No value? Use the value that was already in the tuple. continue } - tup.D[i], err = PopulateDatumWithJSON(ctx, val, tupleTypes[i]) + tup.D[i], err = PopulateDatumWithJSON(ctx, evalCtx, val, tupleTypes[i]) if err != nil { return err } diff --git a/pkg/sql/sem/eval/parse_doid.go b/pkg/sql/sem/eval/parse_doid.go index a9b13e666fd2..d0969170a31b 100644 --- a/pkg/sql/sem/eval/parse_doid.go +++ b/pkg/sql/sem/eval/parse_doid.go @@ -11,6 +11,7 @@ package eval import ( + "context" "regexp" "strings" @@ -30,14 +31,14 @@ import ( var pgSignatureRegexp = regexp.MustCompile(`^\s*([\w\."]+)\s*\((?:(?:\s*[\w"]+\s*,)*\s*[\w"]+)?\s*\)\s*$`) // ParseDOid parses and returns an Oid family datum. -func ParseDOid(ctx *Context, s string, t *types.T) (*tree.DOid, error) { +func ParseDOid(ctx context.Context, evalCtx *Context, s string, t *types.T) (*tree.DOid, error) { // If it is an integer in string form, convert it as an int. if _, err := tree.ParseDInt(strings.TrimSpace(s)); err == nil { tmpOid, err := tree.ParseDOidAsInt(s) if err != nil { return nil, err } - oidRes, errSafeToIgnore, err := ctx.Planner.ResolveOIDFromOID(ctx.Ctx(), t, tmpOid) + oidRes, errSafeToIgnore, err := evalCtx.Planner.ResolveOIDFromOID(ctx, t, tmpOid) if err != nil { if !errSafeToIgnore { return nil, err @@ -69,7 +70,7 @@ func ParseDOid(ctx *Context, s string, t *types.T) (*tree.DOid, error) { for i := 0; i < len(substrs); i++ { name.Parts[i] = substrs[len(substrs)-1-i] } - funcDef, err := ctx.Planner.ResolveFunction(ctx.Ctx(), &name, &ctx.SessionData().SearchPath) + funcDef, err := evalCtx.Planner.ResolveFunction(ctx, &name, &evalCtx.SessionData().SearchPath) if err != nil { return nil, err } @@ -96,7 +97,7 @@ func ParseDOid(ctx *Context, s string, t *types.T) (*tree.DOid, error) { } un := fn.FuncName.ToUnresolvedObjectName().ToUnresolvedName() - fd, err := ctx.Planner.ResolveFunction(ctx.Ctx(), un, &ctx.SessionData().SearchPath) + fd, err := evalCtx.Planner.ResolveFunction(ctx, un, &evalCtx.SessionData().SearchPath) if err != nil { return nil, err } @@ -117,17 +118,17 @@ func ParseDOid(ctx *Context, s string, t *types.T) (*tree.DOid, error) { } } - argTypes, err := fn.InputArgTypes(ctx.Ctx(), ctx.Planner) + argTypes, err := fn.InputArgTypes(ctx, evalCtx.Planner) if err != nil { return nil, err } - ol, err := fd.MatchOverload(argTypes, fn.FuncName.Schema(), &ctx.SessionData().SearchPath) + ol, err := fd.MatchOverload(argTypes, fn.FuncName.Schema(), &evalCtx.SessionData().SearchPath) if err != nil { return nil, err } return tree.NewDOidWithTypeAndName(ol.Oid, t, fd.Name), nil case oid.T_regtype: - parsedTyp, err := ctx.Planner.GetTypeFromValidSQLSyntax(s) + parsedTyp, err := evalCtx.Planner.GetTypeFromValidSQLSyntax(s) if err == nil { return tree.NewDOidWithTypeAndName( parsedTyp.Oid(), t, parsedTyp.SQLStandardName(), @@ -148,8 +149,8 @@ func ParseDOid(ctx *Context, s string, t *types.T) (*tree.DOid, error) { // Trim type modifiers, e.g. `numeric(10,3)` becomes `numeric`. s = pgSignatureRegexp.ReplaceAllString(s, "$1") - dOid, errSafeToIgnore, missingTypeErr := ctx.Planner.ResolveOIDFromString( - ctx.Ctx(), t, tree.NewDString(tree.Name(s).Normalize()), + dOid, errSafeToIgnore, missingTypeErr := evalCtx.Planner.ResolveOIDFromString( + ctx, t, tree.NewDString(tree.Name(s).Normalize()), ) if missingTypeErr == nil { return dOid, nil @@ -177,7 +178,7 @@ func ParseDOid(ctx *Context, s string, t *types.T) (*tree.DOid, error) { if err != nil { return nil, err } - id, err := ctx.Planner.ResolveTableName(ctx.Ctx(), &tn) + id, err := evalCtx.Planner.ResolveTableName(ctx, &tn) if err != nil { return nil, err } @@ -185,7 +186,7 @@ func ParseDOid(ctx *Context, s string, t *types.T) (*tree.DOid, error) { return tree.NewDOidWithTypeAndName(oid.Oid(id), t, tn.ObjectName.String()), nil default: - d, _ /* errSafeToIgnore */, err := ctx.Planner.ResolveOIDFromString(ctx.Ctx(), t, tree.NewDString(s)) + d, _ /* errSafeToIgnore */, err := evalCtx.Planner.ResolveOIDFromString(ctx, t, tree.NewDString(s)) return d, err } } diff --git a/pkg/sql/sem/tree/expr_test.go b/pkg/sql/sem/tree/expr_test.go index 958fe91672e5..6a418afbd0b9 100644 --- a/pkg/sql/sem/tree/expr_test.go +++ b/pkg/sql/sem/tree/expr_test.go @@ -89,7 +89,7 @@ func TestStringConcat(t *testing.T) { continue } d := randgen.RandDatum(rng, typ, false /* nullOk */) - expected, err := eval.PerformCast(&evalCtx, d, types.String) + expected, err := eval.PerformCast(ctx, &evalCtx, d, types.String) require.NoError(t, err) concatOp := treebin.MakeBinaryOperator(treebin.Concat) concatExprLeft := tree.NewTypedBinaryExpr(concatOp, tree.NewDString(""), d, types.String) diff --git a/pkg/sql/user.go b/pkg/sql/user.go index ec5b53105b7a..133ae15048cd 100644 --- a/pkg/sql/user.go +++ b/pkg/sql/user.go @@ -704,7 +704,7 @@ func MaybeUpgradeStoredPasswordHash( // configuration. autoUpgradePasswordHashesBool := security.AutoUpgradePasswordHashes.Get(&execCfg.Settings.SV) - hashMethod := security.GetConfiguredPasswordHashMethod(ctx, &execCfg.Settings.SV) + hashMethod := security.GetConfiguredPasswordHashMethod(&execCfg.Settings.SV) converted, prevHash, newHash, newMethod, err := password.MaybeUpgradePasswordHash(ctx, autoUpgradePasswordHashesBool, hashMethod, cleartext, currentHash, diff --git a/pkg/sql/vars.go b/pkg/sql/vars.go index f878acbfa641..531d9d5d97d2 100644 --- a/pkg/sql/vars.go +++ b/pkg/sql/vars.go @@ -1309,10 +1309,10 @@ var varGen = map[string]sessionVar{ // their own password hash algorithm. `password_encryption`: { Get: func(evalCtx *extendedEvalContext, _ *kv.Txn) (string, error) { - return security.GetConfiguredPasswordHashMethod(evalCtx.Ctx(), &evalCtx.Settings.SV).String(), nil + return security.GetConfiguredPasswordHashMethod(&evalCtx.Settings.SV).String(), nil }, SetWithPlanner: func(ctx context.Context, p *planner, local bool, val string) error { - method := security.GetConfiguredPasswordHashMethod(ctx, &p.ExecCfg().Settings.SV) + method := security.GetConfiguredPasswordHashMethod(&p.ExecCfg().Settings.SV) if val != method.String() { return newCannotChangeParameterError("password_encryption") } diff --git a/pkg/streaming/api.go b/pkg/streaming/api.go index 009616b60f71..2d96bbb468f4 100644 --- a/pkg/streaming/api.go +++ b/pkg/streaming/api.go @@ -43,6 +43,7 @@ var GetStreamIngestManagerHook func(ctx context.Context, evalCtx *eval.Context) type ReplicationStreamManager interface { // StartReplicationStream starts a stream replication job for the specified tenant on the producer side. StartReplicationStream( + ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, tenantID uint64, @@ -53,6 +54,7 @@ type ReplicationStreamManager interface { // progress and extends its life, and the new producer progress will be returned. // If 'frontier' is hlc.MaxTimestamp, returns the producer progress without updating it. HeartbeatReplicationStream( + ctx context.Context, evalCtx *eval.Context, streamID StreamID, frontier hlc.Timestamp, @@ -69,6 +71,7 @@ type ReplicationStreamManager interface { // GetReplicationStreamSpec gets a stream replication spec on the producer side. GetReplicationStreamSpec( + ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, streamID StreamID, @@ -78,7 +81,11 @@ type ReplicationStreamManager interface { // 'successfulIngestion' indicates whether the stream ingestion finished successfully and // determines the fate of the producer job, succeeded or canceled. CompleteReplicationStream( - evalCtx *eval.Context, txn *kv.Txn, streamID StreamID, successfulIngestion bool, + ctx context.Context, + evalCtx *eval.Context, + txn *kv.Txn, + streamID StreamID, + successfulIngestion bool, ) error } @@ -87,6 +94,7 @@ type ReplicationStreamManager interface { type StreamIngestManager interface { // CompleteStreamIngestion signals a running stream ingestion job to complete on the consumer side. CompleteStreamIngestion( + ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, ingestionJobID jobspb.JobID, @@ -95,6 +103,7 @@ type StreamIngestManager interface { // GetStreamIngestionStats gets a statistics summary for a stream ingestion job. GetStreamIngestionStats( + ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, ingestionJobID jobspb.JobID,