Skip to content

Commit

Permalink
Merge pull request #51031 from RaduBerinde/backport20.1-50974
Browse files Browse the repository at this point in the history
release-20.1: stmtdiagnostics: break into chunks
  • Loading branch information
RaduBerinde authored Jul 7, 2020
2 parents 1022d58 + df3d89b commit a565ade
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 12 deletions.
8 changes: 8 additions & 0 deletions pkg/sql/explain_bundle_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import (
"context"
"fmt"
"io"
"math/rand"
"regexp"
"sort"
"strings"
Expand All @@ -42,6 +43,13 @@ func TestExplainAnalyzeDebug(t *testing.T) {
base := "statement.txt trace.json trace.txt trace-jaeger.json env.sql"
plans := "schema.sql opt.txt opt-v.txt opt-vv.txt plan.txt"

// Set a small chunk size to test splitting into chunks. The bundle files are
// on the order of 10KB.
r.Exec(t, fmt.Sprintf(
"SET CLUSTER SETTING sql.stmt_diagnostics.bundle_chunk_size = '%d'",
5000+rand.Intn(10000),
))

t.Run("basic", func(t *testing.T) {
rows := r.QueryStr(t, "EXPLAIN ANALYZE (DEBUG) SELECT * FROM abc WHERE c=1")
checkBundle(
Expand Down
39 changes: 27 additions & 12 deletions pkg/sql/stmtdiagnostics/statement_diagnostics.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,23 @@ import (
"github.com/cockroachdb/errors"
)

var stmtDiagnosticsPollingInterval = settings.RegisterDurationSetting(
var pollingInterval = settings.RegisterDurationSetting(
"sql.stmt_diagnostics.poll_interval",
"rate at which the stmtdiagnostics.Registry polls for requests, set to zero to disable",
10*time.Second)

var bundleChunkSize = settings.RegisterValidatedByteSizeSetting(
"sql.stmt_diagnostics.bundle_chunk_size",
"chunk size for statement diagnostic bundles",
1024*1024,
func(val int64) error {
if val < 16 {
return errors.Errorf("chunk size must be at least 16 bytes")
}
return nil
},
)

// Registry maintains a view on the statement fingerprints
// on which data is to be collected (i.e. system.statement_diagnostics_requests)
// and provides utilities for checking a query against this list and satisfying
Expand Down Expand Up @@ -101,7 +113,7 @@ func (r *Registry) poll(ctx context.Context) {
deadline time.Time
pollIntervalChanged = make(chan struct{}, 1)
maybeResetTimer = func() {
if interval := stmtDiagnosticsPollingInterval.Get(&r.st.SV); interval <= 0 {
if interval := pollingInterval.Get(&r.st.SV); interval <= 0 {
// Setting the interval to a non-positive value stops the polling.
timer.Stop()
} else {
Expand All @@ -122,7 +134,7 @@ func (r *Registry) poll(ctx context.Context) {
lastPoll = timeutil.Now()
}
)
stmtDiagnosticsPollingInterval.SetOnChange(&r.st.SV, func() {
pollingInterval.SetOnChange(&r.st.SV, func() {
select {
case pollIntervalChanged <- struct{}{}:
default:
Expand Down Expand Up @@ -387,27 +399,30 @@ func (r *Registry) insertStatementDiagnostics(
errorVal = tree.NewDString(collectionErr.Error())
}

bundleChunksVal := tree.DNull
if len(bundle) != 0 {
// Insert the bundle into system.statement_bundle_chunks.
// TODO(radu): split in chunks.
bundleChunksVal := tree.NewDArray(types.Int)
for len(bundle) > 0 {
chunkSize := int(bundleChunkSize.Get(&r.st.SV))
chunk := bundle
if len(chunk) > chunkSize {
chunk = chunk[:chunkSize]
}
bundle = bundle[len(chunk):]

// Insert the chunk into system.statement_bundle_chunks.
row, err := r.ie.QueryRowEx(
ctx, "stmt-bundle-chunks-insert", txn,
sqlbase.InternalExecutorSessionDataOverride{User: security.RootUser},
"INSERT INTO system.statement_bundle_chunks(description, data) VALUES ($1, $2) RETURNING id",
"statement diagnostics bundle",
tree.NewDBytes(tree.DBytes(bundle)),
tree.NewDBytes(tree.DBytes(chunk)),
)
if err != nil {
return err
}
chunkID := row[0].(*tree.DInt)

array := tree.NewDArray(types.Int)
if err := array.Append(chunkID); err != nil {
if err := bundleChunksVal.Append(chunkID); err != nil {
return err
}
bundleChunksVal = array
}

collectionTime := timeutil.Now()
Expand Down

0 comments on commit a565ade

Please sign in to comment.