Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sentry: conn_executor.go:645: panic while executing 1 statements: CREATE TABLE _ (_ STRING(20), _ INT8, _ INT8, _ STRING(20)) PARTITION BY LIST (_, _) (PARTITION _ VALUES IN (_), PARTITION _ VALUES IN (_), PARTITION _ VALUES IN (_), PARTITION _ VALUES IN (DEFAULT)): caused by <redacted> #37681

Closed
cockroach-teamcity opened this issue May 21, 2019 · 1 comment
Labels
C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report. X-duplicate Closed as a duplicate of another issue.

Comments

@cockroach-teamcity
Copy link
Member

This issue was autofiled by Sentry. It represents a crash or reported error on a live cluster with telemetry enabled.

Sentry link: https://sentry.io/organizations/cockroach-labs/issues/1036304297/?referrer=webhooks_plugin

Panic message:

conn_executor.go:645: panic while executing 1 statements: CREATE TABLE _ (_ STRING(20), _ INT8, _ INT8, _ STRING(20)) PARTITION BY LIST (, ) (PARTITION _ VALUES IN (), PARTITION _ VALUES IN (), PARTITION _ VALUES IN (_), PARTITION _ VALUES IN (DEFAULT)): caused by

Stacktrace (expand for inline code snippets):

r := recover()
h.ex.closeWrapper(ctx, r)
}()
in pkg/sql.(*Server).ServeConn.func1
/usr/local/go/src/runtime/asm_amd64.s#L572-L574 in runtime.call32
/usr/local/go/src/runtime/panic.go#L501-L503 in runtime.gopanic
/usr/local/go/src/runtime/panic.go#L34-L36 in runtime.panicslice
"declared partition columns (%s) do not match first %d columns in index being partitioned (%s)",
partitioningString(), n, strings.Join(indexDesc.ColumnNames[:n], ", "))
}
in pkg/ccl/partitionccl.createPartitioningImpl
return createPartitioningImpl(
ctx, evalCtx, tableDesc, indexDesc, partBy, 0 /* colOffset */)
in pkg/ccl/partitionccl.createPartitioning
}
return CreatePartitioningCCL(ctx, st, evalCtx, tableDesc, indexDesc, partBy)
}
in pkg/sql.CreatePartitioning
if n.PartitionBy != nil {
partitioning, err := CreatePartitioning(
ctx, st, evalCtx, &desc, &desc.PrimaryIndex, n.PartitionBy)
in pkg/sql.MakeTableDesc
params.p.runWithOptions(resolveFlags{skipCache: true}, func() {
ret, err = MakeTableDesc(
params.ctx,
in pkg/sql.makeTableDesc.func2
}
fn()
}
in pkg/sql.(*planner).runWithOptions
// See the comment at the start of MakeTableDesc() and resolveFK().
params.p.runWithOptions(resolveFlags{skipCache: true}, func() {
ret, err = MakeTableDesc(
in pkg/sql.makeTableDesc
affected = make(map[sqlbase.ID]*sqlbase.TableDescriptor)
desc, err = makeTableDesc(params, n.n, n.dbDesc.ID, id, creationTime, privs, affected)
}
in pkg/sql.(*createTableNode).startExec

cockroach/pkg/sql/plan.go

Lines 639 to 641 in f53d129

if s, ok := n.(execStartable); ok {
return s.startExec(params)
}
in pkg/sql.startExec.func2

cockroach/pkg/sql/walk.go

Lines 133 to 135 in f53d129

}
v.err = v.observer.leaveNode(name, plan)
}()
in pkg/sql.(*planVisitor).visitInternal.func1

cockroach/pkg/sql/walk.go

Lines 542 to 544 in f53d129

}
}
in pkg/sql.(*planVisitor).visitInternal

cockroach/pkg/sql/walk.go

Lines 100 to 102 in f53d129

}
v.visitInternal(plan, name)
return plan
in pkg/sql.(*planVisitor).visit
v := makePlanVisitor(ctx, observer)
v.visit(plan)
return v.err
in pkg/sql.walkPlan

cockroach/pkg/sql/plan.go

Lines 644 to 646 in f53d129

}
return walkPlan(params.ctx, plan, o)
}
in pkg/sql.startExec
// This starts all of the nodes below this node.
if err := startExec(p.params, p.node); err != nil {
p.MoveToDraining(err)
in pkg/sql.(*planNodeToRowSource).Start
}
ctx = pb.self.Start(ctx)
Run(ctx, pb.self, pb.out.output)
in pkg/sql/distsqlrun.(*ProcessorBase).Run
if len(f.processors) > 0 {
f.processors[len(f.processors)-1].Run(ctx, nil)
}
in pkg/sql/distsqlrun.(*Flow).StartSync
// TODO(radu): this should go through the flow scheduler.
if err := flow.StartSync(ctx, func() {}); err != nil {
log.Fatalf(ctx, "unexpected error from syncFlow.Start(): %s "+
in pkg/sql.(*DistSQLPlanner).Run
dsp.FinalizePlan(planCtx, &physPlan)
dsp.Run(planCtx, txn, &physPlan, recv, evalCtx, nil /* finishedSetupFn */)
}
in pkg/sql.(*DistSQLPlanner).PlanAndRun
// the planner whether or not to plan remote table readers.
ex.server.cfg.DistSQLPlanner.PlanAndRun(
ctx, evalCtx, &planCtx, planner.txn, planner.curPlan.plan, recv)
in pkg/sql.(*connExecutor).execWithDistSQLEngine
ex.sessionTracing.TraceExecStart(ctx, "distributed")
err = ex.execWithDistSQLEngine(ctx, planner, stmt.AST.StatementType(), res, distributePlan)
} else {
in pkg/sql.(*connExecutor).dispatchToExecutionEngine
p.autoCommit = os.ImplicitTxn.Get() && !ex.server.cfg.TestingKnobs.DisableAutoCommit
if err := ex.dispatchToExecutionEngine(ctx, stmt, p, res); err != nil {
return nil, nil, err
in pkg/sql.(*connExecutor).execStmtInOpenState
case stateOpen:
ev, payload, err = ex.execStmtInOpenState(ctx, stmt, pinfo, res)
switch ev.(type) {
in pkg/sql.(*connExecutor).execStmt
ctx := withStatement(ex.Ctx(), ex.curStmt)
ev, payload, err = ex.execStmt(ctx, curStmt, stmtRes, nil /* pinfo */, pos)
if err != nil {
in pkg/sql.(*connExecutor).run
}()
return h.ex.run(ctx, s.pool, reserved, cancel)
}
in pkg/sql.(*Server).ServeConn
go func() {
writerErr = sqlServer.ServeConn(ctx, connHandler, reserved, cancelConn)
// TODO(andrei): Should we sometimes transmit the writerErr's to the
in pkg/sql/pgwire.(*conn).serveImpl.func4

pkg/sql/conn_executor.go in pkg/sql.(*Server).ServeConn.func1 at line 387
/usr/local/go/src/runtime/asm_amd64.s in runtime.call32 at line 573
/usr/local/go/src/runtime/panic.go in runtime.gopanic at line 502
/usr/local/go/src/runtime/panic.go in runtime.panicslice at line 35
pkg/ccl/partitionccl/partition.go in pkg/ccl/partitionccl.createPartitioningImpl at line 155
pkg/ccl/partitionccl/partition.go in pkg/ccl/partitionccl.createPartitioning at line 227
pkg/sql/create_table.go in pkg/sql.CreatePartitioning at line 815
pkg/sql/create_table.go in pkg/sql.MakeTableDesc at line 1126
pkg/sql/create_table.go in pkg/sql.makeTableDesc.func2 at line 1237
pkg/sql/resolver.go in pkg/sql.(*planner).runWithOptions at line 140
pkg/sql/create_table.go in pkg/sql.makeTableDesc at line 1236
pkg/sql/create_table.go in pkg/sql.(*createTableNode).startExec at line 142
pkg/sql/plan.go in pkg/sql.startExec.func2 at line 640
pkg/sql/walk.go in pkg/sql.(*planVisitor).visitInternal.func1 at line 134
pkg/sql/walk.go in pkg/sql.(*planVisitor).visitInternal at line 543
pkg/sql/walk.go in pkg/sql.(*planVisitor).visit at line 101
pkg/sql/walk.go in pkg/sql.walkPlan at line 65
pkg/sql/plan.go in pkg/sql.startExec at line 645
pkg/sql/plan_node_to_row_source.go in pkg/sql.(*planNodeToRowSource).Start at line 123
pkg/sql/distsqlrun/processors.go in pkg/sql/distsqlrun.(*ProcessorBase).Run at line 730
pkg/sql/distsqlrun/flow.go in pkg/sql/distsqlrun.(*Flow).StartSync at line 607
pkg/sql/distsql_running.go in pkg/sql.(*DistSQLPlanner).Run at line 253
pkg/sql/distsql_running.go in pkg/sql.(*DistSQLPlanner).PlanAndRun at line 758
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execWithDistSQLEngine at line 982
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).dispatchToExecutionEngine at line 824
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execStmtInOpenState at line 402
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execStmt at line 96
pkg/sql/conn_executor.go in pkg/sql.(*connExecutor).run at line 1118
pkg/sql/conn_executor.go in pkg/sql.(*Server).ServeConn at line 389
pkg/sql/pgwire/conn.go in pkg/sql/pgwire.(*conn).serveImpl.func4 at line 313
Tag Value
Cockroach Release v2.1.6
Cockroach SHA: f53d129
Platform darwin amd64
Distribution CCL
Environment v2.1.6
Command server
Go Version go1.10.7
# of CPUs 8
# of Goroutines 218
@cockroach-teamcity cockroach-teamcity added C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report. labels May 21, 2019
@tim-o
Copy link
Contributor

tim-o commented May 21, 2019

Duplicate of #37682 . This was me verifying that the same bug is present in 19.1 and 2.1.7.

@tim-o tim-o closed this as completed May 21, 2019
@tim-o tim-o added the X-duplicate Closed as a duplicate of another issue. label May 21, 2019
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report. X-duplicate Closed as a duplicate of another issue.
Projects
None yet
Development

No branches or pull requests

2 participants