-
Notifications
You must be signed in to change notification settings - Fork 3.8k
/
start.go
1459 lines (1308 loc) · 55.5 KB
/
start.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"bytes"
"context"
"fmt"
"math"
"net"
"net/url"
"os"
"os/signal"
"path/filepath"
"runtime"
"strings"
"text/tabwriter"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/build"
"github.com/cockroachdb/cockroach/pkg/cli/clierror"
"github.com/cockroachdb/cockroach/pkg/cli/clierrorplus"
"github.com/cockroachdb/cockroach/pkg/cli/cliflagcfg"
"github.com/cockroachdb/cockroach/pkg/cli/cliflags"
"github.com/cockroachdb/cockroach/pkg/cli/exit"
"github.com/cockroachdb/cockroach/pkg/docs"
"github.com/cockroachdb/cockroach/pkg/geo/geos"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/security/clientsecopts"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/status"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/storage/fs"
"github.com/cockroachdb/cockroach/pkg/util/cgroups"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/grpcutil"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/logcrash"
"github.com/cockroachdb/cockroach/pkg/util/log/severity"
"github.com/cockroachdb/cockroach/pkg/util/sdnotify"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/sysutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/errors/oserror"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/pebble/vfs"
"github.com/cockroachdb/redact"
"github.com/spf13/cobra"
)
// debugTSImportFile is the path to a file (containing data coming from
// `./cockroach debug tsdump --format=raw` that will be ingested upon server
// start. This is an experimental feature and may break clusters it is invoked
// against. The data will not display properly in the UI unless the source
// cluster had one store to each node, with the store ID and node ID lining up.
// Additionally, the local server's stores and nodes must match this pattern as
// well. The only expected use case for this env var is against local single
// node throwaway clusters and consequently this variable is only used for
// the start-single-node command.
//
// To be able to visualize the timeseries data properly, a mapping file must be
// provided as well. This maps StoreIDs to the owning NodeID, i.e. the file
// looks like this (if s1 is on n3 and s2 is on n4):
// 1: 3
// 2: 4
// [...]
//
// See #64329 for details.
var debugTSImportFile = envutil.EnvOrDefaultString("COCKROACH_DEBUG_TS_IMPORT_FILE", "")
var debugTSImportMappingFile = envutil.EnvOrDefaultString("COCKROACH_DEBUG_TS_IMPORT_MAPPING_FILE", "")
// startCmd starts a node by initializing the stores and joining
// the cluster.
var startCmd = &cobra.Command{
Use: "start",
Short: "start a node in a multi-node cluster",
Long: `
Start a CockroachDB node, which will export data from one or more
storage devices, specified via --store flags.
Specify the --join flag to point to another node or nodes that are
part of the same cluster. The other nodes do not need to be started
yet, and if the address of the other nodes to be added are not yet
known it is legal for the first node to join itself.
To initialize the cluster, use 'cockroach init'.
`,
Example: ` cockroach start --insecure --store=attrs=ssd,path=/mnt/ssd1 --join=host:port,[host:port]`,
Args: cobra.NoArgs,
RunE: clierrorplus.MaybeShoutError(clierrorplus.MaybeDecorateError(runStartJoin)),
}
// startSingleNodeCmd starts a node by initializing the stores.
var startSingleNodeCmd = &cobra.Command{
Use: "start-single-node",
Short: "start a single-node cluster",
Long: `
Start a CockroachDB node, which will export data from one or more
storage devices, specified via --store flags.
The cluster will also be automatically initialized with
replication disabled (replication factor = 1).
`,
Example: ` cockroach start-single-node --insecure --store=attrs=ssd,path=/mnt/ssd1`,
Args: cobra.NoArgs,
RunE: clierrorplus.MaybeShoutError(clierrorplus.MaybeDecorateError(runStartSingleNode)),
}
// StartCmds lists the commands that start KV nodes as a server.
// This includes 'start' and 'start-single-node' but excludes
// the MT SQL server (not KV node) and 'demo' (not a server).
var StartCmds = []*cobra.Command{startCmd, startSingleNodeCmd}
// serverCmds lists the commands that start servers.
var serverCmds = append(StartCmds, mtStartSQLCmd)
// customLoggingSetupCmds lists the commands that call setupLogging()
// after other types of configuration.
var customLoggingSetupCmds = append(
serverCmds, debugCheckLogConfigCmd, demoCmd, mtStartSQLProxyCmd, mtTestDirectorySvr, statementBundleRecreateCmd,
)
func initBlockProfile() {
// Enable the block profile for a sample of mutex and channel operations.
// Smaller values provide more accurate profiles but are more
// expensive. 0 and 1 are special: 0 disables the block profile and
// 1 captures 100% of block events. For other values, the profiler
// will sample one event per X nanoseconds spent blocking.
//
// The block profile can be viewed with `pprof http://HOST:PORT/debug/pprof/block`
//
// The utility of the block profile (aka blocking profile) has diminished
// with the advent of the mutex profile. We currently leave the block profile
// disabled by default as it has a non-zero performance impact.
d := envutil.EnvOrDefaultInt64("COCKROACH_BLOCK_PROFILE_RATE", 0)
runtime.SetBlockProfileRate(int(d))
}
func initMutexProfile() {
// Enable the mutex profile for a fraction of mutex contention events.
// Smaller values provide more accurate profiles but are more expensive. 0
// and 1 are special: 0 disables the mutex profile and 1 captures 100% of
// mutex contention events. For other values, the profiler will sample on
// average 1/X events.
//
// The mutex profile can be viewed with `pprof http://HOST:PORT/debug/pprof/mutex`
d := envutil.EnvOrDefaultInt("COCKROACH_MUTEX_PROFILE_RATE",
1000 /* 1 sample per 1000 mutex contention events */)
runtime.SetMutexProfileFraction(d)
}
func initTraceDir(ctx context.Context, dir string) {
if dir == "" {
return
}
if err := os.MkdirAll(dir, 0755); err != nil {
// This is possible when running with only in-memory stores;
// in that case the start-up code sets the output directory
// to the current directory (.). If running the process
// from a directory which is not writable, we won't
// be able to create a sub-directory here.
log.Warningf(ctx, "cannot create trace dir; traces will not be dumped: %+v", err)
return
}
}
var cacheSizeValue = newBytesOrPercentageValue(&serverCfg.CacheSize, memoryPercentResolver)
var sqlSizeValue = newBytesOrPercentageValue(&serverCfg.MemoryPoolSize, memoryPercentResolver)
var diskTempStorageSizeValue = newBytesOrPercentageValue(nil /* v */, nil /* percentResolver */)
var tsdbSizeValue = newBytesOrPercentageValue(&serverCfg.TimeSeriesServerConfig.QueryMemoryMax, memoryPercentResolver)
func initExternalIODir(ctx context.Context, firstStore base.StoreSpec) (string, error) {
externalIODir := startCtx.externalIODir
if externalIODir == "" && !firstStore.InMemory {
externalIODir = filepath.Join(firstStore.Path, "extern")
}
if externalIODir == "" || externalIODir == "disabled" {
return "", nil
}
if !filepath.IsAbs(externalIODir) {
return "", errors.Errorf("%s path must be absolute", cliflags.ExternalIODir.Name)
}
return externalIODir, nil
}
func initTempStorageConfig(
ctx context.Context, st *cluster.Settings, stopper *stop.Stopper, stores base.StoreSpecList,
) (base.TempStorageConfig, error) {
// Initialize the target directory for temporary storage. If encryption at
// rest is enabled in any fashion, we'll want temp storage to be encrypted
// too. To achieve this, we use the first encrypted store as temp dir
// target, if any. If we can't find one, we use the first StoreSpec in the
// list.
//
// While we look, we also clean up any abandoned temporary directories. We
// don't know which store spec was used previously—and it may change if
// encryption gets enabled after the fact—so we check each store.
var specIdx = 0
for i, spec := range stores.Specs {
if spec.IsEncrypted() {
// TODO(jackson): One store's EncryptionOptions may say to encrypt
// with a real key, while another store's say to use key=plain.
// This provides no guarantee that we'll use the encrypted one's.
specIdx = i
}
if spec.InMemory {
continue
}
recordPath := filepath.Join(spec.Path, server.TempDirsRecordFilename)
if err := fs.CleanupTempDirs(recordPath); err != nil {
return base.TempStorageConfig{}, errors.Wrap(err,
"could not cleanup temporary directories from record file")
}
}
useStore := stores.Specs[specIdx]
var recordPath string
if !useStore.InMemory {
recordPath = filepath.Join(useStore.Path, server.TempDirsRecordFilename)
}
// The temp store size can depend on the location of the first regular store
// (if it's expressed as a percentage), so we resolve that flag here.
var tempStorePercentageResolver percentResolverFunc
if !useStore.InMemory {
dir := useStore.Path
// Create the store dir, if it doesn't exist. The dir is required to exist
// by diskPercentResolverFactory.
if err := os.MkdirAll(dir, 0755); err != nil {
return base.TempStorageConfig{}, errors.Wrapf(err, "failed to create dir for first store: %s", dir)
}
var err error
tempStorePercentageResolver, err = diskPercentResolverFactory(dir)
if err != nil {
return base.TempStorageConfig{}, errors.Wrapf(err, "failed to create resolver for: %s", dir)
}
} else {
tempStorePercentageResolver = memoryPercentResolver
}
var tempStorageMaxSizeBytes int64
if err := diskTempStorageSizeValue.Resolve(
&tempStorageMaxSizeBytes, tempStorePercentageResolver,
); err != nil {
return base.TempStorageConfig{}, err
}
if !diskTempStorageSizeValue.IsSet() {
// The default temp storage size is different when the temp
// storage is in memory (which occurs when no temp directory
// is specified and the first store is in memory).
if startCtx.tempDir == "" && useStore.InMemory {
tempStorageMaxSizeBytes = base.DefaultInMemTempStorageMaxSizeBytes
} else {
tempStorageMaxSizeBytes = base.DefaultTempStorageMaxSizeBytes
}
}
// Initialize a base.TempStorageConfig based on first store's spec and
// cli flags.
tempStorageConfig := base.TempStorageConfigFromEnv(
ctx,
st,
useStore,
startCtx.tempDir,
tempStorageMaxSizeBytes,
)
// Set temp directory to first store's path if the temp storage is not
// in memory.
tempDir := startCtx.tempDir
if tempDir == "" && !tempStorageConfig.InMemory {
tempDir = useStore.Path
}
// Create the temporary subdirectory for the temp engine.
{
var err error
if tempStorageConfig.Path, err = fs.CreateTempDir(tempDir, server.TempDirPrefix, stopper); err != nil {
return base.TempStorageConfig{}, errors.Wrap(err, "could not create temporary directory for temp storage")
}
}
// We record the new temporary directory in the record file (if it
// exists) for cleanup in case the node crashes.
if recordPath != "" {
if err := fs.RecordTempDir(recordPath, tempStorageConfig.Path); err != nil {
return base.TempStorageConfig{}, errors.Wrapf(
err,
"could not record temporary directory path to record file: %s",
recordPath,
)
}
}
return tempStorageConfig, nil
}
type newServerFn func(ctx context.Context, serverCfg server.Config, stopper *stop.Stopper) (serverStartupInterface, error)
type serverStartupInterface interface {
serverShutdownInterface
// ClusterSettings retrieves this server's settings.
ClusterSettings() *cluster.Settings
// LogicalClusterID retrieves this server's logical cluster ID.
LogicalClusterID() uuid.UUID
// PreStart starts the server on the specified port(s) and
// initializes subsystems.
// It does not activate the pgwire listener over the network / unix
// socket, which is done by the AcceptClients() method. The separation
// between the two exists so that SQL initialization can take place
// before the first client is accepted.
PreStart(ctx context.Context) error
// StartDiagnostics starts periodic diagnostics reporting and update checking.
// NOTE: This is not called in PreStart so that it's disabled by default for
// testing.
StartDiagnostics(ctx context.Context)
// AcceptClients starts listening for incoming SQL clients over the network.
AcceptClients(ctx context.Context) error
// InitialStart returns whether this node is starting for the first time.
// This is (currently) used when displaying the server status report
// on the terminal & in logs. We know that some folk have automation
// that depend on certain strings displayed from this when orchestrating
// KV-only nodes.
InitialStart() bool
}
var errCannotUseJoin = errors.New("cannot use --join with 'cockroach start-single-node' -- use 'cockroach start' instead")
func runStartSingleNode(cmd *cobra.Command, args []string) error {
joinFlag := cliflagcfg.FlagSetForCmd(cmd).Lookup(cliflags.Join.Name)
if joinFlag.Changed {
return errCannotUseJoin
}
// Now actually set the flag as changed so that the start code
// doesn't warn that it was not set. This is all to let `start-single-node`
// get by without the use of --join flags.
joinFlag.Changed = true
// Make the node auto-init the cluster if not done already.
serverCfg.AutoInitializeCluster = true
// Allow passing in a timeseries file.
if debugTSImportFile != "" {
serverCfg.TestingKnobs.Server = &server.TestingKnobs{
ImportTimeseriesFile: debugTSImportFile,
ImportTimeseriesMappingFile: debugTSImportMappingFile,
}
}
return runStart(cmd, args, true /*startSingleNode*/)
}
func runStartJoin(cmd *cobra.Command, args []string) error {
return runStart(cmd, args, false /*startSingleNode*/)
}
// runStart starts the cockroach node using --store as the list of
// storage devices ("stores") on this machine and --join as the list
// of other active nodes used to join this node to the cockroach
// cluster, if this is its first time connecting.
//
// If the argument startSingleNode is set the replication factor
// will be set to 1 all zone configs (see initial_sql.go).
func runStart(cmd *cobra.Command, args []string, startSingleNode bool) (returnErr error) {
tBegin := timeutil.Now()
// First things first: if the user wants background processing,
// relinquish the terminal ASAP by forking and exiting.
//
// If executing in the background, the function returns ok == true in
// the parent process (regardless of err) and the parent exits at
// this point.
if ok, err := maybeRerunBackground(); ok {
return err
}
// Change the permission mask for all created files.
//
// We're considering everything produced by a cockroach node
// to potentially contain sensitive information, so it should
// not be world-readable.
disableOtherPermissionBits()
// Set up the signal handlers. This also ensures that any of these
// signals received beyond this point do not interrupt the startup
// sequence until the point signals are checked below.
// We want to set up signal handling before starting logging, because
// logging uses buffering, and we want to be able to sync
// the buffers in the signal handler below. If we started capturing
// signals later, some startup logging might be lost.
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, drainSignals...)
// Check for stores with full disks and exit with an informative exit
// code. This needs to happen early during start, before we perform any
// writes to the filesystem including log rotation. We need to guarantee
// that the process continues to exit with the Disk Full exit code. A
// flapping exit code can affect alerting, including the alerting
// performed within CockroachCloud.
if err := exitIfDiskFull(vfs.Default, serverCfg.Stores.Specs); err != nil {
return err
}
// If any store has something to say against a server start-up
// (e.g. previously detected corruption), listen to them now.
if err := serverCfg.Stores.PriorCriticalAlertError(); err != nil {
return clierror.NewError(err, exit.FatalError())
}
// Set up a cancellable context for the entire start command.
// The context will be canceled at the end.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// The context annotation ensures that server identifiers show up
// in the logging metadata as soon as they are known.
ambientCtx := serverCfg.AmbientCtx
// Annotate the context, and set up a tracing span for the start process.
//
// The context annotation ensures that server identifiers show up
// in the logging metadata as soon as they are known.
//
// The tracing span is because we want any logging happening beyond
// this point to be accounted to this start context, including
// logging related to the initialization of the logging
// infrastructure below. This span concludes when the startup
// goroutine started below has completed. TODO(andrei): we don't
// close the span on the early returns below.
var startupSpan *tracing.Span
ctx, startupSpan = ambientCtx.AnnotateCtxWithSpan(ctx, "server start")
// Set up the logging and profiling output.
//
// We want to do this as early as possible, because most of the code
// in CockroachDB may use logging, and until logging has been
// initialized log files will be created in $TMPDIR instead of their
// expected location.
//
// This initialization uses the various configuration parameters
// initialized by flag handling (before runStart was called). Any
// additional server configuration tweaks for the startup process
// must be necessarily non-logging-related, as logging parameters
// cannot be picked up beyond this point.
stopper, err := setupAndInitializeLoggingAndProfiling(ctx, cmd, true /* isServerCmd */)
if err != nil {
return err
}
stopper.SetTracer(serverCfg.BaseConfig.AmbientCtx.Tracer)
// We don't care about GRPCs fairly verbose logs in most client commands,
// but when actually starting a server, we enable them.
grpcutil.LowerSeverity(severity.WARNING)
// Tweak GOMAXPROCS if we're in a cgroup / container that has cpu limits set.
// The GO default for GOMAXPROCS is NumCPU(), however this is less
// than ideal if the cgroup is limited to a number lower than that.
//
// TODO(bilal): various global settings have already been initialized based on
// GOMAXPROCS(0) by now.
cgroups.AdjustMaxProcs(ctx)
// Check the --join flag.
if !cliflagcfg.FlagSetForCmd(cmd).Lookup(cliflags.Join.Name).Changed {
err := errors.WithHint(
errors.New("no --join flags provided to 'cockroach start'"),
"Consider using 'cockroach init' or 'cockroach start-single-node' instead")
return err
}
// Now perform additional configuration tweaks specific to the start
// command.
st := serverCfg.BaseConfig.Settings
// Derive temporary/auxiliary directory specifications.
if st.ExternalIODir, err = initExternalIODir(ctx, serverCfg.Stores.Specs[0]); err != nil {
return err
}
if serverCfg.SQLConfig.TempStorageConfig, err = initTempStorageConfig(
ctx, st, stopper, serverCfg.Stores,
); err != nil {
return err
}
// Configure the default storage engine.
// NB: we only support one engine type for now.
if serverCfg.StorageEngine == enginepb.EngineTypeDefault {
serverCfg.StorageEngine = enginepb.EngineTypePebble
}
// Initialize the node's configuration from startup parameters.
// This also reads the part of the configuration that comes from
// environment variables.
if err := serverCfg.InitNode(ctx); err != nil {
return errors.Wrap(err, "failed to initialize node")
}
// The configuration is now ready to report to the user and the log
// file. We had to wait after InitNode() so that all configuration
// environment variables, which are reported too, have been read and
// registered.
reportConfiguration(ctx)
// ReadyFn will be called when the server has started listening on
// its network sockets, but perhaps before it has done bootstrapping
// and thus before Start() completes.
serverCfg.ReadyFn = func(waitForInit bool) { reportReadinessExternally(ctx, cmd, waitForInit) }
// DelayedBootstrapFn will be called if the bootstrap process is
// taking a bit long.
serverCfg.DelayedBootstrapFn = func() {
const msg = `The server appears to be unable to contact the other nodes in the cluster. Please try:
- starting the other nodes, if you haven't already;
- double-checking that the '--join' and '--listen'/'--advertise' flags are set up correctly;
- running the 'cockroach init' command if you are trying to initialize a new cluster.
If problems persist, please see %s.`
docLink := docs.URL("cluster-setup-troubleshooting.html")
if !startCtx.inBackground {
log.Ops.Shoutf(ctx, severity.WARNING, msg, docLink)
} else {
// Don't shout to stderr since the server will have detached by
// the time this function gets called.
log.Ops.Warningf(ctx, msg, docLink)
}
}
initGEOS(ctx)
const serverType redact.SafeString = "node"
// Beyond this point, the configuration is set and the server is
// ready to start.
// Run the rest of the startup process in a goroutine separate from
// the main goroutine to avoid preventing proper handling of signals
// if we get stuck on something during initialization (#10138).
newServerFn := func(_ context.Context, serverCfg server.Config, stopper *stop.Stopper) (serverStartupInterface, error) {
return server.NewServer(serverCfg, stopper)
}
maybeRunInitialSQL := func(ctx context.Context, s serverStartupInterface) error {
// Run SQL for new clusters.
//
// TODO(knz): If/when we want auto-creation of an initial admin user,
// this can be achieved here.
return runInitialSQL(ctx, s.(*server.Server), startSingleNode, "" /* adminUser */, "" /* adminPassword */)
}
getS, srvStatus, serverStartupErrC := createAndStartServerAsync(ctx,
tBegin, &serverCfg, stopper, startupSpan, newServerFn, maybeRunInitialSQL, serverType)
return waitForShutdown(
// NB: we delay the access to s, as it is assigned
// asynchronously in a goroutine above.
getS,
stopper, serverStartupErrC, signalCh,
srvStatus)
}
// createAndStartServerAsync starts an async goroutine which instantiates
// the server and starts it.
// We run it in a separate goroutine because the instantiation&start
// could block, and we want to retain the option to start shutting down
// the process (e.g. via Ctrl+C on the terminal) even in that case.
// The shutdown logic thus starts running asynchronously, via waitForShutdown,
// concurrently with createAndStartServerAsync.
//
// The arguments are as follows:
// - tBegin: time when startup began; used to report statistics at the end of startup.
// - serverCfg: the server configuration.
// - stopper: the stopper used to start all the async tasks. This is the stopper
// used by the shutdown logic.
// - startupSpan: the tracing span for the context that was started earlier
// during startup. It needs to be finalized when the async goroutine completes.
// - newServerFn: a constructor function for the server object.
// - maybeRunInitialSQL: a callback that will be called after the server has
// initialized, but before it starts accepting clients.
// - serverType: a title used for the type of server. This is used
// when reporting the startup messages on the terminal & logs.
func createAndStartServerAsync(
ctx context.Context,
tBegin time.Time,
serverCfg *server.Config,
stopper *stop.Stopper,
startupSpan *tracing.Span,
newServerFn newServerFn,
maybeRunInitialSQL func(context.Context, serverStartupInterface) error,
serverType redact.SafeString,
) (getS func() serverShutdownInterface, srvStatus *serverStatus, serverStartupErrC <-chan error) {
var serverStatusMu serverStatus
var s serverStartupInterface
startupErrC := make(chan error, 1)
log.Ops.Infof(ctx, "starting cockroach %s", serverType)
go func() {
// Ensure that the log files see the startup messages immediately.
defer log.Flush()
// If anything goes dramatically wrong, use Go's panic/recover
// mechanism to intercept the panic and log the panic details to
// the error reporting server.
defer func() {
if s != nil {
// We only attempt to log the panic details if the server has
// actually been started successfully. If there's no server,
// we won't know enough to decide whether reporting is
// permitted.
logcrash.RecoverAndReportPanic(ctx, &s.ClusterSettings().SV)
}
}()
// When the start up goroutine completes, so can the start up span
// defined above.
defer startupSpan.Finish()
// Any error beyond this point should be reported through the
// serverStartupErrC defined above. However, in Go the code pattern "if err
// != nil { return err }" is more common. Expecting contributors
// to remember to write "if err != nil { serverStartupErrC <- err }" beyond
// this point is optimistic. To avoid any mistake, we capture all
// the error returns in a closure, and do the serverStartupErrC reporting,
// if needed, when that function returns.
if err := func() error {
// Instantiate the server.
var err error
s, err = newServerFn(ctx, *serverCfg, stopper)
if err != nil {
return errors.Wrap(err, "failed to start server")
}
// Have we already received a signal to terminate? If so, just
// stop here.
if serverStatusMu.shutdownInProgress() {
return nil
}
// Attempt to start the server.
if err := s.PreStart(ctx); err != nil {
if le := (*server.ListenError)(nil); errors.As(err, &le) {
const errorPrefix = "consider changing the port via --%s"
if le.Addr == serverCfg.Addr {
err = errors.Wrapf(err, errorPrefix, cliflags.ListenAddr.Name)
} else if le.Addr == serverCfg.HTTPAddr {
err = errors.Wrapf(err, errorPrefix, cliflags.ListenHTTPAddr.Name)
}
}
return errors.Wrap(err, "cockroach server exited with error")
}
// Server started, notify the shutdown monitor running concurrently.
if shutdownInProgress := serverStatusMu.setStarted(); shutdownInProgress {
// A shutdown was requested already, e.g. by sending SIGTERM to the process:
// maybeWaitForShutdown (which runs concurrently with this goroutine) has
// called serverStatusMu.startShutdown() already.
// However, because setStarted() had not been called before,
// maybeWaitForShutdown did not call Stop on the stopper.
// So we do it here.
stopper.Stop(ctx)
return nil
}
// After this point, if a shutdown is requested concurrently
// with the startup steps below, the stopper.Stop() method will
// be called by the shutdown goroutine, which in turn will cause
// all these startup steps to fail. So we do not need to look at
// the "shutdown status" in serverStatusMu any more.
// Start up the diagnostics reporting and update check loops.
// We don't do this in (*server.Server).Start() because we don't
// want this overhead and possible interference in tests.
if !cluster.TelemetryOptOut() {
s.StartDiagnostics(ctx)
}
initialStart := s.InitialStart()
if maybeRunInitialSQL != nil {
if err := maybeRunInitialSQL(ctx, s); err != nil {
return err
}
}
// Now let SQL clients in.
if err := s.AcceptClients(ctx); err != nil {
return err
}
// Now inform the user that the server is running and tell the
// user about its run-time derived parameters.
return reportServerInfo(ctx, tBegin, serverCfg, s.ClusterSettings(),
serverType, initialStart, s.LogicalClusterID())
}(); err != nil {
startupErrC <- err
}
}()
getS = func() serverShutdownInterface { return s }
serverStartupErrC = startupErrC
srvStatus = &serverStatusMu
return getS, srvStatus, serverStartupErrC
}
// serverStatus coordinates the async goroutine that starts the server
// up (e.g. in runStart) and the async goroutine that stops the server
// (in waitForShutdown).
//
// We need this intermediate coordination because it isn't safe to try
// to drain a server that doesn't exist or is in the middle of
// starting up, or to start a server after shutdown has begun.
//
// TODO(knz): clarify the transfer of ownership for the stopper.Stop
// call further, as per suggestion in https://github.com/cockroachdb/cockroach/issues/90233.
type serverStatus struct {
syncutil.Mutex
// started indicates that the server has started already. After
// started has become true, a graceful shutdown should use a soft
// drain.
started bool
// shutdownRequested indicates that shutdown has started
// already. After draining has become true, server startup should
// stop.
shutdownRequested bool
}
// setStarted marks the server as started. It returns whether shutdown
// has been requested already. In that case, we know that the shutdown
// goroutine did not use the stopper, so the server startup can do
// that.
func (s *serverStatus) setStarted() bool {
s.Lock()
defer s.Unlock()
s.started = true
return s.shutdownRequested
}
// shutdownInProgress returns whether a shutdown has been requested
// already.
func (s *serverStatus) shutdownInProgress() bool {
s.Lock()
defer s.Unlock()
return s.shutdownRequested
}
// startShutdown registers the shutdown request and returns whether
// the server was started already.
func (s *serverStatus) startShutdown() bool {
s.Lock()
defer s.Unlock()
s.shutdownRequested = true
return s.started
}
// serverShutdownInterface is the subset of the APIs on a server
// object that's sufficient to run a server shutdown.
type serverShutdownInterface interface {
AnnotateCtx(context.Context) context.Context
Drain(ctx context.Context, verbose bool) (uint64, redact.RedactableString, error)
}
// waitForShutdown lets the server run asynchronously and waits for
// shutdown, either due to the server spontaneously shutting down
// (signaled by stopper), or due to a server error (signaled on
// serverStartupErrC), by receiving a signal (signaled by signalCh).
func waitForShutdown(
getS func() serverShutdownInterface,
stopper *stop.Stopper,
serverStartupErrC <-chan error,
signalCh <-chan os.Signal,
serverStatusMu *serverStatus,
) (returnErr error) {
// The remainder of the main function executes concurrently with the
// start up goroutine started above.
//
// It is concerned with determining when the server should stop
// because the main process is being shut down, e.g. via a RPC call
// or a signal.
// We'll want to log any shutdown activity against a separate span.
// We cannot use s.AnnotateCtx here because the server might not have
// been assigned yet (the goroutine above runs asynchronously).
shutdownCtx, shutdownSpan := serverCfg.AmbientCtx.AnnotateCtxWithSpan(context.Background(), "server shutdown")
defer shutdownSpan.Finish()
stopWithoutDrain := make(chan struct{}) // closed if interrupted very early
// Block until one of the signals above is received or the stopper
// is stopped externally (for example, via the quit endpoint).
select {
case err := <-serverStartupErrC:
// An error in errChat signals that the early server startup failed.
returnErr = err
// At this point, we do not expect any application load, etc., and
// therefore we are OK with an expedited shutdown: pass false to
// shouldDrain.
startShutdownAsync(getS, stopper, serverStatusMu, stopWithoutDrain, false /* shouldDrain */)
// We do not return here, on purpose, because we want the common
// shutdown logic below to apply for this case as well.
case <-stopper.ShouldQuiesce():
// Receiving a signal on ShouldQuiesce means that a shutdown was
// requested via the Drain RPC. The RPC code takes ownership
// of calling stopper.Stop.
//
// We fall through to the common logic below so that an operator
// looking at a server running in the foreground on their terminal
// can see what is going on.
// StartAlwaysFlush both flushes and ensures that subsequent log
// writes are flushed too.
log.StartAlwaysFlush()
// We do not return here, on purpose, because we want the common
// shutdown logic below to apply for this case as well.
case sig := <-signalCh:
// We start flushing log writes from here, because if a
// signal was received there is a non-zero chance the sender of
// this signal will follow up with SIGKILL if the shutdown is not
// timely, and we don't want logs to be lost.
log.StartAlwaysFlush()
log.Ops.Infof(shutdownCtx, "received signal '%s'", sig)
switch sig {
case os.Interrupt:
// Graceful shutdown after an interrupt should cause the process
// to terminate with a non-zero exit code; however SIGTERM is
// "legitimate" and should be acknowledged with a success exit
// code. So we keep the error state here for later.
returnErr = clierror.NewErrorWithSeverity(
errors.New("interrupted"),
exit.Interrupted(),
// INFO because a single interrupt is rather innocuous.
severity.INFO,
)
if !startCtx.inBackground {
msgDouble := "Note: a second interrupt will skip graceful shutdown and terminate forcefully"
fmt.Fprintln(os.Stdout, msgDouble)
}
}
startShutdownAsync(getS, stopper, serverStatusMu, stopWithoutDrain, true /* shouldDrain */)
// Don't return: we're shutting down gracefully.
case <-log.FatalChan():
// A fatal error has occurred. Stop everything (gracelessly) to
// avoid serving incorrect data while the final log messages are
// being written.
// https://github.com/cockroachdb/cockroach/issues/23414
// TODO(bdarnell): This could be more graceless, for example by
// reaching into the server objects and closing all the
// connections while they're in use. That would be more in line
// with the expected effect of a log.Fatal.
stopper.Stop(shutdownCtx)
// The logging goroutine is now responsible for killing this
// process, so just block this goroutine.
select {}
}
// At this point, a signal has been received to shut down the
// process, and a goroutine is busy telling the server to drain and
// stop. From this point on, we just have to wait until the server
// indicates it has stopped.
const msgDrain = "initiating graceful shutdown of server"
log.Ops.Info(shutdownCtx, msgDrain)
if !startCtx.inBackground {
fmt.Fprintln(os.Stdout, msgDrain)
}
// Notify the user every 5 second of the shutdown progress.
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Ops.Infof(shutdownCtx, "%d running tasks", stopper.NumTasks())
case <-stopper.IsStopped():
return
case <-stopWithoutDrain:
return
}
}
}()
// Meanwhile, we don't want to wait too long either, in case the
// server is getting stuck and doesn't shut down in a timely manner.
//
// So we also pay attention to any additional signal received beyond
// this point (maybe some service monitor was impatient and sends
// another signal to hasten the shutdown process).
//
// If any such trigger to hasten occurs, we simply return, which
// will cause the process to exit and the server goroutines to be
// forcefully terminated.
const hardShutdownHint = " - node may take longer to restart & clients may need to wait for leases to expire"
for {
select {
case sig := <-signalCh:
switch sig {
case termSignal:
// Double SIGTERM, or SIGTERM after another signal: continue
// the graceful shutdown.
log.Ops.Infof(shutdownCtx, "received additional signal '%s'; continuing graceful shutdown", sig)
continue
}
// This new signal is not welcome, as it interferes with the graceful
// shutdown process.
log.Ops.Shoutf(shutdownCtx, severity.ERROR,
"received signal '%s' during shutdown, initiating hard shutdown%s",
redact.Safe(sig), redact.Safe(hardShutdownHint))
handleSignalDuringShutdown(sig)
panic("unreachable")
case <-stopper.IsStopped():
const msgDone = "server drained and shutdown completed"
log.Ops.Infof(shutdownCtx, msgDone)
if !startCtx.inBackground {
fmt.Fprintln(os.Stdout, msgDone)
}
case <-stopWithoutDrain:
const msgDone = "too early to drain; used hard shutdown instead"
log.Ops.Infof(shutdownCtx, msgDone)
if !startCtx.inBackground {
fmt.Fprintln(os.Stdout, msgDone)
}
}
break
}
return returnErr
}
// startShutdown begins the process that stops the server, asynchronously.
//
// The shouldDrain argument indicates that the shutdown is happening
// some time after server startup has completed, and we are thus
// interested in being graceful to application load.
func startShutdownAsync(
getS func() serverShutdownInterface,
stopper *stop.Stopper,
serverStatusMu *serverStatus,
stopWithoutDrain chan struct{},
shouldDrain bool,
) {
// StartAlwaysFlush both flushes and ensures that subsequent log
// writes are flushed too.
log.StartAlwaysFlush()
// Start the draining process in a separate goroutine so that it
// runs concurrently with the timeout check in waitForShutdown().
go func() {
// The return value of startShutdown indicates whether the
// server has started already, and the graceful shutdown should
// call the Drain method. We cannot call Drain if the server has
// not started yet.
canUseDrain := serverStatusMu.startShutdown()
if !canUseDrain {
// The server has not started yet. We can't use the Drain() call.
close(stopWithoutDrain)
return
}
// Don't use ctx because this is in a goroutine that may
// still be running after shutdownCtx's span has been finished.
drainCtx := logtags.AddTag(getS().AnnotateCtx(context.Background()), "server drain process", nil)
if shouldDrain {
// Perform a graceful drain. We keep retrying forever, in
// case there are many range leases or some unavailability
// preventing progress. If the operator wants to expedite
// the shutdown, they will need to make it ungraceful
// via a 2nd signal.
var (