-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
session.go
1969 lines (1810 loc) · 65.5 KB
/
session.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spanner
import (
"container/heap"
"container/list"
"context"
"fmt"
"log"
"math"
"math/rand"
"os"
"runtime/debug"
"strings"
"sync"
"time"
"cloud.google.com/go/internal/trace"
sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
"cloud.google.com/go/spanner/internal"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
octrace "go.opencensus.io/trace"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
const (
healthCheckIntervalMins = 50
multiplexSessionRefreshInterval = 7 * 24 * time.Hour
)
// ActionOnInactiveTransactionKind describes the kind of action taken when there are inactive transactions.
type ActionOnInactiveTransactionKind int
const (
actionUnspecified ActionOnInactiveTransactionKind = iota
// NoAction action does not perform any action on inactive transactions.
NoAction
// Warn action logs inactive transactions. Any inactive transaction gets logged only once.
Warn
// Close action closes inactive transactions without logging.
Close
// WarnAndClose action logs and closes the inactive transactions.
WarnAndClose
)
// InactiveTransactionRemovalOptions has configurations for action on long-running transactions.
type InactiveTransactionRemovalOptions struct {
// ActionOnInactiveTransaction is the configuration to choose action for inactive transactions.
// It can be one of Warn, Close, WarnAndClose.
ActionOnInactiveTransaction ActionOnInactiveTransactionKind
// long-running transactions will be cleaned up if utilisation is
// greater than the below value.
usedSessionsRatioThreshold float64
// A transaction is considered to be idle if it has not been used for
// a duration greater than the below value.
idleTimeThreshold time.Duration
// frequency for closing inactive transactions
executionFrequency time.Duration
// variable that keeps track of the last execution time when inactive transactions
// were removed by the maintainer task.
lastExecutionTime time.Time
}
// sessionHandle is an interface for transactions to access Cloud Spanner
// sessions safely. It is generated by sessionPool.take().
type sessionHandle struct {
// mu guarantees that the inner session object is returned / destroyed only
// once.
mu sync.Mutex
// session is a pointer to a session object. Transactions never need to
// access it directly.
session *session
// client is the RPC channel to Cloud Spanner. It is set only once during session acquisition.
client spannerClient
// checkoutTime is the time the session was checked out of the pool.
checkoutTime time.Time
// lastUseTime is the time the session was last used after checked out of the pool.
lastUseTime time.Time
// trackedSessionHandle is the linked list node which links the session to
// the list of tracked session handles. trackedSessionHandle is only set if
// TrackSessionHandles has been enabled in the session pool configuration.
trackedSessionHandle *list.Element
// stack is the call stack of the goroutine that checked out the session
// from the pool. This can be used to track down session leak problems.
stack []byte
// eligibleForLongRunning tells if the inner session is eligible to be long-running.
eligibleForLongRunning bool
// if the inner session object is long-running then the stack gets logged once.
isSessionLeakLogged bool
}
// recycle gives the inner session object back to its home session pool. It is
// safe to call recycle multiple times but only the first one would take effect.
func (sh *sessionHandle) recycle() {
sh.mu.Lock()
if sh.session == nil {
// sessionHandle has already been recycled.
sh.mu.Unlock()
return
}
p := sh.session.pool
tracked := sh.trackedSessionHandle
s := sh.session
sh.session = nil
sh.client = nil
sh.trackedSessionHandle = nil
sh.checkoutTime = time.Time{}
sh.lastUseTime = time.Time{}
sh.stack = nil
sh.mu.Unlock()
s.recycle()
if tracked != nil {
p.mu.Lock()
p.trackedSessionHandles.Remove(tracked)
p.mu.Unlock()
}
}
// getID gets the Cloud Spanner session ID from the internal session object.
// getID returns empty string if the sessionHandle is nil or the inner session
// object has been released by recycle / destroy.
func (sh *sessionHandle) getID() string {
sh.mu.Lock()
defer sh.mu.Unlock()
if sh.session == nil {
// sessionHandle has already been recycled/destroyed.
return ""
}
return sh.session.getID()
}
// getClient gets the Cloud Spanner RPC client associated with the session ID
// in sessionHandle.
func (sh *sessionHandle) getClient() spannerClient {
sh.mu.Lock()
defer sh.mu.Unlock()
if sh.session == nil {
return nil
}
if sh.client != nil {
// Use the gRPC connection from the session handle
return sh.client
}
return sh.session.client
}
// getMetadata returns the metadata associated with the session in sessionHandle.
func (sh *sessionHandle) getMetadata() metadata.MD {
sh.mu.Lock()
defer sh.mu.Unlock()
if sh.session == nil {
return nil
}
return sh.session.md
}
// getTransactionID returns the transaction id in the session if available.
func (sh *sessionHandle) getTransactionID() transactionID {
sh.mu.Lock()
defer sh.mu.Unlock()
if sh.session == nil {
return nil
}
return sh.session.tx
}
// destroy destroys the inner session object. It is safe to call destroy
// multiple times and only the first call would attempt to
// destroy the inner session object.
func (sh *sessionHandle) destroy() {
sh.mu.Lock()
s := sh.session
if s == nil {
// sessionHandle has already been recycled.
sh.mu.Unlock()
return
}
tracked := sh.trackedSessionHandle
sh.session = nil
sh.client = nil
sh.trackedSessionHandle = nil
sh.checkoutTime = time.Time{}
sh.lastUseTime = time.Time{}
sh.stack = nil
sh.mu.Unlock()
if tracked != nil {
p := s.pool
p.mu.Lock()
p.trackedSessionHandles.Remove(tracked)
p.mu.Unlock()
}
// since sessionHandle is always used by Transactions we can safely destroy the session with wasInUse=true
s.destroy(false, true)
}
func (sh *sessionHandle) updateLastUseTime() {
sh.mu.Lock()
defer sh.mu.Unlock()
if sh.session != nil {
sh.lastUseTime = time.Now()
}
}
// session wraps a Cloud Spanner session ID through which transactions are
// created and executed.
type session struct {
// client is the RPC channel to Cloud Spanner. It is set only once during
// session's creation.
client spannerClient
// id is the unique id of the session in Cloud Spanner. It is set only once
// during session's creation.
id string
// pool is the session's home session pool where it was created. It is set
// only once during session's creation.
pool *sessionPool
// createTime is the timestamp of the session's creation. It is set only
// once during session's creation.
createTime time.Time
// logger is the logger configured for the Spanner client that created the
// session. If nil, logging will be directed to the standard logger.
logger *log.Logger
// mu protects the following fields from concurrent access: both
// healthcheck workers and transactions can modify them.
mu sync.Mutex
// valid marks the validity of a session.
valid bool
// hcIndex is the index of the session inside the global healthcheck queue.
// If hcIndex < 0, session has been unregistered from the queue.
hcIndex int
// idleList is the linkedlist node which links the session to its home
// session pool's idle list. If idleList == nil, the
// session is not in idle list.
idleList *list.Element
// nextCheck is the timestamp of next scheduled healthcheck of the session.
// It is maintained by the global health checker.
nextCheck time.Time
// checkingHelath is true if currently this session is being processed by
// health checker. Must be modified under health checker lock.
checkingHealth bool
// md is the Metadata to be sent with each request.
md metadata.MD
// tx contains the transaction id if the session has been prepared for
// write.
tx transactionID
// firstHCDone indicates whether the first health check is done or not.
firstHCDone bool
// isMultiplexed is true if the session is multiplexed.
isMultiplexed bool
}
// isValid returns true if the session is still valid for use.
func (s *session) isValid() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.valid
}
// isWritePrepared returns true if the session is prepared for write.
func (s *session) isWritePrepared() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.tx != nil
}
// String implements fmt.Stringer for session.
func (s *session) String() string {
s.mu.Lock()
defer s.mu.Unlock()
return fmt.Sprintf("<id=%v, hcIdx=%v, idleList=%p, valid=%v, create=%v, nextcheck=%v>",
s.id, s.hcIndex, s.idleList, s.valid, s.createTime, s.nextCheck)
}
// ping verifies if the session is still alive in Cloud Spanner.
func (s *session) ping() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// Start parent span that doesn't record.
_, span := octrace.StartSpan(ctx, "cloud.google.com/go/spanner.ping", octrace.WithSampler(octrace.NeverSample()))
defer span.End()
// s.getID is safe even when s is invalid.
_, err := s.client.ExecuteSql(contextWithOutgoingMetadata(ctx, s.md, true), &sppb.ExecuteSqlRequest{
Session: s.getID(),
Sql: "SELECT 1",
})
return err
}
// setHcIndex atomically sets the session's index in the healthcheck queue and
// returns the old index.
func (s *session) setHcIndex(i int) int {
s.mu.Lock()
defer s.mu.Unlock()
oi := s.hcIndex
s.hcIndex = i
return oi
}
// setIdleList atomically sets the session's idle list link and returns the old
// link.
func (s *session) setIdleList(le *list.Element) *list.Element {
s.mu.Lock()
defer s.mu.Unlock()
old := s.idleList
s.idleList = le
return old
}
// invalidate marks a session as invalid and returns the old validity.
func (s *session) invalidate() bool {
s.mu.Lock()
defer s.mu.Unlock()
ov := s.valid
s.valid = false
return ov
}
// setNextCheck sets the timestamp for next healthcheck on the session.
func (s *session) setNextCheck(t time.Time) {
s.mu.Lock()
defer s.mu.Unlock()
s.nextCheck = t
}
// setTransactionID sets the transaction id in the session
func (s *session) setTransactionID(tx transactionID) {
s.mu.Lock()
defer s.mu.Unlock()
s.tx = tx
}
// getID returns the session ID which uniquely identifies the session in Cloud
// Spanner.
func (s *session) getID() string {
s.mu.Lock()
defer s.mu.Unlock()
return s.id
}
// getHcIndex returns the session's index into the global healthcheck priority
// queue.
func (s *session) getHcIndex() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.hcIndex
}
// getIdleList returns the session's link in its home session pool's idle list.
func (s *session) getIdleList() *list.Element {
s.mu.Lock()
defer s.mu.Unlock()
return s.idleList
}
// getNextCheck returns the timestamp for next healthcheck on the session.
func (s *session) getNextCheck() time.Time {
s.mu.Lock()
defer s.mu.Unlock()
return s.nextCheck
}
// recycle turns the session back to its home session pool.
func (s *session) recycle() {
s.setTransactionID(nil)
s.pool.mu.Lock()
if s.isMultiplexed {
s.pool.decNumMultiplexedInUseLocked(context.Background())
s.pool.mu.Unlock()
return
}
if !s.pool.recycleLocked(s) {
// s is rejected by its home session pool because it expired and the
// session pool currently has enough open sessions.
s.pool.mu.Unlock()
s.destroy(false, true)
s.pool.mu.Lock()
}
s.pool.decNumInUseLocked(context.Background())
s.pool.mu.Unlock()
}
// destroy removes the session from its home session pool, healthcheck queue
// and Cloud Spanner service.
func (s *session) destroy(isExpire, wasInUse bool) bool {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
return s.destroyWithContext(ctx, isExpire, wasInUse)
}
func (s *session) destroyWithContext(ctx context.Context, isExpire, wasInUse bool) bool {
// Remove s from session pool.
if !s.pool.remove(s, isExpire, wasInUse) {
return false
}
// Unregister s from healthcheck queue.
s.pool.hc.unregister(s)
return true
}
func (s *session) delete(ctx context.Context) {
// Ignore the error because even if we fail to explicitly destroy the
// session, it will be eventually garbage collected by Cloud Spanner.
err := s.client.DeleteSession(contextWithOutgoingMetadata(ctx, s.md, true), &sppb.DeleteSessionRequest{Name: s.getID()})
// Do not log DeadlineExceeded errors when deleting sessions, as these do
// not indicate anything the user can or should act upon.
if err != nil && ErrCode(err) != codes.DeadlineExceeded {
logf(s.logger, "Failed to delete session %v. Error: %v", s.getID(), err)
}
}
// SessionPoolConfig stores configurations of a session pool.
type SessionPoolConfig struct {
// MaxOpened is the maximum number of opened sessions allowed by the session
// pool. If the client tries to open a session and there are already
// MaxOpened sessions, it will block until one becomes available or the
// context passed to the client method is canceled or times out.
//
// Defaults to NumChannels * 100.
MaxOpened uint64
// MinOpened is the minimum number of opened sessions that the session pool
// tries to maintain. Session pool won't continue to expire sessions if
// number of opened connections drops below MinOpened. However, if a session
// is found to be broken, it will still be evicted from the session pool,
// therefore it is posssible that the number of opened sessions drops below
// MinOpened.
//
// Defaults to 100.
MinOpened uint64
// MaxIdle is the maximum number of idle sessions that are allowed in the
// session pool.
//
// Defaults to 0.
MaxIdle uint64
// MaxBurst is the maximum number of concurrent session creation requests.
//
// Deprecated: MaxBurst exists for historical compatibility and should not
// be used. MaxBurst was used to limit the number of sessions that the
// session pool could create within a time frame. This was an early safety
// valve to prevent a client from overwhelming the backend if a large number
// of sessions was suddenly needed. The session pool would then pause the
// creation of sessions for a while. Such a pause is no longer needed and
// the implementation has been removed from the pool.
//
// Defaults to 10.
MaxBurst uint64
// incStep is the number of sessions to create in one batch when at least
// one more session is needed.
//
// Defaults to 25.
incStep uint64
// WriteSessions is the fraction of sessions we try to keep prepared for
// write.
//
// Deprecated: The session pool no longer prepares a fraction of the sessions with a read/write transaction.
// This setting therefore does not have any meaning anymore, and may be removed in the future.
//
// Defaults to 0.2.
WriteSessions float64
// HealthCheckWorkers is number of workers used by health checker for this
// pool.
//
// Defaults to 10.
HealthCheckWorkers int
// HealthCheckInterval is how often the health checker pings a session.
//
// Defaults to 50m.
HealthCheckInterval time.Duration
// MultiplexSessionCheckInterval is the interval at which the multiplexed session is checked whether it needs to be refreshed.
//
// Defaults to 10 mins.
MultiplexSessionCheckInterval time.Duration
// TrackSessionHandles determines whether the session pool will keep track
// of the stacktrace of the goroutines that take sessions from the pool.
// This setting can be used to track down session leak problems.
//
// Defaults to false.
TrackSessionHandles bool
// healthCheckSampleInterval is how often the health checker samples live
// session (for use in maintaining session pool size).
//
// Defaults to 1m.
healthCheckSampleInterval time.Duration
// sessionLabels for the sessions created in the session pool.
sessionLabels map[string]string
InactiveTransactionRemovalOptions
}
// DefaultSessionPoolConfig is the default configuration for the session pool
// that will be used for a Spanner client, unless the user supplies a specific
// session pool config.
var DefaultSessionPoolConfig = SessionPoolConfig{
MinOpened: 100,
MaxOpened: numChannels * 100,
MaxBurst: 10,
incStep: 25,
WriteSessions: 0.2,
HealthCheckWorkers: 10,
HealthCheckInterval: healthCheckIntervalMins * time.Minute,
InactiveTransactionRemovalOptions: InactiveTransactionRemovalOptions{
ActionOnInactiveTransaction: Warn,
executionFrequency: 2 * time.Minute,
idleTimeThreshold: 60 * time.Minute,
usedSessionsRatioThreshold: 0.95,
},
}
// errMinOpenedGTMapOpened returns error for SessionPoolConfig.MaxOpened < SessionPoolConfig.MinOpened when SessionPoolConfig.MaxOpened is set.
func errMinOpenedGTMaxOpened(maxOpened, minOpened uint64) error {
return spannerErrorf(codes.InvalidArgument,
"require SessionPoolConfig.MaxOpened >= SessionPoolConfig.MinOpened, got %d and %d", maxOpened, minOpened)
}
// errWriteFractionOutOfRange returns error for
// SessionPoolConfig.WriteFraction < 0 or SessionPoolConfig.WriteFraction > 1
func errWriteFractionOutOfRange(writeFraction float64) error {
return spannerErrorf(codes.InvalidArgument,
"require SessionPoolConfig.WriteSessions >= 0.0 && SessionPoolConfig.WriteSessions <= 1.0, got %.2f", writeFraction)
}
// errHealthCheckWorkersNegative returns error for
// SessionPoolConfig.HealthCheckWorkers < 0
func errHealthCheckWorkersNegative(workers int) error {
return spannerErrorf(codes.InvalidArgument,
"require SessionPoolConfig.HealthCheckWorkers >= 0, got %d", workers)
}
// errHealthCheckIntervalNegative returns error for
// SessionPoolConfig.HealthCheckInterval < 0
func errHealthCheckIntervalNegative(interval time.Duration) error {
return spannerErrorf(codes.InvalidArgument,
"require SessionPoolConfig.HealthCheckInterval >= 0, got %v", interval)
}
// validate verifies that the SessionPoolConfig is good for use.
func (spc *SessionPoolConfig) validate() error {
if spc.MinOpened > spc.MaxOpened && spc.MaxOpened > 0 {
return errMinOpenedGTMaxOpened(spc.MaxOpened, spc.MinOpened)
}
if spc.HealthCheckWorkers < 0 {
return errHealthCheckWorkersNegative(spc.HealthCheckWorkers)
}
if spc.HealthCheckInterval < 0 {
return errHealthCheckIntervalNegative(spc.HealthCheckInterval)
}
return nil
}
type muxSessionCreateRequest struct {
ctx context.Context
force bool
}
// sessionPool creates and caches Cloud Spanner sessions.
type sessionPool struct {
// mu protects sessionPool from concurrent access.
mu sync.Mutex
// valid marks the validity of the session pool.
valid bool
// sc is used to create the sessions for the pool.
sc *sessionClient
// trackedSessionHandles contains all sessions handles that have been
// checked out of the pool. The list is only filled if TrackSessionHandles
// has been enabled.
trackedSessionHandles list.List
// idleList caches idle session IDs. Session IDs in this list can be
// allocated for use.
idleList list.List
// multiplexSessionClientCounter is the counter for the multiplexed session client.
multiplexSessionClientCounter int
// clientPool is a pool of Cloud Spanner grpc clients.
clientPool []spannerClient
// multiplexedSession contains the multiplexed session
multiplexedSession *session
// mayGetSession is for broadcasting that session retrival/creation may
// proceed.
mayGetSession chan struct{}
// multiplexedSessionReq is the ongoing multiplexed session creation request (if any).
multiplexedSessionReq chan muxSessionCreateRequest
// mayGetMultiplexedSession is for broadcasting that multiplexed session retrieval is possible.
mayGetMultiplexedSession chan bool
// sessionCreationError is the last error that occurred during session
// creation and is propagated to any waiters waiting for a session.
sessionCreationError error
// multiplexedSessionCreationError is the error that occurred during multiplexed session
// creation for the first time and is propagated to any waiters waiting for a session.
multiplexedSessionCreationError error
// numOpened is the total number of open sessions from the session pool.
numOpened uint64
// createReqs is the number of ongoing session creation requests.
createReqs uint64
// numWaiters is the number of processes waiting for a session to
// become available.
numWaiters uint64
// disableBackgroundPrepareSessions indicates that the BeginTransaction
// call for a read/write transaction failed with a permanent error, such as
// PermissionDenied or `Database not found`. Further background calls to
// prepare sessions will be disabled.
disableBackgroundPrepareSessions bool
// configuration of the session pool.
SessionPoolConfig
// hc is the health checker
hc *healthChecker
// rand is a separately sourced random generator.
rand *rand.Rand
// numInUse is the number of sessions that are currently in use (checked out
// from the session pool).
numInUse uint64
// maxNumInUse is the maximum number of sessions in use concurrently in the
// current 10 minute interval.
maxNumInUse uint64
// lastResetTime is the start time of the window for recording maxNumInUse.
lastResetTime time.Time
// numSessions is the number of sessions that are idle for read/write.
numSessions uint64
// mw is the maintenance window containing statistics for the max number of
// sessions checked out of the pool during the last 10 minutes.
mw *maintenanceWindow
// tagMap is a map of all tags that are associated with the emitted metrics.
tagMap *tag.Map
// indicates the number of leaked sessions removed from the session pool.
// This is valid only when ActionOnInactiveTransaction is WarnAndClose or ActionOnInactiveTransaction is Close in InactiveTransactionRemovalOptions.
numOfLeakedSessionsRemoved uint64
otConfig *openTelemetryConfig
// enableMultiplexSession is a flag to enable multiplexed session.
enableMultiplexSession bool
}
// newSessionPool creates a new session pool.
func newSessionPool(sc *sessionClient, config SessionPoolConfig) (*sessionPool, error) {
if err := config.validate(); err != nil {
return nil, err
}
if config.HealthCheckWorkers == 0 {
// With 10 workers and assuming average latency of 5ms for
// BeginTransaction, we will be able to prepare 2000 tx/sec in advance.
// If the rate of takeWriteSession is more than that, it will degrade to
// doing BeginTransaction inline.
//
// TODO: consider resizing the worker pool dynamically according to the load.
config.HealthCheckWorkers = 10
}
if config.HealthCheckInterval == 0 {
config.HealthCheckInterval = healthCheckIntervalMins * time.Minute
}
if config.healthCheckSampleInterval == 0 {
config.healthCheckSampleInterval = time.Minute
}
if config.ActionOnInactiveTransaction == actionUnspecified {
config.ActionOnInactiveTransaction = DefaultSessionPoolConfig.ActionOnInactiveTransaction
}
if config.idleTimeThreshold == 0 {
config.idleTimeThreshold = DefaultSessionPoolConfig.idleTimeThreshold
}
if config.executionFrequency == 0 {
config.executionFrequency = DefaultSessionPoolConfig.executionFrequency
}
if config.usedSessionsRatioThreshold == 0 {
config.usedSessionsRatioThreshold = DefaultSessionPoolConfig.usedSessionsRatioThreshold
}
if config.MultiplexSessionCheckInterval == 0 {
config.MultiplexSessionCheckInterval = 10 * time.Minute
}
isMultiplexed := strings.ToLower(os.Getenv("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"))
if isMultiplexed != "" && isMultiplexed != "true" && isMultiplexed != "false" {
return nil, spannerErrorf(codes.InvalidArgument, "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS must be either true or false")
}
pool := &sessionPool{
sc: sc,
valid: true,
mayGetSession: make(chan struct{}),
mayGetMultiplexedSession: make(chan bool),
multiplexedSessionReq: make(chan muxSessionCreateRequest),
SessionPoolConfig: config,
mw: newMaintenanceWindow(config.MaxOpened),
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
otConfig: sc.otConfig,
enableMultiplexSession: isMultiplexed == "true",
}
_, instance, database, err := parseDatabaseName(sc.database)
if err != nil {
return nil, err
}
// Errors should not prevent initializing the session pool.
ctx, err := tag.New(context.Background(),
tag.Upsert(tagKeyClientID, sc.id),
tag.Upsert(tagKeyDatabase, database),
tag.Upsert(tagKeyInstance, instance),
tag.Upsert(tagKeyLibVersion, internal.Version),
)
if err != nil {
logf(pool.sc.logger, "Failed to create tag map, error: %v", err)
}
pool.tagMap = tag.FromContext(ctx)
// On GCE VM, within the same region an healthcheck ping takes on average
// 10ms to finish, given a 5 minutes interval and 10 healthcheck workers, a
// healthChecker can effectively mantain
// 100 checks_per_worker/sec * 10 workers * 300 seconds = 300K sessions.
pool.hc = newHealthChecker(config.HealthCheckInterval, config.MultiplexSessionCheckInterval, config.HealthCheckWorkers, config.healthCheckSampleInterval, pool)
// First initialize the pool before we indicate that the healthchecker is
// ready. This prevents the maintainer from starting before the pool has
// been initialized, which means that we guarantee that the initial
// sessions are created using BatchCreateSessions.
if config.MinOpened > 0 {
numSessions := minUint64(config.MinOpened, math.MaxInt32)
if err := pool.initPool(numSessions); err != nil {
return nil, err
}
}
if pool.enableMultiplexSession {
go pool.createMultiplexedSession()
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
pool.multiplexedSessionReq <- muxSessionCreateRequest{force: true, ctx: ctx}
// listen for the session to be created
go func() {
select {
case <-ctx.Done():
cancel()
return
// wait for the session to be created
case <-pool.mayGetMultiplexedSession:
}
}()
}
pool.recordStat(context.Background(), MaxAllowedSessionsCount, int64(config.MaxOpened))
err = registerSessionPoolOTMetrics(pool)
if err != nil {
logf(pool.sc.logger, "Error when registering session pool metrics in OpenTelemetry, error: %v", err)
}
close(pool.hc.ready)
return pool, nil
}
func (p *sessionPool) recordStat(ctx context.Context, m *stats.Int64Measure, n int64, tags ...tag.Tag) {
ctx = tag.NewContext(ctx, p.tagMap)
mutators := make([]tag.Mutator, len(tags))
for i, t := range tags {
mutators[i] = tag.Upsert(t.Key, t.Value)
}
ctx, err := tag.New(ctx, mutators...)
if err != nil {
logf(p.sc.logger, "Failed to tag metrics, error: %v", err)
}
recordStat(ctx, m, n)
}
type recordOTStatOption struct {
attr []attribute.KeyValue
}
func (p *sessionPool) recordOTStat(ctx context.Context, m metric.Int64Counter, val int64, option recordOTStatOption) {
if m != nil {
attrs := p.otConfig.attributeMap
if len(option.attr) > 0 {
attrs = option.attr
}
m.Add(ctx, val, metric.WithAttributes(attrs...))
}
}
func (p *sessionPool) getRatioOfSessionsInUseLocked() float64 {
maxSessions := p.MaxOpened
if maxSessions == 0 {
return 0
}
return float64(p.numInUse) / float64(maxSessions)
}
// gets sessions which are unexpectedly long-running.
func (p *sessionPool) getLongRunningSessionsLocked() []*sessionHandle {
usedSessionsRatio := p.getRatioOfSessionsInUseLocked()
var longRunningSessions []*sessionHandle
if usedSessionsRatio > p.usedSessionsRatioThreshold {
element := p.trackedSessionHandles.Front()
for element != nil {
sh := element.Value.(*sessionHandle)
sh.mu.Lock()
if sh.session == nil {
// sessionHandle has already been recycled/destroyed.
sh.mu.Unlock()
element = element.Next()
continue
}
diff := time.Since(sh.lastUseTime)
if !sh.eligibleForLongRunning && diff.Seconds() >= p.idleTimeThreshold.Seconds() {
if (p.ActionOnInactiveTransaction == Warn || p.ActionOnInactiveTransaction == WarnAndClose) && !sh.isSessionLeakLogged {
if p.ActionOnInactiveTransaction == Warn {
if sh.stack != nil {
logf(p.sc.logger, "session %s checked out of pool at %s is long running due to possible session leak for goroutine: \n%s", sh.session.getID(), sh.checkoutTime.Format(time.RFC3339), sh.stack)
} else {
logf(p.sc.logger, "session %s checked out of pool at %s is long running due to possible session leak for goroutine: \nEnable SessionPoolConfig.TrackSessionHandles to get stack trace associated with the session", sh.session.getID(), sh.checkoutTime.Format(time.RFC3339))
}
sh.isSessionLeakLogged = true
} else if p.ActionOnInactiveTransaction == WarnAndClose {
if sh.stack != nil {
logf(p.sc.logger, "session %s checked out of pool at %s is long running and will be removed due to possible session leak for goroutine: \n%s", sh.session.getID(), sh.checkoutTime.Format(time.RFC3339), sh.stack)
} else {
logf(p.sc.logger, "session %s checked out of pool at %s is long running and will be removed due to possible session leak for goroutine: \nEnable SessionPoolConfig.TrackSessionHandles to get stack trace associated with the session", sh.session.getID(), sh.checkoutTime.Format(time.RFC3339))
}
}
}
if p.ActionOnInactiveTransaction == WarnAndClose || p.ActionOnInactiveTransaction == Close {
longRunningSessions = append(longRunningSessions, sh)
}
}
sh.mu.Unlock()
element = element.Next()
}
}
return longRunningSessions
}
// removes or logs sessions that are unexpectedly long-running.
func (p *sessionPool) removeLongRunningSessions() {
p.mu.Lock()
longRunningSessions := p.getLongRunningSessionsLocked()
p.mu.Unlock()
// destroy long-running sessions
if p.ActionOnInactiveTransaction == WarnAndClose || p.ActionOnInactiveTransaction == Close {
var leakedSessionsRemovedCount uint64
for _, sh := range longRunningSessions {
// removes inner session out of the pool to reduce the probability of two processes trying
// to use the same session at the same time.
sh.destroy()
leakedSessionsRemovedCount++
}
p.mu.Lock()
p.numOfLeakedSessionsRemoved += leakedSessionsRemovedCount
p.mu.Unlock()
}
}
func (p *sessionPool) initPool(numSessions uint64) error {
p.mu.Lock()
defer p.mu.Unlock()
return p.growPoolLocked(numSessions, true)
}
func (p *sessionPool) growPoolLocked(numSessions uint64, distributeOverChannels bool) error {
// Take budget before the actual session creation.
numSessions = minUint64(numSessions, math.MaxInt32)
p.numOpened += uint64(numSessions)
p.recordStat(context.Background(), OpenSessionCount, int64(p.numOpened))
p.createReqs += uint64(numSessions)
// Asynchronously create a batch of sessions for the pool.
return p.sc.batchCreateSessions(int32(numSessions), distributeOverChannels, p)
}
func (p *sessionPool) createMultiplexedSession() {
for c := range p.multiplexedSessionReq {
p.mu.Lock()
sess := p.multiplexedSession
p.mu.Unlock()
if c.force || sess == nil {
p.mu.Lock()
p.sc.mu.Lock()
client, err := p.sc.nextClient()
p.sc.mu.Unlock()
p.mu.Unlock()
if err != nil {
// If we can't get a client, we can't create a session.
p.mu.Lock()
p.multiplexedSessionCreationError = err
p.mu.Unlock()
p.mayGetMultiplexedSession <- true
continue
}
p.sc.executeCreateMultiplexedSession(c.ctx, client, p.sc.md, p)
continue
}
select {
case p.mayGetMultiplexedSession <- true:
case <-c.ctx.Done():
return
}
}
}
// sessionReady is executed by the SessionClient when a session has been
// created and is ready to use. This method will add the new session to the
// pool and decrease the number of sessions that is being created.
func (p *sessionPool) sessionReady(ctx context.Context, s *session) {
p.mu.Lock()
defer p.mu.Unlock()
// Clear any session creation error.
if s.isMultiplexed {
s.pool = p
p.multiplexedSession = s
p.multiplexedSessionCreationError = nil
p.recordStat(context.Background(), OpenSessionCount, int64(1), tag.Tag{Key: tagKeyIsMultiplexed, Value: "true"})
p.recordStat(context.Background(), SessionsCount, 1, tagNumSessions, tag.Tag{Key: tagKeyIsMultiplexed, Value: "true"})
// either notify the waiting goroutine or skip if no one is waiting
select {
case p.mayGetMultiplexedSession <- true:
case <-ctx.Done():
return
}
return
}
p.sessionCreationError = nil
// Set this pool as the home pool of the session and register it with the
// health checker.
s.pool = p
p.hc.register(s)
p.createReqs--
// Insert the session at a random position in the pool to prevent all
// sessions affiliated with a channel to be placed at sequentially in the
// pool.
if p.idleList.Len() > 0 {
pos := rand.Intn(p.idleList.Len())
before := p.idleList.Front()
for i := 0; i < pos; i++ {
before = before.Next()
}
s.setIdleList(p.idleList.InsertBefore(s, before))
} else {
s.setIdleList(p.idleList.PushBack(s))
}
p.incNumSessionsLocked(context.Background())
// Notify other waiters blocking on session creation.
close(p.mayGetSession)
p.mayGetSession = make(chan struct{})
}
// sessionCreationFailed is called by the SessionClient when the creation of one
// or more requested sessions finished with an error. sessionCreationFailed will
// decrease the number of sessions being created and notify any waiters that
// the session creation failed.
func (p *sessionPool) sessionCreationFailed(ctx context.Context, err error, numSessions int32, isMultiplexed bool) {
p.mu.Lock()
defer p.mu.Unlock()
if isMultiplexed {
// Ignore the error if multiplexed session already present
if p.multiplexedSession != nil {
p.multiplexedSessionCreationError = nil
select {
case p.mayGetMultiplexedSession <- true:
case <-ctx.Done():
return
}
return
}
p.recordStat(context.Background(), OpenSessionCount, int64(0), tag.Tag{Key: tagKeyIsMultiplexed, Value: "true"})
p.multiplexedSessionCreationError = err
select {
case p.mayGetMultiplexedSession <- true:
case <-ctx.Done():
return
}
return
}
p.createReqs -= uint64(numSessions)
p.numOpened -= uint64(numSessions)
p.recordStat(context.Background(), OpenSessionCount, int64(p.numOpened), tag.Tag{Key: tagKeyIsMultiplexed, Value: "false"})
// Notify other waiters blocking on session creation.
p.sessionCreationError = err