-
Notifications
You must be signed in to change notification settings - Fork 9.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
clientv3/integration: add TestBalancerUnderServerShutdownImmutable #8779
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -179,3 +179,58 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie | |
t.Fatal(err) | ||
} | ||
} | ||
|
||
func TestBalancerUnderServerShutdownGetLinearizable(t *testing.T) { | ||
testBalancerUnderServerShutdownImmutable(t, func(cli *clientv3.Client, ctx context.Context) error { | ||
_, err := cli.Get(ctx, "foo") | ||
return err | ||
}, 7*time.Second) // give enough time for leader election, balancer switch | ||
} | ||
|
||
func TestBalancerUnderServerShutdownGetSerializable(t *testing.T) { | ||
testBalancerUnderServerShutdownImmutable(t, func(cli *clientv3.Client, ctx context.Context) error { | ||
_, err := cli.Get(ctx, "foo", clientv3.WithSerializable()) | ||
return err | ||
}, 2*time.Second) | ||
} | ||
|
||
// testBalancerUnderServerShutdownImmutable expects that when the member of | ||
// the pinned endpoint is shut down, the balancer switches its endpoints | ||
// and all subsequent range requests succeed with new endpoints. | ||
func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) { | ||
defer testutil.AfterTest(t) | ||
|
||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{ | ||
Size: 3, | ||
SkipCreatingClient: true, | ||
}) | ||
defer clus.Terminate(t) | ||
|
||
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()} | ||
|
||
// pin eps[0] | ||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[0]}}) | ||
if err != nil { | ||
t.Errorf("failed to create client: %v", err) | ||
} | ||
defer cli.Close() | ||
|
||
// wait for eps[0] to be pinned | ||
waitPinReady(t, cli) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. does waitPin return an error? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's fatal inside There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. probably should rename to mustWaitPinReady later. |
||
|
||
// add all eps to list, so that when the original pined one fails | ||
// the client can switch to other available eps | ||
cli.SetEndpoints(eps...) | ||
|
||
// shut down eps[0] | ||
clus.Members[0].Terminate(t) | ||
|
||
// switched to others when eps[0] was explicitly shut down | ||
// and following request should succeed | ||
cctx, ccancel := context.WithTimeout(context.Background(), timeout) | ||
err = op(cli, cctx) | ||
ccancel() | ||
if err != nil { | ||
t.Errorf("failed to finish range request in time %v (timeout %v)", err, timeout) | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
as @jpbetz mentioned in other pr, we should define leader election timeout as a const somehere. and normal waiting timeout too. so this could be rewritten to
electionWait + retryWait