-
Notifications
You must be signed in to change notification settings - Fork 9.8k
/
v3_watch_restore_test.go
105 lines (94 loc) · 3.22 KB
/
v3_watch_restore_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !cluster_proxy
package integration
import (
"context"
"fmt"
"testing"
"time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
// TestV3WatchRestoreSnapshotUnsync tests whether slow follower can restore
// from leader snapshot, and still notify on watchers from an old revision
// that were created in synced watcher group in the first place.
// TODO: fix panic with gRPC proxy "panic: watcher current revision should not exceed current revision"
func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{
Size: 3,
SnapshotCount: 10,
SnapshotCatchUpEntries: 5,
})
defer clus.Terminate(t)
// spawn a watcher before shutdown, and put it in synced watcher
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, errW := toGRPC(clus.Client(0)).Watch.Watch(ctx)
if errW != nil {
t.Fatal(errW)
}
if err := wStream.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 5}}}); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
wresp, errR := wStream.Recv()
if errR != nil {
t.Errorf("wStream.Recv error: %v", errR)
}
if !wresp.Created {
t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
}
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
clus.waitLeader(t, clus.Members[1:])
time.Sleep(2 * time.Second)
kvc := toGRPC(clus.Client(1)).KV
// to trigger snapshot from the leader to the stopped follower
for i := 0; i < 15; i++ {
_, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")})
if err != nil {
t.Errorf("#%d: couldn't put key (%v)", i, err)
}
}
// trigger snapshot send from leader to this slow follower
// which then calls watchable store Restore
clus.Members[0].RecoverPartition(t, clus.Members[1:]...)
clus.WaitLeader(t)
time.Sleep(2 * time.Second)
// slow follower now applies leader snapshot
// should be able to notify on old-revision watchers in unsynced
// make sure restore watch operation correctly moves watchers
// between synced and unsynced watchers
errc := make(chan error)
go func() {
cresp, cerr := wStream.Recv()
if cerr != nil {
errc <- cerr
return
}
// from start revision 5 to latest revision 16
if len(cresp.Events) != 12 {
errc <- fmt.Errorf("expected 12 events, got %+v", cresp.Events)
return
}
errc <- nil
}()
select {
case <-time.After(10 * time.Second):
t.Fatal("took too long to receive events from restored watcher")
case err := <-errc:
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
}
}