Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tests/framework: address golangci var-naming issues [remove (*Member) GRPCURL()] #17655

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 18 additions & 21 deletions tests/framework/integration/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ func (c *Cluster) Launch(t testutil.TB) {
c.WaitMembersMatch(t, c.ProtoMembers())
c.waitVersion()
for _, m := range c.Members {
t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCURL())
t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCURL)
}
}

Expand Down Expand Up @@ -564,9 +564,8 @@ type Member struct {

GRPCServerOpts []grpc.ServerOption
GRPCServer *grpc.Server
//revive:disable-next-line:var-naming
GrpcURL string
GRPCBridge *bridge
GRPCURL string
GRPCBridge *bridge

// ServerClient is a clientv3 that directly calls the etcdserver.
ServerClient *clientv3.Client
Expand All @@ -588,8 +587,6 @@ type Member struct {
LogObserver *testutils.LogObserver
}

func (m *Member) GRPCURL() string { return m.GrpcURL }

type MemberConfig struct {
Name string
UniqNumber int64
Expand Down Expand Up @@ -791,8 +788,8 @@ func (m *Member) listenGRPC() error {
return fmt.Errorf("failed to parse grpc listen port from address %s (%v)", addr, err)
}
m.Port = port
m.GrpcURL = fmt.Sprintf("%s://%s", m.clientScheme(), addr)
m.Logger.Info("LISTEN GRPC SUCCESS", zap.String("grpcAddr", m.GrpcURL), zap.String("m.Name", m.Name),
m.GRPCURL = fmt.Sprintf("%s://%s", m.clientScheme(), addr)
m.Logger.Info("LISTEN GRPC SUCCESS", zap.String("grpcAddr", m.GRPCURL), zap.String("m.Name", m.Name),
zap.String("workdir", wd), zap.String("port", m.Port))

if m.UseBridge {
Expand Down Expand Up @@ -838,7 +835,7 @@ func (m *Member) addBridge() (*bridge, error) {

addr := bridgeListener.Addr().String()
m.Logger.Info("LISTEN BRIDGE SUCCESS", zap.String("grpc-address", addr), zap.String("member", m.Name))
m.GrpcURL = m.clientScheme() + "://" + addr
m.GRPCURL = m.clientScheme() + "://" + addr
return m.GRPCBridge, nil
}

Expand Down Expand Up @@ -893,12 +890,12 @@ func (m *Member) ID() types.ID { return m.Server.MemberID() }

// NewClientV3 creates a new grpc client connection to the member
func NewClientV3(m *Member) (*clientv3.Client, error) {
if m.GrpcURL == "" {
if m.GRPCURL == "" {
return nil, fmt.Errorf("member not configured for grpc")
}

cfg := clientv3.Config{
Endpoints: []string{m.GrpcURL},
Endpoints: []string{m.GRPCURL},
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
MaxCallSendMsgSize: m.ClientMaxCallSendMsgSize,
Expand Down Expand Up @@ -960,7 +957,7 @@ func (m *Member) Launch() error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
zap.String("grpc-url", m.GRPCURL),
)
var err error
if m.Server, err = etcdserver.NewServer(m.ServerConfig); err != nil {
Expand Down Expand Up @@ -1102,7 +1099,7 @@ func (m *Member) Launch() error {
}
m.ServerClosers = append(m.ServerClosers, closer)
}
if m.GrpcURL != "" && m.Client == nil {
if m.GRPCURL != "" && m.Client == nil {
m.Client, err = NewClientV3(m)
if err != nil {
return err
Expand All @@ -1114,7 +1111,7 @@ func (m *Member) Launch() error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
zap.String("grpc-url", m.GRPCURL),
)
return nil
}
Expand Down Expand Up @@ -1230,7 +1227,7 @@ func (m *Member) Stop(_ testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
zap.String("grpc-url", m.GRPCURL),
)
m.Close()
m.ServerClosers = nil
Expand All @@ -1239,7 +1236,7 @@ func (m *Member) Stop(_ testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
zap.String("grpc-url", m.GRPCURL),
)
}

Expand All @@ -1264,7 +1261,7 @@ func (m *Member) Restart(t testutil.TB) error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
zap.String("grpc-url", m.GRPCURL),
)
newPeerListeners := make([]net.Listener, 0)
for _, ln := range m.PeerListeners {
Expand All @@ -1289,7 +1286,7 @@ func (m *Member) Restart(t testutil.TB) error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
zap.String("grpc-url", m.GRPCURL),
zap.Error(err),
)
return err
Expand All @@ -1302,7 +1299,7 @@ func (m *Member) Terminate(t testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
zap.String("grpc-url", m.GRPCURL),
)
m.Close()
if !m.KeepDataDirTerminate {
Expand All @@ -1315,7 +1312,7 @@ func (m *Member) Terminate(t testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
zap.String("grpc-url", m.GRPCURL),
)
}

Expand Down Expand Up @@ -1449,7 +1446,7 @@ func (c *Cluster) Client(i int) *clientv3.Client {
func (c *Cluster) Endpoints() []string {
var endpoints []string
for _, m := range c.Members {
endpoints = append(endpoints, m.GrpcURL)
endpoints = append(endpoints, m.GRPCURL)
}
return endpoints
}
Expand Down
4 changes: 2 additions & 2 deletions tests/integration/clientv3/connectivity/black_hole_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
})
defer clus.Terminate(t)

eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}
eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL}

ccfg := clientv3.Config{
Endpoints: []string{eps[0]},
Expand Down Expand Up @@ -174,7 +174,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien
})
defer clus.Terminate(t)

eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}
eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL}

ccfg := clientv3.Config{
Endpoints: []string{eps[0]},
Expand Down
12 changes: 6 additions & 6 deletions tests/integration/clientv3/connectivity/dial_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func TestDialTLSExpired(t *testing.T) {
}
// expect remote errors "tls: bad certificate"
_, err = integration2.NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCURL()},
Endpoints: []string{clus.Members[0].GRPCURL},
DialTimeout: 3 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
TLS: tls,
Expand All @@ -77,7 +77,7 @@ func TestDialTLSNoConfig(t *testing.T) {
defer clus.Terminate(t)
// expect "signed by unknown authority"
c, err := integration2.NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCURL()},
Endpoints: []string{clus.Members[0].GRPCURL},
DialTimeout: time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
})
Expand Down Expand Up @@ -110,7 +110,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
// get endpoint list
eps := make([]string, 3)
for i := range eps {
eps[i] = clus.Members[i].GRPCURL()
eps[i] = clus.Members[i].GRPCURL
}
toKill := rand.Intn(len(eps))

Expand Down Expand Up @@ -151,7 +151,7 @@ func TestSwitchSetEndpoints(t *testing.T) {
defer clus.Terminate(t)

// get non partitioned members endpoints
eps := []string{clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
eps := []string{clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}

cli := clus.Client(0)
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
Expand All @@ -172,7 +172,7 @@ func TestRejectOldCluster(t *testing.T) {
defer clus.Terminate(t)

cfg := clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()},
Endpoints: []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL},
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
RejectOldCluster: true,
Expand Down Expand Up @@ -214,7 +214,7 @@ func TestSetEndpointAndPut(t *testing.T) {
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2})
defer clus.Terminate(t)

clus.Client(1).SetEndpoints(clus.Members[0].GRPCURL())
clus.Client(1).SetEndpoints(clus.Members[0].GRPCURL)
_, err := clus.Client(1).Put(context.TODO(), "foo", "bar")
if err != nil && !strings.Contains(err.Error(), "closing") {
t.Fatal(err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
})
defer clus.Terminate(t)

eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}

// expect pin eps[0]
ccfg := clientv3.Config{
Expand Down Expand Up @@ -169,7 +169,7 @@ func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T
Size: 3,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}

lead := clus.WaitLeader(t)

Expand Down Expand Up @@ -224,7 +224,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
})
defer clus.Terminate(t)

eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}

target := clus.WaitLeader(t)
if !isolateLeader {
Expand Down Expand Up @@ -284,7 +284,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
defer clus.Terminate(t)
leaderIndex := clus.WaitLeader(t)
// get a follower endpoint
eps := []string{clus.Members[(leaderIndex+1)%3].GRPCURL()}
eps := []string{clus.Members[(leaderIndex+1)%3].GRPCURL}
ccfg := clientv3.Config{
Endpoints: eps,
DialTimeout: 10 * time.Second,
Expand All @@ -302,7 +302,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
// add other endpoints for later endpoint switch
cli.SetEndpoints(eps...)
time.Sleep(time.Second * 2)
conn, err := cli.Dial(clus.Members[(leaderIndex+1)%3].GRPCURL())
conn, err := cli.Dial(clus.Members[(leaderIndex+1)%3].GRPCURL)
if err != nil {
t.Fatal(err)
}
Expand Down
10 changes: 5 additions & 5 deletions tests/integration/clientv3/connectivity/server_shutdown_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
})
defer clus.Terminate(t)

eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}

lead := clus.WaitLeader(t)

Expand Down Expand Up @@ -149,7 +149,7 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie
})
defer clus.Terminate(t)

eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}

// pin eps[0]
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
Expand Down Expand Up @@ -206,7 +206,7 @@ func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Cl
})
defer clus.Terminate(t)

eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}

// pin eps[0]
cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
Expand Down Expand Up @@ -283,9 +283,9 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl

clus := integration2.NewCluster(t, cfg)
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}
eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL}
if linearizable {
eps = append(eps, clus.Members[2].GRPCURL())
eps = append(eps, clus.Members[2].GRPCURL)
}

lead := clus.WaitLeader(t)
Expand Down
6 changes: 3 additions & 3 deletions tests/integration/clientv3/kv_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -797,7 +797,7 @@ func TestKVForLearner(t *testing.T) {
// 1. clus.Members[3] is the newly added learner member, which was appended to clus.Members
// 2. we are using member's grpcAddr instead of clientURLs as the endpoint for clientv3.Config,
// because the implementation of integration test has diverged from embed/etcd.go.
learnerEp := clus.Members[3].GRPCURL()
learnerEp := clus.Members[3].GRPCURL
cfg := clientv3.Config{
Endpoints: []string{learnerEp},
DialTimeout: 5 * time.Second,
Expand Down Expand Up @@ -870,7 +870,7 @@ func TestBalancerSupportLearner(t *testing.T) {
}

// clus.Members[3] is the newly added learner member, which was appended to clus.Members
learnerEp := clus.Members[3].GRPCURL()
learnerEp := clus.Members[3].GRPCURL
cfg := clientv3.Config{
Endpoints: []string{learnerEp},
DialTimeout: 5 * time.Second,
Expand All @@ -890,7 +890,7 @@ func TestBalancerSupportLearner(t *testing.T) {
}
t.Logf("Expected: Read from learner error: %v", err)

eps := []string{learnerEp, clus.Members[0].GRPCURL()}
eps := []string{learnerEp, clus.Members[0].GRPCURL}
cli.SetEndpoints(eps...)
if _, err := cli.Get(context.Background(), "foo"); err != nil {
t.Errorf("expect no error (balancer should retry when request to learner fails), got error: %v", err)
Expand Down
6 changes: 3 additions & 3 deletions tests/integration/clientv3/maintenance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func TestMaintenanceHashKV(t *testing.T) {
if _, err := cli.Get(context.TODO(), "foo"); err != nil {
t.Fatal(err)
}
hresp, err := cli.HashKV(context.Background(), clus.Members[i].GRPCURL(), 0)
hresp, err := cli.HashKV(context.Background(), clus.Members[i].GRPCURL, 0)
if err != nil {
t.Fatal(err)
}
Expand All @@ -87,7 +87,7 @@ func TestCompactionHash(t *testing.T) {
t.Fatal(err)
}

testutil.TestCompactionHash(context.Background(), t, hashTestCase{cc, clus.Members[0].GRPCURL()}, 1000)
testutil.TestCompactionHash(context.Background(), t, hashTestCase{cc, clus.Members[0].GRPCURL}, 1000)
}

type hashTestCase struct {
Expand Down Expand Up @@ -406,7 +406,7 @@ func TestMaintenanceStatus(t *testing.T) {

eps := make([]string, 3)
for i := 0; i < 3; i++ {
eps[i] = clus.Members[i].GRPCURL()
eps[i] = clus.Members[i].GRPCURL
}

t.Logf("Creating client...")
Expand Down
2 changes: 1 addition & 1 deletion tests/integration/clientv3/metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func TestV3ClientMetrics(t *testing.T) {
defer clus.Terminate(t)

cfg := clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCURL()},
Endpoints: []string{clus.Members[0].GRPCURL},
DialOptions: []grpc.DialOption{
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
Expand Down
Loading
Loading