diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..e207c0b3cc --- /dev/null +++ b/Dockerfile @@ -0,0 +1,51 @@ +FROM ubuntu:xenial + +LABEL maintainer="HugeGraph Docker Maintainers " + +ENV PKG_URL https://github.com/hugegraph + +# 1. Install needed dependencies of GraphServer & RocksDB +RUN set -x \ + && apt-get -q update \ + && apt-get -q install -y --no-install-recommends --no-install-suggests \ + curl \ + lsof \ + g++ \ + gcc \ + openjdk-8-jdk \ + && apt-get clean + # && rm -rf /var/lib/apt/lists/* + +# 2. Init HugeGraph Sever +# (Optional) You can set the ip of github to speed up the local build +# && echo "192.30.253.112 github.com\n151.101.44.249 github.global.ssl.fastly.net" >> /etc/hosts \ +ENV SERVER_VERSION 0.10.4 +RUN set -e \ + && mkdir -p /root/hugegraph-server \ + && curl -L -S ${PKG_URL}/hugegraph/releases/download/v${SERVER_VERSION}/hugegraph-${SERVER_VERSION}.tar.gz -o /root/server.tar.gz \ + && tar xzf /root/server.tar.gz --strip-components 1 -C /root/hugegraph-server \ + && rm /root/server.tar.gz \ + && cd /root/hugegraph-server/ \ + && sed -i "s/^restserver.url.*$/restserver.url=http:\/\/0.0.0.0:8080/g" ./conf/rest-server.properties \ + && sed -n '63p' ./bin/start-hugegraph.sh | grep "&" > /dev/null && sed -i 63{s/\&$/#/g} ./bin/start-hugegraph.sh \ + && sed -n '74p' ./bin/start-hugegraph.sh | grep "exit" > /dev/null && sed -i 74{s/^/#/g} ./bin/start-hugegraph.sh \ + && ./bin/init-store.sh + +# 3. Prepare for HugeGraph Studio +ENV STUDIO_VERSION 0.10.0 +# (Optional) You can set the ip of github to speed up the local build +# && echo "192.30.253.112 github.com\n151.101.44.249 github.global.ssl.fastly.net" >> /etc/hosts \ +RUN set -e \ + && mkdir -p /root/hugegraph-studio \ + && curl -L -S ${PKG_URL}/hugegraph-studio/releases/download/v${STUDIO_VERSION}/hugegraph-studio-${STUDIO_VERSION}.tar.gz -o /root/studio.tar.gz \ + && tar xzf /root/studio.tar.gz --strip-components 1 -C /root/hugegraph-studio \ + && rm /root/studio.tar.gz \ + && cd /root/hugegraph-studio/ \ + && sed -i "s/^studio.server.host.*$/studio.server.host=0.0.0.0/g" ./conf/hugegraph-studio.properties \ + && sed -i "s/^graph.server.host.*$/graph.server.host=0.0.0.0/g" ./conf/hugegraph-studio.properties + +EXPOSE 8080 8088 +WORKDIR /root +VOLUME /root + +ENTRYPOINT ["./hugegraph-server/bin/start-hugegraph.sh"] diff --git a/README.md b/README.md index b2f4e96a7e..8fecc3cf5e 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,6 @@ HugeGraph is licensed under Apache 2.0 License. ## Thanks HugeGraph relies on the [TinkerPop](http://tinkerpop.apache.org) framework, we refer to the storage structure of Titan and the schema definition of DataStax. -Thanks to Tinkerpop, thanks to Titan, thanks to DataStax. Thanks to all other organizations or authors who contributed to the project. +Thanks to TinkerPop, thanks to Titan, thanks to DataStax. Thanks to all other organizations or authors who contributed to the project. You are welcome to contribute to HugeGraph, and we are looking forward to working with you to build an excellent open source community. diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/raft/RaftAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/raft/RaftAPI.java new file mode 100644 index 0000000000..8508ffeae8 --- /dev/null +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/raft/RaftAPI.java @@ -0,0 +1,195 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.api.raft; + +import java.util.List; +import java.util.Map; + +import javax.annotation.security.RolesAllowed; +import javax.inject.Singleton; +import javax.ws.rs.Consumes; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; + +import org.slf4j.Logger; + +import com.baidu.hugegraph.HugeException; +import com.baidu.hugegraph.HugeGraph; +import com.baidu.hugegraph.api.API; +import com.baidu.hugegraph.api.filter.StatusFilter.Status; +import com.baidu.hugegraph.backend.store.raft.RaftGroupManager; +import com.baidu.hugegraph.core.GraphManager; +import com.baidu.hugegraph.util.Log; +import com.codahale.metrics.annotation.Timed; +import com.google.common.collect.ImmutableMap; + +@Path("graphs/{graph}/raft") +@Singleton +public class RaftAPI extends API { + + private static final Logger LOG = Log.logger(RaftAPI.class); + + @GET + @Timed + @Path("list-peers") + @Consumes(APPLICATION_JSON) + @Produces(APPLICATION_JSON_WITH_CHARSET) + @RolesAllowed({"admin"}) + public Map> listPeers(@Context GraphManager manager, + @PathParam("graph") String graph, + @QueryParam("group") + @DefaultValue("default") + String group) { + LOG.debug("Graph [{}] prepare to get leader", graph); + + HugeGraph g = graph(manager, graph); + RaftGroupManager raftManager = raftGroupManager(g, group, "list-peers"); + List peers = raftManager.listPeers(); + return ImmutableMap.of(raftManager.group(), peers); + } + + @GET + @Timed + @Path("get-leader") + @Consumes(APPLICATION_JSON) + @Produces(APPLICATION_JSON_WITH_CHARSET) + @RolesAllowed({"admin"}) + public Map getLeader(@Context GraphManager manager, + @PathParam("graph") String graph, + @QueryParam("group") + @DefaultValue("default") + String group) { + LOG.debug("Graph [{}] prepare to get leader", graph); + + HugeGraph g = graph(manager, graph); + RaftGroupManager raftManager = raftGroupManager(g, group, "get-leader"); + String leaderId = raftManager.getLeader(); + return ImmutableMap.of(raftManager.group(), leaderId); + } + + @POST + @Timed + @Status(Status.OK) + @Path("transfer-leader") + @Consumes(APPLICATION_JSON) + @Produces(APPLICATION_JSON_WITH_CHARSET) + @RolesAllowed({"admin"}) + public Map transferLeader(@Context GraphManager manager, + @PathParam("graph") String graph, + @QueryParam("group") + @DefaultValue("default") + String group, + @QueryParam("endpoint") + String endpoint) { + LOG.debug("Graph [{}] prepare to transfer leader to: {}", + graph, endpoint); + + HugeGraph g = graph(manager, graph); + RaftGroupManager raftManager = raftGroupManager(g, group, + "transfer-leader"); + String leaderId = raftManager.transferLeaderTo(endpoint); + return ImmutableMap.of(raftManager.group(), leaderId); + } + + @POST + @Timed + @Status(Status.OK) + @Path("set-leader") + @Consumes(APPLICATION_JSON) + @Produces(APPLICATION_JSON_WITH_CHARSET) + @RolesAllowed({"admin"}) + public Map setLeader(@Context GraphManager manager, + @PathParam("graph") String graph, + @QueryParam("group") + @DefaultValue("default") + String group, + @QueryParam("endpoint") + String endpoint) { + LOG.debug("Graph [{}] prepare to set leader to: {}", + graph, endpoint); + + HugeGraph g = graph(manager, graph); + RaftGroupManager raftManager = raftGroupManager(g, group, "set-leader"); + String leaderId = raftManager.setLeader(endpoint); + return ImmutableMap.of(raftManager.group(), leaderId); + } + + @POST + @Timed + @Status(Status.OK) + @Path("add-peer") + @Consumes(APPLICATION_JSON) + @Produces(APPLICATION_JSON_WITH_CHARSET) + @RolesAllowed({"admin"}) + public Map addPeer(@Context GraphManager manager, + @PathParam("graph") String graph, + @QueryParam("group") + @DefaultValue("default") + String group, + @QueryParam("endpoint") + String endpoint) { + LOG.debug("Graph [{}] prepare to add peer: {}", graph, endpoint); + + HugeGraph g = graph(manager, graph); + RaftGroupManager raftManager = raftGroupManager(g, group, "add-peer"); + String peerId = raftManager.addPeer(endpoint); + return ImmutableMap.of(raftManager.group(), peerId); + } + + @POST + @Timed + @Status(Status.OK) + @Path("remove-peer") + @Consumes(APPLICATION_JSON) + @Produces(APPLICATION_JSON_WITH_CHARSET) + @RolesAllowed({"admin"}) + public Map removePeer(@Context GraphManager manager, + @PathParam("graph") String graph, + @QueryParam("group") + @DefaultValue("default") + String group, + @QueryParam("endpoint") + String endpoint) { + LOG.debug("Graph [{}] prepare to remove peer: {}", graph, endpoint); + + HugeGraph g = graph(manager, graph); + RaftGroupManager raftManager = raftGroupManager(g, group, + "remove-peer"); + String peerId = raftManager.removePeer(endpoint); + return ImmutableMap.of(raftManager.group(), peerId); + } + + private static RaftGroupManager raftGroupManager(HugeGraph graph, + String group, + String operation) { + RaftGroupManager raftManager = graph.raftGroupManager(group); + if (raftManager == null) { + throw new HugeException("Allowed %s operation only when " + + "working on raft mode", operation); + } + return raftManager; + } +} diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/CountAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/CountAPI.java index f2c2bd7a6e..05817a4b9a 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/CountAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/CountAPI.java @@ -43,6 +43,7 @@ import com.baidu.hugegraph.server.RestServer; import com.baidu.hugegraph.structure.HugeVertex; import com.baidu.hugegraph.traversal.algorithm.CountTraverser; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.Log; @@ -77,7 +78,7 @@ public String post(@Context GraphManager manager, request.dedupSize); HugeGraph g = graph(manager, graph); - List steps = step(g, request); + List steps = steps(g, request); CountTraverser traverser = new CountTraverser(g); long count = traverser.count(sourceId, steps, request.containsTraversed, request.dedupSize); @@ -85,10 +86,9 @@ public String post(@Context GraphManager manager, return manager.serializer(g).writeMap(ImmutableMap.of("count", count)); } - private static List step(HugeGraph graph, - CountRequest request) { + private static List steps(HugeGraph graph, CountRequest request) { int stepSize = request.steps.size(); - List steps = new ArrayList<>(stepSize); + List steps = new ArrayList<>(stepSize); for (Step step : request.steps) { steps.add(step.jsonToStep(graph)); } @@ -136,10 +136,9 @@ public String toString() { this.degree, this.skipDegree); } - private CountTraverser.Step jsonToStep(HugeGraph graph) { - return new CountTraverser.Step(graph, this.direction, this.labels, - this.properties, this.degree, - this.skipDegree); + private EdgeStep jsonToStep(HugeGraph graph) { + return new EdgeStep(graph, this.direction, this.labels, + this.properties, this.degree, this.skipDegree); } } } diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/CustomizedPathsAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/CustomizedPathsAPI.java index e49f808e0c..41321871da 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/CustomizedPathsAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/CustomizedPathsAPI.java @@ -51,6 +51,7 @@ import com.baidu.hugegraph.server.RestServer; import com.baidu.hugegraph.traversal.algorithm.CustomizePathsTraverser; import com.baidu.hugegraph.traversal.algorithm.HugeTraverser; +import com.baidu.hugegraph.traversal.algorithm.steps.WeightedEdgeStep; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.Log; @@ -87,7 +88,7 @@ public String post(@Context GraphManager manager, HugeGraph g = graph(manager, graph); Iterator sources = request.sources.vertices(g); - List steps = step(g, request); + List steps = step(g, request); boolean sorted = request.sortBy != SortBy.NONE; CustomizePathsTraverser traverser = new CustomizePathsTraverser(g); @@ -116,10 +117,10 @@ public String post(@Context GraphManager manager, return manager.serializer(g).writePaths("paths", paths, false, iter); } - private static List step(HugeGraph graph, - PathRequest req) { + private static List step(HugeGraph graph, + PathRequest req) { int stepSize = req.steps.size(); - List steps = new ArrayList<>(stepSize); + List steps = new ArrayList<>(stepSize); for (Step step : req.steps) { steps.add(step.jsonToStep(graph)); } @@ -181,15 +182,11 @@ public String toString() { this.sample); } - private CustomizePathsTraverser.Step jsonToStep(HugeGraph g) { - return new CustomizePathsTraverser.Step(g, this.direction, - this.labels, - this.properties, - this.degree, - this.skipDegree, - this.weightBy, - this.defaultWeight, - this.sample); + private WeightedEdgeStep jsonToStep(HugeGraph g) { + return new WeightedEdgeStep(g, this.direction, this.labels, + this.properties, this.degree, + this.skipDegree, this.weightBy, + this.defaultWeight, this.sample); } } diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/FusiformSimilarityAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/FusiformSimilarityAPI.java index e6ef2858b7..dda9845fab 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/FusiformSimilarityAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/FusiformSimilarityAPI.java @@ -100,13 +100,11 @@ public String post(@Context GraphManager manager, Iterator sources = request.sources.vertices(g); E.checkArgument(sources != null && sources.hasNext(), "The source vertices can't be empty"); - EdgeLabel edgeLabel = request.label == null ? - null : g.edgeLabel(request.label); FusiformSimilarityTraverser traverser = new FusiformSimilarityTraverser(g); SimilarsMap result = traverser.fusiformSimilarity( - sources, request.direction, edgeLabel, + sources, request.direction, request.label, request.minNeighbors, request.alpha, request.minSimilars, request.top, request.groupProperty, request.minGroups, diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/JaccardSimilarityAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/JaccardSimilarityAPI.java index c7ee87525f..ebc4334147 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/JaccardSimilarityAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/JaccardSimilarityAPI.java @@ -19,12 +19,12 @@ package com.baidu.hugegraph.api.traversers; -import java.util.Map; - import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_CAPACITY; import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_DEGREE; import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_LIMIT; +import java.util.Map; + import javax.inject.Singleton; import javax.ws.rs.Consumes; import javax.ws.rs.DefaultValue; @@ -45,7 +45,7 @@ import com.baidu.hugegraph.core.GraphManager; import com.baidu.hugegraph.server.RestServer; import com.baidu.hugegraph.structure.HugeVertex; -import com.baidu.hugegraph.traversal.algorithm.EdgeStep; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.traversal.algorithm.JaccardSimilarTraverser; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; @@ -81,9 +81,12 @@ public String get(@Context GraphManager manager, Directions dir = Directions.convert(EdgeAPI.parseDirection(direction)); HugeGraph g = graph(manager, graph); - JaccardSimilarTraverser traverser = new JaccardSimilarTraverser(g); - double similarity = traverser.jaccardSimilarity(sourceId, targetId, dir, - edgeLabel, degree); + double similarity; + try (JaccardSimilarTraverser traverser = + new JaccardSimilarTraverser(g)) { + similarity = traverser.jaccardSimilarity(sourceId, targetId, + dir, edgeLabel, degree); + } return JsonUtil.toJson(ImmutableMap.of("jaccard_similarity", similarity)); } @@ -113,10 +116,12 @@ public String post(@Context GraphManager manager, EdgeStep step = step(g, request.step); - JaccardSimilarTraverser traverser = new JaccardSimilarTraverser(g); - Map results = traverser.jaccardSimilars(sourceId, step, - request.top, - request.capacity); + Map results; + try (JaccardSimilarTraverser traverser = + new JaccardSimilarTraverser(g)) { + results = traverser.jaccardSimilars(sourceId, step, request.top, + request.capacity); + } return manager.serializer(g).writeMap(results); } diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/KneighborAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/KneighborAPI.java index 77822e7c3b..d5120b7c63 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/KneighborAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/KneighborAPI.java @@ -51,9 +51,9 @@ import com.baidu.hugegraph.core.GraphManager; import com.baidu.hugegraph.server.RestServer; import com.baidu.hugegraph.structure.HugeVertex; -import com.baidu.hugegraph.traversal.algorithm.KneighborTraverser; -import com.baidu.hugegraph.traversal.algorithm.EdgeStep; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.traversal.algorithm.HugeTraverser; +import com.baidu.hugegraph.traversal.algorithm.KneighborTraverser; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.Log; @@ -90,9 +90,11 @@ public String get(@Context GraphManager manager, HugeGraph g = graph(manager, graph); - KneighborTraverser traverser = new KneighborTraverser(g); - Set ids = traverser.kneighbor(source, dir, edgeLabel, depth, - degree, limit); + Set ids; + try (KneighborTraverser traverser = new KneighborTraverser(g)) { + ids = traverser.kneighbor(source, dir, edgeLabel, + depth, degree, limit); + } return manager.serializer(g).writeList("vertices", ids); } @@ -124,10 +126,12 @@ public String post(@Context GraphManager manager, EdgeStep step = step(g, request.step); - KneighborTraverser traverser = new KneighborTraverser(g); - Set results = traverser.customizedKneighbor( - sourceId, step, request.maxDepth, - request.limit); + Set results; + try (KneighborTraverser traverser = new KneighborTraverser(g)) { + results = traverser.customizedKneighbor(sourceId, step, + request.maxDepth, + request.limit); + } Set neighbors = new HashSet<>(); for (HugeTraverser.Node node : results) { diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/KoutAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/KoutAPI.java index eb7aa401f8..adb95a6b75 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/KoutAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/KoutAPI.java @@ -19,6 +19,11 @@ package com.baidu.hugegraph.api.traversers; +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_CAPACITY; +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_DEGREE; +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_ELEMENTS_LIMIT; +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_PATHS_LIMIT; + import java.util.ArrayList; import java.util.HashSet; import java.util.Iterator; @@ -47,21 +52,16 @@ import com.baidu.hugegraph.core.GraphManager; import com.baidu.hugegraph.server.RestServer; import com.baidu.hugegraph.structure.HugeVertex; -import com.baidu.hugegraph.traversal.algorithm.KoutTraverser; -import com.baidu.hugegraph.traversal.algorithm.EdgeStep; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.traversal.algorithm.HugeTraverser; +import com.baidu.hugegraph.traversal.algorithm.HugeTraverser.Node; +import com.baidu.hugegraph.traversal.algorithm.KoutTraverser; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.Log; import com.codahale.metrics.annotation.Timed; import com.fasterxml.jackson.annotation.JsonProperty; -import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.Node; -import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_CAPACITY; -import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_DEGREE; -import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_ELEMENTS_LIMIT; -import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_PATHS_LIMIT; - @Path("graphs/{graph}/traversers/kout") @Singleton public class KoutAPI extends TraverserAPI { @@ -96,9 +96,11 @@ public String get(@Context GraphManager manager, HugeGraph g = graph(manager, graph); - KoutTraverser traverser = new KoutTraverser(g); - Set ids = traverser.kout(sourceId, dir, edgeLabel, depth, - nearest, degree, capacity, limit); + Set ids; + try (KoutTraverser traverser = new KoutTraverser(g)) { + ids = traverser.kout(sourceId, dir, edgeLabel, depth, + nearest, degree, capacity, limit); + } return manager.serializer(g).writeList("vertices", ids); } @@ -132,11 +134,14 @@ public String post(@Context GraphManager manager, EdgeStep step = step(g, request.step); - KoutTraverser traverser = new KoutTraverser(g); - Set results = traverser.customizedKout( - sourceId, step, request.maxDepth, - request.nearest, request.capacity, - request.limit); + Set results; + try (KoutTraverser traverser = new KoutTraverser(g)) { + results = traverser.customizedKout(sourceId, step, + request.maxDepth, + request.nearest, + request.capacity, + request.limit); + } Set neighbors = new HashSet<>(); for (HugeTraverser.Node node : results) { diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/MultiNodeShortestPathAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/MultiNodeShortestPathAPI.java index c3cebec720..642724f000 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/MultiNodeShortestPathAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/MultiNodeShortestPathAPI.java @@ -19,6 +19,8 @@ package com.baidu.hugegraph.api.traversers; +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_CAPACITY; + import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -40,7 +42,7 @@ import com.baidu.hugegraph.backend.query.QueryResults; import com.baidu.hugegraph.core.GraphManager; import com.baidu.hugegraph.server.RestServer; -import com.baidu.hugegraph.traversal.algorithm.EdgeStep; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.traversal.algorithm.HugeTraverser; import com.baidu.hugegraph.traversal.algorithm.MultiNodeShortestPathTraverser; import com.baidu.hugegraph.util.E; @@ -48,8 +50,6 @@ import com.codahale.metrics.annotation.Timed; import com.fasterxml.jackson.annotation.JsonProperty; -import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_CAPACITY; - @Path("graphs/{graph}/traversers/multinodeshortestpath") @Singleton public class MultiNodeShortestPathAPI extends TraverserAPI { @@ -80,12 +80,13 @@ public String post(@Context GraphManager manager, EdgeStep step = step(g, request.step); - MultiNodeShortestPathTraverser traverser = - new MultiNodeShortestPathTraverser(g); List paths; - paths = traverser.multiNodeShortestPath(vertices, step, - request.maxDepth, - request.capacity); + try (MultiNodeShortestPathTraverser traverser = + new MultiNodeShortestPathTraverser(g)) { + paths = traverser.multiNodeShortestPath(vertices, step, + request.maxDepth, + request.capacity); + } if (!request.withVertex) { return manager.serializer(g).writePaths("paths", paths, false); diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/NeighborRankAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/NeighborRankAPI.java index a5754fe4ef..3ad6bc58f1 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/NeighborRankAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/NeighborRankAPI.java @@ -67,7 +67,7 @@ public String neighborRank(@Context GraphManager manager, "The source of rank request can't be null"); E.checkArgument(request.steps != null && !request.steps.isEmpty(), "The steps of rank request can't be empty"); - E.checkArgument(request.steps.size() <= Long.valueOf(DEFAULT_MAX_DEPTH), + E.checkArgument(request.steps.size() <= Long.parseLong(DEFAULT_MAX_DEPTH), "The steps length of rank request can't exceed %s", DEFAULT_MAX_DEPTH); E.checkArgument(request.alpha > 0 && request.alpha <= 1.0, @@ -107,7 +107,7 @@ private static class RankRequest { @JsonProperty("alpha") private double alpha; @JsonProperty("capacity") - public long capacity = Long.valueOf(DEFAULT_CAPACITY); + public long capacity = Long.parseLong(DEFAULT_CAPACITY); @Override public String toString() { @@ -124,11 +124,11 @@ private static class Step { @JsonProperty("labels") public List labels; @JsonProperty("degree") - public long degree = Long.valueOf(DEFAULT_DEGREE); + public long degree = Long.parseLong(DEFAULT_DEGREE); @JsonProperty("skip_degree") public long skipDegree = 0L; @JsonProperty("top") - public int top = Integer.valueOf(DEFAULT_PATHS_LIMIT); + public int top = Integer.parseInt(DEFAULT_PATHS_LIMIT); public static final int DEFAULT_CAPACITY_PER_LAYER = 100000; diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/PathsAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/PathsAPI.java index 27ea55b2eb..3564825fd5 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/PathsAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/PathsAPI.java @@ -50,7 +50,7 @@ import com.baidu.hugegraph.core.GraphManager; import com.baidu.hugegraph.server.RestServer; import com.baidu.hugegraph.traversal.algorithm.CollectionPathsTraverser; -import com.baidu.hugegraph.traversal.algorithm.EdgeStep; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.traversal.algorithm.HugeTraverser; import com.baidu.hugegraph.traversal.algorithm.PathsTraverser; import com.baidu.hugegraph.type.define.Directions; diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/SingleSourceShortestPathAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/SingleSourceShortestPathAPI.java index f31a65354c..5a87d413f5 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/SingleSourceShortestPathAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/SingleSourceShortestPathAPI.java @@ -91,7 +91,8 @@ public String get(@Context GraphManager manager, sourceId, dir, edgeLabel, weight, degree, skipDegree, capacity, limit); Iterator iterator = QueryResults.emptyIterator(); - if (withVertex) { + assert paths != null; + if (!paths.isEmpty() && withVertex) { iterator = g.vertices(paths.vertices().toArray()); } return manager.serializer(g).writeWeightedPaths(paths, iterator); diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/TemplatePathsAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/TemplatePathsAPI.java index c0e13d8fc0..fd84b8d1dd 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/TemplatePathsAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/TemplatePathsAPI.java @@ -19,6 +19,9 @@ package com.baidu.hugegraph.api.traversers; +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_CAPACITY; +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_PATHS_LIMIT; + import java.util.ArrayList; import java.util.HashSet; import java.util.Iterator; @@ -43,14 +46,12 @@ import com.baidu.hugegraph.server.RestServer; import com.baidu.hugegraph.traversal.algorithm.HugeTraverser; import com.baidu.hugegraph.traversal.algorithm.TemplatePathsTraverser; +import com.baidu.hugegraph.traversal.algorithm.steps.RepeatEdgeStep; import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.Log; import com.codahale.metrics.annotation.Timed; import com.fasterxml.jackson.annotation.JsonProperty; -import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_CAPACITY; -import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_PATHS_LIMIT; - @Path("graphs/{graph}/traversers/templatepaths") @Singleton public class TemplatePathsAPI extends TraverserAPI { @@ -81,8 +82,7 @@ public String post(@Context GraphManager manager, HugeGraph g = graph(manager, graph); Iterator sources = request.sources.vertices(g); Iterator targets = request.targets.vertices(g); - List steps = - steps(g, request.steps); + List steps = steps(g, request.steps); TemplatePathsTraverser traverser = new TemplatePathsTraverser(g); Set paths; @@ -105,16 +105,22 @@ public String post(@Context GraphManager manager, return manager.serializer(g).writePaths("paths", paths, false, iter); } - private static List steps( - HugeGraph g, List steps) { - List edgeSteps = - new ArrayList<>(steps.size()); - for (RepeatEdgeStep step : steps) { + private static List steps(HugeGraph g, + List steps) { + List edgeSteps = new ArrayList<>(steps.size()); + for (TemplatePathStep step : steps) { edgeSteps.add(repeatEdgeStep(g, step)); } return edgeSteps; } + private static RepeatEdgeStep repeatEdgeStep(HugeGraph graph, + TemplatePathStep step) { + return new RepeatEdgeStep(graph, step.direction, step.labels, + step.properties, step.degree, + step.skipDegree, step.maxTimes); + } + private static class Request { @JsonProperty("sources") @@ -122,7 +128,7 @@ private static class Request { @JsonProperty("targets") public Vertices targets; @JsonProperty("steps") - public List steps; + public List steps; @JsonProperty("with_ring") public boolean withRing = false; @JsonProperty("capacity") @@ -143,14 +149,14 @@ public String toString() { } } - protected static class RepeatEdgeStep extends Step { + protected static class TemplatePathStep extends Step { @JsonProperty("max_times") public int maxTimes = 1; @Override public String toString() { - return String.format("RepeatEdgeStep{direction=%s,labels=%s," + + return String.format("TemplatePathStep{direction=%s,labels=%s," + "properties=%s,degree=%s,skipDegree=%s," + "maxTimes=%s}", this.direction, this.labels, this.properties, diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/TraverserAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/TraverserAPI.java index c9af8595da..d6141d95e4 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/TraverserAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/TraverserAPI.java @@ -24,8 +24,7 @@ import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.api.API; -import com.baidu.hugegraph.traversal.algorithm.EdgeStep; -import com.baidu.hugegraph.traversal.algorithm.TemplatePathsTraverser; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.type.define.Directions; import com.fasterxml.jackson.annotation.JsonProperty; @@ -38,16 +37,6 @@ protected static EdgeStep step(HugeGraph graph, Step step) { step.degree, step.skipDegree); } - protected static TemplatePathsTraverser.RepeatEdgeStep repeatEdgeStep( - HugeGraph graph, TemplatePathsAPI.RepeatEdgeStep step) { - return new TemplatePathsTraverser.RepeatEdgeStep(graph, step.direction, - step.labels, - step.properties, - step.degree, - step.skipDegree, - step.maxTimes); - } - protected static class Step { @JsonProperty("direction") diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/WeightedShortestPathAPI.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/WeightedShortestPathAPI.java index bcebc05ff6..24805a9069 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/WeightedShortestPathAPI.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/api/traversers/WeightedShortestPathAPI.java @@ -94,7 +94,8 @@ public String get(@Context GraphManager manager, sourceId, targetId, dir, edgeLabel, weight, degree, skipDegree, capacity); Iterator iterator = QueryResults.emptyIterator(); - if (withVertex) { + if (path != null && withVertex) { + assert !path.node().path().isEmpty(); iterator = g.vertices(path.node().path().toArray()); } return manager.serializer(g).writeWeightedPath(path, iterator); diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/HugeFactoryAuthProxy.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/HugeFactoryAuthProxy.java index b74263d8b2..375afb5849 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/HugeFactoryAuthProxy.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/HugeFactoryAuthProxy.java @@ -114,8 +114,8 @@ private static void registerPrivateActions() { Reflection.registerMethodsToFilter(com.baidu.hugegraph.StandardHugeGraph.class, "waitUntilAllTasksCompleted", "lambda$0", "closeTx", "access$2", "access$3", "access$4", "access$5", "serializer", "loadSystemStore", "loadSchemaStore", "loadGraphStore", "analyzer", "access$8", "loadStoreProvider", "graphTransaction", "schemaTransaction", "openSchemaTransaction", "checkGraphNotClosed", "openSystemTransaction", "openGraphTransaction", "systemTransaction", "access$9", "access$10", "access$11", "access$12", "access$13", "access$14", "access$15", "access$16", "access$17", "access$18", "access$6", "access$7"); Reflection.registerFieldsToFilter(c("com.baidu.hugegraph.StandardHugeGraph$StandardHugeGraphParams"), "graph", "this$0"); Reflection.registerMethodsToFilter(c("com.baidu.hugegraph.StandardHugeGraph$StandardHugeGraphParams"), "graph", "access$1"); - Reflection.registerFieldsToFilter(c("com.baidu.hugegraph.StandardHugeGraph$TinkerpopTransaction"), "refs", "opened", "transactions", "this$0", "$assertionsDisabled"); - Reflection.registerMethodsToFilter(c("com.baidu.hugegraph.StandardHugeGraph$TinkerpopTransaction"), "resetState", "lambda$0", "access$1", "access$2", "access$3", "access$0", "setOpened", "doCommit", "verifyOpened", "doRollback", "doClose", "destroyTransaction", "graphTransaction", "schemaTransaction", "systemTransaction", "doOpen", "setClosed", "getOrNewTransaction", "lambda$1"); + Reflection.registerFieldsToFilter(c("com.baidu.hugegraph.StandardHugeGraph$TinkerPopTransaction"), "refs", "opened", "transactions", "this$0", "$assertionsDisabled"); + Reflection.registerMethodsToFilter(c("com.baidu.hugegraph.StandardHugeGraph$TinkerPopTransaction"), "resetState", "lambda$0", "access$1", "access$2", "access$3", "access$0", "setOpened", "doCommit", "verifyOpened", "doRollback", "doClose", "destroyTransaction", "graphTransaction", "schemaTransaction", "systemTransaction", "doOpen", "setClosed", "getOrNewTransaction", "lambda$1"); Reflection.registerFieldsToFilter(org.apache.tinkerpop.gremlin.structure.util.AbstractThreadLocalTransaction.class, "readWriteConsumerInternal", "closeConsumerInternal", "transactionListeners"); Reflection.registerMethodsToFilter(org.apache.tinkerpop.gremlin.structure.util.AbstractThreadLocalTransaction.class, "doClose", "fireOnCommit", "fireOnRollback", "doReadWrite", "lambda$fireOnRollback$1", "lambda$fireOnCommit$0"); Reflection.registerFieldsToFilter(org.apache.tinkerpop.gremlin.structure.util.AbstractTransaction.class, "g"); diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/HugeGraphAuthProxy.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/HugeGraphAuthProxy.java index 29459a38b9..37bcf7873c 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/HugeGraphAuthProxy.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/HugeGraphAuthProxy.java @@ -62,6 +62,7 @@ import com.baidu.hugegraph.backend.query.Query; import com.baidu.hugegraph.backend.store.BackendFeatures; import com.baidu.hugegraph.backend.store.BackendStoreSystemInfo; +import com.baidu.hugegraph.backend.store.raft.RaftGroupManager; import com.baidu.hugegraph.config.HugeConfig; import com.baidu.hugegraph.exception.NotSupportException; import com.baidu.hugegraph.iterator.FilterIterator; @@ -631,6 +632,12 @@ public UserManager userManager() { return this.userManager; } + @Override + public RaftGroupManager raftGroupManager(String group) { + this.verifyAdminPermission(); + return this.hugegraph.raftGroupManager(group); + } + @Override public void initBackend() { verifyAdminPermission(); diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/StandardAuthenticator.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/StandardAuthenticator.java index 4682cd847f..0e0d215c05 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/StandardAuthenticator.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/auth/StandardAuthenticator.java @@ -65,7 +65,7 @@ private String inputPassword() { return new String(chars); } else { System.out.print(prompt); - @SuppressWarnings("resource") + @SuppressWarnings("resource") // just wrapper of System.in Scanner scanner = new Scanner(System.in); return scanner.nextLine(); } diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/config/ServerOptions.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/config/ServerOptions.java index c194939956..f5a5e3c289 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/config/ServerOptions.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/config/ServerOptions.java @@ -222,30 +222,21 @@ public static synchronized ServerOptions instance() { "hugegraph:9fd95c9c-711b-415b-b85f-d4df46ba5c31" ); - public static final ConfigOption SERVER_KEYSTORE_FILE = + public static final ConfigOption SSL_KEYSTORE_FILE = new ConfigOption<>( - "ssl.server_keystore_file", + "ssl.keystore_file", "The path of server keystore file used when https " + "protocol is enabled.", disallowEmpty(), - "server.keystore" + "conf/hugegraph-server.keystore" ); - public static final ConfigOption SERVER_KEYSTORE_PASSWORD = + public static final ConfigOption SSL_KEYSTORE_PASSWORD = new ConfigOption<>( - "ssl.server_keystore_password", - "The password of the path of the server keystore file " + - "used when the https protocol is enabled.", + "ssl.keystore_password", + "The password of the server keystore file " + + "when the https protocol is enabled.", null, - "" - ); - - public static final ConfigOption SERVER_PROTOCOL = - new ConfigOption<>( - "server.protocol", - "The protocol of rest-server, allowed values are: " + - "http or https.", - allowValues("http", "https"), - "http" + "hugegraph" ); } diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/core/GraphManager.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/core/GraphManager.java index d50df8c056..82269a28b0 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/core/GraphManager.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/core/GraphManager.java @@ -72,6 +72,7 @@ public GraphManager(HugeConfig conf) { this.loadGraphs(conf.getMap(ServerOptions.GRAPHS)); // this.installLicense(conf, ""); + // Raft will load snapshot firstly then launch election and replay log this.waitGraphsStarted(); this.checkBackendVersionOrExit(); this.serverStarted(conf); diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/serializer/JsonSerializer.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/serializer/JsonSerializer.java index e9dc3f2713..04680da683 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/serializer/JsonSerializer.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/serializer/JsonSerializer.java @@ -278,14 +278,19 @@ public String writeSimilars(SimilarsMap similars, @Override public String writeWeightedPath(NodeWithWeight path, Iterator vertices) { - return JsonUtil.toJson(ImmutableMap.of("path", path.toMap(), + Map pathMap = path == null ? + ImmutableMap.of() : path.toMap(); + return JsonUtil.toJson(ImmutableMap.of("path", pathMap, "vertices", vertices)); } @Override public String writeWeightedPaths(WeightedPaths paths, Iterator vertices) { - return JsonUtil.toJson(ImmutableMap.of("paths", paths.toMap(), + Map> pathMap = paths == null ? + ImmutableMap.of() : + paths.toMap(); + return JsonUtil.toJson(ImmutableMap.of("paths", pathMap, "vertices", vertices)); } diff --git a/hugegraph-api/src/main/java/com/baidu/hugegraph/server/RestServer.java b/hugegraph-api/src/main/java/com/baidu/hugegraph/server/RestServer.java index 2cb2cbbabd..2895083039 100644 --- a/hugegraph-api/src/main/java/com/baidu/hugegraph/server/RestServer.java +++ b/hugegraph-api/src/main/java/com/baidu/hugegraph/server/RestServer.java @@ -70,23 +70,26 @@ public void start() throws IOException { } private HttpServer configHttpServer(URI uri, ResourceConfig rc) { + String protocol = uri.getScheme(); final HttpServer server; - String protocol = this.conf.get(ServerOptions.SERVER_PROTOCOL); - String keystoreServerFile = this.conf.get(ServerOptions.SERVER_KEYSTORE_FILE); - String keystoreServerPassword = this.conf.get(ServerOptions.SERVER_KEYSTORE_PASSWORD); if (protocol != null && protocol.equals("https")) { SSLContextConfigurator sslContext = new SSLContextConfigurator(); - // set up security context - sslContext.setKeyStoreFile(keystoreServerFile); - sslContext.setKeyStorePass(keystoreServerPassword); - SSLEngineConfigurator sslEngineConfigurator = new SSLEngineConfigurator(sslContext) - .setClientMode(false) - .setWantClientAuth(true); + String keystoreFile = this.conf.get( + ServerOptions.SSL_KEYSTORE_FILE); + String keystorePass = this.conf.get( + ServerOptions.SSL_KEYSTORE_PASSWORD); + sslContext.setKeyStoreFile(keystoreFile); + sslContext.setKeyStorePass(keystorePass); + SSLEngineConfigurator sslConfig = new SSLEngineConfigurator( + sslContext); + sslConfig.setClientMode(false); + sslConfig.setWantClientAuth(true); server = GrizzlyHttpServerFactory.createHttpServer(uri, rc, true, - sslEngineConfigurator); + sslConfig); } else { server = GrizzlyHttpServerFactory.createHttpServer(uri, rc, false); } + Collection listeners = server.getListeners(); E.checkState(listeners.size() > 0, "Http Server should have some listeners, but now is none"); diff --git a/hugegraph-core/pom.xml b/hugegraph-core/pom.xml index dca7010094..1aa6354f47 100644 --- a/hugegraph-core/pom.xml +++ b/hugegraph-core/pom.xml @@ -19,7 +19,7 @@ com.baidu.hugegraph hugegraph-common - 1.7.8 + 1.8.1 diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/HugeGraph.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/HugeGraph.java index acd6ecca45..66f65eabd0 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/HugeGraph.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/HugeGraph.java @@ -36,6 +36,7 @@ import com.baidu.hugegraph.backend.query.Query; import com.baidu.hugegraph.backend.store.BackendFeatures; import com.baidu.hugegraph.backend.store.BackendStoreSystemInfo; +import com.baidu.hugegraph.backend.store.raft.RaftGroupManager; import com.baidu.hugegraph.schema.EdgeLabel; import com.baidu.hugegraph.schema.IndexLabel; import com.baidu.hugegraph.schema.PropertyKey; @@ -150,6 +151,7 @@ public interface HugeGraph extends Graph { public UserManager userManager(); public TaskScheduler taskScheduler(); + public RaftGroupManager raftGroupManager(String group); public void proxy(HugeGraph graph); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/StandardHugeGraph.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/StandardHugeGraph.java index cf0a82016c..adfb8d60d6 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/StandardHugeGraph.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/StandardHugeGraph.java @@ -54,6 +54,8 @@ import com.baidu.hugegraph.backend.store.BackendStore; import com.baidu.hugegraph.backend.store.BackendStoreProvider; import com.baidu.hugegraph.backend.store.BackendStoreSystemInfo; +import com.baidu.hugegraph.backend.store.raft.RaftBackendStoreProvider; +import com.baidu.hugegraph.backend.store.raft.RaftGroupManager; import com.baidu.hugegraph.backend.store.ram.RamTable; import com.baidu.hugegraph.backend.tx.GraphTransaction; import com.baidu.hugegraph.backend.tx.SchemaTransaction; @@ -95,7 +97,7 @@ public class StandardHugeGraph implements HugeGraph { public static final Class[] PROTECT_CLASSES = { StandardHugeGraph.class, StandardHugeGraph.StandardHugeGraphParams.class, - StandardHugeGraph.TinkerpopTransaction.class, + TinkerPopTransaction.class, StandardHugeGraph.Txs.class, StandardHugeGraph.SysTransaction.class }; @@ -125,7 +127,7 @@ public class StandardHugeGraph implements HugeGraph { private final HugeFeatures features; private final BackendStoreProvider storeProvider; - private final TinkerpopTransaction tx; + private final TinkerPopTransaction tx; private final RamTable ramtable; @@ -173,7 +175,7 @@ public StandardHugeGraph(HugeConfig config) { throw new HugeException(message); } - this.tx = new TinkerpopTransaction(this); + this.tx = new TinkerPopTransaction(this); SnowflakeIdGenerator.init(this.params); @@ -305,6 +307,17 @@ public void truncateBackend() { this.storeProvider.initSystemInfo(this); this.serverStarted(this.serverInfoManager().selfServerId(), this.serverInfoManager().selfServerRole()); + /* + * Take the initiative to generate a snapshot, it can avoid this + * situation: when the server restart need to read the database + * (such as checkBackendVersionInfo), it happens that raft replays + * the truncate log, at the same time, the store has been cleared + * (truncate) but init-store has not been completed, which will + * cause reading errors. + * When restarting, load the snapshot first and then read backend, + * will not encounter such an intermediate state. + */ + this.storeProvider.writeSnapshot(); } finally { LockUtil.unlock(this.name, LockUtil.GRAPH_LOCK); } @@ -841,6 +854,16 @@ public UserManager userManager() { return this.userManager; } + @Override + public RaftGroupManager raftGroupManager(String group) { + if (!(this.storeProvider instanceof RaftBackendStoreProvider)) { + return null; + } + RaftBackendStoreProvider provider = + ((RaftBackendStoreProvider) this.storeProvider); + return provider.raftNodeManager(group); + } + @Override public HugeConfig configuration() { return this.configuration; @@ -863,7 +886,7 @@ public boolean sameAs(HugeGraph graph) { @Override public long now() { - return ((TinkerpopTransaction) this.tx()).openedTime(); + return ((TinkerPopTransaction) this.tx()).openedTime(); } private void closeTx() { @@ -1021,7 +1044,7 @@ public RamTable ramtable() { } } - private class TinkerpopTransaction extends AbstractThreadLocalTransaction { + private class TinkerPopTransaction extends AbstractThreadLocalTransaction { // Times opened from upper layer private final AtomicInteger refs; @@ -1030,7 +1053,7 @@ private class TinkerpopTransaction extends AbstractThreadLocalTransaction { // Backend transactions private final ThreadLocal transactions; - public TinkerpopTransaction(Graph graph) { + public TinkerPopTransaction(Graph graph) { super(graph); this.refs = new AtomicInteger(); @@ -1114,7 +1137,7 @@ protected void doClose() { @Override public String toString() { - return String.format("TinkerpopTransaction{opened=%s, txs=%s}", + return String.format("TinkerPopTransaction{opened=%s, txs=%s}", this.opened.get(), this.transactions.get()); } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/AbstractCache.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/AbstractCache.java index 8243b0360b..14d251e515 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/AbstractCache.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/AbstractCache.java @@ -34,6 +34,9 @@ public abstract class AbstractCache implements Cache { public static final int DEFAULT_SIZE = 1 * MB; public static final int MAX_INIT_CAP = 100 * MB; + public static final String ACTION_INVALID = "invalid"; + public static final String ACTION_CLEAR = "clear"; + protected static final Logger LOG = Log.logger(Cache.class); private volatile long hits = 0L; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/CachedGraphTransaction.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/CachedGraphTransaction.java index 056f2c3fce..1d1a5bcdc3 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/CachedGraphTransaction.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/CachedGraphTransaction.java @@ -19,6 +19,9 @@ package com.baidu.hugegraph.backend.cache; +import static com.baidu.hugegraph.backend.cache.AbstractCache.ACTION_CLEAR; +import static com.baidu.hugegraph.backend.cache.AbstractCache.ACTION_INVALID; + import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -138,7 +141,7 @@ private void listenChanges() { this.graph(), event); event.checkArgs(String.class, HugeType.class, Id.class); Object[] args = event.args(); - if ("invalid".equals(args[0])) { + if (ACTION_INVALID.equals(args[0])) { HugeType type = (HugeType) args[1]; Id id = (Id) args[2]; if (type.isVertex()) { @@ -153,7 +156,7 @@ private void listenChanges() { this.edgesCache.clear(); } return true; - } else if ("clear".equals(args[0])) { + } else if (ACTION_CLEAR.equals(args[0])) { this.verticesCache.clear(); this.edgesCache.clear(); return true; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/CachedSchemaTransaction.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/CachedSchemaTransaction.java index 8f7c1fde54..91926b5d41 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/CachedSchemaTransaction.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/cache/CachedSchemaTransaction.java @@ -19,6 +19,9 @@ package com.baidu.hugegraph.backend.cache; +import static com.baidu.hugegraph.backend.cache.AbstractCache.ACTION_CLEAR; +import static com.baidu.hugegraph.backend.cache.AbstractCache.ACTION_INVALID; + import java.util.ArrayList; import java.util.List; import java.util.Set; @@ -104,7 +107,7 @@ private void listenChanges() { this.graph(), event); event.checkArgs(String.class, HugeType.class, Id.class); Object[] args = event.args(); - if ("invalid".equals(args[0])) { + if (ACTION_INVALID.equals(args[0])) { HugeType type = (HugeType) args[1]; Id id = (Id) args[2]; this.arrayCaches.remove(type, id); @@ -122,7 +125,7 @@ private void listenChanges() { this.nameCache.invalidate(prefixedName); } return true; - } else if ("clear".equals(args[0])) { + } else if (ACTION_CLEAR.equals(args[0])) { this.clearCache(); return true; } @@ -193,7 +196,7 @@ protected void addSchema(SchemaElement schema) { @SuppressWarnings("unchecked") protected T getSchema(HugeType type, Id id) { // try get from optimized array cache - if (id.number() && id.asLong() > 0) { + if (id.number() && id.asLong() > 0L) { SchemaElement value = this.arrayCaches.get(type, id); if (value != null) { return (T) value; @@ -320,14 +323,19 @@ public void updateIfNeeded(V schema) { return; } Id id = schema.id(); - if (id.number() && id.asLong() > 0) { + if (id.number() && id.asLong() > 0L) { this.set(schema.type(), id, schema); } } public V get(HugeType type, Id id) { - assert id.number() && id.asLong() > 0 : id; - int key = (int) id.asLong(); + assert id.number(); + long longId = id.asLong(); + if (longId <= 0L) { + assert false : id; + return null; + } + int key = (int) longId; if (key >= this.size) { return null; } @@ -346,8 +354,13 @@ public V get(HugeType type, Id id) { } public void set(HugeType type, Id id, V value) { - assert id.number() && id.asLong() > 0 : id; - int key = (int) id.asLong(); + assert id.number(); + long longId = id.asLong(); + if (longId <= 0L) { + assert false : id; + return; + } + int key = (int) longId; if (key >= this.size) { return; } @@ -371,8 +384,12 @@ public void set(HugeType type, Id id, V value) { } public void remove(HugeType type, Id id) { - assert id.number() && id.asLong() > 0 : id; - int key = (int) id.asLong(); + assert id.number(); + long longId = id.asLong(); + if (longId <= 0L) { + return; + } + int key = (int) longId; V value = null; if (key >= this.size) { return; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/serializer/BinarySerializer.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/serializer/BinarySerializer.java index 085994a979..9d95d5e37f 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/serializer/BinarySerializer.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/serializer/BinarySerializer.java @@ -1092,6 +1092,7 @@ private T readEnum(HugeKeys key, } private void writeLong(HugeKeys key, long value) { + @SuppressWarnings("resource") BytesBuffer buffer = new BytesBuffer(8); buffer.writeVLong(value); this.entry.column(formatColumnName(key), buffer.bytes()); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/AbstractBackendStoreProvider.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/AbstractBackendStoreProvider.java index 2b42eee852..7f5a44920c 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/AbstractBackendStoreProvider.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/AbstractBackendStoreProvider.java @@ -150,6 +150,16 @@ public void initSystemInfo(HugeGraph graph) { LOG.debug("Graph '{}' system info has been initialized", this.graph); } + @Override + public void writeSnapshot() { + // TODO: to be implemented + } + + @Override + public void readSnapshot() { + // TODO: to be implemented + } + @Override public BackendStore loadSchemaStore(final String name) { LOG.debug("The '{}' StoreProvider load SchemaStore '{}'", diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/BackendSessionPool.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/BackendSessionPool.java index c22f7a324f..dcf1bd593b 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/BackendSessionPool.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/BackendSessionPool.java @@ -94,7 +94,7 @@ private void detectSession(BackendSession session) { session.update(); } - public Pair closeSession() { + private Pair closeSession() { int sessionCount = this.sessionCount.get(); if (sessionCount <= 0) { assert sessionCount == 0 : sessionCount; @@ -104,7 +104,7 @@ public Pair closeSession() { assert sessionCount > 0 : sessionCount; BackendSession session = this.threadLocalSession.get(); if (session == null) { - LOG.warn("Current session has ever been closed: {}", this); + LOG.debug("Current session has ever been closed: {}", this); return Pair.of(sessionCount, -1); } @@ -128,13 +128,13 @@ public Pair closeSession() { return Pair.of(this.sessionCount.decrementAndGet(), ref); } - protected void forceResetSessions() { + public void forceResetSessions() { for (BackendSession session : this.sessions.values()) { session.reset(); } } - public void close() { + public boolean close() { Pair result = Pair.of(-1, -1); try { result = this.closeSession(); @@ -146,6 +146,7 @@ public void close() { LOG.debug("Now(after close({})) session count is: {}, " + "current session reference is: {}", this, result.getLeft(), result.getRight()); + return result.getLeft() == 0; } public boolean closed() { diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/BackendStoreProvider.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/BackendStoreProvider.java index 6f838b3df0..e7f815b64e 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/BackendStoreProvider.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/BackendStoreProvider.java @@ -54,6 +54,10 @@ public interface BackendStoreProvider { public void initSystemInfo(HugeGraph graph); + public void writeSnapshot(); + + public void readSnapshot(); + public void listen(EventListener listener); public void unlisten(EventListener listener); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftBackendStore.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftBackendStore.java index 42d652138a..3515f12b9e 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftBackendStore.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftBackendStore.java @@ -36,8 +36,8 @@ import com.baidu.hugegraph.backend.store.BackendMutation; import com.baidu.hugegraph.backend.store.BackendStore; import com.baidu.hugegraph.backend.store.BackendStoreProvider; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType; import com.baidu.hugegraph.config.HugeConfig; import com.baidu.hugegraph.type.HugeType; import com.baidu.hugegraph.util.Log; @@ -124,6 +124,9 @@ public void truncate() { @Override public void mutate(BackendMutation mutation) { + if (mutation.isEmpty()) { + return; + } // Just add to local buffer this.getOrNewBatch().add(mutation); } @@ -142,6 +145,7 @@ public Number queryNumber(Query query) { @Override public void beginTx() { + // Don't write raft log, commitTx(in statemachine) will call beginTx } @Override @@ -182,16 +186,6 @@ public long getCounter(HugeType type) { return (Long) this.queryByRaft(type, o -> this.store.getCounter(type)); } - @Override - public void writeSnapshot(String snapshotPath) { - this.store.writeSnapshot(snapshotPath); - } - - @Override - public void readSnapshot(String snapshotPath) { - this.store.readSnapshot(snapshotPath); - } - private Object submitAndWait(StoreAction action, byte[] data) { StoreType type = this.context.storeType(this.store()); return this.submitAndWait(new StoreCommand(type, action, data)); @@ -203,11 +197,11 @@ private Object submitAndWait(StoreCommand command) { } private Object queryByRaft(Object query, Function func) { - if (!this.context.isSafeRead()) { + if (this.node().selfIsLeader() || !this.context.isSafeRead()) { return func.apply(query); } - RaftClosure future = new RaftClosure(); + RaftClosure future = new RaftClosure<>(); ReadIndexClosure readIndexClosure = new ReadIndexClosure() { @Override public void run(Status status, long index, byte[] reqCtx) { @@ -223,10 +217,10 @@ public void run(Status status, long index, byte[] reqCtx) { this.node().node().readIndex(BytesUtil.EMPTY_BYTES, readIndexClosure); try { return future.waitFinished(); - } catch (Throwable t) { + } catch (Throwable e) { LOG.warn("Failed to execute query {} with read-index: {}", query, future.status()); - throw new BackendException("Failed to execute query", t); + throw new BackendException("Failed to execute query", e); } } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftBackendStoreProvider.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftBackendStoreProvider.java index 6ef7ece63e..aeb40eae56 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftBackendStoreProvider.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftBackendStoreProvider.java @@ -24,13 +24,13 @@ import org.slf4j.Logger; -import com.alipay.sofa.jraft.rpc.RpcServer; import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.HugeGraphParams; import com.baidu.hugegraph.backend.store.BackendStore; import com.baidu.hugegraph.backend.store.BackendStoreProvider; import com.baidu.hugegraph.backend.store.BackendStoreSystemInfo; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType; import com.baidu.hugegraph.event.EventHub; import com.baidu.hugegraph.event.EventListener; import com.baidu.hugegraph.util.E; @@ -55,7 +55,10 @@ public RaftBackendStoreProvider(BackendStoreProvider provider, this.schemaStore = null; this.graphStore = null; this.systemStore = null; - this.registerRpcRequestProcessors(); + } + + public RaftGroupManager raftNodeManager(String group) { + return this.context.raftNodeManager(group); } private Set stores() { @@ -63,12 +66,6 @@ private Set stores() { this.systemStore); } - private void registerRpcRequestProcessors() { - RpcServer rpcServer = this.context.rpcServer(); - rpcServer.registerProcessor(new StoreCommandRequestProcessor( - this.context)); - } - private void checkOpened() { E.checkState(this.graph() != null && this.schemaStore != null && @@ -133,7 +130,9 @@ public void open(String name) { @Override public void waitStoreStarted() { this.context.initRaftNode(); + LOG.info("The raft node is initialized"); this.context.waitRaftNodeStarted(); + LOG.info("The raft store is started"); } @Override @@ -190,6 +189,20 @@ public void initSystemInfo(HugeGraph graph) { LOG.debug("Graph '{}' system info has been initialized", this.graph()); } + @Override + public void writeSnapshot() { + StoreCommand command = new StoreCommand(StoreType.ALL, + StoreAction.SNAPSHOT, null); + StoreClosure closure = new StoreClosure(command); + this.context.node().submitAndWait(command, closure); + LOG.debug("Graph '{}' has writed snapshot", this.graph()); + } + + @Override + public void readSnapshot() { + // How to read snapshot by jraft explicity? + } + @Override public void listen(EventListener listener) { this.provider.listen(listener); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftClosure.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftClosure.java index cd4b786cbf..d45e860c3b 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftClosure.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftClosure.java @@ -34,18 +34,18 @@ import com.baidu.hugegraph.backend.BackendException; import com.baidu.hugegraph.util.Log; -public class RaftClosure implements Closure { +public class RaftClosure implements Closure { private static final Logger LOG = Log.logger(StoreClosure.class); - private final CompletableFuture future; + private final CompletableFuture> future; public RaftClosure() { this.future = new CompletableFuture<>(); } - public Object waitFinished() throws Throwable { - RaftResult result = this.get(); + public T waitFinished() throws Throwable { + RaftResult result = this.get(); if (result.status().isOk()) { return result.callback().get(); } else { @@ -57,22 +57,24 @@ public Status status() { return this.get().status(); } - private RaftResult get() { + private RaftResult get() { try { return this.future.get(WAIT_RAFT_LOG_TIMEOUT, TimeUnit.MILLISECONDS); - } catch (InterruptedException | ExecutionException e) { + } catch (ExecutionException e) { throw new BackendException("ExecutionException", e); + } catch (InterruptedException e) { + throw new BackendException("InterruptedException", e); } catch (TimeoutException e) { throw new BackendException("Wait closure timeout"); } } - public void complete(Status status, Supplier callback) { - this.future.complete(new RaftResult(status, callback)); + public void complete(Status status, Supplier callback) { + this.future.complete(new RaftResult<>(status, callback)); } public void failure(Status status, Throwable exception) { - this.future.complete(new RaftResult(status, exception)); + this.future.complete(new RaftResult<>(status, exception)); } @Override diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftGroupManager.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftGroupManager.java new file mode 100644 index 0000000000..c15ca4e89d --- /dev/null +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftGroupManager.java @@ -0,0 +1,39 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.raft; + +import java.util.List; + +public interface RaftGroupManager { + + public String group(); + + public List listPeers(); + + public String getLeader(); + + public String transferLeaderTo(String endpoint); + + public String setLeader(String endpoint); + + public String addPeer(String endpoint); + + public String removePeer(String endpoint); +} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftGroupManagerImpl.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftGroupManagerImpl.java new file mode 100644 index 0000000000..28fa6eeb00 --- /dev/null +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftGroupManagerImpl.java @@ -0,0 +1,156 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.raft; + +import java.util.List; +import java.util.stream.Collectors; + +import com.alipay.sofa.jraft.Node; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.entity.PeerId; +import com.baidu.hugegraph.backend.BackendException; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse; +import com.baidu.hugegraph.backend.store.raft.rpc.RpcForwarder; +import com.baidu.hugegraph.util.E; +import com.google.protobuf.Message; + +public class RaftGroupManagerImpl implements RaftGroupManager { + + private final String group; + private final RaftNode raftNode; + private final RpcForwarder rpcForwarder; + + public RaftGroupManagerImpl(RaftSharedContext context) { + this.group = context.group(); + this.raftNode = context.node(); + this.rpcForwarder = context.rpcForwarder(); + } + + @Override + public String group() { + return this.group; + } + + @Override + public List listPeers() { + if (this.raftNode.selfIsLeader()) { + List peerIds = this.raftNode.node().listPeers(); + return peerIds.stream().map(PeerId::toString) + .collect(Collectors.toList()); + } + // If current node is not leader, forward request to leader + ListPeersRequest request = ListPeersRequest.getDefaultInstance(); + try { + RaftClosure future; + future = this.forwardToLeader(request); + ListPeersResponse response = future.waitFinished(); + return response.getEndpointsList(); + } catch (Throwable e) { + throw new BackendException("Failed to list peers", e); + } + } + + @Override + public String getLeader() { + PeerId leaderId = this.raftNode.leaderId(); + E.checkState(leaderId != null, + "There is no leader for raft group %s", this.group); + return leaderId.toString(); + } + + @Override + public String transferLeaderTo(String endpoint) { + PeerId peerId = PeerId.parsePeer(endpoint); + Status status = this.raftNode.node().transferLeadershipTo(peerId); + if (!status.isOk()) { + throw new BackendException( + "Failed to transafer leader to '%s', raft error: %s", + endpoint, status.getErrorMsg()); + } + return peerId.toString(); + } + + @Override + public String setLeader(String endpoint) { + PeerId newLeaderId = PeerId.parsePeer(endpoint); + Node node = this.raftNode.node(); + // If expected endpoint has already been raft leader + if (node.getLeaderId().equals(newLeaderId)) { + return newLeaderId.toString(); + } + if (this.raftNode.selfIsLeader()) { + // If current node is the leader, transfer directly + this.transferLeaderTo(endpoint); + } else { + // If current node is not leader, forward request to leader + SetLeaderRequest request = SetLeaderRequest.newBuilder() + .setEndpoint(endpoint) + .build(); + try { + RaftClosure future; + future = this.forwardToLeader(request); + future.waitFinished(); + } catch (Throwable e) { + throw new BackendException("Failed to set leader to '%s'", + e, endpoint); + } + } + return newLeaderId.toString(); + } + + @Override + public String addPeer(String endpoint) { + E.checkArgument(this.raftNode.selfIsLeader(), + "Operation add-peer can only be executed on leader"); + PeerId peerId = PeerId.parsePeer(endpoint); + RaftClosure future = new RaftClosure<>(); + try { + this.raftNode.node().addPeer(peerId, future); + future.waitFinished(); + } catch (Throwable e) { + throw new BackendException("Failed to add peer '%s'", e, endpoint); + } + return peerId.toString(); + } + + @Override + public String removePeer(String endpoint) { + E.checkArgument(this.raftNode.selfIsLeader(), + "Operation add-peer can only be executed on leader"); + PeerId peerId = PeerId.parsePeer(endpoint); + RaftClosure future = new RaftClosure<>(); + try { + this.raftNode.node().removePeer(peerId, future); + future.waitFinished(); + } catch (Throwable e) { + throw new BackendException("Failed to remove peer '%s'", + e, endpoint); + } + return peerId.toString(); + } + + private RaftClosure forwardToLeader(Message request) { + PeerId leaderId = this.raftNode.leaderId(); + return this.rpcForwarder.forwardToLeader(leaderId, request); + } +} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftNode.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftNode.java index 6a0fac6d3b..9a7b3ee978 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftNode.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftNode.java @@ -20,13 +20,12 @@ package com.baidu.hugegraph.backend.store.raft; import static com.baidu.hugegraph.backend.store.raft.RaftSharedContext.BUSY_SLEEP_FACTOR; -import static com.baidu.hugegraph.backend.store.raft.RaftSharedContext.WAIT_RPC_TIMEOUT; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import org.slf4j.Logger; @@ -34,25 +33,16 @@ import com.alipay.sofa.jraft.RaftGroupService; import com.alipay.sofa.jraft.Status; import com.alipay.sofa.jraft.closure.ReadIndexClosure; -import com.alipay.sofa.jraft.core.NodeImpl; import com.alipay.sofa.jraft.core.Replicator.ReplicatorStateListener; import com.alipay.sofa.jraft.entity.PeerId; import com.alipay.sofa.jraft.entity.Task; import com.alipay.sofa.jraft.error.RaftError; import com.alipay.sofa.jraft.option.NodeOptions; -import com.alipay.sofa.jraft.rpc.ClientService; -import com.alipay.sofa.jraft.rpc.RpcResponseClosure; import com.alipay.sofa.jraft.rpc.RpcServer; import com.alipay.sofa.jraft.util.BytesUtil; -import com.alipay.sofa.jraft.util.Endpoint; import com.baidu.hugegraph.backend.BackendException; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse; -import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.LZ4Util; import com.baidu.hugegraph.util.Log; -import com.google.protobuf.Message; -import com.google.protobuf.ZeroByteStringHelper; public class RaftNode { @@ -61,11 +51,8 @@ public class RaftNode { private final RaftSharedContext context; private final Node node; private final StoreStateMachine stateMachine; - private final AtomicLong leaderTerm; - - private final Object electedLock; - private volatile boolean elected; - private volatile boolean started; + private final AtomicReference leaderInfo; + private final AtomicBoolean started; private final AtomicInteger busyCounter; public RaftNode(RaftSharedContext context) { @@ -77,10 +64,8 @@ public RaftNode(RaftSharedContext context) { throw new BackendException("Failed to init raft node", e); } this.node.addReplicatorStateListener(new RaftNodeStateListener()); - this.leaderTerm = new AtomicLong(-1); - this.electedLock = new Object(); - this.elected = false; - this.started = false; + this.leaderInfo = new AtomicReference<>(LeaderInfo.NO_LEADER); + this.started = new AtomicBoolean(false); this.busyCounter = new AtomicInteger(); } @@ -97,21 +82,35 @@ public PeerId nodeId() { } public PeerId leaderId() { - return this.node.getLeaderId(); + return this.leaderInfo.get().leaderId; } - public boolean isRaftLeader() { - return this.leaderTerm.get() > 0; + public boolean selfIsLeader() { + return this.leaderInfo.get().selfIsLeader; } - public void leaderTerm(long term) { - this.leaderTerm.set(term); + public void onLeaderInfoChange(PeerId leaderId, boolean selfIsLeader) { + leaderId = leaderId != null ? leaderId.copy() : null; + this.leaderInfo.set(new LeaderInfo(leaderId, selfIsLeader)); } public void shutdown() { this.node.shutdown(); } + public void snapshot() { + if (!this.context.useSnapshot()) { + return; + } + RaftClosure future = new RaftClosure<>(); + try { + this.node().snapshot(future); + future.waitFinished(); + } catch (Throwable e) { + throw new BackendException("Failed to generate snapshot", e); + } + } + private Node initRaftNode() throws IOException { NodeOptions nodeOptions = this.context.nodeOptions(); nodeOptions.setFsm(this.stateMachine); @@ -129,10 +128,11 @@ private Node initRaftNode() throws IOException { private void submitCommand(StoreCommand command, StoreClosure closure) { // Wait leader elected - this.waitLeaderElected(RaftSharedContext.NO_TIMEOUT); - - if (!this.isRaftLeader()) { - this.forwardToLeader(command, closure); + LeaderInfo leaderInfo = this.waitLeaderElected( + RaftSharedContext.NO_TIMEOUT); + if (!leaderInfo.selfIsLeader) { + this.context.rpcForwarder().forwardToLeader(leaderInfo.leaderId, + command, closure); return; } // Sleep a while when raft node is busy @@ -143,6 +143,7 @@ private void submitCommand(StoreCommand command, StoreClosure closure) { // compress return BytesBuffer ByteBuffer buffer = LZ4Util.compress(command.data(), RaftSharedContext.BLOCK_SIZE) + .forReadWritten() .asByteBuffer(); LOG.debug("The bytes size of command(compressed) {} is {}", command.action(), buffer.limit()); @@ -159,48 +160,39 @@ public Object submitAndWait(StoreCommand command, StoreClosure future) { * in forwardToLeader, written like this to simplify the code */ return future.waitFinished(); - } catch (RuntimeException e) { - throw e; - } catch (Throwable t) { - throw new BackendException(t); - } - } - - public void onElected(boolean value) { - synchronized(this.electedLock) { - this.elected = value; - this.electedLock.notify(); + } catch (Throwable e) { + throw new BackendException("Failed to wait store command %s", + e, command); } } - protected void waitLeaderElected(int timeout) { + protected LeaderInfo waitLeaderElected(int timeout) { String group = this.context.group(); - if (this.node.getLeaderId() != null) { - return; + LeaderInfo leaderInfo = this.leaderInfo.get(); + if (leaderInfo.leaderId != null) { + return leaderInfo; } long beginTime = System.currentTimeMillis(); - synchronized(this.electedLock) { - while (!this.elected) { - try { - this.electedLock.wait(RaftSharedContext.POLL_INTERVAL); - } catch (InterruptedException e) { - throw new BackendException( - "Interrupted while waiting raft group '%s' " + - "election", e, group); - } - if (this.elected) { - break; - } - long consumedTime = System.currentTimeMillis() - beginTime; - if (timeout > 0 && consumedTime >= timeout) { - throw new BackendException( - "Wait raft group '{}' election timeout({}ms)", - group, consumedTime); - } - LOG.warn("Waiting raft group '{}' election cost {}s", - group, consumedTime / 1000.0); + while (leaderInfo.leaderId == null) { + try { + Thread.sleep(RaftSharedContext.POLL_INTERVAL); + } catch (InterruptedException e) { + throw new BackendException( + "Waiting for raft group '%s' election is interrupted", + e, group); } + long consumedTime = System.currentTimeMillis() - beginTime; + if (timeout > 0 && consumedTime >= timeout) { + throw new BackendException( + "Waiting for raft group '%s' election timeout(%sms)", + group, consumedTime); + } + LOG.warn("Waiting for raft group '{}' election cost {}s", + group, consumedTime / 1000.0); + leaderInfo = this.leaderInfo.get(); + assert leaderInfo != null; } + return leaderInfo; } protected void waitStarted(int timeout) { @@ -208,17 +200,13 @@ protected void waitStarted(int timeout) { ReadIndexClosure readIndexClosure = new ReadIndexClosure() { @Override public void run(Status status, long index, byte[] reqCtx) { - if (status.isOk()) { - RaftNode.this.started = true; - } else { - RaftNode.this.started = false; - } + RaftNode.this.started.set(status.isOk()); } }; long beginTime = System.currentTimeMillis(); while (true) { this.node.readIndex(BytesUtil.EMPTY_BYTES, readIndexClosure); - if (this.started) { + if (this.started.get()) { break; } try { @@ -230,10 +218,10 @@ public void run(Status status, long index, byte[] reqCtx) { long consumedTime = System.currentTimeMillis() - beginTime; if (timeout > 0 && consumedTime >= timeout) { throw new BackendException( - "Wait raft group '{}' heartbeat timeout({}ms)", + "Waiting for raft group '%s' heartbeat timeout(%sms)", group, consumedTime); } - LOG.warn("Waiting raft group '{}' heartbeat cost {}s", + LOG.warn("Waiting for raft group '{}' heartbeat cost {}s", group, consumedTime / 1000.0); } } @@ -243,7 +231,6 @@ private void waitIfBusy() { if (counter <= 0) { return; } - // TODO:should sleep or throw exception directly? // It may lead many thread sleep, but this is exactly what I want long time = counter * BUSY_SLEEP_FACTOR; LOG.info("The node {} will sleep {} ms", this.node, time); @@ -256,68 +243,14 @@ private void waitIfBusy() { if (this.busyCounter.get() > 0) { synchronized (this) { if (this.busyCounter.get() > 0) { - this.busyCounter.decrementAndGet(); + counter = this.busyCounter.decrementAndGet(); + LOG.info("Decrease busy counter: [{}]", counter); } } } } } - private void forwardToLeader(StoreCommand command, StoreClosure closure) { - assert !this.isRaftLeader(); - PeerId leaderId = this.node.getLeaderId(); - E.checkNotNull(leaderId, "leader id"); - LOG.debug("The node {} forward request to leader {}", - this.node.getNodeId(), leaderId); - - StoreCommandRequest.Builder builder = StoreCommandRequest.newBuilder(); - builder.setType(command.type()); - builder.setAction(command.action()); - builder.setData(ZeroByteStringHelper.wrap(command.data())); - StoreCommandRequest request = builder.build(); - - RpcResponseClosure responseClosure; - responseClosure = new RpcResponseClosure() { - @Override - public void setResponse(StoreCommandResponse resp) { - if (resp.getStatus()) { - LOG.debug("StoreCommandResponse status ok"); - closure.complete(Status.OK(), () -> null); - } else { - LOG.debug("StoreCommandResponse status error"); - Status status = new Status(RaftError.UNKNOWN, - "fowared request failed"); - closure.failure(status, new BackendException( - "Current node isn't leader, leader is " + - "[%s], failed to forward request to " + - "leader: %s", leaderId, resp.getMessage())); - } - } - - @Override - public void run(Status status) { - closure.run(status); - } - }; - this.waitRpc(leaderId.getEndpoint(), request, responseClosure); - } - - private void waitRpc(Endpoint endpoint, Message request, - RpcResponseClosure done) { - ClientService rpcClient = ((NodeImpl) this.node).getRpcService(); - E.checkNotNull(rpcClient, "rpc client"); - E.checkNotNull(endpoint, "leader endpoint"); - try { - rpcClient.invokeWithDone(endpoint, request, done, WAIT_RPC_TIMEOUT) - .get(); - } catch (InterruptedException e) { - throw new BackendException("Invoke rpc request was interrupted, " + - "please try again later", e); - } catch (ExecutionException e) { - throw new BackendException("Failed to invoke rpc request", e); - } - } - @Override public String toString() { return String.format("[%s-%s]", this.context.group(), this.nodeId()); @@ -348,7 +281,7 @@ public void onError(PeerId peer, Status status) { if (this.isWriteBufferOverflow(status)) { // increment busy counter int count = RaftNode.this.busyCounter.incrementAndGet(); - LOG.info("Busy counter: [{}]", count); + LOG.info("Increase busy counter: [{}]", count); } } @@ -359,9 +292,37 @@ private boolean isWriteBufferOverflow(Status status) { status.getErrorMsg().contains(expectMsg); } + /** + * Maybe useful in the future + */ + @SuppressWarnings("unused") + private boolean isRpcTimeout(Status status) { + String expectMsg = "Invoke timeout"; + return RaftError.EINTERNAL == status.getRaftError() && + status.getErrorMsg() != null && + status.getErrorMsg().contains(expectMsg); + } + @Override public void onDestroyed(PeerId peer) { LOG.warn("Replicator {} prepare to offline", peer); } } + + /** + * Jraft Node.getLeaderId() and Node.isLeader() is not always consistent, + * We define this class to manage leader info by ourselves + */ + private static class LeaderInfo { + + private static final LeaderInfo NO_LEADER = new LeaderInfo(null, false); + + private final PeerId leaderId; + private final boolean selfIsLeader; + + public LeaderInfo(PeerId leaderId, boolean selfIsLeader) { + this.leaderId = leaderId; + this.selfIsLeader = selfIsLeader; + } + } } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftRequests.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftRequests.java deleted file mode 100644 index 34207a9e28..0000000000 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftRequests.java +++ /dev/null @@ -1,1490 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: hugegraph-core/src/main/resources/raft.proto - -package com.baidu.hugegraph.backend.store.raft; - -public final class RaftRequests { - private RaftRequests() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - } - /** - * Protobuf enum {@code com.baidu.hugegraph.backend.store.raft.StoreType} - */ - public enum StoreType - implements com.google.protobuf.ProtocolMessageEnum { - /** - * SCHEMA = 0; - */ - SCHEMA(0, 0), - /** - * GRAPH = 1; - */ - GRAPH(1, 1), - /** - * SYSTEM = 2; - */ - SYSTEM(2, 2), - /** - * SIZE = 3; - */ - SIZE(3, 3), - ; - - /** - * SCHEMA = 0; - */ - public static final int SCHEMA_VALUE = 0; - /** - * GRAPH = 1; - */ - public static final int GRAPH_VALUE = 1; - /** - * SYSTEM = 2; - */ - public static final int SYSTEM_VALUE = 2; - /** - * SIZE = 3; - */ - public static final int SIZE_VALUE = 3; - - - public final int getNumber() { return value; } - - public static StoreType valueOf(int value) { - switch (value) { - case 0: return SCHEMA; - case 1: return GRAPH; - case 2: return SYSTEM; - case 3: return SIZE; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public StoreType findValueByNumber(int number) { - return StoreType.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.getDescriptor().getEnumTypes().get(0); - } - - private static final StoreType[] VALUES = values(); - - public static StoreType valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private StoreType(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:com.baidu.hugegraph.backend.store.raft.StoreType) - } - - /** - * Protobuf enum {@code com.baidu.hugegraph.backend.store.raft.StoreAction} - */ - public enum StoreAction - implements com.google.protobuf.ProtocolMessageEnum { - /** - * NONE = 0; - */ - NONE(0, 0), - /** - * INIT = 1; - */ - INIT(1, 1), - /** - * CLEAR = 2; - */ - CLEAR(2, 2), - /** - * TRUNCATE = 3; - */ - TRUNCATE(3, 3), - /** - * BEGIN_TX = 10; - */ - BEGIN_TX(4, 10), - /** - * COMMIT_TX = 11; - */ - COMMIT_TX(5, 11), - /** - * ROLLBACK_TX = 12; - */ - ROLLBACK_TX(6, 12), - /** - * MUTATE = 20; - */ - MUTATE(7, 20), - /** - * INCR_COUNTER = 21; - */ - INCR_COUNTER(8, 21), - /** - * QUERY = 30; - */ - QUERY(9, 30), - ; - - /** - * NONE = 0; - */ - public static final int NONE_VALUE = 0; - /** - * INIT = 1; - */ - public static final int INIT_VALUE = 1; - /** - * CLEAR = 2; - */ - public static final int CLEAR_VALUE = 2; - /** - * TRUNCATE = 3; - */ - public static final int TRUNCATE_VALUE = 3; - /** - * BEGIN_TX = 10; - */ - public static final int BEGIN_TX_VALUE = 10; - /** - * COMMIT_TX = 11; - */ - public static final int COMMIT_TX_VALUE = 11; - /** - * ROLLBACK_TX = 12; - */ - public static final int ROLLBACK_TX_VALUE = 12; - /** - * MUTATE = 20; - */ - public static final int MUTATE_VALUE = 20; - /** - * INCR_COUNTER = 21; - */ - public static final int INCR_COUNTER_VALUE = 21; - /** - * QUERY = 30; - */ - public static final int QUERY_VALUE = 30; - - - public final int getNumber() { return value; } - - public static StoreAction valueOf(int value) { - switch (value) { - case 0: return NONE; - case 1: return INIT; - case 2: return CLEAR; - case 3: return TRUNCATE; - case 10: return BEGIN_TX; - case 11: return COMMIT_TX; - case 12: return ROLLBACK_TX; - case 20: return MUTATE; - case 21: return INCR_COUNTER; - case 30: return QUERY; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public StoreAction findValueByNumber(int number) { - return StoreAction.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.getDescriptor().getEnumTypes().get(1); - } - - private static final StoreAction[] VALUES = values(); - - public static StoreAction valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private StoreAction(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:com.baidu.hugegraph.backend.store.raft.StoreAction) - } - - public interface StoreCommandRequestOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .com.baidu.hugegraph.backend.store.raft.StoreType type = 1; - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreType type = 1; - */ - boolean hasType(); - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreType type = 1; - */ - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType getType(); - - // required .com.baidu.hugegraph.backend.store.raft.StoreAction action = 2; - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreAction action = 2; - */ - boolean hasAction(); - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreAction action = 2; - */ - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction getAction(); - - // required bytes data = 3; - /** - * required bytes data = 3; - */ - boolean hasData(); - /** - * required bytes data = 3; - */ - com.google.protobuf.ByteString getData(); - } - /** - * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.StoreCommandRequest} - */ - public static final class StoreCommandRequest extends - com.google.protobuf.GeneratedMessage - implements StoreCommandRequestOrBuilder { - // Use StoreCommandRequest.newBuilder() to construct. - private StoreCommandRequest(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private StoreCommandRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final StoreCommandRequest defaultInstance; - public static StoreCommandRequest getDefaultInstance() { - return defaultInstance; - } - - public StoreCommandRequest getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private StoreCommandRequest( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType value = com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - type_ = value; - } - break; - } - case 16: { - int rawValue = input.readEnum(); - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction value = com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(2, rawValue); - } else { - bitField0_ |= 0x00000002; - action_ = value; - } - break; - } - case 26: { - bitField0_ |= 0x00000004; - data_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest.class, com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public StoreCommandRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new StoreCommandRequest(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required .com.baidu.hugegraph.backend.store.raft.StoreType type = 1; - public static final int TYPE_FIELD_NUMBER = 1; - private com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType type_; - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreType type = 1; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreType type = 1; - */ - public com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType getType() { - return type_; - } - - // required .com.baidu.hugegraph.backend.store.raft.StoreAction action = 2; - public static final int ACTION_FIELD_NUMBER = 2; - private com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction action_; - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreAction action = 2; - */ - public boolean hasAction() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreAction action = 2; - */ - public com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction getAction() { - return action_; - } - - // required bytes data = 3; - public static final int DATA_FIELD_NUMBER = 3; - private com.google.protobuf.ByteString data_; - /** - * required bytes data = 3; - */ - public boolean hasData() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required bytes data = 3; - */ - public com.google.protobuf.ByteString getData() { - return data_; - } - - private void initFields() { - type_ = com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType.SCHEMA; - action_ = com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction.NONE; - data_ = com.google.protobuf.ByteString.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasType()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasAction()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasData()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, type_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeEnum(2, action_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, data_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, type_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(2, action_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, data_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.StoreCommandRequest} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest.class, com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest.Builder.class); - } - - // Construct using com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - type_ = com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType.SCHEMA; - bitField0_ = (bitField0_ & ~0x00000001); - action_ = com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction.NONE; - bitField0_ = (bitField0_ & ~0x00000002); - data_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandRequest_descriptor; - } - - public com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest getDefaultInstanceForType() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest.getDefaultInstance(); - } - - public com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest build() { - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest buildPartial() { - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest result = new com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.type_ = type_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.action_ = action_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.data_ = data_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest) { - return mergeFrom((com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest other) { - if (other == com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest.getDefaultInstance()) return this; - if (other.hasType()) { - setType(other.getType()); - } - if (other.hasAction()) { - setAction(other.getAction()); - } - if (other.hasData()) { - setData(other.getData()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasType()) { - - return false; - } - if (!hasAction()) { - - return false; - } - if (!hasData()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required .com.baidu.hugegraph.backend.store.raft.StoreType type = 1; - private com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType type_ = com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType.SCHEMA; - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreType type = 1; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreType type = 1; - */ - public com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType getType() { - return type_; - } - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreType type = 1; - */ - public Builder setType(com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - type_ = value; - onChanged(); - return this; - } - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreType type = 1; - */ - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000001); - type_ = com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType.SCHEMA; - onChanged(); - return this; - } - - // required .com.baidu.hugegraph.backend.store.raft.StoreAction action = 2; - private com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction action_ = com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction.NONE; - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreAction action = 2; - */ - public boolean hasAction() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreAction action = 2; - */ - public com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction getAction() { - return action_; - } - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreAction action = 2; - */ - public Builder setAction(com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - action_ = value; - onChanged(); - return this; - } - /** - * required .com.baidu.hugegraph.backend.store.raft.StoreAction action = 2; - */ - public Builder clearAction() { - bitField0_ = (bitField0_ & ~0x00000002); - action_ = com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction.NONE; - onChanged(); - return this; - } - - // required bytes data = 3; - private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes data = 3; - */ - public boolean hasData() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required bytes data = 3; - */ - public com.google.protobuf.ByteString getData() { - return data_; - } - /** - * required bytes data = 3; - */ - public Builder setData(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - data_ = value; - onChanged(); - return this; - } - /** - * required bytes data = 3; - */ - public Builder clearData() { - bitField0_ = (bitField0_ & ~0x00000004); - data_ = getDefaultInstance().getData(); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:com.baidu.hugegraph.backend.store.raft.StoreCommandRequest) - } - - static { - defaultInstance = new StoreCommandRequest(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:com.baidu.hugegraph.backend.store.raft.StoreCommandRequest) - } - - public interface StoreCommandResponseOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bool status = 1; - /** - * required bool status = 1; - */ - boolean hasStatus(); - /** - * required bool status = 1; - */ - boolean getStatus(); - - // optional string message = 2; - /** - * optional string message = 2; - */ - boolean hasMessage(); - /** - * optional string message = 2; - */ - java.lang.String getMessage(); - /** - * optional string message = 2; - */ - com.google.protobuf.ByteString - getMessageBytes(); - } - /** - * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.StoreCommandResponse} - */ - public static final class StoreCommandResponse extends - com.google.protobuf.GeneratedMessage - implements StoreCommandResponseOrBuilder { - // Use StoreCommandResponse.newBuilder() to construct. - private StoreCommandResponse(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private StoreCommandResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final StoreCommandResponse defaultInstance; - public static StoreCommandResponse getDefaultInstance() { - return defaultInstance; - } - - public StoreCommandResponse getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private StoreCommandResponse( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - status_ = input.readBool(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - message_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandResponse_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse.class, com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public StoreCommandResponse parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new StoreCommandResponse(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required bool status = 1; - public static final int STATUS_FIELD_NUMBER = 1; - private boolean status_; - /** - * required bool status = 1; - */ - public boolean hasStatus() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bool status = 1; - */ - public boolean getStatus() { - return status_; - } - - // optional string message = 2; - public static final int MESSAGE_FIELD_NUMBER = 2; - private java.lang.Object message_; - /** - * optional string message = 2; - */ - public boolean hasMessage() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string message = 2; - */ - public java.lang.String getMessage() { - java.lang.Object ref = message_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - message_ = s; - } - return s; - } - } - /** - * optional string message = 2; - */ - public com.google.protobuf.ByteString - getMessageBytes() { - java.lang.Object ref = message_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - message_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - status_ = false; - message_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasStatus()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, status_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getMessageBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, status_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getMessageBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.StoreCommandResponse} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandResponse_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse.class, com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse.Builder.class); - } - - // Construct using com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - status_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - message_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandResponse_descriptor; - } - - public com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse getDefaultInstanceForType() { - return com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse.getDefaultInstance(); - } - - public com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse build() { - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse buildPartial() { - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse result = new com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.status_ = status_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.message_ = message_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse) { - return mergeFrom((com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse other) { - if (other == com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse.getDefaultInstance()) return this; - if (other.hasStatus()) { - setStatus(other.getStatus()); - } - if (other.hasMessage()) { - bitField0_ |= 0x00000002; - message_ = other.message_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasStatus()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required bool status = 1; - private boolean status_ ; - /** - * required bool status = 1; - */ - public boolean hasStatus() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bool status = 1; - */ - public boolean getStatus() { - return status_; - } - /** - * required bool status = 1; - */ - public Builder setStatus(boolean value) { - bitField0_ |= 0x00000001; - status_ = value; - onChanged(); - return this; - } - /** - * required bool status = 1; - */ - public Builder clearStatus() { - bitField0_ = (bitField0_ & ~0x00000001); - status_ = false; - onChanged(); - return this; - } - - // optional string message = 2; - private java.lang.Object message_ = ""; - /** - * optional string message = 2; - */ - public boolean hasMessage() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string message = 2; - */ - public java.lang.String getMessage() { - java.lang.Object ref = message_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - message_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string message = 2; - */ - public com.google.protobuf.ByteString - getMessageBytes() { - java.lang.Object ref = message_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - message_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string message = 2; - */ - public Builder setMessage( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - message_ = value; - onChanged(); - return this; - } - /** - * optional string message = 2; - */ - public Builder clearMessage() { - bitField0_ = (bitField0_ & ~0x00000002); - message_ = getDefaultInstance().getMessage(); - onChanged(); - return this; - } - /** - * optional string message = 2; - */ - public Builder setMessageBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - message_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:com.baidu.hugegraph.backend.store.raft.StoreCommandResponse) - } - - static { - defaultInstance = new StoreCommandResponse(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:com.baidu.hugegraph.backend.store.raft.StoreCommandResponse) - } - - private static com.google.protobuf.Descriptors.Descriptor - internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandRequest_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandRequest_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandResponse_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandResponse_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n,hugegraph-core/src/main/resources/raft" + - ".proto\022&com.baidu.hugegraph.backend.stor" + - "e.raft\"\251\001\n\023StoreCommandRequest\022?\n\004type\030\001" + - " \002(\01621.com.baidu.hugegraph.backend.store" + - ".raft.StoreType\022C\n\006action\030\002 \002(\01623.com.ba" + - "idu.hugegraph.backend.store.raft.StoreAc" + - "tion\022\014\n\004data\030\003 \002(\014\"7\n\024StoreCommandRespon" + - "se\022\016\n\006status\030\001 \002(\010\022\017\n\007message\030\002 \001(\t*8\n\tS" + - "toreType\022\n\n\006SCHEMA\020\000\022\t\n\005GRAPH\020\001\022\n\n\006SYSTE" + - "M\020\002\022\010\n\004SIZE\020\003*\221\001\n\013StoreAction\022\010\n\004NONE\020\000\022", - "\010\n\004INIT\020\001\022\t\n\005CLEAR\020\002\022\014\n\010TRUNCATE\020\003\022\014\n\010BE" + - "GIN_TX\020\n\022\r\n\tCOMMIT_TX\020\013\022\017\n\013ROLLBACK_TX\020\014" + - "\022\n\n\006MUTATE\020\024\022\020\n\014INCR_COUNTER\020\025\022\t\n\005QUERY\020" + - "\036B6\n&com.baidu.hugegraph.backend.store.r" + - "aftB\014RaftRequests" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandRequest_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandRequest_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandRequest_descriptor, - new java.lang.String[] { "Type", "Action", "Data", }); - internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandResponse_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandResponse_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_com_baidu_hugegraph_backend_store_raft_StoreCommandResponse_descriptor, - new java.lang.String[] { "Status", "Message", }); - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - }, assigner); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftResult.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftResult.java index 747368ecc3..51cfaf4b17 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftResult.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftResult.java @@ -24,13 +24,13 @@ import com.alipay.sofa.jraft.Status; import com.baidu.hugegraph.util.E; -public class RaftResult { +public class RaftResult { private final Status status; - private final Supplier callback; + private final Supplier callback; private final Throwable exception; - public RaftResult(Status status, Supplier callback) { + public RaftResult(Status status, Supplier callback) { this(status, callback, null); } @@ -38,7 +38,7 @@ public RaftResult(Status status, Throwable exception) { this(status, () -> null, exception); } - public RaftResult(Status status, Supplier callback, + public RaftResult(Status status, Supplier callback, Throwable exception) { E.checkNotNull(status, "status"); E.checkNotNull(callback, "callback"); @@ -51,7 +51,7 @@ public Status status() { return this.status; } - public Supplier callback() { + public Supplier callback() { return this.callback; } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftSharedContext.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftSharedContext.java index f6a49786ba..d76cb41c97 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftSharedContext.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/RaftSharedContext.java @@ -19,12 +19,11 @@ package com.baidu.hugegraph.backend.store.raft; +import static com.baidu.hugegraph.backend.cache.AbstractCache.ACTION_CLEAR; + import java.io.File; import java.io.IOException; import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; @@ -37,21 +36,26 @@ import com.alipay.sofa.jraft.conf.Configuration; import com.alipay.sofa.jraft.entity.PeerId; -import com.alipay.sofa.jraft.option.CliOptions; import com.alipay.sofa.jraft.option.NodeOptions; import com.alipay.sofa.jraft.option.RaftOptions; import com.alipay.sofa.jraft.rpc.RaftRpcServerFactory; import com.alipay.sofa.jraft.rpc.RpcServer; +import com.alipay.sofa.jraft.rpc.impl.BoltRaftRpcFactory; import com.alipay.sofa.jraft.util.NamedThreadFactory; import com.alipay.sofa.jraft.util.ThreadPoolUtil; import com.baidu.hugegraph.HugeException; import com.baidu.hugegraph.HugeGraphParams; import com.baidu.hugegraph.backend.id.Id; import com.baidu.hugegraph.backend.store.BackendStore; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType; +import com.baidu.hugegraph.backend.store.raft.rpc.ListPeersProcessor; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType; +import com.baidu.hugegraph.backend.store.raft.rpc.RpcForwarder; +import com.baidu.hugegraph.backend.store.raft.rpc.SetLeaderProcessor; +import com.baidu.hugegraph.backend.store.raft.rpc.StoreCommandProcessor; import com.baidu.hugegraph.config.CoreOptions; import com.baidu.hugegraph.config.HugeConfig; import com.baidu.hugegraph.event.EventHub; +import com.baidu.hugegraph.testutil.Whitebox; import com.baidu.hugegraph.type.HugeType; import com.baidu.hugegraph.type.define.GraphMode; import com.baidu.hugegraph.util.E; @@ -72,7 +76,7 @@ public final class RaftSharedContext { // compress block size public static final int BLOCK_SIZE = 4096; - private static final String DEFAULT_GROUP = "default-group"; + public static final String DEFAULT_GROUP = "default"; private final HugeGraphParams params; private final String schemaStoreName; @@ -86,6 +90,8 @@ public final class RaftSharedContext { private final ExecutorService backendExecutor; private RaftNode raftNode; + private RaftGroupManager raftGroupManager; + private RpcForwarder rpcForwarder; public RaftSharedContext(HugeGraphParams params) { this.params = params; @@ -94,7 +100,7 @@ public RaftSharedContext(HugeGraphParams params) { this.schemaStoreName = config.get(CoreOptions.STORE_SCHEMA); this.graphStoreName = config.get(CoreOptions.STORE_GRAPH); this.systemStoreName = config.get(CoreOptions.STORE_SYSTEM); - this.stores = new RaftBackendStore[StoreType.SIZE.getNumber()]; + this.stores = new RaftBackendStore[StoreType.ALL.getNumber()]; this.rpcServer = this.initAndStartRpcServer(); if (config.get(CoreOptions.RAFT_SAFE_READ)) { int readIndexThreads = config.get(CoreOptions.RAFT_READ_INDEX_THREADS); @@ -111,19 +117,27 @@ public RaftSharedContext(HugeGraphParams params) { this.backendExecutor = this.createBackendExecutor(backendThreads); this.raftNode = null; + this.raftGroupManager = null; + this.rpcForwarder = null; + this.registerRpcRequestProcessors(); + } + + private void registerRpcRequestProcessors() { + this.rpcServer.registerProcessor(new StoreCommandProcessor(this)); + this.rpcServer.registerProcessor(new SetLeaderProcessor(this)); + this.rpcServer.registerProcessor(new ListPeersProcessor(this)); } public void initRaftNode() { this.raftNode = new RaftNode(this); - CliOptions cliOptions = new CliOptions(); - cliOptions.setTimeoutMs(WAIT_LEADER_TIMEOUT); - cliOptions.setMaxRetry(1); + this.rpcForwarder = new RpcForwarder(this.raftNode); + this.raftGroupManager = new RaftGroupManagerImpl(this); } public void waitRaftNodeStarted() { RaftNode node = this.node(); node.waitLeaderElected(RaftSharedContext.WAIT_LEADER_TIMEOUT); - if (node.isRaftLeader()) { + if (node.selfIsLeader()) { node.waitStarted(RaftSharedContext.NO_TIMEOUT); } } @@ -137,6 +151,21 @@ public RaftNode node() { return this.raftNode; } + public RpcForwarder rpcForwarder() { + return this.rpcForwarder; + } + + public RaftGroupManager raftNodeManager(String group) { + E.checkArgument(DEFAULT_GROUP.equals(group), + "The group must be '%s' now, actual is '%s'", + DEFAULT_GROUP, group); + return this.raftGroupManager; + } + + public RpcServer rpcServer() { + return this.rpcServer; + } + public String group() { return DEFAULT_GROUP; } @@ -156,6 +185,10 @@ public StoreType storeType(String store) { } } + protected RaftBackendStore[] stores() { + return this.stores; + } + public BackendStore originStore(StoreType storeType) { RaftBackendStore raftStore = this.stores[storeType.getNumber()]; E.checkState(raftStore != null, @@ -163,14 +196,6 @@ public BackendStore originStore(StoreType storeType) { return raftStore.originStore(); } - public Collection originStores() { - List originStores = new ArrayList<>(); - for (RaftBackendStore store : this.stores) { - originStores.add(store.originStore()); - } - return originStores; - } - public NodeOptions nodeOptions() throws IOException { HugeConfig config = this.config(); PeerId selfId = new PeerId(); @@ -232,7 +257,13 @@ public NodeOptions nodeOptions() throws IOException { return nodeOptions; } - public void notifyCache(HugeType type, Id id) { + public void clearCache() { + // Just choose two representatives used to represent schema and graph + this.notifyCache(ACTION_CLEAR, HugeType.VERTEX_LABEL, null); + this.notifyCache(ACTION_CLEAR, HugeType.VERTEX, null); + } + + public void notifyCache(String action, HugeType type, Id id) { EventHub eventHub; if (type.isGraph()) { eventHub = this.params.graphEventHub(); @@ -243,9 +274,9 @@ public void notifyCache(HugeType type, Id id) { } try { // How to avoid update cache from server info - eventHub.notify(Events.CACHE, "invalid", type, id); + eventHub.notify(Events.CACHE, action, type, id); } catch (RejectedExecutionException e) { - // pass + LOG.warn("Can't update cache due to EventHub is too busy"); } } @@ -262,8 +293,8 @@ public boolean isSafeRead() { return this.config().get(CoreOptions.RAFT_SAFE_READ); } - public RpcServer rpcServer() { - return this.rpcServer; + public boolean useSnapshot() { + return this.config().get(CoreOptions.RAFT_USE_SNAPSHOT); } public ExecutorService snapshotExecutor() { @@ -283,6 +314,13 @@ private HugeConfig config() { } private RpcServer initAndStartRpcServer() { + Whitebox.setInternalState( + BoltRaftRpcFactory.class, "CHANNEL_WRITE_BUF_LOW_WATER_MARK", + this.config().get(CoreOptions.RAFT_RPC_BUF_LOW_WATER_MARK)); + Whitebox.setInternalState( + BoltRaftRpcFactory.class, "CHANNEL_WRITE_BUF_HIGH_WATER_MARK", + this.config().get(CoreOptions.RAFT_RPC_BUF_HIGH_WATER_MARK)); + PeerId serverId = new PeerId(); serverId.parse(this.config().get(CoreOptions.RAFT_ENDPOINT)); RpcServer rpcServer = RaftRpcServerFactory.createAndStartRaftRpcServer( @@ -291,7 +329,6 @@ private RpcServer initAndStartRpcServer() { return rpcServer; } - @SuppressWarnings("unused") private ExecutorService createReadIndexExecutor(int coreThreads) { int maxThreads = coreThreads << 2; String name = "store-read-index-callback"; @@ -320,7 +357,7 @@ private static ExecutorService newPool(int coreThreads, int maxThreads, BlockingQueue workQueue = new LinkedBlockingQueue<>(); return ThreadPoolUtil.newBuilder() .poolName(name) - .enableMetric(true) + .enableMetric(false) .coreThreads(coreThreads) .maximumThreads(maxThreads) .keepAliveSeconds(300L) diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreClosure.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreClosure.java index 76cf0c4526..5a8a2078e0 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreClosure.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreClosure.java @@ -19,14 +19,9 @@ package com.baidu.hugegraph.backend.store.raft; -import org.slf4j.Logger; - import com.baidu.hugegraph.util.E; -import com.baidu.hugegraph.util.Log; - -public class StoreClosure extends RaftClosure { - private static final Logger LOG = Log.logger(StoreClosure.class); +public class StoreClosure extends RaftClosure { private final StoreCommand command; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreCommand.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreCommand.java index 95accfe294..f239d0f103 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreCommand.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreCommand.java @@ -20,8 +20,8 @@ package com.baidu.hugegraph.backend.store.raft; import com.baidu.hugegraph.backend.serializer.BytesBuffer; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType; public class StoreCommand { @@ -30,8 +30,14 @@ public class StoreCommand { private final StoreType type; private final StoreAction action; private final byte[] data; + private final boolean forwarded; public StoreCommand(StoreType type, StoreAction action, byte[] data) { + this(type, action, data, false); + } + + public StoreCommand(StoreType type, StoreAction action, + byte[] data, boolean forwarded) { this.type = type; this.action = action; if (data == null) { @@ -42,6 +48,7 @@ public StoreCommand(StoreType type, StoreAction action, byte[] data) { } this.data[0] = (byte) this.type.getNumber(); this.data[1] = (byte) this.action.getNumber(); + this.forwarded = forwarded; } public StoreType type() { @@ -56,6 +63,10 @@ public byte[] data() { return this.data; } + public boolean forwarded() { + return this.forwarded; + } + public static void writeHeader(BytesBuffer buffer) { buffer.write((byte) 0); buffer.write((byte) 0); @@ -72,4 +83,10 @@ public static StoreCommand fromBytes(byte[] bytes) { StoreAction action = StoreAction.valueOf(bytes[1]); return new StoreCommand(type, action, bytes); } + + @Override + public String toString() { + return String.format("StoreCommand{type=%s, action=%s", + this.type.name(), this.action.name()); + } } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreSnapshotFile.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreSnapshotFile.java index 0b8ddf31a6..d0a1aef063 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreSnapshotFile.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreSnapshotFile.java @@ -37,7 +37,6 @@ import com.alipay.sofa.jraft.storage.snapshot.SnapshotReader; import com.alipay.sofa.jraft.storage.snapshot.SnapshotWriter; import com.alipay.sofa.jraft.util.CRC64; -import com.baidu.hugegraph.backend.store.BackendStore; import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.Log; import com.baidu.hugegraph.util.ZipUtil; @@ -49,22 +48,27 @@ public class StoreSnapshotFile { private static final String SNAPSHOT_DIR = "ss"; private static final String SNAPSHOT_ARCHIVE = "ss.zip"; - public void save(BackendStore store, SnapshotWriter writer, - Closure done, ExecutorService executor) { + private final RaftBackendStore[] stores; + + public StoreSnapshotFile(RaftBackendStore[] stores) { + this.stores = stores; + } + + public void save(SnapshotWriter writer, Closure done, + ExecutorService executor) { String writerPath = writer.getPath(); String snapshotPath = Paths.get(writerPath, SNAPSHOT_DIR).toString(); try { - this.doSnapshotSave(store, snapshotPath) - .whenComplete((metaBuilder, throwable) -> { - if (throwable == null) { + this.doSnapshotSave(snapshotPath).whenComplete((metaBuilder, t) -> { + if (t == null) { executor.execute(() -> compressSnapshot(writer, metaBuilder, done)); } else { - LOG.error("Failed to save snapshot, path={}, files={}, {}.", - writerPath, writer.listFiles(), throwable); + LOG.error("Failed to save snapshot, path={}, files={}", + writerPath, writer.listFiles(), t); done.run(new Status(RaftError.EIO, "Failed to save snapshot at %s, error is %s", - writerPath, throwable.getMessage())); + writerPath, t.getMessage())); } }); } catch (Throwable t) { @@ -76,17 +80,17 @@ public void save(BackendStore store, SnapshotWriter writer, } } - public boolean load(BackendStore store, SnapshotReader reader) { + public boolean load(SnapshotReader reader) { LocalFileMeta meta = (LocalFileMeta) reader.getFileMeta(SNAPSHOT_ARCHIVE); String readerPath = reader.getPath(); if (meta == null) { - LOG.error("Can't find snapshot file, path={}.", readerPath); + LOG.error("Can't find snapshot archive file, path={}.", readerPath); return false; } String snapshotPath = Paths.get(readerPath, SNAPSHOT_DIR).toString(); try { this.decompressSnapshot(readerPath, meta); - this.doSnapshotLoad(store, snapshotPath); + this.doSnapshotLoad(snapshotPath); File tmp = new File(snapshotPath); // Delete the decompressed temporary file. If the deletion fails // (although it is a small probability event), it may affect the @@ -94,7 +98,7 @@ public boolean load(BackendStore store, SnapshotReader reader) { // is to terminate the state machine immediately. Users can choose // to manually delete and restart according to the log information. if (tmp.exists()) { - FileUtils.forceDelete(new File(snapshotPath)); + FileUtils.forceDelete(tmp); } return true; } catch (Throwable t) { @@ -104,15 +108,22 @@ public boolean load(BackendStore store, SnapshotReader reader) { } } - public CompletableFuture doSnapshotSave( - BackendStore store, - String snapshotPath) { - store.writeSnapshot(snapshotPath); + private CompletableFuture doSnapshotSave( + String snapshotPath) { + for (RaftBackendStore store : this.stores) { + String parentPath = Paths.get(snapshotPath, store.store()) + .toString(); + store.originStore().writeSnapshot(parentPath); + } return CompletableFuture.completedFuture(LocalFileMeta.newBuilder()); } - public void doSnapshotLoad(BackendStore store, String snapshotPath) { - store.readSnapshot(snapshotPath); + private void doSnapshotLoad(String snapshotPath) { + for (RaftBackendStore store : this.stores) { + String parentPath = Paths.get(snapshotPath, store.store()) + .toString(); + store.originStore().readSnapshot(parentPath); + } } private void compressSnapshot(SnapshotWriter writer, diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreStateMachine.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreStateMachine.java index b7cdd5f9b8..670cd6a627 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreStateMachine.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreStateMachine.java @@ -19,16 +19,16 @@ package com.baidu.hugegraph.backend.store.raft; -import java.util.EnumMap; +import static com.baidu.hugegraph.backend.cache.AbstractCache.ACTION_INVALID; + import java.util.List; -import java.util.Map; -import java.util.function.BiConsumer; import org.slf4j.Logger; import com.alipay.sofa.jraft.Closure; import com.alipay.sofa.jraft.Iterator; import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; import com.alipay.sofa.jraft.core.StateMachineAdapter; import com.alipay.sofa.jraft.entity.LeaderChangeContext; import com.alipay.sofa.jraft.error.RaftError; @@ -42,8 +42,8 @@ import com.baidu.hugegraph.backend.store.BackendMutation; import com.baidu.hugegraph.backend.store.BackendStore; import com.baidu.hugegraph.backend.store.raft.RaftBackendStore.IncrCounter; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType; import com.baidu.hugegraph.type.HugeType; import com.baidu.hugegraph.type.define.GraphMode; import com.baidu.hugegraph.util.LZ4Util; @@ -55,13 +55,10 @@ public class StoreStateMachine extends StateMachineAdapter { private final RaftSharedContext context; private final StoreSnapshotFile snapshotFile; - private final Map> funcs; public StoreStateMachine(RaftSharedContext context) { this.context = context; - this.snapshotFile = new StoreSnapshotFile(); - this.funcs = new EnumMap<>(StoreAction.class); - this.registerCommands(); + this.snapshotFile = new StoreSnapshotFile(context.stores()); } private BackendStore store(StoreType type) { @@ -72,41 +69,18 @@ private RaftNode node() { return this.context.node(); } - private void registerCommands() { - this.register(StoreAction.TRUNCATE, (store, buffer) -> { - store.truncate(); - }); - // clear - this.register(StoreAction.CLEAR, (store, buffer) -> { - boolean clearSpace = buffer.read() > 0; - store.clear(clearSpace); - }); - this.register(StoreAction.BEGIN_TX, (store, buffer) -> store.beginTx()); - this.register(StoreAction.COMMIT_TX, (store, buffer) -> { - List ms = StoreSerializer.readMutations(buffer); - store.beginTx(); - for (BackendMutation mutation : ms) { - store.mutate(mutation); - // update cache on follower when graph run in general mode - if (this.context.graphMode() == GraphMode.NONE) { - this.updateCacheIfNeeded(mutation); - } - } - store.commitTx(); - }); - this.register(StoreAction.ROLLBACK_TX, (store, buffer) -> { - store.rollbackTx(); - }); - // increase counter - this.register(StoreAction.INCR_COUNTER, (store, buffer) -> { - IncrCounter counter = StoreSerializer.readIncrCounter(buffer); - store.increaseCounter(counter.type(), counter.increment()); - }); - } - - private void updateCacheIfNeeded(BackendMutation mutation) { - // Only follower need to update cache from store to tx - if (this.node().isRaftLeader()) { + private void updateCacheIfNeeded(BackendMutation mutation, + boolean forwarded) { + // Update cache only when graph run in general mode + if (this.context.graphMode() != GraphMode.NONE) { + return; + } + /* + * 1. Follower need to update cache from store to tx + * 2. If request come from leader, cache will be updated by upper layer + * 3. If request is forwarded by follower, need to update cache + */ + if (!forwarded && this.node().selfIsLeader()) { return; } for (HugeType type : mutation.types()) { @@ -116,50 +90,45 @@ private void updateCacheIfNeeded(BackendMutation mutation) { for (java.util.Iterator it = mutation.mutation(type); it.hasNext();) { BackendEntry entry = it.next().entry(); - this.context.notifyCache(type, entry.originId()); + this.context.notifyCache(ACTION_INVALID, type, entry.originId()); } } } - private void register(StoreAction action, - BiConsumer func) { - this.funcs.put(action, func); - } - @Override public void onApply(Iterator iter) { - LOG.debug("Node role: {}", this.node().isRaftLeader() ? + LOG.debug("Node role: {}", this.node().selfIsLeader() ? "leader" : "follower"); StoreClosure closure = null; try { while (iter.hasNext()) { - StoreType type; - StoreAction action; - BytesBuffer buffer; closure = (StoreClosure) iter.done(); if (closure != null) { // Leader just take it out from the closure - buffer = BytesBuffer.wrap(closure.command().data()); - } else { - // Follower need readMutation data - buffer = LZ4Util.decompress(iter.getData().array(), - RaftSharedContext.BLOCK_SIZE); - } - // The first two bytes are StoreType and StoreAction - type = StoreType.valueOf(buffer.read()); - action = StoreAction.valueOf(buffer.read()); - if (closure != null) { - // Closure is null on follower node + StoreCommand command = closure.command(); + BytesBuffer buffer = BytesBuffer.wrap(command.data()); + // The first two bytes are StoreType and StoreAction + StoreType type = StoreType.valueOf(buffer.read()); + StoreAction action = StoreAction.valueOf(buffer.read()); + boolean forwarded = command.forwarded(); // Let the producer thread to handle it closure.complete(Status.OK(), () -> { - return this.applyCommand(type, action, buffer); + this.applyCommand(type, action, buffer, forwarded); + return null; }); } else { + // Follower need readMutation data + byte[] bytes = iter.getData().array(); // Follower seems no way to wait future // Let the backend thread do it directly this.context.backendExecutor().submit(() -> { + BytesBuffer buffer = LZ4Util.decompress(bytes, + RaftSharedContext.BLOCK_SIZE); + buffer.forReadWritten(); + StoreType type = StoreType.valueOf(buffer.read()); + StoreAction action = StoreAction.valueOf(buffer.read()); try { - this.applyCommand(type, action, buffer); + this.applyCommand(type, action, buffer, false); } catch (Throwable e) { LOG.error("Failed to execute backend command: {}", action, e); @@ -182,68 +151,97 @@ public void onApply(Iterator iter) { } } - private Object applyCommand(StoreType type, StoreAction action, - BytesBuffer buffer) { - BackendStore store = this.store(type); - BiConsumer func = this.funcs.get(action); - func.accept(store, buffer); - return null; + private void applyCommand(StoreType type, StoreAction action, + BytesBuffer buffer, boolean forwarded) { + BackendStore store = type != StoreType.ALL ? this.store(type) : null; + switch (action) { + case CLEAR: + boolean clearSpace = buffer.read() > 0; + store.clear(clearSpace); + this.context.clearCache(); + break; + case TRUNCATE: + store.truncate(); + this.context.clearCache(); + break; + case SNAPSHOT: + assert store == null; + this.node().snapshot(); + break; + case BEGIN_TX: + store.beginTx(); + break; + case COMMIT_TX: + List ms = StoreSerializer.readMutations(buffer); + // RaftBackendStore doesn't write raft log for beginTx + store.beginTx(); + for (BackendMutation mutation : ms) { + store.mutate(mutation); + this.updateCacheIfNeeded(mutation, forwarded); + } + store.commitTx(); + break; + case ROLLBACK_TX: + store.rollbackTx(); + break; + // increase counter + case INCR_COUNTER: + IncrCounter counter = StoreSerializer.readIncrCounter(buffer); + store.increaseCounter(counter.type(), counter.increment()); + break; + default: + throw new IllegalArgumentException("Invalid action " + action); + } } @Override public void onSnapshotSave(SnapshotWriter writer, Closure done) { LOG.debug("The node {} start snapshot save", this.node().nodeId()); - for (BackendStore store : this.context.originStores()) { - this.snapshotFile.save(store, writer, done, - this.context.snapshotExecutor()); - } + this.snapshotFile.save(writer, done, this.context.snapshotExecutor()); } @Override public boolean onSnapshotLoad(SnapshotReader reader) { - if (this.node().isRaftLeader()) { + if (this.node() != null && this.node().selfIsLeader()) { LOG.warn("Leader is not supposed to load snapshot."); return false; } - LOG.debug("The node {} start snapshot load", this.node().nodeId()); - for (BackendStore store : this.context.originStores()) { - if (!this.snapshotFile.load(store, reader)) { - return false; - } - } - return true; + return this.snapshotFile.load(reader); } @Override public void onLeaderStart(long term) { LOG.info("The node {} become to leader", this.node().nodeId()); - this.node().leaderTerm(term); - this.node().onElected(true); + this.node().onLeaderInfoChange(this.node().nodeId(), true); super.onLeaderStart(term); } @Override public void onLeaderStop(Status status) { LOG.info("The node {} abdicated from leader", this.node().nodeId()); - this.node().leaderTerm(-1); - this.node().onElected(false); + this.node().onLeaderInfoChange(null, false); super.onLeaderStop(status); } @Override public void onStartFollowing(LeaderChangeContext ctx) { LOG.info("The node {} become to follower", this.node().nodeId()); - this.node().onElected(true); + this.node().onLeaderInfoChange(ctx.getLeaderId(), false); super.onStartFollowing(ctx); } @Override public void onStopFollowing(LeaderChangeContext ctx) { LOG.info("The node {} abdicated from follower", this.node().nodeId()); - this.node().onElected(false); + this.node().onLeaderInfoChange(null, false); super.onStopFollowing(ctx); } + @Override + public void onConfigurationCommitted(Configuration conf) { + super.onConfigurationCommitted(conf); + } + @Override public void onError(final RaftException e) { LOG.error("Raft error: {}", e.getMessage(), e); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/ListPeersProcessor.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/ListPeersProcessor.java new file mode 100644 index 0000000000..328cf3c108 --- /dev/null +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/ListPeersProcessor.java @@ -0,0 +1,77 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.raft.rpc; + +import org.slf4j.Logger; + +import com.alipay.sofa.jraft.rpc.RpcRequestClosure; +import com.alipay.sofa.jraft.rpc.RpcRequestProcessor; +import com.baidu.hugegraph.backend.store.raft.RaftGroupManager; +import com.baidu.hugegraph.backend.store.raft.RaftSharedContext; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse; +import com.baidu.hugegraph.util.Log; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Message; + +public class ListPeersProcessor + extends RpcRequestProcessor { + + private static final Logger LOG = Log.logger(ListPeersProcessor.class); + + private final RaftSharedContext context; + + public ListPeersProcessor(RaftSharedContext context) { + super(null, null); + this.context = context; + } + + @Override + public Message processRequest(ListPeersRequest request, + RpcRequestClosure done) { + LOG.debug("Processing ListPeersRequest {}", request.getClass()); + RaftGroupManager nodeManager = this.context.raftNodeManager( + RaftSharedContext.DEFAULT_GROUP); + try { + CommonResponse common = CommonResponse.newBuilder() + .setStatus(true) + .build(); + return ListPeersResponse.newBuilder() + .setCommon(common) + .addAllEndpoints(nodeManager.listPeers()) + .build(); + } catch (Throwable e) { + CommonResponse common = CommonResponse.newBuilder() + .setStatus(false) + .setMessage(e.toString()) + .build(); + return ListPeersResponse.newBuilder() + .setCommon(common) + .addAllEndpoints(ImmutableList.of()) + .build(); + } + } + + @Override + public String interest() { + return ListPeersRequest.class.getName(); + } +} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/RaftRequests.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/RaftRequests.java new file mode 100644 index 0000000000..6bcc9aa702 --- /dev/null +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/RaftRequests.java @@ -0,0 +1,4143 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: hugegraph-core/src/main/resources/raft.proto + +package com.baidu.hugegraph.backend.store.raft.rpc; + +@SuppressWarnings("unused") +public final class RaftRequests { + private RaftRequests() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + /** + * Protobuf enum {@code com.baidu.hugegraph.backend.store.raft.rpc.StoreType} + */ + public enum StoreType + implements com.google.protobuf.ProtocolMessageEnum { + /** + * SCHEMA = 0; + */ + SCHEMA(0, 0), + /** + * GRAPH = 1; + */ + GRAPH(1, 1), + /** + * SYSTEM = 2; + */ + SYSTEM(2, 2), + /** + * ALL = 3; + */ + ALL(3, 3), + ; + + /** + * SCHEMA = 0; + */ + public static final int SCHEMA_VALUE = 0; + /** + * GRAPH = 1; + */ + public static final int GRAPH_VALUE = 1; + /** + * SYSTEM = 2; + */ + public static final int SYSTEM_VALUE = 2; + /** + * ALL = 3; + */ + public static final int ALL_VALUE = 3; + + + public final int getNumber() { return value; } + + public static StoreType valueOf(int value) { + switch (value) { + case 0: return SCHEMA; + case 1: return GRAPH; + case 2: return SYSTEM; + case 3: return ALL; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public StoreType findValueByNumber(int number) { + return StoreType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.getDescriptor().getEnumTypes().get(0); + } + + private static final StoreType[] VALUES = values(); + + public static StoreType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private StoreType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:com.baidu.hugegraph.backend.store.raft.rpc.StoreType) + } + + /** + * Protobuf enum {@code com.baidu.hugegraph.backend.store.raft.rpc.StoreAction} + */ + public enum StoreAction + implements com.google.protobuf.ProtocolMessageEnum { + /** + * NONE = 0; + */ + NONE(0, 0), + /** + * INIT = 1; + */ + INIT(1, 1), + /** + * CLEAR = 2; + */ + CLEAR(2, 2), + /** + * TRUNCATE = 3; + */ + TRUNCATE(3, 3), + /** + * SNAPSHOT = 4; + */ + SNAPSHOT(4, 4), + /** + * BEGIN_TX = 10; + */ + BEGIN_TX(5, 10), + /** + * COMMIT_TX = 11; + */ + COMMIT_TX(6, 11), + /** + * ROLLBACK_TX = 12; + */ + ROLLBACK_TX(7, 12), + /** + * MUTATE = 20; + */ + MUTATE(8, 20), + /** + * INCR_COUNTER = 21; + */ + INCR_COUNTER(9, 21), + /** + * QUERY = 30; + */ + QUERY(10, 30), + ; + + /** + * NONE = 0; + */ + public static final int NONE_VALUE = 0; + /** + * INIT = 1; + */ + public static final int INIT_VALUE = 1; + /** + * CLEAR = 2; + */ + public static final int CLEAR_VALUE = 2; + /** + * TRUNCATE = 3; + */ + public static final int TRUNCATE_VALUE = 3; + /** + * SNAPSHOT = 4; + */ + public static final int SNAPSHOT_VALUE = 4; + /** + * BEGIN_TX = 10; + */ + public static final int BEGIN_TX_VALUE = 10; + /** + * COMMIT_TX = 11; + */ + public static final int COMMIT_TX_VALUE = 11; + /** + * ROLLBACK_TX = 12; + */ + public static final int ROLLBACK_TX_VALUE = 12; + /** + * MUTATE = 20; + */ + public static final int MUTATE_VALUE = 20; + /** + * INCR_COUNTER = 21; + */ + public static final int INCR_COUNTER_VALUE = 21; + /** + * QUERY = 30; + */ + public static final int QUERY_VALUE = 30; + + + public final int getNumber() { return value; } + + public static StoreAction valueOf(int value) { + switch (value) { + case 0: return NONE; + case 1: return INIT; + case 2: return CLEAR; + case 3: return TRUNCATE; + case 4: return SNAPSHOT; + case 10: return BEGIN_TX; + case 11: return COMMIT_TX; + case 12: return ROLLBACK_TX; + case 20: return MUTATE; + case 21: return INCR_COUNTER; + case 30: return QUERY; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public StoreAction findValueByNumber(int number) { + return StoreAction.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.getDescriptor().getEnumTypes().get(1); + } + + private static final StoreAction[] VALUES = values(); + + public static StoreAction valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private StoreAction(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:com.baidu.hugegraph.backend.store.raft.rpc.StoreAction) + } + + public interface StoreCommandRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .com.baidu.hugegraph.backend.store.raft.rpc.StoreType type = 1; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreType type = 1; + */ + boolean hasType(); + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreType type = 1; + */ + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType getType(); + + // required .com.baidu.hugegraph.backend.store.raft.rpc.StoreAction action = 2; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreAction action = 2; + */ + boolean hasAction(); + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreAction action = 2; + */ + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction getAction(); + + // required bytes data = 3; + /** + * required bytes data = 3; + */ + boolean hasData(); + /** + * required bytes data = 3; + */ + com.google.protobuf.ByteString getData(); + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.StoreCommandRequest} + */ + public static final class StoreCommandRequest extends + com.google.protobuf.GeneratedMessage + implements StoreCommandRequestOrBuilder { + // Use StoreCommandRequest.newBuilder() to construct. + private StoreCommandRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StoreCommandRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StoreCommandRequest defaultInstance; + public static StoreCommandRequest getDefaultInstance() { + return defaultInstance; + } + + public StoreCommandRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StoreCommandRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType value = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + type_ = value; + } + break; + } + case 16: { + int rawValue = input.readEnum(); + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction value = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + action_ = value; + } + break; + } + case 26: { + bitField0_ |= 0x00000004; + data_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StoreCommandRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StoreCommandRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .com.baidu.hugegraph.backend.store.raft.rpc.StoreType type = 1; + public static final int TYPE_FIELD_NUMBER = 1; + private com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType type_; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreType type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreType type = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType getType() { + return type_; + } + + // required .com.baidu.hugegraph.backend.store.raft.rpc.StoreAction action = 2; + public static final int ACTION_FIELD_NUMBER = 2; + private com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction action_; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreAction action = 2; + */ + public boolean hasAction() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreAction action = 2; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction getAction() { + return action_; + } + + // required bytes data = 3; + public static final int DATA_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString data_; + /** + * required bytes data = 3; + */ + public boolean hasData() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes data = 3; + */ + public com.google.protobuf.ByteString getData() { + return data_; + } + + private void initFields() { + type_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType.SCHEMA; + action_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction.NONE; + data_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasAction()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasData()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, type_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, action_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, data_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, type_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, action_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, data_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.StoreCommandRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest.Builder.class); + } + + // Construct using com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + type_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType.SCHEMA; + bitField0_ = (bitField0_ & ~0x00000001); + action_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction.NONE; + bitField0_ = (bitField0_ & ~0x00000002); + data_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandRequest_descriptor; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest getDefaultInstanceForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest.getDefaultInstance(); + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest build() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest buildPartial() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest result = new com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.action_ = action_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.data_ = data_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest) { + return mergeFrom((com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest other) { + if (other == com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest.getDefaultInstance()) return this; + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasAction()) { + setAction(other.getAction()); + } + if (other.hasData()) { + setData(other.getData()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasType()) { + + return false; + } + if (!hasAction()) { + + return false; + } + if (!hasData()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .com.baidu.hugegraph.backend.store.raft.rpc.StoreType type = 1; + private com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType type_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType.SCHEMA; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreType type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreType type = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType getType() { + return type_; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreType type = 1; + */ + public Builder setType(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value; + onChanged(); + return this; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreType type = 1; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType.SCHEMA; + onChanged(); + return this; + } + + // required .com.baidu.hugegraph.backend.store.raft.rpc.StoreAction action = 2; + private com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction action_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction.NONE; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreAction action = 2; + */ + public boolean hasAction() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreAction action = 2; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction getAction() { + return action_; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreAction action = 2; + */ + public Builder setAction(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + action_ = value; + onChanged(); + return this; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.StoreAction action = 2; + */ + public Builder clearAction() { + bitField0_ = (bitField0_ & ~0x00000002); + action_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction.NONE; + onChanged(); + return this; + } + + // required bytes data = 3; + private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes data = 3; + */ + public boolean hasData() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes data = 3; + */ + public com.google.protobuf.ByteString getData() { + return data_; + } + /** + * required bytes data = 3; + */ + public Builder setData(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + data_ = value; + onChanged(); + return this; + } + /** + * required bytes data = 3; + */ + public Builder clearData() { + bitField0_ = (bitField0_ & ~0x00000004); + data_ = getDefaultInstance().getData(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:com.baidu.hugegraph.backend.store.raft.rpc.StoreCommandRequest) + } + + static { + defaultInstance = new StoreCommandRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:com.baidu.hugegraph.backend.store.raft.rpc.StoreCommandRequest) + } + + public interface StoreCommandResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool status = 1; + /** + * required bool status = 1; + */ + boolean hasStatus(); + /** + * required bool status = 1; + */ + boolean getStatus(); + + // optional string message = 2; + /** + * optional string message = 2; + */ + boolean hasMessage(); + /** + * optional string message = 2; + */ + java.lang.String getMessage(); + /** + * optional string message = 2; + */ + com.google.protobuf.ByteString + getMessageBytes(); + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.StoreCommandResponse} + */ + public static final class StoreCommandResponse extends + com.google.protobuf.GeneratedMessage + implements StoreCommandResponseOrBuilder { + // Use StoreCommandResponse.newBuilder() to construct. + private StoreCommandResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StoreCommandResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StoreCommandResponse defaultInstance; + public static StoreCommandResponse getDefaultInstance() { + return defaultInstance; + } + + public StoreCommandResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StoreCommandResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + status_ = input.readBool(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + message_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StoreCommandResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StoreCommandResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool status = 1; + public static final int STATUS_FIELD_NUMBER = 1; + private boolean status_; + /** + * required bool status = 1; + */ + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool status = 1; + */ + public boolean getStatus() { + return status_; + } + + // optional string message = 2; + public static final int MESSAGE_FIELD_NUMBER = 2; + private java.lang.Object message_; + /** + * optional string message = 2; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string message = 2; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + message_ = s; + } + return s; + } + } + /** + * optional string message = 2; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + status_ = false; + message_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasStatus()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, status_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getMessageBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, status_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getMessageBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.StoreCommandResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse.Builder.class); + } + + // Construct using com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + status_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + message_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandResponse_descriptor; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse getDefaultInstanceForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse.getDefaultInstance(); + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse build() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse buildPartial() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse result = new com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.status_ = status_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.message_ = message_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse) { + return mergeFrom((com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse other) { + if (other == com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse.getDefaultInstance()) return this; + if (other.hasStatus()) { + setStatus(other.getStatus()); + } + if (other.hasMessage()) { + bitField0_ |= 0x00000002; + message_ = other.message_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasStatus()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool status = 1; + private boolean status_ ; + /** + * required bool status = 1; + */ + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool status = 1; + */ + public boolean getStatus() { + return status_; + } + /** + * required bool status = 1; + */ + public Builder setStatus(boolean value) { + bitField0_ |= 0x00000001; + status_ = value; + onChanged(); + return this; + } + /** + * required bool status = 1; + */ + public Builder clearStatus() { + bitField0_ = (bitField0_ & ~0x00000001); + status_ = false; + onChanged(); + return this; + } + + // optional string message = 2; + private java.lang.Object message_ = ""; + /** + * optional string message = 2; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string message = 2; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + message_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string message = 2; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string message = 2; + */ + public Builder setMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + message_ = value; + onChanged(); + return this; + } + /** + * optional string message = 2; + */ + public Builder clearMessage() { + bitField0_ = (bitField0_ & ~0x00000002); + message_ = getDefaultInstance().getMessage(); + onChanged(); + return this; + } + /** + * optional string message = 2; + */ + public Builder setMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + message_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:com.baidu.hugegraph.backend.store.raft.rpc.StoreCommandResponse) + } + + static { + defaultInstance = new StoreCommandResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:com.baidu.hugegraph.backend.store.raft.rpc.StoreCommandResponse) + } + + public interface CommonResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool status = 1; + /** + * required bool status = 1; + */ + boolean hasStatus(); + /** + * required bool status = 1; + */ + boolean getStatus(); + + // optional string message = 2; + /** + * optional string message = 2; + */ + boolean hasMessage(); + /** + * optional string message = 2; + */ + java.lang.String getMessage(); + /** + * optional string message = 2; + */ + com.google.protobuf.ByteString + getMessageBytes(); + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse} + */ + public static final class CommonResponse extends + com.google.protobuf.GeneratedMessage + implements CommonResponseOrBuilder { + // Use CommonResponse.newBuilder() to construct. + private CommonResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private CommonResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final CommonResponse defaultInstance; + public static CommonResponse getDefaultInstance() { + return defaultInstance; + } + + public CommonResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CommonResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + status_ = input.readBool(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + message_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_CommonResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_CommonResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public CommonResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CommonResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool status = 1; + public static final int STATUS_FIELD_NUMBER = 1; + private boolean status_; + /** + * required bool status = 1; + */ + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool status = 1; + */ + public boolean getStatus() { + return status_; + } + + // optional string message = 2; + public static final int MESSAGE_FIELD_NUMBER = 2; + private java.lang.Object message_; + /** + * optional string message = 2; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string message = 2; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + message_ = s; + } + return s; + } + } + /** + * optional string message = 2; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + status_ = false; + message_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasStatus()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, status_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getMessageBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, status_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getMessageBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_CommonResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_CommonResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder.class); + } + + // Construct using com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + status_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + message_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_CommonResponse_descriptor; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse getDefaultInstanceForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance(); + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse build() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse buildPartial() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse result = new com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.status_ = status_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.message_ = message_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse) { + return mergeFrom((com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse other) { + if (other == com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance()) return this; + if (other.hasStatus()) { + setStatus(other.getStatus()); + } + if (other.hasMessage()) { + bitField0_ |= 0x00000002; + message_ = other.message_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasStatus()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool status = 1; + private boolean status_ ; + /** + * required bool status = 1; + */ + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool status = 1; + */ + public boolean getStatus() { + return status_; + } + /** + * required bool status = 1; + */ + public Builder setStatus(boolean value) { + bitField0_ |= 0x00000001; + status_ = value; + onChanged(); + return this; + } + /** + * required bool status = 1; + */ + public Builder clearStatus() { + bitField0_ = (bitField0_ & ~0x00000001); + status_ = false; + onChanged(); + return this; + } + + // optional string message = 2; + private java.lang.Object message_ = ""; + /** + * optional string message = 2; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string message = 2; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + message_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string message = 2; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string message = 2; + */ + public Builder setMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + message_ = value; + onChanged(); + return this; + } + /** + * optional string message = 2; + */ + public Builder clearMessage() { + bitField0_ = (bitField0_ & ~0x00000002); + message_ = getDefaultInstance().getMessage(); + onChanged(); + return this; + } + /** + * optional string message = 2; + */ + public Builder setMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + message_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse) + } + + static { + defaultInstance = new CommonResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse) + } + + public interface ListPeersRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.ListPeersRequest} + */ + public static final class ListPeersRequest extends + com.google.protobuf.GeneratedMessage + implements ListPeersRequestOrBuilder { + // Use ListPeersRequest.newBuilder() to construct. + private ListPeersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListPeersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListPeersRequest defaultInstance; + public static ListPeersRequest getDefaultInstance() { + return defaultInstance; + } + + public ListPeersRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListPeersRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListPeersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListPeersRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.ListPeersRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest.Builder.class); + } + + // Construct using com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersRequest_descriptor; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest getDefaultInstanceForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest.getDefaultInstance(); + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest build() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest buildPartial() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest result = new com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest) { + return mergeFrom((com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest other) { + if (other == com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:com.baidu.hugegraph.backend.store.raft.rpc.ListPeersRequest) + } + + static { + defaultInstance = new ListPeersRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:com.baidu.hugegraph.backend.store.raft.rpc.ListPeersRequest) + } + + public interface ListPeersResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + boolean hasCommon(); + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse getCommon(); + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder getCommonOrBuilder(); + + // repeated string endpoints = 2; + /** + * repeated string endpoints = 2; + */ + java.util.List + getEndpointsList(); + /** + * repeated string endpoints = 2; + */ + int getEndpointsCount(); + /** + * repeated string endpoints = 2; + */ + java.lang.String getEndpoints(int index); + /** + * repeated string endpoints = 2; + */ + com.google.protobuf.ByteString + getEndpointsBytes(int index); + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.ListPeersResponse} + */ + public static final class ListPeersResponse extends + com.google.protobuf.GeneratedMessage + implements ListPeersResponseOrBuilder { + // Use ListPeersResponse.newBuilder() to construct. + private ListPeersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListPeersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListPeersResponse defaultInstance; + public static ListPeersResponse getDefaultInstance() { + return defaultInstance; + } + + public ListPeersResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListPeersResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = common_.toBuilder(); + } + common_ = input.readMessage(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(common_); + common_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + endpoints_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + endpoints_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + endpoints_ = new com.google.protobuf.UnmodifiableLazyStringList(endpoints_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListPeersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListPeersResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + public static final int COMMON_FIELD_NUMBER = 1; + private com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse common_; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public boolean hasCommon() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse getCommon() { + return common_; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder getCommonOrBuilder() { + return common_; + } + + // repeated string endpoints = 2; + public static final int ENDPOINTS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList endpoints_; + /** + * repeated string endpoints = 2; + */ + public java.util.List + getEndpointsList() { + return endpoints_; + } + /** + * repeated string endpoints = 2; + */ + public int getEndpointsCount() { + return endpoints_.size(); + } + /** + * repeated string endpoints = 2; + */ + public java.lang.String getEndpoints(int index) { + return endpoints_.get(index); + } + /** + * repeated string endpoints = 2; + */ + public com.google.protobuf.ByteString + getEndpointsBytes(int index) { + return endpoints_.getByteString(index); + } + + private void initFields() { + common_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance(); + endpoints_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasCommon()) { + memoizedIsInitialized = 0; + return false; + } + if (!getCommon().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, common_); + } + for (int i = 0; i < endpoints_.size(); i++) { + output.writeBytes(2, endpoints_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, common_); + } + { + int dataSize = 0; + for (int i = 0; i < endpoints_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(endpoints_.getByteString(i)); + } + size += dataSize; + size += 1 * getEndpointsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.ListPeersResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse.Builder.class); + } + + // Construct using com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getCommonFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (commonBuilder_ == null) { + common_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance(); + } else { + commonBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + endpoints_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersResponse_descriptor; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse getDefaultInstanceForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse.getDefaultInstance(); + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse build() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse buildPartial() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse result = new com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (commonBuilder_ == null) { + result.common_ = common_; + } else { + result.common_ = commonBuilder_.build(); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + endpoints_ = new com.google.protobuf.UnmodifiableLazyStringList( + endpoints_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.endpoints_ = endpoints_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse) { + return mergeFrom((com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse other) { + if (other == com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse.getDefaultInstance()) return this; + if (other.hasCommon()) { + mergeCommon(other.getCommon()); + } + if (!other.endpoints_.isEmpty()) { + if (endpoints_.isEmpty()) { + endpoints_ = other.endpoints_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureEndpointsIsMutable(); + endpoints_.addAll(other.endpoints_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasCommon()) { + + return false; + } + if (!getCommon().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.ListPeersResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + private com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse common_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder> commonBuilder_; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public boolean hasCommon() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse getCommon() { + if (commonBuilder_ == null) { + return common_; + } else { + return commonBuilder_.getMessage(); + } + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public Builder setCommon(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse value) { + if (commonBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + common_ = value; + onChanged(); + } else { + commonBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public Builder setCommon( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder builderForValue) { + if (commonBuilder_ == null) { + common_ = builderForValue.build(); + onChanged(); + } else { + commonBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public Builder mergeCommon(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse value) { + if (commonBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + common_ != com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance()) { + common_ = + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.newBuilder(common_).mergeFrom(value).buildPartial(); + } else { + common_ = value; + } + onChanged(); + } else { + commonBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public Builder clearCommon() { + if (commonBuilder_ == null) { + common_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance(); + onChanged(); + } else { + commonBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder getCommonBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getCommonFieldBuilder().getBuilder(); + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder getCommonOrBuilder() { + if (commonBuilder_ != null) { + return commonBuilder_.getMessageOrBuilder(); + } else { + return common_; + } + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder> + getCommonFieldBuilder() { + if (commonBuilder_ == null) { + commonBuilder_ = new com.google.protobuf.SingleFieldBuilder< + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder>( + common_, + getParentForChildren(), + isClean()); + common_ = null; + } + return commonBuilder_; + } + + // repeated string endpoints = 2; + private com.google.protobuf.LazyStringList endpoints_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureEndpointsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + endpoints_ = new com.google.protobuf.LazyStringArrayList(endpoints_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated string endpoints = 2; + */ + public java.util.List + getEndpointsList() { + return java.util.Collections.unmodifiableList(endpoints_); + } + /** + * repeated string endpoints = 2; + */ + public int getEndpointsCount() { + return endpoints_.size(); + } + /** + * repeated string endpoints = 2; + */ + public java.lang.String getEndpoints(int index) { + return endpoints_.get(index); + } + /** + * repeated string endpoints = 2; + */ + public com.google.protobuf.ByteString + getEndpointsBytes(int index) { + return endpoints_.getByteString(index); + } + /** + * repeated string endpoints = 2; + */ + public Builder setEndpoints( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureEndpointsIsMutable(); + endpoints_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string endpoints = 2; + */ + public Builder addEndpoints( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureEndpointsIsMutable(); + endpoints_.add(value); + onChanged(); + return this; + } + /** + * repeated string endpoints = 2; + */ + public Builder addAllEndpoints( + java.lang.Iterable values) { + ensureEndpointsIsMutable(); + super.addAll(values, endpoints_); + onChanged(); + return this; + } + /** + * repeated string endpoints = 2; + */ + public Builder clearEndpoints() { + endpoints_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * repeated string endpoints = 2; + */ + public Builder addEndpointsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureEndpointsIsMutable(); + endpoints_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:com.baidu.hugegraph.backend.store.raft.rpc.ListPeersResponse) + } + + static { + defaultInstance = new ListPeersResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:com.baidu.hugegraph.backend.store.raft.rpc.ListPeersResponse) + } + + public interface SetLeaderRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string endpoint = 1; + /** + * required string endpoint = 1; + */ + boolean hasEndpoint(); + /** + * required string endpoint = 1; + */ + java.lang.String getEndpoint(); + /** + * required string endpoint = 1; + */ + com.google.protobuf.ByteString + getEndpointBytes(); + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.SetLeaderRequest} + */ + public static final class SetLeaderRequest extends + com.google.protobuf.GeneratedMessage + implements SetLeaderRequestOrBuilder { + // Use SetLeaderRequest.newBuilder() to construct. + private SetLeaderRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SetLeaderRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SetLeaderRequest defaultInstance; + public static SetLeaderRequest getDefaultInstance() { + return defaultInstance; + } + + public SetLeaderRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SetLeaderRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + endpoint_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetLeaderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SetLeaderRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string endpoint = 1; + public static final int ENDPOINT_FIELD_NUMBER = 1; + private java.lang.Object endpoint_; + /** + * required string endpoint = 1; + */ + public boolean hasEndpoint() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string endpoint = 1; + */ + public java.lang.String getEndpoint() { + java.lang.Object ref = endpoint_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + endpoint_ = s; + } + return s; + } + } + /** + * required string endpoint = 1; + */ + public com.google.protobuf.ByteString + getEndpointBytes() { + java.lang.Object ref = endpoint_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + endpoint_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + endpoint_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasEndpoint()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getEndpointBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getEndpointBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.SetLeaderRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest.Builder.class); + } + + // Construct using com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + endpoint_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderRequest_descriptor; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest getDefaultInstanceForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest.getDefaultInstance(); + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest build() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest buildPartial() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest result = new com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.endpoint_ = endpoint_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest) { + return mergeFrom((com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest other) { + if (other == com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest.getDefaultInstance()) return this; + if (other.hasEndpoint()) { + bitField0_ |= 0x00000001; + endpoint_ = other.endpoint_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasEndpoint()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string endpoint = 1; + private java.lang.Object endpoint_ = ""; + /** + * required string endpoint = 1; + */ + public boolean hasEndpoint() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string endpoint = 1; + */ + public java.lang.String getEndpoint() { + java.lang.Object ref = endpoint_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + endpoint_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string endpoint = 1; + */ + public com.google.protobuf.ByteString + getEndpointBytes() { + java.lang.Object ref = endpoint_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + endpoint_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string endpoint = 1; + */ + public Builder setEndpoint( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + endpoint_ = value; + onChanged(); + return this; + } + /** + * required string endpoint = 1; + */ + public Builder clearEndpoint() { + bitField0_ = (bitField0_ & ~0x00000001); + endpoint_ = getDefaultInstance().getEndpoint(); + onChanged(); + return this; + } + /** + * required string endpoint = 1; + */ + public Builder setEndpointBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + endpoint_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:com.baidu.hugegraph.backend.store.raft.rpc.SetLeaderRequest) + } + + static { + defaultInstance = new SetLeaderRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:com.baidu.hugegraph.backend.store.raft.rpc.SetLeaderRequest) + } + + public interface SetLeaderResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + boolean hasCommon(); + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse getCommon(); + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder getCommonOrBuilder(); + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.SetLeaderResponse} + */ + public static final class SetLeaderResponse extends + com.google.protobuf.GeneratedMessage + implements SetLeaderResponseOrBuilder { + // Use SetLeaderResponse.newBuilder() to construct. + private SetLeaderResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SetLeaderResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SetLeaderResponse defaultInstance; + public static SetLeaderResponse getDefaultInstance() { + return defaultInstance; + } + + public SetLeaderResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SetLeaderResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = common_.toBuilder(); + } + common_ = input.readMessage(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(common_); + common_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetLeaderResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SetLeaderResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + public static final int COMMON_FIELD_NUMBER = 1; + private com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse common_; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public boolean hasCommon() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse getCommon() { + return common_; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder getCommonOrBuilder() { + return common_; + } + + private void initFields() { + common_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasCommon()) { + memoizedIsInitialized = 0; + return false; + } + if (!getCommon().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, common_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, common_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code com.baidu.hugegraph.backend.store.raft.rpc.SetLeaderResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse.class, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse.Builder.class); + } + + // Construct using com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getCommonFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (commonBuilder_ == null) { + common_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance(); + } else { + commonBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderResponse_descriptor; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse getDefaultInstanceForType() { + return com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse.getDefaultInstance(); + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse build() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse buildPartial() { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse result = new com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (commonBuilder_ == null) { + result.common_ = common_; + } else { + result.common_ = commonBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse) { + return mergeFrom((com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse other) { + if (other == com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse.getDefaultInstance()) return this; + if (other.hasCommon()) { + mergeCommon(other.getCommon()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasCommon()) { + + return false; + } + if (!getCommon().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + private com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse common_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder> commonBuilder_; + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public boolean hasCommon() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse getCommon() { + if (commonBuilder_ == null) { + return common_; + } else { + return commonBuilder_.getMessage(); + } + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public Builder setCommon(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse value) { + if (commonBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + common_ = value; + onChanged(); + } else { + commonBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public Builder setCommon( + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder builderForValue) { + if (commonBuilder_ == null) { + common_ = builderForValue.build(); + onChanged(); + } else { + commonBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public Builder mergeCommon(com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse value) { + if (commonBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + common_ != com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance()) { + common_ = + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.newBuilder(common_).mergeFrom(value).buildPartial(); + } else { + common_ = value; + } + onChanged(); + } else { + commonBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public Builder clearCommon() { + if (commonBuilder_ == null) { + common_ = com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.getDefaultInstance(); + onChanged(); + } else { + commonBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder getCommonBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getCommonFieldBuilder().getBuilder(); + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + public com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder getCommonOrBuilder() { + if (commonBuilder_ != null) { + return commonBuilder_.getMessageOrBuilder(); + } else { + return common_; + } + } + /** + * required .com.baidu.hugegraph.backend.store.raft.rpc.CommonResponse common = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder> + getCommonFieldBuilder() { + if (commonBuilder_ == null) { + commonBuilder_ = new com.google.protobuf.SingleFieldBuilder< + com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse.Builder, com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponseOrBuilder>( + common_, + getParentForChildren(), + isClean()); + common_ = null; + } + return commonBuilder_; + } + + // @@protoc_insertion_point(builder_scope:com.baidu.hugegraph.backend.store.raft.rpc.SetLeaderResponse) + } + + static { + defaultInstance = new SetLeaderResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:com.baidu.hugegraph.backend.store.raft.rpc.SetLeaderResponse) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_CommonResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_CommonResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n,hugegraph-core/src/main/resources/raft" + + ".proto\022*com.baidu.hugegraph.backend.stor" + + "e.raft.rpc\"\261\001\n\023StoreCommandRequest\022C\n\004ty" + + "pe\030\001 \002(\01625.com.baidu.hugegraph.backend.s" + + "tore.raft.rpc.StoreType\022G\n\006action\030\002 \002(\0162" + + "7.com.baidu.hugegraph.backend.store.raft" + + ".rpc.StoreAction\022\014\n\004data\030\003 \002(\014\"7\n\024StoreC" + + "ommandResponse\022\016\n\006status\030\001 \002(\010\022\017\n\007messag" + + "e\030\002 \001(\t\"1\n\016CommonResponse\022\016\n\006status\030\001 \002(" + + "\010\022\017\n\007message\030\002 \001(\t\"\022\n\020ListPeersRequest\"r", + "\n\021ListPeersResponse\022J\n\006common\030\001 \002(\0132:.co" + + "m.baidu.hugegraph.backend.store.raft.rpc" + + ".CommonResponse\022\021\n\tendpoints\030\002 \003(\t\"$\n\020Se" + + "tLeaderRequest\022\020\n\010endpoint\030\001 \002(\t\"_\n\021SetL" + + "eaderResponse\022J\n\006common\030\001 \002(\0132:.com.baid" + + "u.hugegraph.backend.store.raft.rpc.Commo" + + "nResponse*7\n\tStoreType\022\n\n\006SCHEMA\020\000\022\t\n\005GR" + + "APH\020\001\022\n\n\006SYSTEM\020\002\022\007\n\003ALL\020\003*\237\001\n\013StoreActi" + + "on\022\010\n\004NONE\020\000\022\010\n\004INIT\020\001\022\t\n\005CLEAR\020\002\022\014\n\010TRU" + + "NCATE\020\003\022\014\n\010SNAPSHOT\020\004\022\014\n\010BEGIN_TX\020\n\022\r\n\tC", + "OMMIT_TX\020\013\022\017\n\013ROLLBACK_TX\020\014\022\n\n\006MUTATE\020\024\022" + + "\020\n\014INCR_COUNTER\020\025\022\t\n\005QUERY\020\036B:\n*com.baid" + + "u.hugegraph.backend.store.raft.rpcB\014Raft" + + "Requests" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandRequest_descriptor, + new java.lang.String[] { "Type", "Action", "Data", }); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandResponse_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_StoreCommandResponse_descriptor, + new java.lang.String[] { "Status", "Message", }); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_CommonResponse_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_CommonResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_CommonResponse_descriptor, + new java.lang.String[] { "Status", "Message", }); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersRequest_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersRequest_descriptor, + new java.lang.String[] { }); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersResponse_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_ListPeersResponse_descriptor, + new java.lang.String[] { "Common", "Endpoints", }); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderRequest_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderRequest_descriptor, + new java.lang.String[] { "Endpoint", }); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderResponse_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_com_baidu_hugegraph_backend_store_raft_rpc_SetLeaderResponse_descriptor, + new java.lang.String[] { "Common", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/RpcForwarder.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/RpcForwarder.java new file mode 100644 index 0000000000..d1910fdcae --- /dev/null +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/RpcForwarder.java @@ -0,0 +1,162 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.raft.rpc; + +import static com.baidu.hugegraph.backend.store.raft.RaftSharedContext.WAIT_RPC_TIMEOUT; + +import java.util.concurrent.ExecutionException; + +import org.slf4j.Logger; + +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.core.NodeImpl; +import com.alipay.sofa.jraft.entity.PeerId; +import com.alipay.sofa.jraft.error.RaftError; +import com.alipay.sofa.jraft.rpc.RaftClientService; +import com.alipay.sofa.jraft.rpc.RpcResponseClosure; +import com.alipay.sofa.jraft.util.Endpoint; +import com.baidu.hugegraph.backend.BackendException; +import com.baidu.hugegraph.backend.store.raft.RaftClosure; +import com.baidu.hugegraph.backend.store.raft.RaftNode; +import com.baidu.hugegraph.backend.store.raft.StoreClosure; +import com.baidu.hugegraph.backend.store.raft.StoreCommand; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse; +import com.baidu.hugegraph.util.E; +import com.baidu.hugegraph.util.Log; +import com.google.protobuf.Descriptors.FieldDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.ZeroByteStringHelper; + +public class RpcForwarder { + + private static final Logger LOG = Log.logger(RpcForwarder.class); + + private final PeerId nodeId; + private final RaftClientService rpcClient; + + public RpcForwarder(RaftNode node) { + this.nodeId = node.node().getNodeId().getPeerId(); + this.rpcClient = ((NodeImpl) node.node()).getRpcService(); + E.checkNotNull(this.rpcClient, "rpc client"); + } + + public void forwardToLeader(PeerId leaderId, StoreCommand command, + StoreClosure closure) { + E.checkNotNull(leaderId, "leader id"); + E.checkState(!leaderId.equals(this.nodeId), + "Invalid state: current node is the leader, there is " + + "no need to forward the request"); + LOG.debug("The node {} forward request to leader {}", + this.nodeId, leaderId); + + StoreCommandRequest.Builder builder = StoreCommandRequest.newBuilder(); + builder.setType(command.type()); + builder.setAction(command.action()); + builder.setData(ZeroByteStringHelper.wrap(command.data())); + StoreCommandRequest request = builder.build(); + + RpcResponseClosure responseClosure; + responseClosure = new RpcResponseClosure() { + @Override + public void setResponse(StoreCommandResponse response) { + if (response.getStatus()) { + LOG.debug("StoreCommandResponse status ok"); + closure.complete(Status.OK(), () -> null); + } else { + LOG.debug("StoreCommandResponse status error"); + Status status = new Status(RaftError.UNKNOWN, + "fowared request failed"); + BackendException e = new BackendException( + "Current node isn't leader, leader " + + "is [%s], failed to forward request " + + "to leader: %s", + leaderId, response.getMessage()); + closure.failure(status, e); + } + } + + @Override + public void run(Status status) { + closure.run(status); + } + }; + this.waitRpc(leaderId.getEndpoint(), request, responseClosure); + } + + public RaftClosure forwardToLeader(PeerId leaderId, + Message request) { + E.checkNotNull(leaderId, "leader id"); + E.checkState(!leaderId.equals(this.nodeId), + "Invalid state: current node is the leader, there is " + + "no need to forward the request"); + LOG.debug("The node '{}' forward request to leader '{}'", + this.nodeId, leaderId); + + RaftClosure future = new RaftClosure<>(); + RpcResponseClosure responseClosure = new RpcResponseClosure() { + @Override + public void setResponse(T response) { + FieldDescriptor fd = response.getDescriptorForType() + .findFieldByName("common"); + Object object = response.getField(fd); + E.checkState(object instanceof CommonResponse, + "The common field must be instance of " + + "CommonResponse, actual is '%s'", + object != null ? object.getClass() : null); + CommonResponse commonResponse = (CommonResponse) object; + if (commonResponse.getStatus()) { + future.complete(Status.OK(), () -> response); + } else { + Status status = new Status(RaftError.UNKNOWN, + "fowared request failed"); + BackendException e = new BackendException( + "Current node isn't leader, leader " + + "is [%s], failed to forward request " + + "to leader: %s", + leaderId, commonResponse.getMessage()); + future.failure(status, e); + } + } + + @Override + public void run(Status status) { + future.run(status); + } + }; + this.waitRpc(leaderId.getEndpoint(), request, responseClosure); + return future; + } + + private void waitRpc(Endpoint endpoint, Message request, + RpcResponseClosure done) { + E.checkNotNull(endpoint, "leader endpoint"); + try { + this.rpcClient.invokeWithDone(endpoint, request, done, + WAIT_RPC_TIMEOUT).get(); + } catch (InterruptedException e) { + throw new BackendException("Invoke rpc request was interrupted, " + + "please try again later", e); + } catch (ExecutionException e) { + throw new BackendException("Failed to invoke rpc request", e); + } + } +} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/SetLeaderProcessor.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/SetLeaderProcessor.java new file mode 100644 index 0000000000..f110537ffb --- /dev/null +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/SetLeaderProcessor.java @@ -0,0 +1,71 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.raft.rpc; + +import org.slf4j.Logger; + +import com.alipay.sofa.jraft.rpc.RpcRequestClosure; +import com.alipay.sofa.jraft.rpc.RpcRequestProcessor; +import com.baidu.hugegraph.backend.store.raft.RaftGroupManager; +import com.baidu.hugegraph.backend.store.raft.RaftSharedContext; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.CommonResponse; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderRequest; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.SetLeaderResponse; +import com.baidu.hugegraph.util.Log; +import com.google.protobuf.Message; + +public class SetLeaderProcessor + extends RpcRequestProcessor { + + private static final Logger LOG = Log.logger(SetLeaderProcessor.class); + + private final RaftSharedContext context; + + public SetLeaderProcessor(RaftSharedContext context) { + super(null, null); + this.context = context; + } + + @Override + public Message processRequest(SetLeaderRequest request, + RpcRequestClosure done) { + LOG.debug("Processing SetLeaderRequest {}", request.getClass()); + RaftGroupManager nodeManager = this.context.raftNodeManager( + RaftSharedContext.DEFAULT_GROUP); + try { + nodeManager.setLeader(request.getEndpoint()); + CommonResponse common = CommonResponse.newBuilder() + .setStatus(true) + .build(); + return SetLeaderResponse.newBuilder().setCommon(common).build(); + } catch (Throwable e) { + CommonResponse common = CommonResponse.newBuilder() + .setStatus(false) + .setMessage(e.toString()) + .build(); + return SetLeaderResponse.newBuilder().setCommon(common).build(); + } + } + + @Override + public String interest() { + return SetLeaderRequest.class.getName(); + } +} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreCommandRequestProcessor.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/StoreCommandProcessor.java similarity index 72% rename from hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreCommandRequestProcessor.java rename to hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/StoreCommandProcessor.java index ad92476c53..50964d49b6 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/StoreCommandRequestProcessor.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/store/raft/rpc/StoreCommandProcessor.java @@ -17,28 +17,33 @@ * under the License. */ -package com.baidu.hugegraph.backend.store.raft; +package com.baidu.hugegraph.backend.store.raft.rpc; import org.slf4j.Logger; import com.alipay.sofa.jraft.rpc.RpcRequestClosure; import com.alipay.sofa.jraft.rpc.RpcRequestProcessor; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandRequest; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreCommandResponse; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType; +import com.baidu.hugegraph.backend.store.raft.RaftNode; +import com.baidu.hugegraph.backend.store.raft.RaftSharedContext; +import com.baidu.hugegraph.backend.store.raft.StoreClosure; +import com.baidu.hugegraph.backend.store.raft.StoreCommand; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandRequest; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreCommandResponse; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType; import com.baidu.hugegraph.util.Log; import com.google.protobuf.Message; -public class StoreCommandRequestProcessor +public class StoreCommandProcessor extends RpcRequestProcessor { - private static final Logger LOG = Log.logger(StoreCommandRequestProcessor.class); + private static final Logger LOG = Log.logger( + StoreCommandProcessor.class); private final RaftSharedContext context; - public StoreCommandRequestProcessor(RaftSharedContext context) { - super(null, StoreCommandResponse.newBuilder().setStatus(true).build()); + public StoreCommandProcessor(RaftSharedContext context) { + super(null, null); this.context = context; } @@ -71,6 +76,6 @@ private StoreCommand parseStoreCommand(StoreCommandRequest request) { StoreType type = request.getType(); StoreAction action = request.getAction(); byte[] data = request.getData().toByteArray(); - return new StoreCommand(type, action, data); + return new StoreCommand(type, action, data, true); } } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/tx/GraphTransaction.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/tx/GraphTransaction.java index 971e4efec9..c594f40acf 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/tx/GraphTransaction.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/backend/tx/GraphTransaction.java @@ -92,7 +92,6 @@ import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.type.define.HugeKeys; import com.baidu.hugegraph.type.define.IdStrategy; -import com.baidu.hugegraph.type.define.SchemaStatus; import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.InsertionOrderUtil; import com.baidu.hugegraph.util.LockUtil; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/config/CoreOptions.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/config/CoreOptions.java index 0bb8ac9201..f1e94a900e 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/config/CoreOptions.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/config/CoreOptions.java @@ -124,7 +124,7 @@ public static synchronized CoreOptions instance() { "raft.use_snapshot", "Whether to use snapshot.", disallowEmpty(), - false + true ); public static final ConfigOption RAFT_ENDPOINT = @@ -157,7 +157,7 @@ public static synchronized CoreOptions instance() { "Whether to use replicator line, when turned on it " + "multiple logs can be sent in parallel, and the next log " + "doesn't have to wait for the ack message of the current " + - "log to be sent", + "log to be sent.", disallowEmpty(), true ); @@ -207,7 +207,7 @@ public static synchronized CoreOptions instance() { new ConfigOption<>( "raft.queue_size", "The disruptor buffers size for jraft RaftNode, " + - "StateMachine and LogManager", + "StateMachine and LogManager.", positiveInt(), // jraft default value is 16384 16384 @@ -216,7 +216,7 @@ public static synchronized CoreOptions instance() { public static final ConfigOption RAFT_QUEUE_PUBLISH_TIMEOUT = new ConfigOption<>( "raft.queue_publish_timeout", - "The timeout in second when publish event into disruptor", + "The timeout in second when publish event into disruptor.", positiveInt(), // jraft default value is 10(sec) 60 @@ -234,7 +234,7 @@ public static synchronized CoreOptions instance() { public static final ConfigOption RAFT_RPC_CONNECT_TIMEOUT = new ConfigOption<>( "raft.rpc_connect_timeout", - "The rpc connect timeout for jraft rpc", + "The rpc connect timeout for jraft rpc.", positiveInt(), // jraft default value is 1000(ms) 5000 @@ -243,12 +243,36 @@ public static synchronized CoreOptions instance() { public static final ConfigOption RAFT_RPC_TIMEOUT = new ConfigOption<>( "raft.rpc_timeout", - "The rpc timeout for jraft rpc", + "The rpc timeout for jraft rpc.", positiveInt(), // jraft default value is 5000(ms) 60000 ); + public static final ConfigOption RAFT_RPC_BUF_LOW_WATER_MARK = + new ConfigOption<>( + "raft.rpc_buf_low_water_mark", + "The ChannelOutboundBuffer's low water mark of netty, " + + "when buffer size less than this size, the method " + + "ChannelOutboundBuffer.isWritable() will return true, " + + "it means that low downstream pressure or good network.", + positiveInt(), + 10 * 1024 * 1024 + ); + + public static final ConfigOption RAFT_RPC_BUF_HIGH_WATER_MARK = + new ConfigOption<>( + "raft.rpc_buf_high_water_mark", + "The ChannelOutboundBuffer's high water mark of netty, " + + "only when buffer size exceed this size, the method " + + "ChannelOutboundBuffer.isWritable() will return false, " + + "it means that the downstream pressure is too great to " + + "process the request or network is very congestion, " + + "upstream needs to limit rate at this time.", + positiveInt(), + 20 * 1024 * 1024 + ); + public static final ConfigOption RATE_LIMIT_WRITE = new ConfigOption<>( "rate_limit.write", @@ -268,8 +292,9 @@ public static synchronized CoreOptions instance() { public static final ConfigOption TASK_WAIT_TIMEOUT = new ConfigOption<>( "task.wait_timeout", - "Timeout in seconds for waiting for the task to complete," + - "such as when truncating or clearing the backend.", + "Timeout in seconds for waiting for the task to " + + "complete, such as when truncating or clearing the " + + "backend.", rangeInt(0L, Long.MAX_VALUE), 10L ); @@ -290,6 +315,14 @@ public static synchronized CoreOptions instance() { 16 * Bytes.MB ); + public static final ConfigOption TASK_TTL_DELETE_BATCH = + new ConfigOption<>( + "task.ttl_delete_batch", + "The batch size used to delete expired data.", + rangeInt(1, 500), + 1 + ); + public static final ConfigOption CONNECTION_DETECT_INTERVAL = new ConfigOption<>( "store.connection_detect_interval", @@ -313,7 +346,7 @@ public static synchronized CoreOptions instance() { new ConfigOption<>( "vertex.check_customized_id_exist", "Whether to check the vertices exist for those using " + - "customized id strategy", + "customized id strategy.", disallowEmpty(), false ); @@ -321,7 +354,7 @@ public static synchronized CoreOptions instance() { public static final ConfigOption VERTEX_ADJACENT_VERTEX_EXIST = new ConfigOption<>( "vertex.check_adjacent_vertex_exist", - "Whether to check the adjacent vertices of edges exist", + "Whether to check the adjacent vertices of edges exist.", disallowEmpty(), false ); @@ -329,7 +362,7 @@ public static synchronized CoreOptions instance() { public static final ConfigOption VERTEX_ADJACENT_VERTEX_LAZY = new ConfigOption<>( "vertex.lazy_load_adjacent_vertex", - "Whether to lazy load adjacent vertices of edges", + "Whether to lazy load adjacent vertices of edges.", disallowEmpty(), true ); @@ -337,8 +370,8 @@ public static synchronized CoreOptions instance() { public static final ConfigOption VERTEX_PART_EDGE_COMMIT_SIZE = new ConfigOption<>( "vertex.part_edge_commit_size", - "Whether to enable the mode to commit part of edges of vertex, " + - "enabled if commit size > 0, 0 meas disabled.", + "Whether to enable the mode to commit part of edges of " + + "vertex, enabled if commit size > 0, 0 meas disabled.", rangeInt(0, (int) Query.DEFAULT_CAPACITY), 5000 ); @@ -370,8 +403,9 @@ public static synchronized CoreOptions instance() { public static final ConfigOption QUERY_INDEX_INTERSECT_THRESHOLD = new ConfigOption<>( "query.index_intersect_threshold", - "The maximum number of intermediate results to intersect " + - "indexes when querying by multiple single index properties.", + "The maximum number of intermediate results to " + + "intersect indexes when querying by multiple single " + + "index properties.", rangeInt(1, (int) Query.DEFAULT_CAPACITY), 1000 ); @@ -405,7 +439,8 @@ public static synchronized CoreOptions instance() { public static final ConfigOption VERTEX_TX_CAPACITY = new ConfigOption<>( "vertex.tx_capacity", - "The max size(items) of vertices(uncommitted) in transaction.", + "The max size(items) of vertices(uncommitted) in " + + "transaction.", rangeInt(COMMIT_BATCH, 1000000), 10000 ); @@ -413,7 +448,8 @@ public static synchronized CoreOptions instance() { public static final ConfigOption EDGE_TX_CAPACITY = new ConfigOption<>( "edge.tx_capacity", - "The max size(items) of edges(uncommitted) in transaction.", + "The max size(items) of edges(uncommitted) in " + + "transaction.", rangeInt(COMMIT_BATCH, 1000000), 10000 ); @@ -455,14 +491,6 @@ public static synchronized CoreOptions instance() { "l1" ); - public static final ConfigOption EXPIRED_DELETE_BATCH = - new ConfigOption<>( - "expired.delete_batch", - "The batch size used to delete expired data.", - rangeInt(1, 500), - 1 - ); - public static final ConfigOption VERTEX_CACHE_CAPACITY = new ConfigOption<>( "vertex.cache_capacity", @@ -533,7 +561,7 @@ public static synchronized CoreOptions instance() { "Choose a text analyzer for searching the " + "vertex/edge properties, available type are " + "[word, ansj, hanlp, smartcn, jieba, jcseg, " + - "mmseg4j, ikanalyzer]", + "mmseg4j, ikanalyzer].", disallowEmpty(), "ikanalyzer" ); @@ -558,7 +586,7 @@ public static synchronized CoreOptions instance() { "jcseg: [Simple, Complex], " + "mmseg4j: [Simple, Complex, MaxWord], " + "ikanalyzer: [smart, max_word]" + - "}", + "}.", disallowEmpty(), "smart" ); @@ -566,7 +594,7 @@ public static synchronized CoreOptions instance() { public static final ConfigOption COMPUTER_CONFIG = new ConfigOption<>( "computer.config", - "The config file path of computer job", + "The config file path of computer job.", disallowEmpty(), "./conf/computer.yaml" ); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/AbstractComputer.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/AbstractComputer.java index 63806a3dde..d318824133 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/AbstractComputer.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/AbstractComputer.java @@ -35,6 +35,8 @@ import com.baidu.hugegraph.HugeException; import com.baidu.hugegraph.job.ComputerJob; import com.baidu.hugegraph.job.Job; +import com.baidu.hugegraph.traversal.algorithm.HugeTraverser; +import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.Log; import com.baidu.hugegraph.util.ParameterUtil; @@ -60,6 +62,11 @@ public abstract class AbstractComputer implements Computer { public static final int DEFAULT_MAX_STEPS = 5; public static final String PRECISION = "precision"; public static final double DEFAULT_PRECISION = 0.0001D; + public static final String TIMES = "times"; + public static final int DEFAULT_TIMES = 10; + public static final String DIRECTION = "direction"; + public static final String DEGREE = "degree"; + public static final long DEFAULT_DEGREE = 100L; protected static final String CATEGORY_RANK = "rank"; protected static final String CATEGORY_COMM = "community"; @@ -207,4 +214,46 @@ protected static double precision(Map parameters) { PRECISION, precision); return precision; } + + protected static int times(Map parameters) { + if (!parameters.containsKey(TIMES)) { + return DEFAULT_TIMES; + } + int times = ParameterUtil.parameterInt(parameters, TIMES); + E.checkArgument(times > 0, + "The value of %s must be > 0, but got %s", + TIMES, times); + return times; + } + + protected static Directions direction(Map parameters) { + if (!parameters.containsKey(DIRECTION)) { + return Directions.BOTH; + } + Object direction = ParameterUtil.parameter(parameters, DIRECTION); + return parseDirection(direction); + } + + protected static long degree(Map parameters) { + if (!parameters.containsKey(DEGREE)) { + return DEFAULT_DEGREE; + } + long degree = ParameterUtil.parameterLong(parameters, DEGREE); + HugeTraverser.checkDegree(degree); + return degree; + } + + protected static Directions parseDirection(Object direction) { + if (direction.equals(Directions.BOTH.toString())) { + return Directions.BOTH; + } else if (direction.equals(Directions.OUT.toString())) { + return Directions.OUT; + } else if (direction.equals(Directions.IN.toString())) { + return Directions.IN; + } else { + throw new IllegalArgumentException(String.format( + "The value of direction must be in [OUT, IN, BOTH], " + + "but got '%s'", direction)); + } + } } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/ComputerPool.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/ComputerPool.java index 1f3ed64de2..699430398d 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/ComputerPool.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/ComputerPool.java @@ -29,6 +29,9 @@ public class ComputerPool { static { INSTANCE.register(new PageRankComputer()); INSTANCE.register(new WeakConnectedComponentComputer()); + INSTANCE.register(new LpaComputer()); + INSTANCE.register(new TriangleCountComputer()); + INSTANCE.register(new LouvainComputer()); } private final Map computers; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/LouvainComputer.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/LouvainComputer.java new file mode 100644 index 0000000000..7770a84f19 --- /dev/null +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/LouvainComputer.java @@ -0,0 +1,128 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.job.computer; + +import java.util.Map; + +import com.baidu.hugegraph.traversal.algorithm.HugeTraverser; +import com.baidu.hugegraph.util.E; +import com.baidu.hugegraph.util.ParameterUtil; +import com.google.common.collect.ImmutableMap; + +public class LouvainComputer extends AbstractComputer { + + public static final String LOUVAIN = "louvain"; + + public static final String KEY_STABLE_TIMES = "stable_times"; + public static final String KEY_PRECISION = "precision"; + public static final String KEY_SHOW_MOD= "show_modularity"; + public static final String KEY_SHOW_COMM = "show_community"; + public static final String KEY_EXPORT_COMM = "export_community"; + public static final String KEY_SKIP_ISOLATED = "skip_isolated"; + public static final String KEY_CLEAR = "clear"; + + public static final long DEFAULT_STABLE_TIMES= 3L; + private static final int MAX_TIMES = 2048; + + @Override + public String name() { + return LOUVAIN; + } + + @Override + public String category() { + return CATEGORY_COMM; + } + + @Override + public void checkParameters(Map parameters) { + times(parameters); + stableTimes(parameters); + precision(parameters); + degree(parameters); + showModularity(parameters); + showCommunity(parameters); + exportCommunity(parameters); + skipIsolated(parameters); + clearPass(parameters); + } + + @Override + protected Map checkAndCollectParameters( + Map parameters) { + return ImmutableMap.of(TIMES, times(parameters), + PRECISION, precision(parameters), + DIRECTION, direction(parameters), + DEGREE, degree(parameters)); + } + + protected static int stableTimes(Map parameters) { + if (!parameters.containsKey(KEY_STABLE_TIMES)) { + return (int) DEFAULT_STABLE_TIMES; + } + int times = ParameterUtil.parameterInt(parameters, KEY_STABLE_TIMES); + HugeTraverser.checkPositiveOrNoLimit(times, KEY_STABLE_TIMES); + E.checkArgument(times <= MAX_TIMES, + "The maximum number of stable iterations is %s, " + + "but got %s", MAX_TIMES, times); + return times; + } + + protected static Long showModularity(Map parameters) { + if (!parameters.containsKey(KEY_SHOW_MOD)) { + return null; + } + long pass = ParameterUtil.parameterLong(parameters, KEY_SHOW_MOD); + HugeTraverser.checkNonNegative(pass, KEY_SHOW_MOD); + return pass; + } + + protected static String showCommunity(Map parameters) { + if (!parameters.containsKey(KEY_SHOW_COMM)) { + return null; + } + return ParameterUtil.parameterString(parameters, KEY_SHOW_COMM); + } + + protected static Long exportCommunity(Map parameters) { + if (!parameters.containsKey(KEY_EXPORT_COMM)) { + return null; + } + long pass = ParameterUtil.parameterLong(parameters, KEY_EXPORT_COMM); + HugeTraverser.checkNonNegative(pass, KEY_EXPORT_COMM); + return pass; + } + + protected static boolean skipIsolated(Map parameters) { + if (!parameters.containsKey(KEY_SKIP_ISOLATED)) { + return true; + } + return ParameterUtil.parameterBoolean(parameters, KEY_SKIP_ISOLATED); + } + + protected static Long clearPass(Map parameters) { + if (!parameters.containsKey(KEY_CLEAR)) { + return null; + } + long pass = ParameterUtil.parameterLong(parameters, KEY_CLEAR); + HugeTraverser.checkNonNegativeOrNoLimit(pass, KEY_CLEAR); + return pass; + } +} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/LpaComputer.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/LpaComputer.java new file mode 100644 index 0000000000..cb1fec943c --- /dev/null +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/LpaComputer.java @@ -0,0 +1,73 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.job.computer; + +import java.util.Map; + +import com.baidu.hugegraph.util.E; +import com.baidu.hugegraph.util.ParameterUtil; +import com.google.common.collect.ImmutableMap; + +public class LpaComputer extends AbstractComputer { + + public static final String LPA = "lpa"; + + public static final String PROPERTY = "property"; + public static final String DEFAULT_PROPERTY = "id"; + + @Override + public String name() { + return LPA; + } + + @Override + public String category() { + return CATEGORY_COMM; + } + + @Override + public void checkParameters(Map parameters) { + times(parameters); + property(parameters); + precision(parameters); + direction(parameters); + degree(parameters); + } + + @Override + protected Map checkAndCollectParameters( + Map parameters) { + return ImmutableMap.of(TIMES, times(parameters), + PROPERTY, property(parameters), + PRECISION, precision(parameters), + DIRECTION, direction(parameters), + DEGREE, degree(parameters)); + } + + private static String property(Map parameters) { + if (!parameters.containsKey(PROPERTY)) { + return DEFAULT_PROPERTY; + } + String property = ParameterUtil.parameterString(parameters, PROPERTY); + E.checkArgument(property != null && !property.isEmpty(), + "The value of %s can not be null or empty", PROPERTY); + return property; + } +} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/TriangleCountComputer.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/TriangleCountComputer.java new file mode 100644 index 0000000000..d4a882b279 --- /dev/null +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/computer/TriangleCountComputer.java @@ -0,0 +1,52 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.job.computer; + +import java.util.Map; + +import com.google.common.collect.ImmutableMap; + +public class TriangleCountComputer extends AbstractComputer { + + public static final String TRIANGLE_COUNT = "triangle_count"; + + @Override + public String name() { + return TRIANGLE_COUNT; + } + + @Override + public String category() { + return CATEGORY_COMM; + } + + @Override + public void checkParameters(Map parameters) { + direction(parameters); + degree(parameters); + } + + @Override + protected Map checkAndCollectParameters( + Map parameters) { + return ImmutableMap.of(DIRECTION, direction(parameters), + DEGREE, degree(parameters)); + } +} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/job/system/JobCounters.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/system/JobCounters.java index 8a48d103d2..09a7a10e03 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/job/system/JobCounters.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/job/system/JobCounters.java @@ -34,7 +34,7 @@ public class JobCounters { new ConcurrentHashMap<>(); public JobCounter jobCounter(HugeGraphParams g) { - int batch = g.configuration().get(CoreOptions.EXPIRED_DELETE_BATCH); + int batch = g.configuration().get(CoreOptions.TASK_TTL_DELETE_BATCH); String graph = g.name(); if (!this.jobCounters.containsKey(graph)) { this.jobCounters.putIfAbsent(graph, new JobCounter(batch)); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/security/HugeSecurityManager.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/security/HugeSecurityManager.java index 7280cb2ff3..4ccf9e9eb4 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/security/HugeSecurityManager.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/security/HugeSecurityManager.java @@ -102,6 +102,12 @@ public class HugeSecurityManager extends SecurityManager { "com.baidu.hugegraph.backend.store.hbase.HbaseSessions$RowIterator" ); + private static final Set RAFT_CLASSES = ImmutableSet.of( + "com.baidu.hugegraph.backend.store.raft.RaftNode", + "com.baidu.hugegraph.backend.store.raft.StoreStateMachine", + "com.baidu.hugegraph.backend.store.raft.rpc.RpcForwarder" + ); + @Override public void checkPermission(Permission permission) { if (DENIED_PERMISSIONS.contains(permission.getName()) && @@ -142,7 +148,8 @@ public void checkLink(String lib) { public void checkAccess(Thread thread) { if (callFromGremlin() && !callFromCaffeine() && !callFromAsyncTasks() && !callFromEventHubNotify() && - !callFromBackendThread() && !callFromBackendHbase()) { + !callFromBackendThread() && !callFromBackendHbase() && + !callFromRaft()) { throw newSecurityException( "Not allowed to access thread via Gremlin"); } @@ -153,7 +160,8 @@ public void checkAccess(Thread thread) { public void checkAccess(ThreadGroup threadGroup) { if (callFromGremlin() && !callFromCaffeine() && !callFromAsyncTasks() && !callFromEventHubNotify() && - !callFromBackendThread() && !callFromBackendHbase()) { + !callFromBackendThread() && !callFromBackendHbase() && + !callFromRaft()) { throw newSecurityException( "Not allowed to access thread group via Gremlin"); } @@ -180,7 +188,8 @@ public void checkExec(String cmd) { @Override public void checkRead(FileDescriptor fd) { - if (callFromGremlin() && !callFromBackendSocket()) { + if (callFromGremlin() && !callFromBackendSocket() && + !callFromRaft()) { throw newSecurityException("Not allowed to read fd via Gremlin"); } super.checkRead(fd); @@ -189,7 +198,8 @@ public void checkRead(FileDescriptor fd) { @Override public void checkRead(String file) { if (callFromGremlin() && !callFromCaffeine() && - !readGroovyInCurrentDir(file) && !callFromBackendHbase()) { + !readGroovyInCurrentDir(file) && !callFromBackendHbase() && + !callFromRaft()) { throw newSecurityException( "Not allowed to read file via Gremlin: %s", file); } @@ -198,7 +208,7 @@ public void checkRead(String file) { @Override public void checkRead(String file, Object context) { - if (callFromGremlin()) { + if (callFromGremlin() && !callFromRaft()) { throw newSecurityException( "Not allowed to read file via Gremlin: %s", file); } @@ -207,7 +217,8 @@ public void checkRead(String file, Object context) { @Override public void checkWrite(FileDescriptor fd) { - if (callFromGremlin() && !callFromBackendSocket()) { + if (callFromGremlin() && !callFromBackendSocket() && + !callFromRaft()) { throw newSecurityException("Not allowed to write fd via Gremlin"); } super.checkWrite(fd); @@ -215,7 +226,7 @@ public void checkWrite(FileDescriptor fd) { @Override public void checkWrite(String file) { - if (callFromGremlin()) { + if (callFromGremlin() && !callFromRaft()) { throw newSecurityException("Not allowed to write file via Gremlin"); } super.checkWrite(file); @@ -251,7 +262,7 @@ public void checkAccept(String host, int port) { @Override public void checkConnect(String host, int port) { if (callFromGremlin() && !callFromBackendSocket() && - !callFromBackendHbase()) { + !callFromBackendHbase() && !callFromRaft()) { throw newSecurityException( "Not allowed to connect socket via Gremlin"); } @@ -305,7 +316,8 @@ public void checkPropertiesAccess() { @Override public void checkPropertyAccess(String key) { if (!callFromAcceptClassLoaders() && callFromGremlin() && - !WHITE_SYSTEM_PROPERTYS.contains(key) && !callFromBackendHbase()) { + !WHITE_SYSTEM_PROPERTYS.contains(key) && !callFromBackendHbase() && + !callFromRaft()) { throw newSecurityException( "Not allowed to access system property(%s) via Gremlin", key); } @@ -425,6 +437,10 @@ private static boolean callFromBackendHbase() { return callFromWorkerWithClass(HBASE_CLASSES); } + private static boolean callFromRaft() { + return callFromWorkerWithClass(RAFT_CLASSES); + } + private static boolean callFromWorkerWithClass(Set classes) { Thread curThread = Thread.currentThread(); if (curThread.getName().startsWith(GREMLIN_SERVER_WORKER) || diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/task/ServerInfoManager.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/task/ServerInfoManager.java index a2dbd31179..17aa4fabb5 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/task/ServerInfoManager.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/task/ServerInfoManager.java @@ -361,10 +361,13 @@ public void updateServerInfos(Collection serverInfos) { } public Collection allServerInfos() { - @SuppressWarnings("resource") - ListIterator iter = new ListIterator<>(MAX_SERVERS, - this.serverInfos(NO_LIMIT, null)); - return iter.list(); + Iterator infos = this.serverInfos(NO_LIMIT, null); + try (ListIterator iter = new ListIterator<>( + MAX_SERVERS, infos)) { + return iter.list(); + } catch (Exception e) { + throw new HugeException("Failed to close server info iterator", e); + } } public Iterator serverInfos(String page) { diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/task/TaskManager.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/task/TaskManager.java index 6eea705423..84322941a2 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/task/TaskManager.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/task/TaskManager.java @@ -19,8 +19,6 @@ package com.baidu.hugegraph.task; -import java.util.ArrayList; -import java.util.List; import java.util.Map; import java.util.Queue; import java.util.concurrent.Callable; @@ -35,6 +33,7 @@ import com.baidu.hugegraph.HugeException; import com.baidu.hugegraph.HugeGraphParams; +import com.baidu.hugegraph.util.Consumers; import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.ExecutorUtil; import com.baidu.hugegraph.util.LockUtil; @@ -89,7 +88,7 @@ public void addScheduler(HugeGraphParams graph) { E.checkArgumentNotNull(graph, "The graph can't be null"); TaskScheduler scheduler = new StandardTaskScheduler(graph, - this.taskExecutor,this.taskDbExecutor, + this.taskExecutor, this.taskDbExecutor, this.serverInfoDbExecutor); this.schedulers.put(graph, scheduler); } @@ -127,42 +126,14 @@ public void closeScheduler(HugeGraphParams graph) { private void closeTaskTx(HugeGraphParams graph) { final boolean selfIsTaskWorker = Thread.currentThread().getName() .startsWith(TASK_WORKER_PREFIX); - final Map threadsTimes = new ConcurrentHashMap<>(); - final List> tasks = new ArrayList<>(); final int totalThreads = selfIsTaskWorker ? THREADS - 1 : THREADS; - - final Callable closeTx = () -> { - Thread current = Thread.currentThread(); - threadsTimes.putIfAbsent(current, 0); - int times = threadsTimes.get(current); - if (times == 0) { - // Do close-tx for current thread - graph.closeTx(); - // Let other threads run - Thread.yield(); - } else { - assert times < totalThreads; - assert threadsTimes.size() < totalThreads; - E.checkState(tasks.size() == totalThreads, - "Bad tasks size: %s", tasks.size()); - // Let another thread run and wait for it - this.taskExecutor.submit(tasks.get(0)).get(); - } - threadsTimes.put(current, ++times); - return null; - }; - - // NOTE: expect each task thread to perform a close operation - for (int i = 0; i < totalThreads; i++) { - tasks.add(closeTx); - } - try { if (selfIsTaskWorker) { // Call closeTx directly if myself is task thread(ignore others) - closeTx.call(); + graph.closeTx(); } else { - this.taskExecutor.invokeAll(tasks); + Consumers.executeOncePerThread(this.taskExecutor, totalThreads, + graph::closeTx); } } catch (Exception e) { throw new HugeException("Exception when closing task tx", e); @@ -177,7 +148,6 @@ private void closeSchedulerTx(HugeGraphParams graph) { Thread.yield(); return null; }; - try { this.schedulerExecutor.submit(closeTx).get(); } catch (Exception e) { diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CollectionPathsTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CollectionPathsTraverser.java index 6f1989464b..da76202e5c 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CollectionPathsTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CollectionPathsTraverser.java @@ -26,20 +26,17 @@ import java.util.Map; import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.slf4j.Logger; import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; import com.baidu.hugegraph.structure.HugeVertex; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.traversal.algorithm.strategy.TraverseStrategy; import com.baidu.hugegraph.util.E; -import com.baidu.hugegraph.util.Log; import com.google.common.collect.ImmutableList; public class CollectionPathsTraverser extends HugeTraverser { - private static final Logger LOG = Log.logger(CollectionPathsTraverser.class); - public CollectionPathsTraverser(HugeGraph graph) { super(graph); } @@ -275,6 +272,7 @@ public void addNewVerticesToAll(Map> targets) { } } + @Override protected int accessedNodes() { return this.sourcesAll.size() + this.targetsAll.size(); } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CountTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CountTraverser.java index b8ce2c258f..ee27b40c44 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CountTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CountTraverser.java @@ -22,7 +22,6 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.Set; import org.apache.commons.lang.mutable.MutableLong; @@ -34,7 +33,7 @@ import com.baidu.hugegraph.iterator.FilterIterator; import com.baidu.hugegraph.iterator.FlatMapperIterator; import com.baidu.hugegraph.structure.HugeEdge; -import com.baidu.hugegraph.type.define.Directions; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.util.E; public class CountTraverser extends HugeTraverser { @@ -48,7 +47,7 @@ public CountTraverser(HugeGraph graph) { super(graph); } - public long count(Id source, List steps, + public long count(Id source, List steps, boolean containsTraversed, long dedupSize) { E.checkNotNull(source, "source vertex id"); this.checkVertexExist(source, "source vertex"); @@ -63,45 +62,45 @@ public long count(Id source, List steps, } int stepNum = steps.size(); - Step firstStep = steps.get(0); + EdgeStep firstStep = steps.get(0); if (stepNum == 1) { // Just one step, query count and return - long edgesCount = this.edgesCount(source, firstStep.edgeStep); + long edgesCount = this.edgesCount(source, firstStep); this.count.add(edgesCount); return this.count.longValue(); } // Multiple steps, construct first step to iterator - Iterator edges = this.edgesOfVertex(source, firstStep); + Iterator edges = this.edgesOfVertexWithCount(source, firstStep); // Wrap steps to Iterator except last step for (int i = 1; i < stepNum - 1; i++) { - Step currentStep = steps.get(i); + EdgeStep currentStep = steps.get(i); edges = new FlatMapperIterator<>(edges, (edge) -> { Id target = ((HugeEdge) edge).id().otherVertexId(); - return this.edgesOfVertex(target, currentStep); + return this.edgesOfVertexWithCount(target, currentStep); }); } // The last step, just query count - Step lastStep = steps.get(stepNum - 1); + EdgeStep lastStep = steps.get(stepNum - 1); while (edges.hasNext()) { Id target = ((HugeEdge) edges.next()).id().otherVertexId(); if (this.dedup(target)) { continue; } // Count last layer vertices(without dedup size) - long edgesCount = this.edgesCount(target, lastStep.edgeStep); + long edgesCount = this.edgesCount(target, lastStep); this.count.add(edgesCount); } return this.count.longValue(); } - private Iterator edgesOfVertex(Id source, Step step) { + private Iterator edgesOfVertexWithCount(Id source, EdgeStep step) { if (this.dedup(source)) { return QueryResults.emptyIterator(); } - Iterator flatten = this.edgesOfVertex(source, step.edgeStep); + Iterator flatten = this.edgesOfVertex(source, step); return new FilterIterator<>(flatten, e -> { if (this.containsTraversed) { // Count intermediate vertices @@ -138,16 +137,4 @@ private boolean reachDedup() { return this.dedupSize != NO_LIMIT && this.dedupSet.size() >= this.dedupSize; } - - public static class Step { - - private final EdgeStep edgeStep; - - public Step(HugeGraph g, Directions direction, List labels, - Map properties, long degree, - long skipDegree) { - this.edgeStep = new EdgeStep(g, direction, labels, properties, - degree, skipDegree); - } - } } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CustomizePathsTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CustomizePathsTraverser.java index c5edb3e5e7..4c1f73eb09 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CustomizePathsTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CustomizePathsTraverser.java @@ -32,10 +32,9 @@ import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; -import com.baidu.hugegraph.schema.PropertyKey; import com.baidu.hugegraph.structure.HugeEdge; import com.baidu.hugegraph.structure.HugeVertex; -import com.baidu.hugegraph.type.define.Directions; +import com.baidu.hugegraph.traversal.algorithm.steps.WeightedEdgeStep; import com.baidu.hugegraph.util.CollectionUtil; import com.baidu.hugegraph.util.E; import com.google.common.collect.ImmutableList; @@ -48,7 +47,7 @@ public CustomizePathsTraverser(HugeGraph graph) { } public List customizedPaths(Iterator vertices, - List steps, boolean sorted, + List steps, boolean sorted, long capacity, long limit) { E.checkArgument(vertices.hasNext(), "The source vertices can't be empty"); @@ -68,7 +67,7 @@ public List customizedPaths(Iterator vertices, int pathCount = 0; long access = 0; MultivaluedMap newVertices = null; - root : for (Step step : steps) { + root : for (WeightedEdgeStep step : steps) { stepNum--; newVertices = newMultivalueMap(); Iterator edges; @@ -76,7 +75,7 @@ public List customizedPaths(Iterator vertices, // Traversal vertices of previous level for (Map.Entry> entry : sources.entrySet()) { List adjacency = new ArrayList<>(); - edges = this.edgesOfVertex(entry.getKey(), step.edgeStep); + edges = this.edgesOfVertex(entry.getKey(), step.step()); while (edges.hasNext()) { HugeEdge edge = (HugeEdge) edges.next(); Id target = edge.id().otherVertexId(); @@ -87,9 +86,9 @@ public List customizedPaths(Iterator vertices, } Node newNode; if (sorted) { - double w = step.weightBy != null ? - edge.value(step.weightBy.name()) : - step.defaultWeight; + double w = step.weightBy() != null ? + edge.value(step.weightBy().name()) : + step.defaultWeight(); newNode = new WeightNode(target, n, w); } else { newNode = new Node(target, n); @@ -100,9 +99,9 @@ public List customizedPaths(Iterator vertices, } } - if (step.sample > 0) { + if (step.sample() > 0) { // Sample current node's adjacent nodes - adjacency = sample(adjacency, step.sample); + adjacency = sample(adjacency, step.sample()); } // Add current node's adjacent nodes @@ -238,35 +237,4 @@ private void calcTotalWeight() { this.totalWeight = sum; } } - - public static class Step { - - private final EdgeStep edgeStep; - private final PropertyKey weightBy; - private final double defaultWeight; - private final long sample; - - public Step(HugeGraph g, Directions direction, List labels, - Map properties, - long degree, long skipDegree, - String weightBy, double defaultWeight, long sample) { - E.checkArgument(sample > 0L || sample == NO_LIMIT, - "The sample must be > 0 or == -1, but got: %s", - sample); - E.checkArgument(degree == NO_LIMIT || degree >= sample, - "Degree must be greater than or equal to sample," + - " but got degree %s and sample %s", - degree, sample); - - this.edgeStep = new EdgeStep(g, direction, labels, properties, - degree, skipDegree); - if (weightBy != null) { - this.weightBy = g.propertyKey(weightBy); - } else { - this.weightBy = null; - } - this.defaultWeight = defaultWeight; - this.sample = sample; - } - } } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CustomizedCrosspointsTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CustomizedCrosspointsTraverser.java index 23fdc499eb..4345646d55 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CustomizedCrosspointsTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/CustomizedCrosspointsTraverser.java @@ -37,6 +37,7 @@ import com.baidu.hugegraph.backend.id.Id; import com.baidu.hugegraph.structure.HugeEdge; import com.baidu.hugegraph.structure.HugeVertex; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.CollectionUtil; import com.baidu.hugegraph.util.E; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/FusiformSimilarityTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/FusiformSimilarityTraverser.java index 17a534ab0e..320833f1cd 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/FusiformSimilarityTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/FusiformSimilarityTraverser.java @@ -56,7 +56,7 @@ public FusiformSimilarityTraverser(HugeGraph graph) { } public SimilarsMap fusiformSimilarity(Iterator vertices, - Directions direction, EdgeLabel label, + Directions direction, String label, int minNeighbors, double alpha, int minSimilars, int top, String groupProperty, int minGroups, @@ -92,7 +92,7 @@ public SimilarsMap fusiformSimilarity(Iterator vertices, private Set fusiformSimilarityForVertex( HugeVertex vertex, Directions direction, - EdgeLabel label, int minNeighbors, double alpha, + String label, int minNeighbors, double alpha, int minSimilars, int top, String groupProperty, int minGroups, long degree, long capacity, boolean withIntermediary) { @@ -102,7 +102,7 @@ private Set fusiformSimilarityForVertex( // Ignore current vertex if its neighbors number is not enough return ImmutableSet.of(); } - Id labelId = label == null ? null : label.id(); + Id labelId = this.getEdgeLabelId(label); // Get similar nodes and counts Iterator edges = this.edgesOfVertex(vertex.id(), direction, labelId, degree); @@ -209,12 +209,17 @@ private static void checkGroupArgs(String groupProperty, int minGroups) { private boolean matchMinNeighborCount(HugeVertex vertex, Directions direction, - EdgeLabel edgeLabel, + String label, int minNeighbors, long degree) { Iterator edges; long neighborCount; - Id labelId = edgeLabel == null ? null : edgeLabel.id(); + EdgeLabel edgeLabel = null; + Id labelId = null; + if (label != null) { + edgeLabel = this.graph().edgeLabel(label); + labelId = edgeLabel.id(); + } if (edgeLabel != null && edgeLabel.frequency() == Frequency.SINGLE) { edges = this.edgesOfVertex(vertex.id(), direction, labelId, minNeighbors); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/HugeTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/HugeTraverser.java index 8f6102f9ae..4ac2e2ab3d 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/HugeTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/HugeTraverser.java @@ -52,6 +52,7 @@ import com.baidu.hugegraph.iterator.MapperIterator; import com.baidu.hugegraph.schema.SchemaLabel; import com.baidu.hugegraph.structure.HugeEdge; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.traversal.optimize.TraversalUtil; import com.baidu.hugegraph.type.HugeType; import com.baidu.hugegraph.type.define.Directions; @@ -156,7 +157,7 @@ protected Set adjacentVertices(Set vertices, EdgeStep step, continue; } neighbors.add(kNode); - if (--remaining <= 0L) { + if (remaining != NO_LIMIT && --remaining <= 0L) { return neighbors; } } @@ -193,10 +194,10 @@ protected Iterator edgesOfVertex(Id source, Directions dir, } protected Iterator edgesOfVertex(Id source, EdgeStep edgeStep) { - if (edgeStep.properties == null || edgeStep.properties.isEmpty()) { + if (edgeStep.properties() == null || edgeStep.properties().isEmpty()) { Iterator edges = this.edgesOfVertex(source, - edgeStep.direction, - edgeStep.labels, + edgeStep.direction(), + edgeStep.labels(), edgeStep.limit()); return edgeStep.skipSuperNodeIfNeeded(edges); } @@ -204,7 +205,7 @@ protected Iterator edgesOfVertex(Id source, EdgeStep edgeStep) { } protected Iterator edgesOfVertexWithSK(Id source, EdgeStep edgeStep) { - assert edgeStep.properties != null && !edgeStep.properties.isEmpty(); + assert edgeStep.properties() != null && !edgeStep.properties().isEmpty(); return this.edgesOfVertex(source, edgeStep, true); } @@ -212,14 +213,14 @@ private Iterator edgesOfVertex(Id source, EdgeStep edgeStep, boolean mustAllSK) { Id[] edgeLabels = edgeStep.edgeLabels(); Query query = GraphTransaction.constructEdgesQuery(source, - edgeStep.direction, + edgeStep.direction(), edgeLabels); ConditionQuery filter = null; if (mustAllSK) { - this.fillFilterBySortKeys(query, edgeLabels, edgeStep.properties); + this.fillFilterBySortKeys(query, edgeLabels, edgeStep.properties()); } else { filter = (ConditionQuery) query.copy(); - this.fillFilterByProperties(filter, edgeStep.properties); + this.fillFilterByProperties(filter, edgeStep.properties()); } query.capacity(Query.NO_CAPACITY); if (edgeStep.limit() != NO_LIMIT) { @@ -270,19 +271,20 @@ private void fillFilterByProperties(Query query, protected long edgesCount(Id source, EdgeStep edgeStep) { Id[] edgeLabels = edgeStep.edgeLabels(); Query query = GraphTransaction.constructEdgesQuery(source, - edgeStep.direction, + edgeStep.direction(), edgeLabels); - this.fillFilterBySortKeys(query, edgeLabels, edgeStep.properties); + this.fillFilterBySortKeys(query, edgeLabels, edgeStep.properties()); query.aggregate(Aggregate.AggregateFunc.COUNT, null); query.capacity(Query.NO_CAPACITY); query.limit(Query.NO_LIMIT); long count = graph().queryNumber(query).longValue(); - if (edgeStep.degree == NO_LIMIT || count < edgeStep.degree) { + if (edgeStep.degree() == NO_LIMIT || count < edgeStep.degree()) { return count; - } else if (edgeStep.skipDegree != 0L && count >= edgeStep.skipDegree) { + } else if (edgeStep.skipDegree() != 0L && + count >= edgeStep.skipDegree()) { return 0L; } else { - return edgeStep.degree; + return edgeStep.degree(); } } @@ -355,9 +357,10 @@ public static void checkCapacity(long capacity, long access, public static void checkSkipDegree(long skipDegree, long degree, long capacity) { - E.checkArgument(skipDegree >= 0L, - "The skipped degree must be >= 0, but got '%s'", - skipDegree); + E.checkArgument(skipDegree >= 0L && + skipDegree <= Query.DEFAULT_CAPACITY , + "The skipped degree must be in [0, %s], but got '%s'", + Query.DEFAULT_CAPACITY, skipDegree); if (capacity != NO_LIMIT) { E.checkArgument(degree != NO_LIMIT && degree < capacity, "The degree must be < capacity"); @@ -515,6 +518,11 @@ public boolean equals(Object object) { return Objects.equals(this.id, other.id) && Objects.equals(this.parent, other.parent); } + + @Override + public String toString() { + return this.id.toString(); + } } public static class KNode extends Node { diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/JaccardSimilarTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/JaccardSimilarTraverser.java index 82e679c250..ba62b24df5 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/JaccardSimilarTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/JaccardSimilarTraverser.java @@ -31,6 +31,7 @@ import com.baidu.hugegraph.HugeException; import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.CollectionUtil; import com.baidu.hugegraph.util.E; @@ -75,7 +76,7 @@ public Map jaccardSimilars(Id source, EdgeStep step, Map results; if (3 >= this.concurrentDepth() && - step.direction == Directions.BOTH) { + step.direction() == Directions.BOTH) { results = this.jaccardSimilarsConcurrent(source, step, capacity); } else { results = this.jaccardSimilarsSingle(source, step, capacity); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/KneighborTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/KneighborTraverser.java index 3c68021b5e..1fadd50147 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/KneighborTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/KneighborTraverser.java @@ -24,6 +24,7 @@ import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; @@ -72,7 +73,7 @@ public Set customizedKneighbor(Id source, EdgeStep step, checkLimit(limit); boolean single = maxDepth < this.concurrentDepth() || - step.direction != Directions.BOTH; + step.direction() != Directions.BOTH; return this.customizedKneighbor(source, step, maxDepth, limit, single); } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/KoutTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/KoutTraverser.java index fcfddf6bbc..a6fa743dd6 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/KoutTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/KoutTraverser.java @@ -24,6 +24,7 @@ import com.baidu.hugegraph.HugeException; import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.CollectionUtil; import com.baidu.hugegraph.util.E; @@ -102,7 +103,7 @@ public Set customizedKout(Id source, EdgeStep step, int maxDepth, Set results; boolean single = maxDepth < this.concurrentDepth() || - step.direction != Directions.BOTH; + step.direction() != Directions.BOTH; results = this.customizedKout(source, step, maxDepth, nearest, capacity, single); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/MultiNodeShortestPathTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/MultiNodeShortestPathTraverser.java index 633b5bd79f..c4978f3c3c 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/MultiNodeShortestPathTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/MultiNodeShortestPathTraverser.java @@ -32,6 +32,7 @@ import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; import com.baidu.hugegraph.structure.HugeVertex; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; @@ -61,7 +62,7 @@ public List multiNodeShortestPath(Iterator vertices, }); if (maxDepth >= this.concurrentDepth() && - step.direction == Directions.BOTH || + step.direction() == Directions.BOTH || vertexCount > 10) { return this.multiNodeShortestPathConcurrent(pairs, step, maxDepth, capacity); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/NeighborRankTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/NeighborRankTraverser.java index 2203064bf4..f9060cb692 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/NeighborRankTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/NeighborRankTraverser.java @@ -34,6 +34,7 @@ import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; import com.baidu.hugegraph.structure.HugeEdge; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.OrderLimitMap; @@ -244,7 +245,9 @@ public Step(HugeGraph g, Directions direction, List labels, long degree, long skipDegree, int top, int capacity) { E.checkArgument(top > 0 && top <= MAX_TOP, "The top of each layer can't exceed %s", MAX_TOP); - + E.checkArgument(capacity > 0, + "The capacity of each layer must be > 0, " + + "but got %s", capacity); this.edgeStep = new EdgeStep(g, direction, labels, null, degree, skipDegree); this.top = top; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/OltpTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/OltpTraverser.java index b35c1713d2..b8367b3794 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/OltpTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/OltpTraverser.java @@ -24,7 +24,6 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; @@ -40,6 +39,7 @@ import com.baidu.hugegraph.config.CoreOptions; import com.baidu.hugegraph.iterator.FilterIterator; import com.baidu.hugegraph.structure.HugeEdge; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.util.Consumers; import jersey.repackaged.com.google.common.base.Objects; @@ -48,20 +48,20 @@ public abstract class OltpTraverser extends HugeTraverser implements AutoCloseable { private static final String EXECUTOR_NAME = "oltp"; - private static ExecutorService executor; + private static Consumers.ExecutorPool executors; protected OltpTraverser(HugeGraph graph) { super(graph); - if (executor != null) { + if (executors != null) { return; } synchronized (OltpTraverser.class) { - if (executor != null) { + if (executors != null) { return; } int workers = this.config().get(CoreOptions.OLTP_CONCURRENT_THREADS); if (workers > 0) { - executor = Consumers.newThreadPool(EXECUTOR_NAME, workers); + executors = new Consumers.ExecutorPool(EXECUTOR_NAME, workers); } } } @@ -72,9 +72,11 @@ public void close() { } public static void destroy() { - if (executor != null) { - executor.shutdown(); - executor = null; + synchronized (OltpTraverser.class) { + if (executors != null) { + executors.destroy(); + executors = null; + } } } @@ -126,7 +128,12 @@ protected long traverseIds(Iterator ids, Consumer consumer) { protected long traverse(Iterator iterator, Consumer consumer, String name) { - Consumers consumers = new Consumers<>(executor, consumer, null); + if (!iterator.hasNext()) { + return 0L; + } + + Consumers consumers = new Consumers<>(executors.getExecutor(), + consumer, null); consumers.start(name); long total = 0L; try { @@ -143,8 +150,9 @@ protected long traverse(Iterator iterator, Consumer consumer, try { consumers.await(); } catch (Throwable e) { - Consumers.wrapException(e); + throw Consumers.wrapException(e); } finally { + executors.returnExecutor(consumers.executor()); CloseableIterator.closeIterator(iterator); } } @@ -169,6 +177,8 @@ protected boolean match(Element elem, String key, Object value) { public class ConcurrentMultiValuedMap extends ConcurrentHashMap> { + private static final long serialVersionUID = -7249946839643493614L; + public ConcurrentMultiValuedMap() { super(); } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/PathTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/PathTraverser.java index 9551009e5e..df9f017ccb 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/PathTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/PathTraverser.java @@ -30,6 +30,7 @@ import com.baidu.hugegraph.backend.id.Id; import com.baidu.hugegraph.structure.HugeEdge; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.traversal.algorithm.strategy.TraverseStrategy; import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.NO_LIMIT; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/ShortestPathTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/ShortestPathTraverser.java index aaec88379b..17b757c6bf 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/ShortestPathTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/ShortestPathTraverser.java @@ -32,6 +32,7 @@ import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; import com.baidu.hugegraph.structure.HugeEdge; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; import com.google.common.collect.ImmutableList; @@ -89,9 +90,10 @@ public Path shortestPath(Id sourceV, Id targetV, Directions dir, public Path shortestPath(Id sourceV, Id targetV, EdgeStep step, int depth, long capacity) { - return this.shortestPath(sourceV, targetV, step.direction, - new ArrayList<>(step.labels.values()), - depth, step.degree, step.skipDegree, capacity); + return this.shortestPath(sourceV, targetV, step.direction(), + new ArrayList<>(step.labels().values()), + depth, step.degree(), step.skipDegree(), + capacity); } public PathSet allShortestPaths(Id sourceV, Id targetV, Directions dir, diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/SingleSourceShortestPathTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/SingleSourceShortestPathTraverser.java index 053767fef3..9b89be5b62 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/SingleSourceShortestPathTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/SingleSourceShortestPathTraverser.java @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -36,6 +37,7 @@ import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.CollectionUtil; import com.baidu.hugegraph.util.E; +import com.baidu.hugegraph.util.InsertionOrderUtil; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -168,7 +170,7 @@ public void forward() { Map sorted = CollectionUtil.sortByValue( this.findingNodes, true); double minWeight = 0; - Set newSources = new HashSet<>(); + Set newSources = InsertionOrderUtil.newSet(); for (Map.Entry entry : sorted.entrySet()) { Id id = entry.getKey(); NodeWithWeight wn = entry.getValue(); @@ -265,7 +267,7 @@ public int compareTo(NodeWithWeight other) { } } - public static class WeightedPaths extends HashMap { + public static class WeightedPaths extends LinkedHashMap { private static final long serialVersionUID = -313873642177730993L; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/SubGraphTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/SubGraphTraverser.java index f9d9bf35c3..7e7f02f75b 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/SubGraphTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/SubGraphTraverser.java @@ -155,7 +155,7 @@ public PathSet forward(Directions direction) { // Store rays paths.add(new Path(n.path())); this.pathCount++; - if (reachLimit()) { + if (this.reachLimit()) { return paths; } } @@ -190,7 +190,7 @@ public PathSet forward(Directions direction) { if (!this.rings && bothBack && uniqueEdge) { paths.add(new Path(node.path())); this.pathCount++; - if (reachLimit()) { + if (this.reachLimit()) { return paths; } } @@ -215,7 +215,7 @@ public PathSet forward(Directions direction) { path.add(target); paths.add(new RingPath(null, path)); this.pathCount++; - if (reachLimit()) { + if (this.reachLimit()) { return paths; } } @@ -230,6 +230,10 @@ public PathSet forward(Directions direction) { for (List list : newVertices.values()) { for (Node n : list) { paths.add(new Path(n.path())); + this.pathCount++; + if (this.reachLimit()) { + return paths; + } } } } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/TemplatePathsTraverser.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/TemplatePathsTraverser.java index d22c01d32f..3e4bdc5b00 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/TemplatePathsTraverser.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/TemplatePathsTraverser.java @@ -27,20 +27,17 @@ import java.util.Set; import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.slf4j.Logger; import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; import com.baidu.hugegraph.structure.HugeVertex; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; +import com.baidu.hugegraph.traversal.algorithm.steps.RepeatEdgeStep; import com.baidu.hugegraph.traversal.algorithm.strategy.TraverseStrategy; -import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; -import com.baidu.hugegraph.util.Log; public class TemplatePathsTraverser extends HugeTraverser { - private static final Logger LOG = Log.logger(TemplatePathsTraverser.class); - public TemplatePathsTraverser(HugeGraph graph) { super(graph); } @@ -72,7 +69,7 @@ public Set templatePaths(Iterator sources, int totalSteps = 0; for (RepeatEdgeStep step : steps) { - totalSteps += step.maxTimes; + totalSteps += step.maxTimes(); } TraverseStrategy strategy = TraverseStrategy.create( totalSteps >= this.concurrentDepth(), @@ -116,7 +113,7 @@ public Traverser(HugeTraverser traverser, TraverseStrategy strategy, this.steps = steps; this.withRing = withRing; for (RepeatEdgeStep step : steps) { - this.totalSteps += step.maxTimes; + this.totalSteps += step.maxTimes(); } this.sourceIndex = 0; @@ -126,6 +123,7 @@ public Traverser(HugeTraverser traverser, TraverseStrategy strategy, this.targetFinishOneStep = false; } + @Override public RepeatEdgeStep nextStep(boolean forward) { return forward ? this.forwardStep() : this.backwardStep(); } @@ -255,7 +253,7 @@ public RepeatEdgeStep forwardStep() { RepeatEdgeStep step = this.steps.get(i); if (step.remainTimes() > 0) { currentStep = step; - this.targetIndex = i; + this.sourceIndex = i; break; } } @@ -281,25 +279,4 @@ public boolean lastSuperStep() { this.targetIndex == this.sourceIndex + 1; } } - - public static class RepeatEdgeStep extends EdgeStep { - - private int maxTimes = 1; - - public RepeatEdgeStep(HugeGraph g, Directions direction, - List labels, - Map properties, long degree, - long skipDegree, int maxTimes) { - super(g, direction, labels, properties, degree, skipDegree); - this.maxTimes = maxTimes; - } - - private int remainTimes() { - return this.maxTimes; - } - - private void decreaseTimes() { - this.maxTimes--; - } - } } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/EdgeStep.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/steps/EdgeStep.java similarity index 71% rename from hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/EdgeStep.java rename to hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/steps/EdgeStep.java index f27e435b84..ed5ffc66d4 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/EdgeStep.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/steps/EdgeStep.java @@ -17,8 +17,9 @@ * under the License. */ -package com.baidu.hugegraph.traversal.algorithm; +package com.baidu.hugegraph.traversal.algorithm.steps; +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_DEGREE; import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.NO_LIMIT; import java.util.HashMap; @@ -31,9 +32,12 @@ import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; import com.baidu.hugegraph.schema.EdgeLabel; +import com.baidu.hugegraph.traversal.algorithm.HugeTraverser; import com.baidu.hugegraph.traversal.optimize.TraversalUtil; import com.baidu.hugegraph.type.define.Directions; import com.baidu.hugegraph.util.E; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; public class EdgeStep { @@ -43,6 +47,28 @@ public class EdgeStep { protected final long degree; protected final long skipDegree; + public EdgeStep(HugeGraph g, Directions direction) { + this(g, direction, ImmutableList.of()); + } + + public EdgeStep(HugeGraph g, List labels) { + this(g, Directions.BOTH, labels); + } + + public EdgeStep(HugeGraph g, Map properties) { + this(g, Directions.BOTH, ImmutableList.of(), properties); + } + + public EdgeStep(HugeGraph g, Directions direction, List labels) { + this(g, direction, labels, ImmutableMap.of()); + } + + public EdgeStep(HugeGraph g, Directions direction, List labels, + Map properties) { + this(g, direction, labels, properties, + Long.valueOf(DEFAULT_DEGREE), 0L); + } + public EdgeStep(HugeGraph g, Directions direction, List labels, Map properties, long degree, long skipDegree) { @@ -74,6 +100,26 @@ public EdgeStep(HugeGraph g, Directions direction, List labels, this.skipDegree = skipDegree; } + public Directions direction() { + return this.direction; + } + + public Map labels() { + return this.labels; + } + + public Map properties() { + return this.properties; + } + + public long degree() { + return this.degree; + } + + public long skipDegree() { + return this.skipDegree; + } + public Id[] edgeLabels() { int elsSize = this.labels.size(); Id[] edgeLabels = this.labels.keySet().toArray(new Id[elsSize]); diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/steps/RepeatEdgeStep.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/steps/RepeatEdgeStep.java new file mode 100644 index 0000000000..d264e81056 --- /dev/null +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/steps/RepeatEdgeStep.java @@ -0,0 +1,108 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.traversal.algorithm.steps; + +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_DEGREE; + +import java.util.List; +import java.util.Map; + +import com.baidu.hugegraph.HugeGraph; +import com.baidu.hugegraph.type.define.Directions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; + +public class RepeatEdgeStep extends EdgeStep { + + private int maxTimes = 1; + + public RepeatEdgeStep(HugeGraph g, Directions direction) { + this(g, direction, ImmutableList.of()); + } + + public RepeatEdgeStep(HugeGraph g, Directions direction, int maxTimes) { + this(g, direction); + this.maxTimes = maxTimes; + } + + public RepeatEdgeStep(HugeGraph g, List labels) { + this(g, Directions.BOTH, labels); + } + + public RepeatEdgeStep(HugeGraph g, List labels, int maxTimes) { + this(g, labels); + this.maxTimes = maxTimes; + } + + public RepeatEdgeStep(HugeGraph g, Map properties) { + this(g, Directions.BOTH, ImmutableList.of(), properties); + } + + public RepeatEdgeStep(HugeGraph g, Map properties, + int maxTimes) { + this(g, properties); + this.maxTimes = maxTimes; + } + + public RepeatEdgeStep(HugeGraph g, Directions direction, + List labels) { + this(g, direction, labels, ImmutableMap.of()); + } + + public RepeatEdgeStep(HugeGraph g, Directions direction, + List labels, int maxTimes) { + this(g, direction, labels); + this.maxTimes = maxTimes; + } + + public RepeatEdgeStep(HugeGraph g, Directions direction, + List labels, + Map properties) { + this(g, direction, labels, properties, + Long.valueOf(DEFAULT_DEGREE), 0L, 1); + } + + public RepeatEdgeStep(HugeGraph g, Directions direction, + List labels, + Map properties, int maxTimes) { + this(g, direction, labels, properties); + this.maxTimes = maxTimes; + } + + public RepeatEdgeStep(HugeGraph g, Directions direction, + List labels, + Map properties, long degree, + long skipDegree, int maxTimes) { + super(g, direction, labels, properties, degree, skipDegree); + this.maxTimes = maxTimes; + } + + public int remainTimes() { + return this.maxTimes; + } + + public void decreaseTimes() { + this.maxTimes--; + } + + public int maxTimes() { + return this.maxTimes; + } +} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/steps/WeightedEdgeStep.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/steps/WeightedEdgeStep.java new file mode 100644 index 0000000000..4928dfe71c --- /dev/null +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/steps/WeightedEdgeStep.java @@ -0,0 +1,104 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.traversal.algorithm.steps; + +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_DEGREE; +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.DEFAULT_SAMPLE; +import static com.baidu.hugegraph.traversal.algorithm.HugeTraverser.NO_LIMIT; + +import java.util.List; +import java.util.Map; + +import com.baidu.hugegraph.HugeGraph; +import com.baidu.hugegraph.schema.PropertyKey; +import com.baidu.hugegraph.type.define.Directions; +import com.baidu.hugegraph.util.E; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; + +public class WeightedEdgeStep { + + private final EdgeStep edgeStep; + private final PropertyKey weightBy; + private final double defaultWeight; + private final long sample; + + public WeightedEdgeStep(HugeGraph g, Directions direction) { + this(g, direction, ImmutableList.of()); + } + + public WeightedEdgeStep(HugeGraph g, List labels) { + this(g, Directions.BOTH, labels); + } + + public WeightedEdgeStep(HugeGraph g, Map properties) { + this(g, Directions.BOTH, ImmutableList.of(), properties); + } + + public WeightedEdgeStep(HugeGraph g, Directions direction, List labels) { + this(g, direction, labels, ImmutableMap.of()); + } + + public WeightedEdgeStep(HugeGraph g, Directions direction, List labels, + Map properties) { + this(g, direction, labels, properties, + Long.valueOf(DEFAULT_DEGREE), 0L, null, 0.0D, + Long.valueOf(DEFAULT_SAMPLE)); + } + + public WeightedEdgeStep(HugeGraph g, Directions direction, List labels, + Map properties, + long degree, long skipDegree, + String weightBy, double defaultWeight, long sample) { + E.checkArgument(sample > 0L || sample == NO_LIMIT, + "The sample must be > 0 or == -1, but got: %s", + sample); + E.checkArgument(degree == NO_LIMIT || degree >= sample, + "Degree must be greater than or equal to sample," + + " but got degree %s and sample %s", + degree, sample); + + this.edgeStep = new EdgeStep(g, direction, labels, properties, + degree, skipDegree); + if (weightBy != null) { + this.weightBy = g.propertyKey(weightBy); + } else { + this.weightBy = null; + } + this.defaultWeight = defaultWeight; + this.sample = sample; + } + + public EdgeStep step() { + return this.edgeStep; + } + + public PropertyKey weightBy() { + return this.weightBy; + } + + public double defaultWeight() { + return this.defaultWeight; + } + + public long sample() { + return this.sample; + } +} diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/ConcurrentTraverseStrategy.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/ConcurrentTraverseStrategy.java index f856e0a47c..69203433a8 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/ConcurrentTraverseStrategy.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/ConcurrentTraverseStrategy.java @@ -27,7 +27,7 @@ import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; -import com.baidu.hugegraph.traversal.algorithm.EdgeStep; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.traversal.algorithm.OltpTraverser; public class ConcurrentTraverseStrategy extends OltpTraverser diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/SingleTraverseStrategy.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/SingleTraverseStrategy.java index 822949d185..817d188feb 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/SingleTraverseStrategy.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/SingleTraverseStrategy.java @@ -28,7 +28,7 @@ import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; -import com.baidu.hugegraph.traversal.algorithm.EdgeStep; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.traversal.algorithm.HugeTraverser; import com.baidu.hugegraph.traversal.algorithm.OltpTraverser; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/TraverseStrategy.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/TraverseStrategy.java index cb81ad87f8..9bfc2759ba 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/TraverseStrategy.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/traversal/algorithm/strategy/TraverseStrategy.java @@ -26,7 +26,7 @@ import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.id.Id; -import com.baidu.hugegraph.traversal.algorithm.EdgeStep; +import com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep; import com.baidu.hugegraph.traversal.algorithm.HugeTraverser; public interface TraverseStrategy { diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/util/Consumers.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/util/Consumers.java index aedb547fbf..39d43a5627 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/util/Consumers.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/util/Consumers.java @@ -19,12 +19,19 @@ package com.baidu.hugegraph.util; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Queue; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import org.slf4j.Logger; @@ -32,11 +39,12 @@ import com.baidu.hugegraph.HugeException; import com.baidu.hugegraph.task.TaskManager.ContextCallable; -public class Consumers { +public final class Consumers { public static final int CPUS = Runtime.getRuntime().availableProcessors(); public static final int THREADS = 4 + CPUS / 4; public static final int QUEUE_WORKER_SIZE = 1000; + public static final long CONSUMER_WAKE_PERIOD = 1; private static final Logger LOG = Log.logger(Consumers.class); @@ -115,7 +123,7 @@ private void run() { private boolean consume() { V elem; try { - elem = this.queue.poll(1, TimeUnit.SECONDS); + elem = this.queue.poll(CONSUMER_WAKE_PERIOD, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { // ignore return true; @@ -176,7 +184,9 @@ public void await() throws Throwable { try { this.latch.await(); } catch (InterruptedException e) { - LOG.warn("Interrupted while waiting for consumers", e); + String error = "Interrupted while waiting for consumers"; + this.exception = new HugeException(error, e); + LOG.warn(error, e); } } @@ -185,6 +195,44 @@ public void await() throws Throwable { } } + public ExecutorService executor() { + return this.executor; + } + + public static void executeOncePerThread(ExecutorService executor, + int totalThreads, + Runnable callback) + throws InterruptedException { + // Ensure callback execute at least once for every thread + final Map threadsTimes = new ConcurrentHashMap<>(); + final List> tasks = new ArrayList<>(); + final Callable task = () -> { + Thread current = Thread.currentThread(); + threadsTimes.putIfAbsent(current, 0); + int times = threadsTimes.get(current); + if (times == 0) { + callback.run(); + // Let other threads run + Thread.yield(); + } else { + assert times < totalThreads; + assert threadsTimes.size() < totalThreads; + E.checkState(tasks.size() == totalThreads, + "Bad tasks size: %s", tasks.size()); + // Let another thread run and wait for it + executor.submit(tasks.get(0)).get(); + } + threadsTimes.put(current, ++times); + return null; + }; + + // NOTE: expect each task thread to perform a close operation + for (int i = 0; i < totalThreads; i++) { + tasks.add(task); + } + executor.invokeAll(tasks); + } + public static ExecutorService newThreadPool(String prefix, int workers) { if (workers == 0) { return null; @@ -200,6 +248,10 @@ public static ExecutorService newThreadPool(String prefix, int workers) { } } + public static ExecutorPool newExecutorPool(String prefix, int workers) { + return new ExecutorPool(prefix, workers); + } + public static RuntimeException wrapException(Throwable e) { if (e instanceof RuntimeException) { throw (RuntimeException) e; @@ -208,6 +260,48 @@ public static RuntimeException wrapException(Throwable e) { HugeException.rootCause(e).getMessage(), e); } + public static class ExecutorPool { + + private final static int POOL_CAPACITY = 2 * CPUS; + + private final String threadNamePrefix; + private final int executorWorkers; + private final AtomicInteger count; + + private final Queue executors; + + public ExecutorPool(String prefix, int workers) { + this.threadNamePrefix = prefix; + this.executorWorkers = workers; + this.count = new AtomicInteger(); + this.executors = new ArrayBlockingQueue<>(POOL_CAPACITY); + } + + public synchronized ExecutorService getExecutor() { + ExecutorService executor = this.executors.poll(); + if (executor == null) { + int count = this.count.incrementAndGet(); + String prefix = this.threadNamePrefix + "-" + count; + executor = newThreadPool(prefix, this.executorWorkers); + } + return executor; + } + + public synchronized void returnExecutor(ExecutorService executor) { + E.checkNotNull(executor, "executor"); + if (!this.executors.offer(executor)) { + executor.shutdown(); + } + } + + public synchronized void destroy() { + for (ExecutorService executor : this.executors) { + executor.shutdown(); + } + this.executors.clear(); + } + } + public static class StopExecution extends HugeException { private static final long serialVersionUID = -371829356182454517L; diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/util/LZ4Util.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/util/LZ4Util.java index 15d1d09133..fac78c22f6 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/util/LZ4Util.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/util/LZ4Util.java @@ -54,6 +54,10 @@ public static BytesBuffer compress(byte[] bytes, int blockSize, } catch (IOException e) { throw new BackendException("Failed to compress", e); } + /* + * If need to perform reading outside the method, + * remember to call forReadWritten() + */ return buf; } @@ -82,6 +86,10 @@ public static BytesBuffer decompress(byte[] bytes, int blockSize, } catch (IOException e) { throw new BackendException("Failed to decompress", e); } + /* + * If need to perform reading outside the method, + * remember to call forReadWritten() + */ return buf; } } diff --git a/hugegraph-core/src/main/java/com/baidu/hugegraph/version/CoreVersion.java b/hugegraph-core/src/main/java/com/baidu/hugegraph/version/CoreVersion.java index 336e057888..e2812029f1 100644 --- a/hugegraph-core/src/main/java/com/baidu/hugegraph/version/CoreVersion.java +++ b/hugegraph-core/src/main/java/com/baidu/hugegraph/version/CoreVersion.java @@ -39,7 +39,7 @@ public class CoreVersion { public static void check() { // Check version of hugegraph-common - VersionUtil.check(CommonVersion.VERSION, "1.7.0", "1.8", + VersionUtil.check(CommonVersion.VERSION, "1.8.0", "1.9", CommonVersion.NAME); } } diff --git a/hugegraph-core/src/main/resources/raft.proto b/hugegraph-core/src/main/resources/raft.proto index bde9dc3bfc..baee0b75da 100644 --- a/hugegraph-core/src/main/resources/raft.proto +++ b/hugegraph-core/src/main/resources/raft.proto @@ -1,15 +1,15 @@ syntax="proto2"; -package com.baidu.hugegraph.backend.store.raft; +package com.baidu.hugegraph.backend.store.raft.rpc; -option java_package="com.baidu.hugegraph.backend.store.raft"; +option java_package="com.baidu.hugegraph.backend.store.raft.rpc"; option java_outer_classname = "RaftRequests"; enum StoreType { SCHEMA = 0; GRAPH = 1; SYSTEM = 2; - SIZE = 3; + ALL = 3; } enum StoreAction { @@ -17,6 +17,7 @@ enum StoreAction { INIT = 1; CLEAR = 2; TRUNCATE = 3; + SNAPSHOT = 4; BEGIN_TX = 10; COMMIT_TX = 11; @@ -38,3 +39,24 @@ message StoreCommandResponse { required bool status = 1; optional string message = 2; } + +message CommonResponse { + required bool status = 1; + optional string message = 2; +} + +message ListPeersRequest { +} + +message ListPeersResponse { + required CommonResponse common = 1; + repeated string endpoints = 2; +} + +message SetLeaderRequest { + required string endpoint = 1; +} + +message SetLeaderResponse { + required CommonResponse common = 1; +} diff --git a/hugegraph-dist/src/assembly/static/bin/raft-tools.sh b/hugegraph-dist/src/assembly/static/bin/raft-tools.sh new file mode 100755 index 0000000000..aa5faa2e59 --- /dev/null +++ b/hugegraph-dist/src/assembly/static/bin/raft-tools.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +export LANG=zh_CN.UTF-8 +set -e + +HOME_PATH=`dirname $0` +HOME_PATH=`cd ${HOME_PATH}/.. && pwd` +cd ${HOME_PATH} + +BIN_PATH=${HOME_PATH}/bin +CONF_PATH=${HOME_PATH}/conf +LIB_PATH=${HOME_PATH}/lib +LOG_PATH=${HOME_PATH}/logs + +. ${BIN_PATH}/util.sh + +function print_usage() { + echo "usage: raft-tools.sh [options]" + echo "options: " + echo " -l,--list-peers \${graph} \${group} list all peers' endpoints for graph, can be used on leader or follower node" + echo " -g,--get-leader \${graph} \${group} get the leader endpoint for graph, can be used on leader or follower node" + echo " -s,--set-leader \${graph} \${group} \${endpoint} set the leader endpoint for graph, can be used on leader or follower node" + echo " -t,--transfer-leader \${graph} \${group} \${endpoint} transfer leader to specified endpoint for graph, can be used on leader node" + echo " -a,--add-peer \${graph} \${group} \${endpoint} add peer for graph, can be used on leader node" + echo " -r,--remove-peer \${graph} \${group} \${endpoint} remove peer for graph, can be used on leader node" + echo " -h,--help display help information" +} + +GRAPH="hugegraph" +ENDPOINT="" + +if [[ $# -lt 2 ]]; then + print_usage + exit 0 +fi + +function list_peers() { + local graph=$1 + local rest_server_url=`read_property ${CONF_PATH}/rest-server.properties restserver.url` + local url=${rest_server_url}/graphs/${graph}/raft/list-peers + + curl ${url} +} + +function get_leader() { + local graph=$1 + local rest_server_url=`read_property ${CONF_PATH}/rest-server.properties restserver.url` + local url=${rest_server_url}/graphs/${graph}/raft/get-leader + + curl ${url} +} + +function set_leader() { + local graph=$1 + local endpoint=$2 + local rest_server_url=`read_property ${CONF_PATH}/rest-server.properties restserver.url` + local url=${rest_server_url}/graphs/${graph}/raft/set-leader?endpoint=${endpoint} + + curl -X POST ${url} +} + +function transfer_leader() { + local graph=$1 + local endpoint=$2 + local rest_server_url=`read_property ${CONF_PATH}/rest-server.properties restserver.url` + local url=${rest_server_url}/graphs/${graph}/raft/transfer-leader?endpoint=${endpoint} + + curl -X POST ${url} +} + +function add_peer() { + local graph=$1 + local endpoint=$2 + local rest_server_url=`read_property ${CONF_PATH}/rest-server.properties restserver.url` + local url=${rest_server_url}/graphs/${graph}/raft/add-peer?endpoint=${endpoint} + + curl -X POST ${url} +} + +function remove_peer() { + local graph=$1 + local endpoint=$2 + local rest_server_url=`read_property ${CONF_PATH}/rest-server.properties restserver.url` + local url=${rest_server_url}/graphs/${graph}/raft/remove-peer?endpoint=${endpoint} + + curl -X POST ${url} +} + +while [[ $# -gt 0 ]]; do + case $1 in + # help + --help|-h) + print_usage + shift + ;; + # list-peers + --list-peers|-l) + list_peers $2 + shift 2 + ;; + # get-leader + --get-leader|-g) + get_leader $2 + shift 2 + ;; + # set-leader + --set-leader|-s) + set_leader $2 $3 + shift 3 + ;; + # transfer-leader + --transfer-leader|-t) + transfer_leader $2 $3 + shift 3 + ;; + # add-peer + --add-peer|-a) + add_peer $2 $3 + shift 3 + ;; + # remove-peer + --remove-peer|-r) + remove_peer $2 $3 + shift 3 + ;; + *) + print_usage + exit 0 + ;; + esac +done diff --git a/hugegraph-dist/src/assembly/static/conf/gremlin-server.yaml b/hugegraph-dist/src/assembly/static/conf/gremlin-server.yaml index 0068a04569..8787491fe9 100644 --- a/hugegraph-dist/src/assembly/static/conf/gremlin-server.yaml +++ b/hugegraph-dist/src/assembly/static/conf/gremlin-server.yaml @@ -20,15 +20,27 @@ scriptEngines: { com.baidu.hugegraph.backend.id.IdGenerator, com.baidu.hugegraph.type.define.Directions, com.baidu.hugegraph.type.define.NodeRole, - com.baidu.hugegraph.traversal.algorithm.CustomizePathsTraverser, + com.baidu.hugegraph.traversal.algorithm.CollectionPathsTraverser, + com.baidu.hugegraph.traversal.algorithm.CountTraverser, com.baidu.hugegraph.traversal.algorithm.CustomizedCrosspointsTraverser, + com.baidu.hugegraph.traversal.algorithm.CustomizePathsTraverser, com.baidu.hugegraph.traversal.algorithm.FusiformSimilarityTraverser, com.baidu.hugegraph.traversal.algorithm.HugeTraverser, + com.baidu.hugegraph.traversal.algorithm.JaccardSimilarTraverser, + com.baidu.hugegraph.traversal.algorithm.KneighborTraverser, + com.baidu.hugegraph.traversal.algorithm.KoutTraverser, + com.baidu.hugegraph.traversal.algorithm.MultiNodeShortestPathTraverser, com.baidu.hugegraph.traversal.algorithm.NeighborRankTraverser, com.baidu.hugegraph.traversal.algorithm.PathsTraverser, com.baidu.hugegraph.traversal.algorithm.PersonalRankTraverser, + com.baidu.hugegraph.traversal.algorithm.SameNeighborTraverser, com.baidu.hugegraph.traversal.algorithm.ShortestPathTraverser, + com.baidu.hugegraph.traversal.algorithm.SingleSourceShortestPathTraverser, com.baidu.hugegraph.traversal.algorithm.SubGraphTraverser, + com.baidu.hugegraph.traversal.algorithm.TemplatePathsTraverser, + com.baidu.hugegraph.traversal.algorithm.steps.EdgeStep, + com.baidu.hugegraph.traversal.algorithm.steps.RepeatEdgeStep, + com.baidu.hugegraph.traversal.algorithm.steps.WeightedEdgeStep, com.baidu.hugegraph.traversal.optimize.Text, com.baidu.hugegraph.traversal.optimize.TraversalUtil, com.baidu.hugegraph.util.DateUtil @@ -68,7 +80,7 @@ serializers: } metrics: { consoleReporter: {enabled: false, interval: 180000}, - csvReporter: {enabled: true, interval: 180000, fileName: /tmp/gremlin-server-metrics.csv}, + csvReporter: {enabled: false, interval: 180000, fileName: ./metrics/gremlin-server-metrics.csv}, jmxReporter: {enabled: false}, slf4jReporter: {enabled: false, interval: 180000}, gangliaReporter: {enabled: false, interval: 180000, addressingMode: MULTICAST}, diff --git a/hugegraph-dist/src/assembly/static/conf/hugegraph-server.keystore b/hugegraph-dist/src/assembly/static/conf/hugegraph-server.keystore new file mode 100644 index 0000000000..e60c6b4c4c Binary files /dev/null and b/hugegraph-dist/src/assembly/static/conf/hugegraph-server.keystore differ diff --git a/hugegraph-hbase/src/main/java/com/baidu/hugegraph/backend/store/hbase/HbaseOptions.java b/hugegraph-hbase/src/main/java/com/baidu/hugegraph/backend/store/hbase/HbaseOptions.java index 3a434c663e..ae74610fe0 100644 --- a/hugegraph-hbase/src/main/java/com/baidu/hugegraph/backend/store/hbase/HbaseOptions.java +++ b/hugegraph-hbase/src/main/java/com/baidu/hugegraph/backend/store/hbase/HbaseOptions.java @@ -97,4 +97,36 @@ public static synchronized HbaseOptions instance() { positiveInt(), 12 * 60 * 60L ); + + public static final ConfigOption HBASE_KERBEROS_ENABLE = + new ConfigOption<>( + "hbase.kerberos_enable", + "Is Kerberos authentication enabled for HBase.", + disallowEmpty(), + false + ); + + public static final ConfigOption HBASE_KRB5_CONF = + new ConfigOption<>( + "hbase.krb5_conf", + "Kerberos configuration file, including KDC IP, default realm, etc.", + null, + "/etc/krb5.conf" + ); + + public static final ConfigOption HBASE_KERBEROS_PRINCIPAL = + new ConfigOption<>( + "hbase.kerberos_principal", + "The HBase's principal for kerberos authentication.", + null, + "" + ); + + public static final ConfigOption HBASE_KERBEROS_KEYTAB = + new ConfigOption<>( + "hbase.kerberos_keytab", + "The HBase's key tab file for kerberos authentication.", + null, + "" + ); } diff --git a/hugegraph-hbase/src/main/java/com/baidu/hugegraph/backend/store/hbase/HbaseSessions.java b/hugegraph-hbase/src/main/java/com/baidu/hugegraph/backend/store/hbase/HbaseSessions.java index 74a6b7e20d..336a6640c4 100644 --- a/hugegraph-hbase/src/main/java/com/baidu/hugegraph/backend/store/hbase/HbaseSessions.java +++ b/hugegraph-hbase/src/main/java/com/baidu/hugegraph/backend/store/hbase/HbaseSessions.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.filter.PageFilter; import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.util.VersionInfo; +import org.apache.hadoop.security.UserGroupInformation; import com.baidu.hugegraph.backend.BackendException; import com.baidu.hugegraph.backend.store.BackendEntry.BackendColumn; @@ -121,7 +122,7 @@ public synchronized void open() throws IOException { String hosts = config.get(HbaseOptions.HBASE_HOSTS); int port = config.get(HbaseOptions.HBASE_PORT); String znodeParent = config.get(HbaseOptions.HBASE_ZNODE_PARENT); - + boolean isEnableKerberos = config.get(HbaseOptions.HBASE_KERBEROS_ENABLE); Configuration hConfig = HBaseConfiguration.create(); hConfig.set(HConstants.ZOOKEEPER_QUORUM, hosts); hConfig.set(HConstants.ZOOKEEPER_CLIENT_PORT, String.valueOf(port)); @@ -134,6 +135,18 @@ public synchronized void open() throws IOException { hConfig.setInt("hbase.hconnection.threads.max", config.get(HbaseOptions.HBASE_THREADS_MAX)); + if(isEnableKerberos) { + String krb5Conf = config.get(HbaseOptions.HBASE_KRB5_CONF); + System.setProperty("java.security.krb5.conf", krb5Conf); + String principal = config.get(HbaseOptions.HBASE_KERBEROS_PRINCIPAL); + String keyTab = config.get(HbaseOptions.HBASE_KERBEROS_KEYTAB); + hConfig.set("hadoop.security.authentication", "kerberos"); + hConfig.set("hbase.security.authentication", "kerberos"); + + // login/authenticate using keytab + UserGroupInformation.setConfiguration(hConfig); + UserGroupInformation.loginUserFromKeytab(principal, keyTab); + } this.hbase = ConnectionFactory.createConnection(hConfig); } diff --git a/hugegraph-palo/src/main/java/com/baidu/hugegraph/backend/store/palo/PaloSessions.java b/hugegraph-palo/src/main/java/com/baidu/hugegraph/backend/store/palo/PaloSessions.java index 7aa8b7e71d..d349bbb510 100644 --- a/hugegraph-palo/src/main/java/com/baidu/hugegraph/backend/store/palo/PaloSessions.java +++ b/hugegraph-palo/src/main/java/com/baidu/hugegraph/backend/store/palo/PaloSessions.java @@ -107,10 +107,11 @@ protected final Session newSession() { } @Override - public void close() { + public boolean close() { this.loadTask.join(); this.timer.cancel(); super.close(); + return true; } public final class Session extends MysqlSessions.Session { diff --git a/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBOptions.java b/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBOptions.java index 7702072617..ae1844d6fd 100644 --- a/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBOptions.java +++ b/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBOptions.java @@ -280,6 +280,20 @@ public static synchronized RocksDBOptions instance() { 0 ); + public static final ConfigOption DYNAMIC_LEVEL_BYTES = + new ConfigOption<>( + "rocksdb.level_compaction_dynamic_level_bytes", + "Whether to enable level_compaction_dynamic_level_bytes, " + + "if it's enabled we give max_bytes_for_level_multiplier a " + + "priority against max_bytes_for_level_base, the bytes of " + + "base level is dynamic for a more predictable LSM tree, " + + "it is useful to limit worse case space amplification. " + + "Turning this feature on/off for an existing DB can cause " + + "unexpected LSM tree structure so it's not recommended.", + disallowEmpty(), + false + ); + public static final ConfigOption MAX_LEVEL1_BYTES = new ConfigOption<>( "rocksdb.max_bytes_for_level_base", diff --git a/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBSessions.java b/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBSessions.java index a2408efb07..6a7245cbfe 100644 --- a/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBSessions.java +++ b/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBSessions.java @@ -47,6 +47,12 @@ public RocksDBSessions(HugeConfig config, String database, String store) { public abstract RocksDBSessions copy(HugeConfig config, String database, String store); + public abstract void createSnapshot(String parentPath); + + public abstract void reload() throws RocksDBException; + + public abstract void forceCloseRocksDB(); + @Override public abstract Session session(); @@ -87,8 +93,6 @@ public abstract BackendColumnIterator scan(String table, byte[] keyTo, int scanType); - public abstract void createSnapshot(String parentPath); - public BackendColumnIterator scan(String table, byte[] keyFrom, byte[] keyTo) { diff --git a/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBStdSessions.java b/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBStdSessions.java index a1e2ada2b4..f9201900f9 100644 --- a/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBStdSessions.java +++ b/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBStdSessions.java @@ -67,17 +67,18 @@ import com.baidu.hugegraph.backend.store.BackendEntryIterator; import com.baidu.hugegraph.config.HugeConfig; import com.baidu.hugegraph.util.Bytes; -import com.baidu.hugegraph.util.GZipUtil; import com.baidu.hugegraph.util.E; +import com.baidu.hugegraph.util.GZipUtil; import com.baidu.hugegraph.util.StringEncoding; import com.google.common.collect.ImmutableList; public class RocksDBStdSessions extends RocksDBSessions { + private final HugeConfig config; private final String dataPath; private final String walPath; - private final RocksDB rocksdb; + private volatile RocksDB rocksdb; private final SstFileManager sstFileManager; private final Map cfs; @@ -87,6 +88,7 @@ public RocksDBStdSessions(HugeConfig config, String database, String store, String dataPath, String walPath) throws RocksDBException { super(config, database, store); + this.config = config; this.dataPath = dataPath; this.walPath = walPath; // Init options @@ -112,6 +114,7 @@ public RocksDBStdSessions(HugeConfig config, String database, String store, String dataPath, String walPath, List cfNames) throws RocksDBException { super(config, database, store); + this.config = config; this.dataPath = dataPath; this.walPath = walPath; // Old CFs should always be opened @@ -156,6 +159,7 @@ public RocksDBStdSessions(HugeConfig config, String database, String store, private RocksDBStdSessions(HugeConfig config, String database, String store, RocksDBStdSessions origin) { super(config, database, store); + this.config = config; this.dataPath = origin.dataPath; this.walPath = origin.walPath; this.rocksdb = origin.rocksdb; @@ -247,6 +251,44 @@ public synchronized void dropTable(String... tables) } } + @Override + public void reload() throws RocksDBException { + if (this.rocksdb.isOwningHandle()) { + this.rocksdb.close(); + } + this.cfs.values().forEach(CFHandle::destroy); + // Init CFs options + Set mergedCFs = this.mergeOldCFs(this.dataPath, new ArrayList<>( + this.cfs.keySet())); + List cfNames = ImmutableList.copyOf(mergedCFs); + + List cfds = new ArrayList<>(cfNames.size()); + for (String cf : cfNames) { + ColumnFamilyDescriptor cfd = new ColumnFamilyDescriptor(encode(cf)); + ColumnFamilyOptions options = cfd.getOptions(); + RocksDBStdSessions.initOptions(this.config, null, null, + options, options); + cfds.add(cfd); + } + List cfhs = new ArrayList<>(); + + // Init DB options + DBOptions options = new DBOptions(); + RocksDBStdSessions.initOptions(this.config, options, options, + null, null); + options.setWalDir(this.walPath); + options.setSstFileManager(this.sstFileManager); + this.rocksdb = RocksDB.open(options, this.dataPath, cfds, cfhs); + for (int i = 0; i < cfNames.size(); i++) { + this.cfs.put(cfNames.get(i), new CFHandle(cfhs.get(i))); + } + } + + @Override + public void forceCloseRocksDB() { + this.rocksdb().close(); + } + @Override public boolean existsTable(String table) { return this.cfs.containsKey(table); @@ -277,6 +319,30 @@ public RocksDBSessions copy(HugeConfig config, return new RocksDBStdSessions(config, database, store, this); } + @Override + public void createSnapshot(String parentPath) { + String md5 = GZipUtil.md5(this.dataPath); + String snapshotPath = Paths.get(parentPath, md5).toString(); + // https://github.com/facebook/rocksdb/wiki/Checkpoints + try (Checkpoint checkpoint = Checkpoint.create(this.rocksdb)) { + String tempPath = snapshotPath + "_temp"; + File tempFile = new File(tempPath); + FileUtils.deleteDirectory(tempFile); + + FileUtils.forceMkdir(tempFile.getParentFile()); + checkpoint.createCheckpoint(tempPath); + File snapshotFile = new File(snapshotPath); + FileUtils.deleteDirectory(snapshotFile); + if (!tempFile.renameTo(snapshotFile)) { + throw new IOException(String.format("Failed to rename %s to %s", + tempFile, snapshotFile)); + } + } catch (Exception e) { + throw new BackendException("Failed to write snapshot at path %s", + e, snapshotPath); + } + } + @Override public final Session session() { return (Session) super.getOrNewSession(); @@ -464,6 +530,9 @@ public static void initOptions(HugeConfig conf, cf.setMaxWriteBufferNumberToMaintain( conf.get(RocksDBOptions.MAX_MEMTABLES_TO_MAINTAIN)); + cf.setLevelCompactionDynamicLevelBytes( + conf.get(RocksDBOptions.DYNAMIC_LEVEL_BYTES)); + // https://github.com/facebook/rocksdb/wiki/Block-Cache BlockBasedTableConfig tableConfig = new BlockBasedTableConfig(); long cacheCapacity = conf.get(RocksDBOptions.BLOCK_CACHE_CAPACITY); @@ -637,6 +706,11 @@ public boolean closed() { return !this.opened || !RocksDBStdSessions.this.opened(); } + @Override + public void reset() { + this.batch = new WriteBatch(); + } + /** * Any change in the session */ @@ -857,32 +931,6 @@ public BackendColumnIterator scan(String table, byte[] keyFrom, keyTo, scanType); } } - - @Override - public void createSnapshot(String parentPath) { - String md5 = GZipUtil.md5(this.dataPath()); - String snapshotPath = Paths.get(parentPath, md5).toString(); - // https://github.com/facebook/rocksdb/wiki/Checkpoints - try (Checkpoint checkpoint = Checkpoint.create(rocksdb)) { - String tempPath = snapshotPath + "_temp"; - File tempFile = new File(tempPath); - FileUtils.deleteDirectory(tempFile); - - FileUtils.forceMkdir(tempFile.getParentFile()); - checkpoint.createCheckpoint(tempPath); - File snapshotFile = new File(snapshotPath); - FileUtils.deleteDirectory(snapshotFile); - if (!tempFile.renameTo(snapshotFile)) { - throw new IOException(String.format( - "Failed to rename %s to %s", - tempFile, snapshotFile)); - } - } catch (Exception e) { - throw new BackendException( - "Failed to write snapshot at path %s", - e, snapshotPath); - } - } } /** diff --git a/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBStore.java b/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBStore.java index d10c45669f..635e33024a 100644 --- a/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBStore.java +++ b/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdb/RocksDBStore.java @@ -24,6 +24,7 @@ import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -36,6 +37,9 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.commons.io.FileUtils; @@ -51,12 +55,14 @@ import com.baidu.hugegraph.backend.store.BackendEntry; import com.baidu.hugegraph.backend.store.BackendFeatures; import com.baidu.hugegraph.backend.store.BackendMutation; +import com.baidu.hugegraph.backend.store.BackendSessionPool; import com.baidu.hugegraph.backend.store.BackendStoreProvider; import com.baidu.hugegraph.backend.store.BackendTable; import com.baidu.hugegraph.backend.store.rocksdb.RocksDBSessions.Session; import com.baidu.hugegraph.config.HugeConfig; import com.baidu.hugegraph.exception.ConnectionException; import com.baidu.hugegraph.type.HugeType; +import com.baidu.hugegraph.util.Consumers; import com.baidu.hugegraph.util.E; import com.baidu.hugegraph.util.ExecutorUtil; import com.baidu.hugegraph.util.GZipUtil; @@ -78,6 +84,9 @@ public abstract class RocksDBStore extends AbstractBackendStore { private RocksDBSessions sessions; private final Map tableDiskMapping; + // DataPath:RocksDB mapping + private final ConcurrentMap dbs; + private final ReadWriteLock storeLock; private static final String DB_OPEN = "db-open-%s"; private static final long OPEN_TIMEOUT = 600L; @@ -88,13 +97,6 @@ public abstract class RocksDBStore extends AbstractBackendStore { */ private static final int OPEN_POOL_THREADS = 8; - // DataPath:RocksDB mapping - protected static final ConcurrentMap dbs; - - static { - dbs = new ConcurrentHashMap<>(); - } - public RocksDBStore(final BackendStoreProvider provider, final String database, final String store) { this.tables = new HashMap<>(); @@ -104,6 +106,8 @@ public RocksDBStore(final BackendStoreProvider provider, this.store = store; this.sessions = null; this.tableDiskMapping = new HashMap<>(); + this.dbs = new ConcurrentHashMap<>(); + this.storeLock = new ReentrantReadWriteLock(); this.registerMetaHandlers(); } @@ -166,7 +170,7 @@ public synchronized void open(HugeConfig config) { if (this.sessions != null && !this.sessions.closed()) { LOG.debug("Store {} has been opened before", this.store); - this.sessions.useSession(); + this.useSessions(); return; } @@ -199,8 +203,8 @@ public synchronized void open(HugeConfig config) { waitOpenFinish(futures, openPool); } - private static void waitOpenFinish(List> futures, - ExecutorService openPool) { + private void waitOpenFinish(List> futures, + ExecutorService openPool) { for (Future future : futures) { try { future.get(); @@ -211,6 +215,22 @@ private static void waitOpenFinish(List> futures, if (openPool.isShutdown()) { return; } + + /* + * Transfer the session holder from db-open thread to main thread, + * otherwise once the db-open thread pool is closed, we can no longer + * close the session created by it, which will cause the rocksdb + * instance fail to close + */ + this.useSessions(); + try { + Consumers.executeOncePerThread(openPool, OPEN_POOL_THREADS, + this::closeSessions); + } catch (InterruptedException e) { + throw new BackendException("Failed to close session opened by " + + "open-pool"); + } + boolean terminated = false; openPool.shutdown(); try { @@ -241,10 +261,17 @@ protected RocksDBSessions open(HugeConfig config, String dataPath, sessions = this.openSessionPool(config, dataPath, walPath, tableNames); } catch (RocksDBException e) { - RocksDBSessions origin = dbs.get(dataPath); + RocksDBSessions origin = this.dbs.get(dataPath); if (origin != null) { if (e.getMessage().contains("No locks available")) { - // Open twice, but we should support keyspace + /* + * Open twice, copy a RocksDBSessions reference, since from + * v0.11.2 release we don't support multi graphs share + * rocksdb instance (before v0.11.2 graphs with different + * CF-prefix share one rocksdb instance and data path), + * so each graph has its independent data paths, but multi + * CFs may share same optimized disk(or optimized disk path). + */ sessions = origin.copy(config, this.database, this.store); } } @@ -286,7 +313,7 @@ protected RocksDBSessions open(HugeConfig config, String dataPath, if (sessions != null) { // May override the original session pool - dbs.put(dataPath, sessions); + this.dbs.put(dataPath, sessions); sessions.session(); LOG.debug("Store opened: {}", dataPath); } @@ -333,7 +360,7 @@ public void close() { LOG.debug("Store close: {}", this.store); this.checkOpened(); - this.sessions.close(); + this.closeSessions(); } @Override @@ -344,18 +371,24 @@ public boolean opened() { @Override public void mutate(BackendMutation mutation) { - this.checkOpened(); + Lock readLock = this.storeLock.readLock(); + readLock.lock(); + try { + this.checkOpened(); - if (LOG.isDebugEnabled()) { - LOG.debug("Store {} mutation: {}", this.store, mutation); - } + if (LOG.isDebugEnabled()) { + LOG.debug("Store {} mutation: {}", this.store, mutation); + } - for (HugeType type : mutation.types()) { - Session session = this.session(type); - for (Iterator it = mutation.mutation(type); - it.hasNext();) { - this.mutate(session, it.next()); + for (HugeType type : mutation.types()) { + Session session = this.session(type); + for (Iterator it = mutation.mutation(type); + it.hasNext();) { + this.mutate(session, it.next()); + } } + } finally { + readLock.unlock(); } } @@ -384,37 +417,55 @@ private void mutate(Session session, BackendAction item) { @Override public Iterator query(Query query) { - this.checkOpened(); + Lock readLock = this.storeLock.readLock(); + readLock.lock(); + try { + this.checkOpened(); - HugeType tableType = RocksDBTable.tableType(query); - RocksDBTable table = this.table(tableType); - return table.query(this.session(tableType), query); + HugeType tableType = RocksDBTable.tableType(query); + RocksDBTable table = this.table(tableType); + return table.query(this.session(tableType), query); + } finally { + readLock.unlock(); + } } @Override public Number queryNumber(Query query) { - this.checkOpened(); + Lock readLock = this.storeLock.readLock(); + readLock.lock(); + try { + this.checkOpened(); - HugeType tableType = RocksDBTable.tableType(query); - RocksDBTable table = this.table(tableType); - return table.queryNumber(this.session(tableType), query); + HugeType tableType = RocksDBTable.tableType(query); + RocksDBTable table = this.table(tableType); + return table.queryNumber(this.session(tableType), query); + } finally { + readLock.unlock(); + } } @Override public synchronized void init() { - this.checkDbOpened(); + Lock writeLock = this.storeLock.writeLock(); + writeLock.lock(); + try { + this.checkDbOpened(); - // Create tables with main disk - this.createTable(this.sessions, - this.tableNames().toArray(new String[0])); + // Create tables with main disk + this.createTable(this.sessions, + this.tableNames().toArray(new String[0])); - // Create table with optimized disk - Map tableDBMap = this.tableDBMapping(); - for (Map.Entry e : tableDBMap.entrySet()) { - this.createTable(e.getValue(), e.getKey()); - } + // Create table with optimized disk + Map tableDBMap = this.tableDBMapping(); + for (Map.Entry e : tableDBMap.entrySet()) { + this.createTable(e.getValue(), e.getKey()); + } - LOG.debug("Store initialized: {}", this.store); + LOG.debug("Store initialized: {}", this.store); + } finally { + writeLock.unlock(); + } } private void createTable(RocksDBSessions db, String... tables) { @@ -428,18 +479,25 @@ private void createTable(RocksDBSessions db, String... tables) { @Override public synchronized void clear(boolean clearSpace) { - this.checkDbOpened(); + Lock writeLock = this.storeLock.writeLock(); + writeLock.lock(); + try { + this.checkDbOpened(); - // Drop tables with main disk - this.dropTable(this.sessions, this.tableNames().toArray(new String[0])); + // Drop tables with main disk + this.dropTable(this.sessions, + this.tableNames().toArray(new String[0])); - // Drop tables with optimized disk - Map tableDBMap = this.tableDBMapping(); - for (Map.Entry e : tableDBMap.entrySet()) { - this.dropTable(e.getValue(), e.getKey()); - } + // Drop tables with optimized disk + Map tableDBMap = this.tableDBMapping(); + for (Map.Entry e : tableDBMap.entrySet()) { + this.dropTable(e.getValue(), e.getKey()); + } - LOG.debug("Store cleared: {}", this.store); + LOG.debug("Store cleared: {}", this.store); + } finally { + writeLock.unlock(); + } } private void dropTable(RocksDBSessions db, String... tables) { @@ -473,41 +531,66 @@ public boolean initialized() { @Override public synchronized void truncate() { - this.checkOpened(); - - this.clear(false); - this.init(); + Lock writeLock = this.storeLock.writeLock(); + writeLock.lock(); + try { + this.checkOpened(); - LOG.debug("Store truncated: {}", this.store); + this.clear(false); + this.init(); + // clear write batch + dbs.values().forEach(BackendSessionPool::forceResetSessions); + LOG.debug("Store truncated: {}", this.store); + } finally { + writeLock.unlock(); + } } @Override public void beginTx() { - this.checkOpened(); + Lock readLock = this.storeLock.readLock(); + readLock.lock(); + try { + this.checkOpened(); - for (Session session : this.session()) { - assert !session.hasChanges(); + for (Session session : this.session()) { + assert !session.hasChanges(); + } + } finally { + readLock.unlock(); } } @Override public void commitTx() { - this.checkOpened(); - // Unable to guarantee atomicity when committing multi sessions - for (Session session : this.session()) { - Object count = session.commit(); - if (LOG.isDebugEnabled()) { - LOG.debug("Store {} committed {} items", this.store, count); + Lock readLock = this.storeLock.readLock(); + readLock.lock(); + try { + this.checkOpened(); + // Unable to guarantee atomicity when committing multi sessions + for (Session session : this.session()) { + Object count = session.commit(); + if (LOG.isDebugEnabled()) { + LOG.debug("Store {} committed {} items", this.store, count); + } } + } finally { + readLock.unlock(); } } @Override public void rollbackTx() { - this.checkOpened(); + Lock readLock = this.storeLock.readLock(); + readLock.lock(); + try { + this.checkOpened(); - for (Session session : this.session()) { - session.rollback(); + for (Session session : this.session()) { + session.rollback(); + } + } finally { + readLock.unlock(); } } @@ -526,60 +609,99 @@ protected Session session(HugeType tableType) { @Override public void writeSnapshot(String parentPath) { - for (Session session : this.session()) { - session.createSnapshot(parentPath); + Lock writeLock = this.storeLock.writeLock(); + writeLock.lock(); + try { + // Every rocksdb instance should create an snapshot + for (RocksDBSessions sessions : this.sessions()) { + sessions.createSnapshot(parentPath); + } + } finally { + writeLock.unlock(); } } @Override public void readSnapshot(String parentPath) { - if (!this.opened()) { - return; - } + Lock writeLock = this.storeLock.writeLock(); + writeLock.lock(); + try { + if (!this.opened()) { + return; + } - File[] snapshotFiles = new File(parentPath).listFiles(); - E.checkNotNull(snapshotFiles, "snapshot files"); - List> fileRenamePairs = new ArrayList<>(); - for (File snapshotFile : snapshotFiles) { - Session session = this.findMatchedSession(snapshotFile); - File dataFile = new File(session.dataPath()); - fileRenamePairs.add(Pair.of(snapshotFile, dataFile)); + File[] snapshotFiles = new File(parentPath).listFiles(); + E.checkNotNull(snapshotFiles, "snapshot files"); + List> fileRenamePairs = new ArrayList<>(); + for (File snapshotFile : snapshotFiles) { + Session session = this.findMatchedSession(snapshotFile); + File dataFile = new File(session.dataPath()); + fileRenamePairs.add(Pair.of(snapshotFile, dataFile)); + } + /* + * NOTE: must close rocksdb instance before deleting file directory, + * if close after copying the snapshot directory to origin position, + * it may produce dirty data. + */ + for (RocksDBSessions sessions : this.sessions()) { + sessions.forceCloseRocksDB(); + } + // Copy snapshot file to dest file + for (Pair pair : fileRenamePairs) { + File snapshotFile = pair.getLeft(); + File dataFile = pair.getRight(); + try { + if (dataFile.exists()) { + LOG.warn("Delete origin data directory {}", dataFile); + FileUtils.deleteDirectory(dataFile); + } + FileUtils.moveDirectory(snapshotFile, dataFile); + } catch (IOException e) { + throw new BackendException("Failed to move %s to %s", + e, snapshotFile, dataFile); + } + } + // Reload rocksdb instance + for (RocksDBSessions sessions : this.sessions()) { + sessions.reload(); + } + LOG.info("The store {} load snapshot successfully", this.store); + } catch (RocksDBException e) { + throw new BackendException("Failed to reload rocksdb", e); + } finally { + writeLock.unlock(); } - // Close old sessions, is that right? - for (Session session : this.session()) { - session.close(); + } + + private final void useSessions() { + for (RocksDBSessions sessions : this.sessions()) { + sessions.useSession(); } - this.sessions.session().close(); + } - // Copy snapshot file to dest file - for (Pair pair : fileRenamePairs) { - File snapshotFile = pair.getLeft(); - File dataFile = pair.getRight(); - try { - if (dataFile.exists()) { - LOG.warn("Delete origin data directory {}", dataFile); - FileUtils.deleteDirectory(dataFile); - } - FileUtils.moveDirectory(snapshotFile, dataFile); - } catch (IOException e) { - throw new BackendException("Failed to move %s to %s", - e, snapshotFile, dataFile); + private final void closeSessions() { + Iterator> iter = this.dbs.entrySet() + .iterator(); + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + RocksDBSessions sessions = entry.getValue(); + boolean closed = sessions.close(); + if (closed) { + iter.remove(); } } - - // Reopen the db - this.open(this.sessions.config()); } private Session findMatchedSession(File snapshotFile) { String fileName = snapshotFile.getName(); for (Session session : this.session()) { - if (fileName.equals(GZipUtil.md5(session.dataPath()))) { + String md5 = GZipUtil.md5(session.dataPath()); + if (fileName.equals(md5)) { return session; } } throw new BackendException("Can't find matched session for " + - "snapshot file {}", snapshotFile); + "snapshot file %s", snapshotFile); } private final List session() { @@ -598,6 +720,10 @@ private final List session() { return list; } + private final Collection sessions() { + return this.dbs.values(); + } + private final void parseTableDiskMapping(Map disks, String dataPath) { this.tableDiskMapping.clear(); @@ -628,8 +754,8 @@ private final void checkDbOpened() { "RocksDB has not been opened"); } - private static RocksDBSessions db(String disk) { - RocksDBSessions db = dbs.get(disk); + private RocksDBSessions db(String disk) { + RocksDBSessions db = this.dbs.get(disk); E.checkState(db != null && !db.closed(), "RocksDB store has not been opened: %s", disk); return db; @@ -696,16 +822,28 @@ protected List tableNames() { @Override public void increaseCounter(HugeType type, long increment) { - super.checkOpened(); - Session session = super.sessions.session(); - this.counters.increaseCounter(session, type, increment); + Lock readLock = super.storeLock.readLock(); + readLock.lock(); + try { + super.checkOpened(); + Session session = super.sessions.session(); + this.counters.increaseCounter(session, type, increment); + } finally { + readLock.unlock(); + } } @Override public long getCounter(HugeType type) { - super.checkOpened(); - Session session = super.sessions.session(); - return this.counters.getCounter(session, type); + Lock readLock = super.storeLock.readLock(); + readLock.lock(); + try { + super.checkOpened(); + Session session = super.sessions.session(); + return this.counters.getCounter(session, type); + } finally { + readLock.unlock(); + } } @Override diff --git a/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdbsst/RocksDBSstSessions.java b/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdbsst/RocksDBSstSessions.java index 1b971e5529..d84d21ea68 100644 --- a/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdbsst/RocksDBSstSessions.java +++ b/hugegraph-rocksdb/src/main/java/com/baidu/hugegraph/backend/store/rocksdbsst/RocksDBSstSessions.java @@ -143,6 +143,20 @@ public RocksDBSessions copy(HugeConfig config, return new RocksDBSstSessions(config, database, store, this); } + @Override + public void createSnapshot(String snapshotPath) { + throw new UnsupportedOperationException("createSnapshot"); + } + + @Override + public void reload() throws RocksDBException { + throw new UnsupportedOperationException("reload"); + } + + @Override + public void forceCloseRocksDB() { + throw new UnsupportedOperationException("forceCloseRocksDB"); + } private SstFileWriter table(String table) { SstFileWriter sst = this.tables.get(table); @@ -368,11 +382,6 @@ public BackendColumnIterator scan(String table, assert !this.hasChanges(); return BackendColumnIterator.empty(); } - - @Override - public void createSnapshot(String snapshotPath) { - throw new UnsupportedOperationException("createSnapshot"); - } } private static class Changes extends ArrayList> { diff --git a/hugegraph-test/pom.xml b/hugegraph-test/pom.xml index 7d5b453061..983761d309 100644 --- a/hugegraph-test/pom.xml +++ b/hugegraph-test/pom.xml @@ -139,7 +139,7 @@ tinkerpop-process-test - + -Dbuild.dir=${project.build.directory} ${basedir}/src/main/java/ diff --git a/hugegraph-test/src/main/java/com/baidu/hugegraph/core/MultiGraphsTest.java b/hugegraph-test/src/main/java/com/baidu/hugegraph/core/MultiGraphsTest.java index 9f8f8b8d6c..d467c49bff 100644 --- a/hugegraph-test/src/main/java/com/baidu/hugegraph/core/MultiGraphsTest.java +++ b/hugegraph-test/src/main/java/com/baidu/hugegraph/core/MultiGraphsTest.java @@ -19,6 +19,7 @@ package com.baidu.hugegraph.core; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -36,6 +37,7 @@ import com.baidu.hugegraph.HugeGraph; import com.baidu.hugegraph.backend.BackendException; import com.baidu.hugegraph.backend.id.IdGenerator; +import com.baidu.hugegraph.backend.store.rocksdb.RocksDBOptions; import com.baidu.hugegraph.config.CoreOptions; import com.baidu.hugegraph.exception.ExistedException; import com.baidu.hugegraph.schema.EdgeLabel; @@ -306,8 +308,9 @@ public void testCreateGraphsWithMultiDisksForRocksDB() { HugeGraph g1 = openGraphWithBackend( "g1", "rocksdb", "binary", "rocksdb.data_disks", - "[g/secondary_index:rocksdb-index," + - " g/range_int_index:rocksdb-index]"); + "[g/secondary_index:rocksdb-index1," + + " g/range_int_index:rocksdb-index1," + + " g/range_long_index:rocksdb-index2]"); g1.initBackend(); g1.clearBackend(); @@ -315,36 +318,43 @@ public void testCreateGraphsWithMultiDisksForRocksDB() { Assert.assertThrows(BackendException.class, () -> { g2[0] = openGraphWithBackend("g2", "rocksdb", "binary", "rocksdb.data_disks", - "[g/secondary_index:/," + - " g/range_int_index:rocksdb-index]"); + "[g/range_int_index:rocksdb-index1]"); g2[0].initBackend(); }, e -> { Throwable root = HugeException.rootCause(e); Assert.assertInstanceOf(RocksDBException.class, root); - Assert.assertContains("While mkdir if missing: /g", + Assert.assertContains("lock hold by current process", + root.getMessage()); + Assert.assertContains("No locks available", + root.getMessage()); + }); + + final HugeGraph[] g3 = new HugeGraph[1]; + Assert.assertThrows(BackendException.class, () -> { + g3[0] = openGraphWithBackend("g3", "rocksdb", "binary", + "rocksdb.data_disks", + "[g/secondary_index:/]"); + g3[0].initBackend(); + }, e -> { + Throwable root = HugeException.rootCause(e); + Assert.assertInstanceOf(RocksDBException.class, root); + Assert.assertContains("While mkdir if missing", root.getMessage()); }); - destoryGraphs(ImmutableList.of(g1, g2[0])); + destoryGraphs(ImmutableList.of(g1)); } - public static List openGraphs(String... graphNames) { + private static List openGraphs(String... graphNames) { List graphs = new ArrayList<>(graphNames.length); - PropertiesConfiguration conf = Utils.getConf(); - Configuration config = new BaseConfiguration(); - for (Iterator keys = conf.getKeys(); keys.hasNext();) { - String key = keys.next(); - config.setProperty(key, conf.getProperty(key)); - } - ((BaseConfiguration) config).setDelimiterParsingDisabled(true); for (String graphName : graphNames) { - config.setProperty(CoreOptions.STORE.name(), graphName); + Configuration config = buildConfig(graphName); graphs.add((HugeGraph) GraphFactory.open(config)); } return graphs; } - public static void destoryGraphs(List graphs) { + private static void destoryGraphs(List graphs) { for (HugeGraph graph : graphs) { try { graph.close(); @@ -354,9 +364,19 @@ public static void destoryGraphs(List graphs) { } } - public static HugeGraph openGraphWithBackend(String graph, String backend, - String serializer, - String... configs) { + private static HugeGraph openGraphWithBackend(String graph, String backend, + String serializer, + String... configs) { + Configuration config = buildConfig(graph); + config.setProperty(CoreOptions.BACKEND.name(), backend); + config.setProperty(CoreOptions.SERIALIZER.name(), serializer); + for (int i = 0; i < configs.length;) { + config.setProperty(configs[i++], configs[i++]); + } + return ((HugeGraph) GraphFactory.open(config)); + } + + private static Configuration buildConfig(String graphName) { PropertiesConfiguration conf = Utils.getConf(); Configuration config = new BaseConfiguration(); for (Iterator keys = conf.getKeys(); keys.hasNext();) { @@ -364,12 +384,14 @@ public static HugeGraph openGraphWithBackend(String graph, String backend, config.setProperty(key, conf.getProperty(key)); } ((BaseConfiguration) config).setDelimiterParsingDisabled(true); - config.setProperty(CoreOptions.STORE.name(), graph); - config.setProperty(CoreOptions.BACKEND.name(), backend); - config.setProperty(CoreOptions.SERIALIZER.name(), serializer); - for (int i = 0; i < configs.length;) { - config.setProperty(configs[i++], configs[i++]); - } - return ((HugeGraph) GraphFactory.open(config)); + + config.setProperty(CoreOptions.STORE.name(), graphName); + String dataPath = config.getString(RocksDBOptions.DATA_PATH.name()); + config.setProperty(RocksDBOptions.DATA_PATH.name(), + Paths.get(dataPath, graphName).toString()); + String walPath = config.getString(RocksDBOptions.WAL_PATH.name()); + config.setProperty(RocksDBOptions.WAL_PATH.name(), + Paths.get(walPath, graphName).toString()); + return config; } } diff --git a/hugegraph-test/src/main/java/com/baidu/hugegraph/unit/serializer/StoreSerializerTest.java b/hugegraph-test/src/main/java/com/baidu/hugegraph/unit/serializer/StoreSerializerTest.java index 6a8511453d..21dd2ddf05 100644 --- a/hugegraph-test/src/main/java/com/baidu/hugegraph/unit/serializer/StoreSerializerTest.java +++ b/hugegraph-test/src/main/java/com/baidu/hugegraph/unit/serializer/StoreSerializerTest.java @@ -28,8 +28,8 @@ import com.baidu.hugegraph.backend.store.BackendAction; import com.baidu.hugegraph.backend.store.BackendEntry; import com.baidu.hugegraph.backend.store.BackendMutation; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreAction; -import com.baidu.hugegraph.backend.store.raft.RaftRequests.StoreType; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreAction; +import com.baidu.hugegraph.backend.store.raft.rpc.RaftRequests.StoreType; import com.baidu.hugegraph.backend.store.raft.StoreCommand; import com.baidu.hugegraph.backend.store.raft.StoreSerializer; import com.baidu.hugegraph.testutil.Assert;