Skip to content

Commit

Permalink
translate remaining files
Browse files Browse the repository at this point in the history
  • Loading branch information
Pengzna committed Aug 4, 2024
1 parent ad5f6fb commit bd0e4bb
Show file tree
Hide file tree
Showing 55 changed files with 283 additions and 275 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@
import lombok.extern.slf4j.Slf4j;

/**
* 使用pd,支持raft
* 读取文件并多线程进行入库
* use pd, support raft
* Read the file and go into multi threaded warehouse
*/
@Slf4j
public class HgThread2DB {
Expand Down Expand Up @@ -204,7 +204,7 @@ public boolean testOrder(String input) {
}

/**
* 多线程读取文件入库
* Multi -threaded read files to enter the warehouse
*
* @throws IOException
* @throws InterruptedException
Expand All @@ -223,7 +223,8 @@ public void startMultiprocessInsert(String filepath) throws IOException {

String strLine = null;
String tableName = HgCliUtil.TABLE_NAME;
// After accumulating how many rear execution threads are entered into the library, 100,000
// After accumulating how many rear execution threads are entered into the library,
// 100,000
int maxlist = 100000;
List<String> keys = new ArrayList<>(maxlist);
metrics = MetricX.ofStart();
Expand All @@ -232,7 +233,8 @@ public void startMultiprocessInsert(String filepath) throws IOException {
keys.add(strLine);
dataCount++;

// Read 10,000 pieces of data in the file and enable a thread to enter the warehouse
// Read 10,000 pieces of data in the file and enable a thread to enter the
// warehouse
if (dataCount % maxlist == 0) {
List<String> finalKeys = keys;
Runnable task = () -> {
Expand Down Expand Up @@ -310,7 +312,7 @@ public void startMultiprocessInsert(String filepath) throws IOException {
}

/**
* 多线程读取文件入库
* Multi -threaded read files to enter the warehouse
*
* @throws IOException
* @throws InterruptedException
Expand Down Expand Up @@ -384,7 +386,8 @@ public String getLong() {
}

/**
* 执行查询,并将查询的结果做为下一次迭代的点放入队列
* Execute the query, and put the results of the query as the point of the next iteration
* into the queue
*/
private void queryAnd2Queue() {
try {
Expand Down Expand Up @@ -444,7 +447,8 @@ private void queryAnd2Queue() {
}
}
}
// If you do not have enough 10,000 at a time, submit a separate query separately to ensure that all the results can be executed
// If you do not have enough 10,000 at a time, submit a separate query separately
// to ensure that all the results can be executed
if (!newQueryList.isEmpty() && listQueue.size() < 1000) {
listQueue.put(newQueryList);
}
Expand All @@ -459,10 +463,10 @@ private void queryAnd2Queue() {
}

/**
* 多线程查询
* Multi -thread query
*
* @param point 起始查询点,后续根据这个点查询到的value做为下一次的查询条件进行迭代
* @param scanCount 允许启动的线程数量
* @param Point Start Query Point, followed up according to this pointvalue做为下一次的查询条件进行迭代
* @param Scancount allows the number of threads to start
* @throws IOException
* @throws InterruptedException
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ public interface HgKvStore {
boolean put(String table, HgOwnerKey ownerKey, byte[] value);

/**
* 该版本被store内部使用。向分区写入数据,
* partitionId与key.keyCode必须与pd存储的分区信息保持一致。
* This version isstore内部使用。向分区写入数据,
* Partitionid andkey.keyCode必须与pd存储的分区信息保持一致。
*/
boolean directPut(String table, int partitionId, HgOwnerKey key, byte[] value);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public HgStoreNodePartitionerImpl(PDClient pdClient, HgStoreNodeManager nodeMana
}

/**
* 查询分区信息,结果通过HgNodePartitionerBuilder返回
* Query partition information,结果通过HgNodePartitionerBuilder返回
*/
@Override
public int partition(HgNodePartitionerBuilder builder, String graphName,
Expand Down Expand Up @@ -141,7 +141,7 @@ public int partition(HgNodePartitionerBuilder builder, String graphName,
}

/**
* 查询hgstore信息
* Inquirehgstore信息
*
* @return hgstore
*/
Expand All @@ -157,7 +157,7 @@ public HgStoreNode apply(String graphName, Long nodeId) {
}

/**
* 通知更新缓存
* Notification to update the cache
*/
@Override
public int notice(String graphName, HgStoreNotice storeNotice) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
import lombok.extern.slf4j.Slf4j;

/**
* 批量流式查询客户端实现类
* Batch flow query client implementation class
* <p>
* created on 2022/07/23
*
Expand Down Expand Up @@ -86,8 +86,8 @@ public KvBatchScanner(
}

/**
* 构建流式查询迭代器
* scanQuery进行拆分,启动多个流式请求,提升store的并发性
* Construct a streaming query iterator
* Scanquery for splitting,启动多个流式请求,提升store的并发性
*
* @param scanQuery scanQuery
* @param handler task handler
Expand Down Expand Up @@ -115,7 +115,7 @@ public static void scan(
}

/**
* 发送查询请求
* Send query request
*
* @param query scan query
*/
Expand All @@ -133,7 +133,7 @@ public void sendQuery(HgScanQuery query) {
}

/**
* 发送应答
* Send response
*/
public void sendResponse() {
try {
Expand Down Expand Up @@ -164,7 +164,7 @@ public void dataArrived(Supplier<HgKvIterator<HgKvEntry>> supplier) throws
}

/**
* 数据接收结束
* Data reception is over
*/
public void dataComplete() {
close();
Expand Down Expand Up @@ -192,7 +192,7 @@ public void close() {
}

/**
* 任务拆分器
* Task splitter
*/
static class TaskSplitter {

Expand Down Expand Up @@ -229,7 +229,7 @@ public boolean isFinished() {
}

/**
* 评估最大任务数
* Evaluate the maximum task number
*/
private void evaluateMaxTaskSize() {
if (maxTaskSize == 0) { // Based on the first batch of tasks, get the number of stores, and then calculate the maximum number of tasks
Expand All @@ -241,7 +241,7 @@ private void evaluateMaxTaskSize() {
maxBatchSize = this.notifier.getScannerCount() * maxBatchSize; // A maximum of 1,000 per machine

/*
* Limit少于10000时启动一个流,节省网络带宽
* Limit less than10000时启动一个流,节省网络带宽
*/
if (scanQuery.getLimit() < maxBatchSize * 30L) {
maxTaskSize = 1;
Expand All @@ -250,7 +250,7 @@ private void evaluateMaxTaskSize() {
}

/**
* 拆分任务,任务拆分为多个grpc请求
* Disassembling task,任务拆分为多个grpc请求
*/
public void splitTask() {
if (this.finished || this.splitting) {
Expand Down Expand Up @@ -292,7 +292,7 @@ public synchronized void close() {
}

/**
* 查询结果接收器
* Query results receiver
*/
static class KvBatchReceiver implements StreamObserver<KvStream> {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@
import lombok.extern.slf4j.Slf4j;

/**
* 批量查询结果归并,阻塞队列工作模式
* 对请求任务的拆分,创建多个请求队列
* Batch query results are classified,阻塞队列工作模式
* Demolition of the request task,创建多个请求队列
*/
@Slf4j
public class KvBatchScannerMerger implements KvCloseableIterator<HgKvIterator<HgKvEntry>>,
Expand Down Expand Up @@ -125,7 +125,7 @@ public void registerScanner(KvBatchScanner closeable) {
}

/**
* 返回值<0表示任务结束
* return value<0表示任务结束
*
* @param closeable
* @return
Expand All @@ -146,7 +146,7 @@ public int getScannerCount() {
}

/**
* 组装一个Scanner的多个有序迭代器为一个迭代器
* Assemble aScanner的多个有序迭代器为一个迭代器
*/
static class ScannerDataQueue {

Expand All @@ -170,7 +170,7 @@ public void add(Supplier<HgKvIterator<HgKvEntry>> supplier) {
}

/**
* 迭代器是否有效,如果没有数据,等待数据到达
* Whether the iterator is valid,如果没有数据,等待数据到达
*
* @return
*/
Expand Down Expand Up @@ -221,7 +221,7 @@ private void moveNext() {
}

/**
* 对多个Scanner返回结果进行归并排序
* MultipleScanner返回结果进行归并排序
*/
static class SortedScannerMerger extends KvBatchScannerMerger {

Expand Down Expand Up @@ -286,8 +286,8 @@ public HgKvEntry next() {
}

/**
* 从多个Scanner中挑选一个sn最小的迭代器
* 如果Scanner没有数据,等待数据到达。
* From multipleScanner中挑选一个sn最小的迭代器
* ifScanner没有数据,等待数据到达。
*
* @return
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
public class Bits {

/**
* 大头字节序写入short
* Writing of the head up sequence short
*/
public static void putShort(byte[] buf, int offSet, int x) {
buf[offSet] = (byte) (x >> 8);
Expand All @@ -37,7 +37,7 @@ public static void putInt(byte[] buf, int offSet, int x) {
}

/**
* 大头字节序读取short
* Read the head -up sequenceshort
*/
public static int getShort(byte[] buf, int offSet) {
int x = buf[offSet] & 0xff;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,8 @@ public boolean isClusterReady() {
}

/**
* 服务状态有四种
* 就绪,在线、离线、死亡(从集群排除)
* There are four types of service status
Be ready,在线、离线、死亡(从集群排除)
*/
protected void doStoreHeartbeat() {
while (!terminated) {
Expand Down
Loading

0 comments on commit bd0e4bb

Please sign in to comment.