Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding subshard work items on lease expiry #1160

Merged
merged 21 commits into from
Dec 5, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
55a83ca
Checkpoint - code in place for subshard work items, need to test
chelma Nov 22, 2024
3429a5a
Improved cursor plumbing for RFS SubShard work items
chelma Nov 25, 2024
2aae632
Additional changes per PR comments
chelma Nov 25, 2024
2b33a84
Merge remote-tracking branch 'upstream/main' into MIGRATIONS-2128
AndreKurait Nov 25, 2024
2c2a708
Modify LuceneDocumentsReader to read docs/segments sequentially
AndreKurait Nov 25, 2024
56839cd
Refactor of partial shard work items - added sequential doc reading, …
AndreKurait Dec 2, 2024
cf6ed86
Fix spotless issues
AndreKurait Dec 2, 2024
920be77
Working subshard
AndreKurait Dec 3, 2024
d8c4372
Rename numAttempts to leaseAcquisitionExponent and add max exponent b…
AndreKurait Dec 4, 2024
40eca92
Add worker cancellation on lease expiration
AndreKurait Dec 4, 2024
6211c33
Fix lucene starting doc id
AndreKurait Dec 4, 2024
e403228
Add lease duration decrease if shard setup is < 2.5% of lease time
AndreKurait Dec 4, 2024
e9ce08e
Fix WorkCoordinatorTest.java
AndreKurait Dec 4, 2024
5d82fbe
Add LeaseExpirationTest
AndreKurait Dec 5, 2024
e4be465
Fix scheduler dispose
AndreKurait Dec 5, 2024
b5640f5
Merge branch 'main' into MIGRATIONS-2128
AndreKurait Dec 5, 2024
8494eec
Address spotless
AndreKurait Dec 5, 2024
9820fa1
Address comments for LeaseExpirationTest
AndreKurait Dec 5, 2024
2d3ed9c
Update messaging on deletedDocs
AndreKurait Dec 5, 2024
c4dcbc4
Update RFS Design doc with successor work items
AndreKurait Dec 5, 2024
178fe55
Fix WorkCoordinatorTest
AndreKurait Dec 5, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@
import org.opensearch.migrations.bulkload.workcoordination.OpenSearchWorkCoordinator;
import org.opensearch.migrations.bulkload.workcoordination.ScopedWorkCoordinator;
import org.opensearch.migrations.bulkload.worker.DocumentsRunner;
import org.opensearch.migrations.bulkload.worker.IndexAndShardCursor;
import org.opensearch.migrations.bulkload.worker.ShardWorkPreparer;
import org.opensearch.migrations.bulkload.worker.WorkItemCursor;
import org.opensearch.migrations.cluster.ClusterProviderRegistry;
import org.opensearch.migrations.reindexer.tracing.RootDocumentMigrationContext;
import org.opensearch.migrations.tracing.ActiveContextTracker;
Expand Down Expand Up @@ -182,7 +182,7 @@
public String getTransformerConfigParameterArgPrefix() {
return DOC_CONFIG_PARAMETER_ARG_PREFIX;
}
final static String DOC_CONFIG_PARAMETER_ARG_PREFIX = "doc-";

Check failure on line 185 in DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java

View workflow job for this annotation

GitHub Actions / Run SonarQube Analysis

java:S1124

Reorder the modifiers to comply with the Java Language Specification.

@Parameter(
required = false,
Expand Down Expand Up @@ -276,13 +276,15 @@
}
IJsonTransformer docTransformer = new TransformationLoader().getTransformerFactoryLoader(docTransformerConfig);

AtomicReference<IndexAndShardCursor> progressCursor = new AtomicReference<>();
AtomicReference<IWorkCoordinator.WorkItemAndDuration> workItemRef = new AtomicReference<>();
AtomicReference<WorkItemCursor> progressCursor = new AtomicReference<>();
try (var workCoordinator = new OpenSearchWorkCoordinator(
new CoordinateWorkHttpClient(connectionContext),
TOLERABLE_CLIENT_SERVER_CLOCK_DIFFERENCE_SECONDS,
workerId);
var processManager = new LeaseExpireTrigger(
w -> exitOnLeaseTimeout(
workItemRef,
workCoordinator,
w,
progressCursor,
Expand Down Expand Up @@ -343,19 +345,21 @@

@SneakyThrows
private static void exitOnLeaseTimeout(
AtomicReference<IWorkCoordinator.WorkItemAndDuration> workItemRef,
IWorkCoordinator coordinator,
String workItemId,
AtomicReference<IndexAndShardCursor> progressCursorRef,
AtomicReference<WorkItemCursor> progressCursorRef,
Supplier<IWorkCoordinationContexts.ICreateSuccessorWorkItemsContext> contextSupplier
) {
log.error("Terminating RfsMigrateDocuments because the lease has expired for " + workItemId);
log.atWarn().setMessage("Terminating RfsMigrateDocuments because the lease has expired for {}")
.addArgument(workItemId)
.log();
var progressCursor = progressCursorRef.get();
if (progressCursor != null) {
chelma marked this conversation as resolved.
Show resolved Hide resolved
log.error("Progress cursor: " + progressCursor.toString());
var successorWorkItem = progressCursor.toWorkItemString();
ArrayList<String> successorWorkItemIds = new ArrayList<>();
successorWorkItemIds.add(successorWorkItem);

log.atError().setMessage("Progress cursor: {}")
.addArgument(progressCursor).log();
var workItemAndDuration = workItemRef.get();
var successorWorkItemIds = getSuccessorWorkItemIds(workItemAndDuration, progressCursor);
coordinator.createSuccessorWorkItemsAndMarkComplete(
workItemId,
successorWorkItemIds,
Expand All @@ -369,6 +373,19 @@
System.exit(PROCESS_TIMED_OUT_EXIT_CODE);
}

private static ArrayList<String> getSuccessorWorkItemIds(IWorkCoordinator.WorkItemAndDuration workItemAndDuration, WorkItemCursor progressCursor) {
if (workItemAndDuration == null) {
throw new IllegalStateException("Unexpected worker coordination state. Expected workItem set when progressCursor not null.");
}
var workItem = workItemAndDuration.getWorkItem();
var successorWorkItem = new IWorkCoordinator.WorkItemAndDuration
.WorkItem(workItem.getIndexName(), workItem.getShardNumber(),
progressCursor.getDocId() + 1);
ArrayList<String> successorWorkItemIds = new ArrayList<>();
successorWorkItemIds.add(successorWorkItem.toString());
return successorWorkItemIds;
}

private static RootDocumentMigrationContext makeRootContext(Args arguments, String workerId) {
var compositeContextTracker = new CompositeContextTracker(
new ActiveContextTracker(),
Expand All @@ -384,7 +401,7 @@

public static DocumentsRunner.CompletionStatus run(Function<Path, LuceneDocumentsReader> readerFactory,
DocumentReindexer reindexer,
AtomicReference<IndexAndShardCursor> progressCursor,
AtomicReference<WorkItemCursor> progressCursor,
IWorkCoordinator workCoordinator,
Duration maxInitialLeaseDuration,
LeaseExpireTrigger leaseExpireTrigger,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ protected DirectoryReader getReader() {
}

@Override
protected RfsLuceneDocument getDocument(IndexReader reader, int luceneSegIndex, int luceneDocId, boolean isLive) {
protected RfsLuceneDocument getDocument(IndexReader reader, int luceneDocId, boolean isLive, int segmentDocBase) {
ingestedDocuments.incrementAndGet();
return super.getDocument(reader, luceneSegIndex, luceneDocId, isLive);
return super.getDocument(reader, luceneDocId, isLive, segmentDocBase);
}
};

Expand Down Expand Up @@ -107,7 +107,7 @@ protected RfsLuceneDocument getDocument(IndexReader reader, int luceneSegIndex,

// Start reindexing in a separate thread
Thread reindexThread = new Thread(() -> {
reindexer.reindex("test-index", 0, reader.readDocuments(), mockContext).then().block();
reindexer.reindex("test-index", reader.readDocuments(), mockContext).then().block();
});
reindexThread.start();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
import org.opensearch.migrations.bulkload.workcoordination.LeaseExpireTrigger;
import org.opensearch.migrations.bulkload.workcoordination.OpenSearchWorkCoordinator;
import org.opensearch.migrations.bulkload.worker.DocumentsRunner;
import org.opensearch.migrations.bulkload.worker.IndexAndShardCursor;
import org.opensearch.migrations.bulkload.worker.WorkItemCursor;
import org.opensearch.migrations.cluster.ClusterProviderRegistry;
import org.opensearch.migrations.reindexer.tracing.DocumentMigrationTestContext;
import org.opensearch.migrations.transform.TransformationLoader;
Expand Down Expand Up @@ -143,8 +143,8 @@ public FilteredLuceneDocumentsReader(Path luceneFilesBasePath, boolean softDelet
}

@Override
public Flux<RfsLuceneDocument> readDocuments(int startSegmentIndex, int startDoc) {
return super.readDocuments(startSegmentIndex, startDoc).map(docTransformer::apply);
public Flux<RfsLuceneDocument> readDocuments(int startDoc) {
return super.readDocuments(startDoc).map(docTransformer);
}
}

Expand Down Expand Up @@ -193,7 +193,7 @@ public static DocumentsRunner.CompletionStatus migrateDocumentsWithOneWorker(

var defaultDocTransformer = new TransformationLoader().getTransformerFactoryLoader(RfsMigrateDocuments.DEFAULT_DOCUMENT_TRANSFORMATION_CONFIG);

AtomicReference<IndexAndShardCursor> progressCursor = new AtomicReference<>();
AtomicReference<WorkItemCursor> progressCursor = new AtomicReference<>();
try (var workCoordinator = new OpenSearchWorkCoordinator(
new CoordinateWorkHttpClient(ConnectionContextTestParams.builder()
.host(targetAddress)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import java.util.function.Predicate;
import java.util.stream.Collectors;

import org.opensearch.migrations.bulkload.worker.IndexAndShardCursor;
import org.opensearch.migrations.bulkload.worker.WorkItemCursor;
import org.opensearch.migrations.reindexer.tracing.IDocumentMigrationContexts.IDocumentReindexContext;
import org.opensearch.migrations.transform.IJsonTransformer;

Expand All @@ -27,17 +27,17 @@ public class DocumentReindexer {
private final int maxConcurrentWorkItems;
private final IJsonTransformer transformer;

public Flux<IndexAndShardCursor> reindex(String indexName, int shardNumber, Flux<RfsLuceneDocument> documentStream, IDocumentReindexContext context) {
public Flux<WorkItemCursor> reindex(String indexName, Flux<RfsLuceneDocument> documentStream, IDocumentReindexContext context) {
var scheduler = Schedulers.newParallel("DocumentBulkAggregator");
var rfsDocs = documentStream
.publishOn(scheduler, 1)
.map(doc -> transformDocument(doc, indexName, shardNumber));
.map(doc -> transformDocument(doc, indexName));

return this.reindexDocsInParallelBatches(rfsDocs, indexName, shardNumber, context)
return this.reindexDocsInParallelBatches(rfsDocs, indexName, context)
.doOnTerminate(scheduler::dispose);
}

Flux<IndexAndShardCursor> reindexDocsInParallelBatches(Flux<RfsDocument> docs, String indexName, int shardNumber, IDocumentReindexContext context) {
Flux<WorkItemCursor> reindexDocsInParallelBatches(Flux<RfsDocument> docs, String indexName, IDocumentReindexContext context) {
// Use parallel scheduler for send subscription due on non-blocking io client
var scheduler = Schedulers.newParallel("DocumentBatchReindexer");
var bulkDocsBatches = batchDocsBySizeOrCount(docs);
Expand All @@ -52,8 +52,8 @@ Flux<IndexAndShardCursor> reindexDocsInParallelBatches(Flux<RfsDocument> docs, S
}

@SneakyThrows
RfsDocument transformDocument(RfsLuceneDocument doc, String indexName, int shardNumber) {
var finalDocument = RfsDocument.fromLuceneDocument(doc, indexName, shardNumber);
RfsDocument transformDocument(RfsLuceneDocument doc, String indexName) {
var finalDocument = RfsDocument.fromLuceneDocument(doc, indexName);
if (transformer != null) {
finalDocument = RfsDocument.transform(transformer::transformJson, finalDocument);
}
Expand All @@ -64,9 +64,9 @@ RfsDocument transformDocument(RfsLuceneDocument doc, String indexName, int shard
* TODO: Update the reindexing code to rely on _index field embedded in each doc section rather than requiring it in the
* REST path. See: https://opensearch.atlassian.net/browse/MIGRATIONS-2232
*/
Mono<IndexAndShardCursor> sendBulkRequest(UUID batchId, List<RfsDocument> docsBatch, String indexName, IDocumentReindexContext context, Scheduler scheduler) {
Mono<WorkItemCursor> sendBulkRequest(UUID batchId, List<RfsDocument> docsBatch, String indexName, IDocumentReindexContext context, Scheduler scheduler) {
var lastDoc = docsBatch.get(docsBatch.size() - 1);
log.atInfo().setMessage("Last doc is: Index " + lastDoc.indexName + "Shard " + lastDoc.shardNumber + " Seg Id " + lastDoc.luceneSegId + " Lucene ID " + lastDoc.luceneDocId).log();
log.atInfo().setMessage("Last doc is: Source Index " + indexName + "Shard " + " Lucene Doc Number " + lastDoc.luceneDocNumber).log();

List<BulkDocSection> bulkDocSections = docsBatch.stream()
.map(rfsDocument -> rfsDocument.document)
Expand All @@ -84,7 +84,7 @@ Mono<IndexAndShardCursor> sendBulkRequest(UUID batchId, List<RfsDocument> docsBa
.log())
// Prevent the error from stopping the entire stream, retries occurring within sendBulkRequest
.onErrorResume(e -> Mono.empty())
.then(Mono.just(new IndexAndShardCursor(indexName, lastDoc.shardNumber, lastDoc.luceneSegId, lastDoc.luceneDocId))
.then(Mono.just(new WorkItemCursor(lastDoc.luceneDocNumber))
.subscribeOn(scheduler));
}

Expand Down
AndreKurait marked this conversation as resolved.
Show resolved Hide resolved
AndreKurait marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -92,13 +92,13 @@

*/
public Flux<RfsLuceneDocument> readDocuments() {
return readDocuments(0, 0);
return readDocuments(0);
}

public Flux<RfsLuceneDocument> readDocuments(int startSegmentIndex, int startDoc) {
public Flux<RfsLuceneDocument> readDocuments(int startDoc) {
return Flux.using(
() -> wrapReader(getReader(), softDeletesPossible, softDeletesField),
reader -> readDocsByLeavesFromStartingPosition(reader, startSegmentIndex, startDoc),
reader -> readDocsByLeavesFromStartingPosition(reader, startDoc),
reader -> {
try {
reader.close();
Expand Down Expand Up @@ -128,7 +128,7 @@
}
// Otherwise, shift the SegmentReaders to the front
else if (leafReader1 instanceof SegmentReader && !(leafReader2 instanceof SegmentReader)) {
log.info("Found non-SegmentReader of type {} in the DirectoryReader", leafReader2.getClass().getName());

Check failure on line 131 in RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java

View workflow job for this annotation

GitHub Actions / Run SonarQube Analysis

java:S1192

Define a constant instead of duplicating this literal "Found non-SegmentReader of type {} in the DirectoryReader" 4 times.
return -1;
} else if (!(leafReader1 instanceof SegmentReader) && leafReader2 instanceof SegmentReader) {
log.info("Found non-SegmentReader of type {} in the DirectoryReader", leafReader1.getClass().getName());
Expand Down Expand Up @@ -158,7 +158,7 @@
If the startSegmentIndex is 0, it will start from the first segment.
If the startDocId is 0, it will start from the first document in the segment.
*/
Publisher<RfsLuceneDocument> readDocsByLeavesFromStartingPosition(DirectoryReader reader, int startSegmentIndex, int startDocId) {
Publisher<RfsLuceneDocument> readDocsByLeavesFromStartingPosition(DirectoryReader reader, int startDocId) {

Check failure on line 161 in RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java

View workflow job for this annotation

GitHub Actions / Run SonarQube Analysis

java:S1172

Remove this unused method parameter "startDocId".
var maxDocumentsToReadAtOnce = 100; // Arbitrary value
log.atInfo().setMessage("{} documents in {} leaves found in the current Lucene index")
.addArgument(reader::maxDoc)
Expand All @@ -167,32 +167,32 @@

// Create shared scheduler for i/o bound document reading
var sharedSegmentReaderScheduler = Schedulers.newBoundedElastic(maxDocumentsToReadAtOnce, Integer.MAX_VALUE, "sharedSegmentReader");

int startDocIdInt = 2;
AndreKurait marked this conversation as resolved.
Show resolved Hide resolved
return Flux.fromIterable(reader.leaves())
.skip(startSegmentIndex)
.concatMapDelayError(c -> readDocsFromSegment(c,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you explain the parallelization here? Are we running multiple readDocsFromSegment in parallel?
Why would we need/want to do those reads asynchronously, especially if they're sequential?

// Only use startDocId for the first segment we process
c.ord == startSegmentIndex ? startDocId : 0,
startDocIdInt,
sharedSegmentReaderScheduler,
maxDocumentsToReadAtOnce)
)
.subscribeOn(sharedSegmentReaderScheduler) // Scheduler to read documents on
.doOnTerminate(sharedSegmentReaderScheduler::dispose);
}

Flux<RfsLuceneDocument> readDocsFromSegment(LeafReaderContext leafReaderContext, int startDocId, Scheduler scheduler,
Flux<RfsLuceneDocument> readDocsFromSegment(LeafReaderContext leafReaderContext, int docCommitId, Scheduler scheduler,
int concurrency) {
var segmentReader = leafReaderContext.reader();
var liveDocs = segmentReader.getLiveDocs();

int segmentIndex = leafReaderContext.ord;

Check failure on line 186 in RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java

View workflow job for this annotation

GitHub Actions / Run SonarQube Analysis

java:S1854

Remove this useless assignment to local variable "segmentIndex".

Check failure on line 186 in RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java

View workflow job for this annotation

GitHub Actions / Run SonarQube Analysis

java:S1481

Remove this unused "segmentIndex" local variable.
int segmentDocBase = leafReaderContext.docBase;

return Flux.range(startDocId, segmentReader.maxDoc() - startDocId)
return Flux.range(0, segmentReader.maxDoc())
.skipWhile(id -> id + segmentDocBase <= docCommitId)
.flatMapSequentialDelayError(docIdx -> Mono.defer(() -> {
try {
if (liveDocs == null || liveDocs.get(docIdx)) {
// Get document, returns null to skip malformed docs
RfsLuceneDocument document = getDocument(segmentReader, segmentIndex, docIdx, true);
RfsLuceneDocument document = getDocument(segmentReader, docIdx, true, segmentDocBase);
return Mono.justOrEmpty(document); // Emit only non-null documents
} else {
return Mono.empty(); // Skip non-live documents
Expand All @@ -212,7 +212,7 @@
return reader;
}

protected RfsLuceneDocument getDocument(IndexReader reader, int luceneSegIndex, int luceneDocId, boolean isLive) {
protected RfsLuceneDocument getDocument(IndexReader reader, int luceneDocId, boolean isLive, int segmentDocBase) {
Document document;
try {
document = reader.document(luceneDocId);
Expand Down Expand Up @@ -284,6 +284,6 @@
}

log.atDebug().setMessage("Document {} read successfully").addArgument(openSearchDocId).log();
return new RfsLuceneDocument(luceneSegIndex, luceneDocId, openSearchDocId, type, sourceBytes.utf8ToString(), routing);
return new RfsLuceneDocument(segmentDocBase + luceneDocId, openSearchDocId, type, sourceBytes.utf8ToString(), routing);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -13,27 +13,15 @@
*/
@AllArgsConstructor
public class RfsDocument {
// The Lucene segment identifier of the document
public final int luceneSegId;

// The Lucene document identifier of the document
public final int luceneDocId;

// The original ElasticSearch/OpenSearch Index the document was in
public final String indexName;

// The original ElasticSearch/OpenSearch shard the document was in
public final int shardNumber;
// The Lucene index doc number of the document (global over shard / lucene-index)
public final int luceneDocNumber;

// The Elasticsearch/OpenSearch document to be reindexed
public final BulkDocSection document;

public static RfsDocument fromLuceneDocument(RfsLuceneDocument doc, String indexName, int shardNumber) {
public static RfsDocument fromLuceneDocument(RfsLuceneDocument doc, String indexName) {
return new RfsDocument(
doc.luceneSegId,
doc.luceneDocId,
indexName,
shardNumber,
doc.luceneDocNumber,
new BulkDocSection(
doc.id,
indexName,
Expand All @@ -46,10 +34,7 @@ public static RfsDocument fromLuceneDocument(RfsLuceneDocument doc, String index

public static RfsDocument transform(Function<Map<String, Object>, Map<String, Object>> transformer, RfsDocument doc) {
return new RfsDocument(
doc.luceneSegId,
doc.luceneDocId,
doc.indexName,
doc.shardNumber,
doc.luceneDocNumber,
BulkDocSection.fromMap(transformer.apply(doc.document.toMap()))
);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,8 @@
*/
@RequiredArgsConstructor
public class RfsLuceneDocument {
// The Lucene segment identifier of the document
public final int luceneSegId;

// The Lucene document identifier of the document
public final int luceneDocId;
// The Lucene document number of the document
public final int luceneDocNumber;

// The Elasticsearch/OpenSearch document identifier (_id) of the document
public final String id;
Expand Down
Loading
Loading