Skip to content

Commit

Permalink
Grammar fixes in comments (#14100)
Browse files Browse the repository at this point in the history
  • Loading branch information
viliam-durina authored Jan 16, 2025
1 parent 16da44a commit cad76cc
Show file tree
Hide file tree
Showing 23 changed files with 45 additions and 48 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ public FlatFieldVectorsWriter<?> addField(FieldInfo fieldInfo) throws IOExceptio
public void mergeOneField(FieldInfo fieldInfo, MergeState mergeState) throws IOException {
rawVectorDelegate.mergeOneField(fieldInfo, mergeState);
// Since we know we will not be searching for additional indexing, we can just write the
// the vectors directly to the new segment.
// vectors directly to the new segment.
// No need to use temporary file as we don't have to re-open for reading
if (fieldInfo.getVectorEncoding().equals(VectorEncoding.FLOAT32)) {
ScalarQuantizer mergedQuantizationState =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ static CompositeReaderContext create(CompositeReader reader) {
}

/**
* Creates a {@link CompositeReaderContext} for intermediate readers that aren't not top-level
* readers in the current context
* Creates a {@link CompositeReaderContext} for intermediate readers that aren't top-level readers
* in the current context
*/
CompositeReaderContext(
CompositeReaderContext parent,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ public static DirectoryReader open(final IndexCommit commit) throws IOException

/**
* Expert: returns an IndexReader reading the index on the given {@link IndexCommit}. This method
* allows to open indices that were created wih a Lucene version older than N-1 provided that all
* allows to open indices that were created with a Lucene version older than N-1 provided that all
* codecs for this index are available in the classpath and the segment file format used was
* created with Lucene 7 or newer. Users of this API must be aware that Lucene doesn't guarantee
* semantic compatibility for indices created with versions older than N-1. All backwards
Expand All @@ -150,8 +150,7 @@ public static DirectoryReader open(
/**
* If the index has changed since the provided reader was opened, open and return a new reader;
* else, return null. The new reader, if not null, will be the same type of reader as the previous
* one, ie an NRT reader will open a new NRT reader, a MultiReader will open a new MultiReader,
* etc.
* one, ie an NRT reader will open a new NRT reader etc.
*
* <p>This method is typically far less costly than opening a fully new <code>DirectoryReader
* </code> as it shares resources (for example sub-readers) with the provided <code>
Expand Down Expand Up @@ -192,7 +191,7 @@ public static DirectoryReader openIfChanged(DirectoryReader oldReader, IndexComm
* never returns null).
*
* <p>This provides "near real-time" searching, in that changes made during an {@link IndexWriter}
* session can be quickly made available for searching without closing the writer nor calling
* session can be quickly made available for searching without closing the writer or calling
* {@link IndexWriter#commit}.
*
* <p>It's <i>near</i> real-time because there is no hard guarantee on how quickly you can get a
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,7 @@ private void doFlush(DocumentsWriterPerThread flushingDWPT) throws IOException {
* flush 'B' starts and freezes all deletes occurred since 'A' has
* started. if 'B' finishes before 'A' we need to wait until 'A' is done
* otherwise the deletes frozen by 'B' are not applied to 'A' and we
* might miss to deletes documents in 'A'.
* might miss to delete documents in 'A'.
*/
try {
assert assertTicketQueueModification(flushingDWPT.deleteQueue);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ && delta < ramBufferGranularity()) {
// we need to commit this under lock but calculate it outside of the lock to minimize the time
// this lock is held
// per document. The reason we update this under lock is that we mark DWPTs as pending without
// acquiring it's
// acquiring its
// lock in #setFlushPending and this also reads the committed bytes and modifies the
// flush/activeBytes.
// In the future we can clean this up to be more intuitive.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -736,7 +736,7 @@ long getLastCommittedBytesUsed() {
}

/**
* Commits the current {@link #ramBytesUsed()} and stores it's value for later reuse. The last
* Commits the current {@link #ramBytesUsed()} and stores its value for later reuse. The last
* committed bytes used can be retrieved via {@link #getLastCommittedBytesUsed()}
*/
void commitLastBytesUsed(long delta) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import java.util.List;

/**
* A struct like class that represents a hierarchical relationship between {@link IndexReader}
* A struct-like class that represents a hierarchical relationship between {@link IndexReader}
* instances.
*/
public abstract sealed class IndexReaderContext permits CompositeReaderContext, LeafReaderContext {
Expand Down
4 changes: 2 additions & 2 deletions lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
Original file line number Diff line number Diff line change
Expand Up @@ -2979,7 +2979,7 @@ private List<Lock> acquireWriteLocks(Directory... dirs) throws IOException {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @throws IllegalArgumentException if addIndexes would cause the index to exceed {@link
* #MAX_DOCS}, or if the indoming index sort does not match this index's index sort
* #MAX_DOCS}, or if the incoming index sort does not match this index's index sort
*/
public long addIndexes(Directory... dirs) throws IOException {
ensureOpen();
Expand Down Expand Up @@ -6029,7 +6029,7 @@ private void processEvents(boolean triggerMerge) throws IOException {
/**
* Interface for internal atomic events. See {@link DocumentsWriter} for details. Events are
* executed concurrently and no order is guaranteed. Each event should only rely on the
* serializeability within its process method. All actions that must happen before or after a
* serializability within its process method. All actions that must happen before or after a
* certain action must be encoded inside the {@link #process(IndexWriter)} method.
*/
@FunctionalInterface
Expand Down
4 changes: 2 additions & 2 deletions lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
Original file line number Diff line number Diff line change
Expand Up @@ -756,7 +756,7 @@ public boolean useCompoundFile(

/**
* Return the byte size of the provided {@link SegmentCommitInfo}, prorated by percentage of
* non-deleted documents is set.
* non-deleted documents.
*/
protected long size(SegmentCommitInfo info, MergeContext mergeContext) throws IOException {
long byteSize = info.sizeInBytes();
Expand Down Expand Up @@ -838,7 +838,7 @@ public void setMaxCFSSegmentSizeMB(double v) {
}

/**
* Returns true if the segment represented by the given CodecReader should be keep even if it's
* Returns true if the segment represented by the given CodecReader should be kept even if it's
* fully deleted. This is useful for testing of for instance if the merge policy implements
* retention policies for soft deletes.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ boolean writeDocValuesUpdatesForMerge(List<SegmentCommitInfo> infos) throws IOEx
}

/**
* Returns a list of all currently maintained ReadersAndUpdates sorted by it's ram consumption
* Returns a list of all currently maintained ReadersAndUpdates sorted by their ram consumption
* largest to smallest. This list can also contain readers that don't consume any ram at this
* point i.e. don't have any updates buffered.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public final class SegmentReader extends CodecReader {

private final SegmentCommitInfo si;
// this is the original SI that IW uses internally but it's mutated behind the scenes
// and we don't want this SI to be used for anything. Yet, IW needs this to do maintainance
// and we don't want this SI to be used for anything. Yet, IW needs this to do maintenance
// and lookup pooled readers etc.
private final SegmentCommitInfo originalSi;
private final LeafMetaData metaData;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@
import org.apache.lucene.util.FixedBitSet;

/**
* This reader filters out documents that have a doc values value in the given field and treat these
* documents as soft deleted. Hard deleted documents will also be filtered out in the life docs of
* this reader.
* This reader filters out documents that have a doc-values value in the given field and treats
* these documents as soft-deleted. Hard deleted documents will also be filtered out in the live
* docs of this reader.
*
* @see IndexWriterConfig#setSoftDeletesField(String)
* @see IndexWriter#softUpdateDocument(Term, Iterable, Field...)
Expand Down Expand Up @@ -68,7 +68,7 @@ private SoftDeletesDirectoryReaderWrapper(DirectoryReader in, SoftDeletesSubRead
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
Map<CacheKey, LeafReader> readerCache = new HashMap<>();
for (LeafReader reader : getSequentialSubReaders()) {
// we try to reuse the life docs instances here if the reader cache key didn't change
// we try to reuse the live docs instances here if the reader cache key didn't change
if (reader instanceof SoftDeletesFilterLeafReader && reader.getReaderCacheHelper() != null) {
readerCache.put(
((SoftDeletesFilterLeafReader) reader).reader.getReaderCacheHelper().getKey(), reader);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException {}
/** Process a string field. */
public void stringField(FieldInfo fieldInfo, String value) throws IOException {}

/** Process a int numeric field. */
/** Process an int numeric field. */
public void intField(FieldInfo fieldInfo, int value) throws IOException {}

/** Process a long numeric field. */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
/**
* A {@link Collector} which allows running a search with several {@link Collector}s. It offers a
* static {@link #wrap} method which accepts a list of collectors and wraps them with {@link
* MultiCollector}, while filtering out the <code>null</code> null ones.
* MultiCollector}, while filtering out the <code>null</code> ones.
*
* <p><b>NOTE:</b>When mixing collectors that want to skip low-scoring hits ({@link
* ScoreMode#TOP_SCORES}) with ones that require to see all hits, such as mixing {@link
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
import java.util.List;

/**
* A {@link CollectorManager} implements which wrap a set of {@link CollectorManager} as {@link
* MultiCollector} acts for {@link Collector}.
* A composite {@link CollectorManager} which wraps a set of {@link CollectorManager} instances,
* akin to how {@link MultiCollector} wraps {@link Collector} instances.
*/
public class MultiCollectorManager implements CollectorManager<Collector, Object[]> {

Expand Down Expand Up @@ -56,21 +56,21 @@ public Collector newCollector() throws IOException {
}

@Override
public Object[] reduce(Collection<Collector> reducableCollectors) throws IOException {
final int size = reducableCollectors.size();
public Object[] reduce(Collection<Collector> reducibleCollectors) throws IOException {
final int size = reducibleCollectors.size();
final Object[] results = new Object[collectorManagers.length];
for (int i = 0; i < collectorManagers.length; i++) {
final List<Collector> reducableCollector = new ArrayList<>(size);
for (Collector collector : reducableCollectors) {
final List<Collector> reducibleCollector = new ArrayList<>(size);
for (Collector collector : reducibleCollectors) {
// MultiCollector will not actually wrap the collector if only one is provided, so we
// check the instance type here:
if (collector instanceof MultiCollector) {
reducableCollector.add(((MultiCollector) collector).getCollectors()[i]);
reducibleCollector.add(((MultiCollector) collector).getCollectors()[i]);
} else {
reducableCollector.add(collector);
reducibleCollector.add(collector);
}
}
results[i] = collectorManagers[i].reduce(reducableCollector);
results[i] = collectorManagers[i].reduce(reducibleCollector);
}
return results;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,8 @@ public final G acquire() throws IOException {
if (getRefCount(ref) == 0 && current == ref) {
assert ref != null;
/* if we can't increment the reader but we are
still the current reference the RM is in a
illegal states since we can't make any progress
still the current reference the RM is in an
illegal state since we can't make any progress
anymore. The reference is closed but the RM still
holds on to it as the actual instance.
This can only happen if somebody outside of the RM
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ private void privateDeleteFile(String name, boolean isPendingDelete) throws IOEx
// a WindowsFSDirectory ...
// LUCENE-6684: we suppress this check for Windows, since a file could be in a confusing
// "pending delete" state, failing the first
// delete attempt with access denied and then apparently falsely failing here when we try ot
// delete attempt with access denied and then apparently falsely failing here when we try to
// delete it again, with NSFE/FNFE
} else {
throw e;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import org.apache.lucene.internal.tests.TestSecrets;

/**
* IndexInput implementation that delegates calls to another directory. This class can be used to
* IndexInput implementation that delegates calls to another IndexInput. This class can be used to
* add limitations on top of an existing {@link IndexInput} implementation or to add additional
* sanity checks for tests. However, if you plan to write your own {@link IndexInput}
* implementation, you should consider extending directly {@link IndexInput} or {@link DataInput}
Expand Down
4 changes: 1 addition & 3 deletions lucene/core/src/java/org/apache/lucene/store/IOContext.java
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,7 @@
public record IOContext(
Context context, MergeInfo mergeInfo, FlushInfo flushInfo, ReadAdvice readAdvice) {

/**
* Context is a enumerator which specifies the context in which the Directory is being used for.
*/
/** Context is an enumerator which specifies the context in which the Directory is being used. */
public enum Context {
/** Context for reads and writes that are associated with a merge. */
MERGE,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@
* </pre>
*
* <p>This will cache all newly flushed segments, all merges whose expected segment size is {@code
* <= 5 MB}, unless the net cached bytes exceeds 60 MB at which point all writes will not be cached
* (until the net bytes falls below 60 MB).
* <= 5 MB}, unless the net cached bytes exceed 60 MB at which point all writes will not be cached
* (until the net bytes fall below 60 MB).
*
* @lucene.experimental
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ public final class RateLimitedIndexOutput extends FilterIndexOutput {
private long bytesSinceLastPause;

/**
* Cached here not not always have to call RateLimiter#getMinPauseCheckBytes() which does volatile
* Cached here to not always have to call RateLimiter#getMinPauseCheckBytes() which does volatile
* read.
*/
private long currentMinPauseCheckBytes;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ public float dotProduct(float[] a, float[] b) {
int i = 0;
float res = 0;

// if the array size is large (> 2x platform vector size), its worth the overhead to vectorize
// if the array size is large (> 2x platform vector size), it's worth the overhead to vectorize
if (a.length > 2 * FLOAT_SPECIES.length()) {
i += FLOAT_SPECIES.loopBound(a.length);
res += dotProductBody(a, b, i);
Expand Down Expand Up @@ -161,7 +161,7 @@ public float cosine(float[] a, float[] b) {
float norm1 = 0;
float norm2 = 0;

// if the array size is large (> 2x platform vector size), its worth the overhead to vectorize
// if the array size is large (> 2x platform vector size), it's worth the overhead to vectorize
if (a.length > 2 * FLOAT_SPECIES.length()) {
i += FLOAT_SPECIES.loopBound(a.length);
float[] ret = cosineBody(a, b, i);
Expand Down Expand Up @@ -226,7 +226,7 @@ public float squareDistance(float[] a, float[] b) {
int i = 0;
float res = 0;

// if the array size is large (> 2x platform vector size), its worth the overhead to vectorize
// if the array size is large (> 2x platform vector size), it's worth the overhead to vectorize
if (a.length > 2 * FLOAT_SPECIES.length()) {
i += FLOAT_SPECIES.loopBound(a.length);
res += squareDistanceBody(a, b, i);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ protected void ensureOpen() throws AlreadyClosedException {

/**
* Determines if direct IO should be used for a file. By default this tests if it is a merge
* context and if the merge or file length extends the minimum size (see {@link
* context and if the merge or file length exceeds the minimum size (see {@link
* #DEFAULT_MIN_BYTES_DIRECT}). Subclasses may override method to enforce direct IO for specific
* file types.
*
Expand Down Expand Up @@ -213,8 +213,8 @@ private static final class DirectIOIndexOutput extends IndexOutput {
* bypassing OS buffer
*
* @throws UnsupportedOperationException if the JDK does not support Direct I/O
* @throws IOException if the operating system or filesystem does not support support Direct I/O
* or a sufficient equivalent.
* @throws IOException if the operating system or filesystem does not support Direct I/O or a
* sufficient equivalent.
*/
public DirectIOIndexOutput(Path path, String name, int blockSize, int bufferSize)
throws IOException {
Expand Down

0 comments on commit cad76cc

Please sign in to comment.