-
Notifications
You must be signed in to change notification settings - Fork 141
/
MiniSearch.ts
1954 lines (1713 loc) · 65.6 KB
/
MiniSearch.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import SearchableMap from './SearchableMap/SearchableMap'
const OR = 'or'
const AND = 'and'
const AND_NOT = 'and_not'
/**
* Search options to customize the search behavior.
*/
export type SearchOptions = {
/**
* Names of the fields to search in. If omitted, all fields are searched.
*/
fields?: string[],
/**
* Function used to filter search results, for example on the basis of stored
* fields. It takes as argument each search result and should return a boolean
* to indicate if the result should be kept or not.
*/
filter?: (result: SearchResult) => boolean,
/**
* Key-value object of field names to boosting values. By default, fields are
* assigned a boosting factor of 1. If one assigns to a field a boosting value
* of 2, a result that matches the query in that field is assigned a score
* twice as high as a result matching the query in another field, all else
* being equal.
*/
boost?: { [fieldName: string]: number },
/**
* Relative weights to assign to prefix search results and fuzzy search
* results. Exact matches are assigned a weight of 1.
*/
weights?: { fuzzy: number, prefix: number },
/**
* Function to calculate a boost factor for documents. It takes as arguments
* the document ID, and a term that matches the search in that document, and
* the value of the stored fields for the document (if any). It should return
* a boosting factor: a number higher than 1 increases the computed score, a
* number lower than 1 decreases the score, and a falsy value skips the search
* result completely.
*/
boostDocument?: (documentId: any, term: string, storedFields?: Record<string, unknown>) => number,
/**
* Controls whether to perform prefix search. It can be a simple boolean, or a
* function.
*
* If a boolean is passed, prefix search is performed if true.
*
* If a function is passed, it is called upon search with a search term, the
* positional index of that search term in the tokenized search query, and the
* tokenized search query. The function should return a boolean to indicate
* whether to perform prefix search for that search term.
*/
prefix?: boolean | ((term: string, index: number, terms: string[]) => boolean),
/**
* Controls whether to perform fuzzy search. It can be a simple boolean, or a
* number, or a function.
*
* If a boolean is given, fuzzy search with a default fuzziness parameter is
* performed if true.
*
* If a number higher or equal to 1 is given, fuzzy search is performed, with
* a maximum edit distance (Levenshtein) equal to the number.
*
* If a number between 0 and 1 is given, fuzzy search is performed within a
* maximum edit distance corresponding to that fraction of the term length,
* approximated to the nearest integer. For example, 0.2 would mean an edit
* distance of 20% of the term length, so 1 character in a 5-characters term.
* The calculated fuzziness value is limited by the `maxFuzzy` option, to
* prevent slowdown for very long queries.
*
* If a function is passed, the function is called upon search with a search
* term, a positional index of that term in the tokenized search query, and
* the tokenized search query. It should return a boolean or a number, with
* the meaning documented above.
*/
fuzzy?: boolean | number | ((term: string, index: number, terms: string[]) => boolean | number),
/**
* Controls the maximum fuzziness when using a fractional fuzzy value. This is
* set to 6 by default. Very high edit distances usually don't produce
* meaningful results, but can excessively impact search performance.
*/
maxFuzzy?: number,
/**
* The operand to combine partial results for each term. By default it is
* "OR", so results matching _any_ of the search terms are returned by a
* search. If "AND" is given, only results matching _all_ the search terms are
* returned by a search.
*/
combineWith?: string,
/**
* Function to tokenize the search query. By default, the same tokenizer used
* for indexing is used also for search.
*/
tokenize?: (text: string) => string[],
/**
* Function to process or normalize terms in the search query. By default, the
* same term processor used for indexing is used also for search.
*/
processTerm?: (term: string) => string | string[] | null | undefined | false
/**
* BM25+ algorithm parameters. Customizing these is almost never necessary,
* and finetuning them requires an understanding of the BM25 scoring model. In
* most cases, it is best to omit this option to use defaults, and instead use
* boosting to tweak scoring for specific use cases.
*/
bm25?: BM25Params
}
type SearchOptionsWithDefaults = SearchOptions & {
boost: { [fieldName: string]: number },
weights: { fuzzy: number, prefix: number },
prefix: boolean | ((term: string, index: number, terms: string[]) => boolean),
fuzzy: boolean | number | ((term: string, index: number, terms: string[]) => boolean | number),
maxFuzzy: number,
combineWith: string
bm25: BM25Params
}
/**
* Configuration options passed to the [[MiniSearch]] constructor
*
* @typeParam T The type of documents being indexed.
*/
export type Options<T = any> = {
/**
* Names of the document fields to be indexed.
*/
fields: string[],
/**
* Name of the ID field, uniquely identifying a document.
*/
idField?: string,
/**
* Names of fields to store, so that search results would include them. By
* default none, so results would only contain the id field.
*/
storeFields?: string[],
/**
* Function used to extract the value of each field in documents. By default,
* the documents are assumed to be plain objects with field names as keys,
* but by specifying a custom `extractField` function one can completely
* customize how the fields are extracted.
*
* The function takes as arguments the document, and the name of the field to
* extract from it. It should return the field value as a string.
*/
extractField?: (document: T, fieldName: string) => string,
/*
* Function used to split a field value into individual terms to be indexed.
* The default tokenizer separates terms by space or punctuation, but a
* custom tokenizer can be provided for custom logic.
*
* The function takes as arguments string to tokenize, and the name of the
* field it comes from. It should return the terms as an array of strings.
* When used for tokenizing a search query instead of a document field, the
* `fieldName` is undefined.
*/
tokenize?: (text: string, fieldName?: string) => string[],
/**
* Function used to process a term before indexing or search. This can be
* used for normalization (such as stemming). By default, terms are
* downcased, and otherwise no other normalization is performed.
*
* The function takes as arguments a term to process, and the name of the
* field it comes from. It should return the processed term as a string, or a
* falsy value to reject the term entirely.
*
* It can also return an array of strings, in which case each string in the
* returned array is indexed as a separate term.
*/
processTerm?: (term: string, fieldName?: string) => string | string[] | null | undefined | false,
/**
* Function called to log messages. Arguments are a log level ('debug',
* 'info', 'warn', or 'error'), a log message, and an optional string code
* that identifies the reason for the log.
*
* The default implementation uses `console`, if defined.
*/
logger?: (level: LogLevel, message: string, code?: string) => void
/**
* If `true` (the default), vacuuming is performed automatically as soon as
* [[MiniSearch.discard]] is called a certain number of times, cleaning up
* obsolete references from the index. If `false`, no automatic vacuuming is
* performed. Custom settings controlling auto vacuuming thresholds, as well
* as batching behavior, can be passed as an object (see the
* [[AutoVacuumOptions]] type).
*/
autoVacuum?: boolean | AutoVacuumOptions
/**
* Default search options (see the [[SearchOptions]] type and the
* [[MiniSearch.search]] method for details)
*/
searchOptions?: SearchOptions,
/**
* Default auto suggest options (see the [[SearchOptions]] type and the
* [[MiniSearch.autoSuggest]] method for details)
*/
autoSuggestOptions?: SearchOptions
}
type OptionsWithDefaults<T = any> = Options<T> & {
storeFields: string[]
idField: string
extractField: (document: T, fieldName: string) => string
tokenize: (text: string, fieldName: string) => string[]
processTerm: (term: string, fieldName: string) => string | string[] | null | undefined | false
logger: (level: LogLevel, message: string, code?: string) => void
autoVacuum: false | AutoVacuumOptions
searchOptions: SearchOptionsWithDefaults
autoSuggestOptions: SearchOptions
}
type LogLevel = 'debug' | 'info' | 'warn' | 'error'
/**
* The type of auto-suggestions
*/
export type Suggestion = {
/**
* The suggestion
*/
suggestion: string,
/**
* Suggestion as an array of terms
*/
terms: string[],
/**
* Score for the suggestion
*/
score: number
}
/**
* Match information for a search result. It is a key-value object where keys
* are terms that matched, and values are the list of fields that the term was
* found in.
*/
export type MatchInfo = {
[term: string]: string[]
}
/**
* Type of the search results. Each search result indicates the document ID, the
* terms that matched, the match information, the score, and all the stored
* fields.
*/
export type SearchResult = {
/**
* The document ID
*/
id: any,
/**
* List of terms that matched
*/
terms: string[],
/**
* Score of the search results
*/
score: number,
/**
* Match information, see [[MatchInfo]]
*/
match: MatchInfo,
/**
* Stored fields
*/
[key: string]: any
}
/**
* @ignore
*/
export type AsPlainObject = {
documentCount: number,
nextId: number,
documentIds: { [shortId: string]: any }
fieldIds: { [fieldName: string]: number }
fieldLength: { [shortId: string]: number[] }
averageFieldLength: number[],
storedFields: { [shortId: string]: any }
dirtCount?: number,
index: [string, { [fieldId: string]: SerializedIndexEntry }][]
serializationVersion: number
}
export type QueryCombination = SearchOptions & { queries: Query[] }
/**
* Search query expression, either a query string or an expression tree
* combining several queries with a combination of AND or OR.
*/
export type Query = QueryCombination | string
/**
* Options to control vacuuming behavior.
*
* Vacuuming cleans up document references made obsolete by
* [[MiniSearch.discard]] from the index. On large indexes, vacuuming is
* potentially costly, because it has to traverse the whole inverted index.
* Therefore, in order to dilute this cost so it does not negatively affects the
* application, vacuuming is performed in batches, with a delay between each
* batch. These options are used to configure the batch size and the delay
* between batches.
*/
export type VacuumOptions = {
/**
* Size of each vacuuming batch (the number of terms in the index that will be
* traversed in each batch). Defaults to 1000.
*/
batchSize?: number,
/**
* Wait time between each vacuuming batch in milliseconds. Defaults to 10.
*/
batchWait?: number
}
/**
* Sets minimum thresholds for `dirtCount` and `dirtFactor` that trigger an
* automatic vacuuming.
*/
export type VacuumConditions = {
/**
* Minimum `dirtCount` (number of discarded documents since the last vacuuming)
* under which auto vacuum is not triggered. It defaults to 20.
*/
minDirtCount?: number
/**
* Minimum `dirtFactor` (proportion of discarded documents over the total)
* under which auto vacuum is not triggered. It defaults to 0.1.
*/
minDirtFactor?: number,
}
/**
* Options to control auto vacuum behavior. When discarding a document with
* [[MiniSearch.discard]], a vacuuming operation is automatically started if the
* `dirtCount` and `dirtFactor` are above the `minDirtCount` and `minDirtFactor`
* thresholds defined by this configuration. See [[VacuumConditions]] for
* details on these.
*
* Also, `batchSize` and `batchWait` can be specified, controlling batching
* behavior (see [[VacuumOptions]]).
*/
export type AutoVacuumOptions = VacuumOptions & VacuumConditions
type QuerySpec = {
prefix: boolean,
fuzzy: number | boolean,
term: string
}
type DocumentTermFreqs = Map<number, number>
type FieldTermData = Map<number, DocumentTermFreqs>
interface RawResultValue {
// Intermediate score, before applying the final score based on number of
// matched terms.
score: number,
// Set of all query terms that were matched. They may not be present in the
// text exactly in the case of prefix/fuzzy matches. We must check for
// uniqueness before adding a new term. This is much faster than using a set,
// because the number of elements is relatively small.
terms: string[],
// All terms that were found in the content, including the fields in which
// they were present. This object will be provided as part of the final search
// results.
match: MatchInfo,
}
type RawResult = Map<number, RawResultValue>
/**
* [[MiniSearch]] is the main entrypoint class, implementing a full-text search
* engine in memory.
*
* @typeParam T The type of the documents being indexed.
*
* ### Basic example:
*
* ```javascript
* const documents = [
* {
* id: 1,
* title: 'Moby Dick',
* text: 'Call me Ishmael. Some years ago...',
* category: 'fiction'
* },
* {
* id: 2,
* title: 'Zen and the Art of Motorcycle Maintenance',
* text: 'I can see by my watch...',
* category: 'fiction'
* },
* {
* id: 3,
* title: 'Neuromancer',
* text: 'The sky above the port was...',
* category: 'fiction'
* },
* {
* id: 4,
* title: 'Zen and the Art of Archery',
* text: 'At first sight it must seem...',
* category: 'non-fiction'
* },
* // ...and more
* ]
*
* // Create a search engine that indexes the 'title' and 'text' fields for
* // full-text search. Search results will include 'title' and 'category' (plus the
* // id field, that is always stored and returned)
* const miniSearch = new MiniSearch({
* fields: ['title', 'text'],
* storeFields: ['title', 'category']
* })
*
* // Add documents to the index
* miniSearch.addAll(documents)
*
* // Search for documents:
* let results = miniSearch.search('zen art motorcycle')
* // => [
* // { id: 2, title: 'Zen and the Art of Motorcycle Maintenance', category: 'fiction', score: 2.77258 },
* // { id: 4, title: 'Zen and the Art of Archery', category: 'non-fiction', score: 1.38629 }
* // ]
* ```
*/
export default class MiniSearch<T = any> {
protected _options: OptionsWithDefaults<T>
protected _index: SearchableMap<FieldTermData>
protected _documentCount: number
protected _documentIds: Map<number, any>
protected _idToShortId: Map<any, number>
protected _fieldIds: { [key: string]: number }
protected _fieldLength: Map<number, number[]>
protected _avgFieldLength: number[]
protected _nextId: number
protected _storedFields: Map<number, Record<string, unknown>>
protected _dirtCount: number
private _currentVacuum: Promise<void> | null
private _enqueuedVacuum: Promise<void> | null
private _enqueuedVacuumConditions: VacuumConditions | undefined
/**
* @param options Configuration options
*
* ### Examples:
*
* ```javascript
* // Create a search engine that indexes the 'title' and 'text' fields of your
* // documents:
* const miniSearch = new MiniSearch({ fields: ['title', 'text'] })
* ```
*
* ### ID Field:
*
* ```javascript
* // Your documents are assumed to include a unique 'id' field, but if you want
* // to use a different field for document identification, you can set the
* // 'idField' option:
* const miniSearch = new MiniSearch({ idField: 'key', fields: ['title', 'text'] })
* ```
*
* ### Options and defaults:
*
* ```javascript
* // The full set of options (here with their default value) is:
* const miniSearch = new MiniSearch({
* // idField: field that uniquely identifies a document
* idField: 'id',
*
* // extractField: function used to get the value of a field in a document.
* // By default, it assumes the document is a flat object with field names as
* // property keys and field values as string property values, but custom logic
* // can be implemented by setting this option to a custom extractor function.
* extractField: (document, fieldName) => document[fieldName],
*
* // tokenize: function used to split fields into individual terms. By
* // default, it is also used to tokenize search queries, unless a specific
* // `tokenize` search option is supplied. When tokenizing an indexed field,
* // the field name is passed as the second argument.
* tokenize: (string, _fieldName) => string.split(SPACE_OR_PUNCTUATION),
*
* // processTerm: function used to process each tokenized term before
* // indexing. It can be used for stemming and normalization. Return a falsy
* // value in order to discard a term. By default, it is also used to process
* // search queries, unless a specific `processTerm` option is supplied as a
* // search option. When processing a term from a indexed field, the field
* // name is passed as the second argument.
* processTerm: (term, _fieldName) => term.toLowerCase(),
*
* // searchOptions: default search options, see the `search` method for
* // details
* searchOptions: undefined,
*
* // fields: document fields to be indexed. Mandatory, but not set by default
* fields: undefined
*
* // storeFields: document fields to be stored and returned as part of the
* // search results.
* storeFields: []
* })
* ```
*/
constructor (options: Options<T>) {
if (options?.fields == null) {
throw new Error('MiniSearch: option "fields" must be provided')
}
const autoVacuum = (options.autoVacuum == null || options.autoVacuum === true) ? defaultAutoVacuumOptions : options.autoVacuum
this._options = {
...defaultOptions,
...options,
autoVacuum,
searchOptions: { ...defaultSearchOptions, ...(options.searchOptions || {}) },
autoSuggestOptions: { ...defaultAutoSuggestOptions, ...(options.autoSuggestOptions || {}) }
}
this._index = new SearchableMap()
this._documentCount = 0
this._documentIds = new Map()
this._idToShortId = new Map()
// Fields are defined during initialization, don't change, are few in
// number, rarely need iterating over, and have string keys. Therefore in
// this case an object is a better candidate than a Map to store the mapping
// from field key to ID.
this._fieldIds = {}
this._fieldLength = new Map()
this._avgFieldLength = []
this._nextId = 0
this._storedFields = new Map()
this._dirtCount = 0
this._currentVacuum = null
this._enqueuedVacuum = null
this._enqueuedVacuumConditions = defaultVacuumConditions
this.addFields(this._options.fields)
}
/**
* Adds a document to the index
*
* @param document The document to be indexed
*/
add (document: T): void {
const { extractField, tokenize, processTerm, fields, idField } = this._options
const id = extractField(document, idField)
if (id == null) {
throw new Error(`MiniSearch: document does not have ID field "${idField}"`)
}
if (this._idToShortId.has(id)) {
throw new Error(`MiniSearch: duplicate ID ${id}`)
}
const shortDocumentId = this.addDocumentId(id)
this.saveStoredFields(shortDocumentId, document)
for (const field of fields) {
const fieldValue = extractField(document, field)
if (fieldValue == null) continue
const tokens = tokenize(fieldValue.toString(), field)
const fieldId = this._fieldIds[field]
const uniqueTerms = new Set(tokens).size
this.addFieldLength(shortDocumentId, fieldId, this._documentCount - 1, uniqueTerms)
for (const term of tokens) {
const processedTerm = processTerm(term, field)
if (Array.isArray(processedTerm)) {
for (const t of processedTerm) {
this.addTerm(fieldId, shortDocumentId, t)
}
} else if (processedTerm) {
this.addTerm(fieldId, shortDocumentId, processedTerm)
}
}
}
}
/**
* Adds all the given documents to the index
*
* @param documents An array of documents to be indexed
*/
addAll (documents: readonly T[]): void {
for (const document of documents) this.add(document)
}
/**
* Adds all the given documents to the index asynchronously.
*
* Returns a promise that resolves (to `undefined`) when the indexing is done.
* This method is useful when index many documents, to avoid blocking the main
* thread. The indexing is performed asynchronously and in chunks.
*
* @param documents An array of documents to be indexed
* @param options Configuration options
* @return A promise resolving to `undefined` when the indexing is done
*/
addAllAsync (documents: readonly T[], options: { chunkSize?: number } = {}): Promise<void> {
const { chunkSize = 10 } = options
const acc: { chunk: T[], promise: Promise<void> } = { chunk: [], promise: Promise.resolve() }
const { chunk, promise } = documents.reduce(({ chunk, promise }, document: T, i: number) => {
chunk.push(document)
if ((i + 1) % chunkSize === 0) {
return {
chunk: [],
promise: promise
.then(() => new Promise(resolve => setTimeout(resolve, 0)))
.then(() => this.addAll(chunk))
}
} else {
return { chunk, promise }
}
}, acc)
return promise.then(() => this.addAll(chunk))
}
/**
* Removes the given document from the index.
*
* The document to remove must NOT have changed between indexing and removal,
* otherwise the index will be corrupted.
*
* This method requires passing the full document to be removed (not just the
* ID), and immediately removes the document from the inverted index, allowing
* memory to be released. A convenient alternative is [[MiniSearch.discard]],
* which needs only the document ID, and has the same visible effect, but
* delays cleaning up the index until the next vacuuming.
*
* @param document The document to be removed
*/
remove (document: T): void {
const { tokenize, processTerm, extractField, fields, idField } = this._options
const id = extractField(document, idField)
if (id == null) {
throw new Error(`MiniSearch: document does not have ID field "${idField}"`)
}
const shortId = this._idToShortId.get(id)
if (shortId == null) {
throw new Error(`MiniSearch: cannot remove document with ID ${id}: it is not in the index`)
}
for (const field of fields) {
const fieldValue = extractField(document, field)
if (fieldValue == null) continue
const tokens = tokenize(fieldValue.toString(), field)
const fieldId = this._fieldIds[field]
const uniqueTerms = new Set(tokens).size
this.removeFieldLength(shortId, fieldId, this._documentCount, uniqueTerms)
for (const term of tokens) {
const processedTerm = processTerm(term, field)
if (Array.isArray(processedTerm)) {
for (const t of processedTerm) {
this.removeTerm(fieldId, shortId, t)
}
} else if (processedTerm) {
this.removeTerm(fieldId, shortId, processedTerm)
}
}
}
this._storedFields.delete(shortId)
this._documentIds.delete(shortId)
this._idToShortId.delete(id)
this._fieldLength.delete(shortId)
this._documentCount -= 1
}
/**
* Removes all the given documents from the index. If called with no arguments,
* it removes _all_ documents from the index.
*
* @param documents The documents to be removed. If this argument is omitted,
* all documents are removed. Note that, for removing all documents, it is
* more efficient to call this method with no arguments than to pass all
* documents.
*/
removeAll (documents?: readonly T[]): void {
if (documents) {
for (const document of documents) this.remove(document)
} else if (arguments.length > 0) {
throw new Error('Expected documents to be present. Omit the argument to remove all documents.')
} else {
this._index = new SearchableMap()
this._documentCount = 0
this._documentIds = new Map()
this._idToShortId = new Map()
this._fieldLength = new Map()
this._avgFieldLength = []
this._storedFields = new Map()
this._nextId = 0
}
}
/**
* Discards the document with the given ID, so it won't appear in search results
*
* It has the same visible effect of [[MiniSearch.remove]] (both cause the
* document to stop appearing in searches), but a different effect on the
* internal data structures:
*
* - [[MiniSearch.remove]] requires passing the full document to be removed
* as argument, and removes it from the inverted index immediately.
*
* - [[MiniSearch.discard]] instead only needs the document ID, and works by
* marking the current version of the document as discarded, so it is
* immediately ignored by searches. This is faster and more convenient than
* `remove`, but the index is not immediately modified. To take care of
* that, vacuuming is performed after a certain number of documents are
* discarded, cleaning up the index and allowing memory to be released.
*
* After discarding a document, it is possible to re-add a new version, and
* only the new version will appear in searches. In other words, discarding
* and re-adding a document works exactly like removing and re-adding it. The
* [[MiniSearch.replace]] method can also be used to replace a document with a
* new version.
*
* #### Details about vacuuming
*
* Repetite calls to this method would leave obsolete document references in
* the index, invisible to searches. Two mechanisms take care of cleaning up:
* clean up during search, and vacuuming.
*
* - Upon search, whenever a discarded ID is found (and ignored for the
* results), references to the discarded document are removed from the
* inverted index entries for the search terms. This ensures that subsequent
* searches for the same terms do not need to skip these obsolete references
* again.
*
* - In addition, vacuuming is performed automatically by default (see the
* `autoVacuum` field in [[Options]]) after a certain number of documents
* are discarded. Vacuuming traverses all terms in the index, cleaning up
* all references to discarded documents. Vacuuming can also be triggered
* manually by calling [[MiniSearch.vacuum]].
*
* @param id The ID of the document to be discarded
*/
discard (id: any): void {
const shortId = this._idToShortId.get(id)
if (shortId == null) {
throw new Error(`MiniSearch: cannot discard document with ID ${id}: it is not in the index`)
}
this._idToShortId.delete(id)
this._documentIds.delete(shortId)
this._storedFields.delete(shortId)
;(this._fieldLength.get(shortId) || []).forEach((fieldLength, fieldId) => {
this.removeFieldLength(shortId, fieldId, this._documentCount, fieldLength)
})
this._fieldLength.delete(shortId)
this._documentCount -= 1
this._dirtCount += 1
this.maybeAutoVacuum()
}
private maybeAutoVacuum (): void {
if (this._options.autoVacuum === false) { return }
const { minDirtFactor, minDirtCount, batchSize, batchWait } = this._options.autoVacuum
this.conditionalVacuum({ batchSize, batchWait }, { minDirtCount, minDirtFactor })
}
/**
* Discards the documents with the given IDs, so they won't appear in search
* results
*
* It is equivalent to calling [[MiniSearch.discard]] for all the given IDs,
* but with the optimization of triggering at most one automatic vacuuming at
* the end.
*
* Note: to remove all documents from the index, it is faster and more
* convenient to call [[MiniSearch.removeAll]] with no argument, instead of
* passing all IDs to this method.
*/
discardAll (ids: readonly any[]): void {
const autoVacuum = this._options.autoVacuum
try {
this._options.autoVacuum = false
for (const id of ids) {
this.discard(id)
}
} finally {
this._options.autoVacuum = autoVacuum
}
this.maybeAutoVacuum()
}
/**
* It replaces an existing document with the given updated version
*
* It works by discarding the current version and adding the updated one, so
* it is functionally equivalent to calling [[MiniSearch.discard]] followed by
* [[MiniSearch.add]]. The ID of the updated document should be the same as
* the original one.
*
* Since it uses [[MiniSearch.discard]] internally, this method relies on
* vacuuming to clean up obsolete document references from the index, allowing
* memory to be released (see [[MiniSearch.discard]]).
*
* @param updatedDocument The updated document to replace the old version
* with
*/
replace (updatedDocument: T): void {
const { idField, extractField } = this._options
const id = extractField(updatedDocument, idField)
this.discard(id)
this.add(updatedDocument)
}
/**
* Triggers a manual vacuuming, cleaning up references to discarded documents
* from the inverted index
*
* Vacuuming is only useful for applications that use the
* [[MiniSearch.discard]] or [[MiniSearch.replace]] methods.
*
* By default, vacuuming is performed automatically when needed (controlled by
* the `autoVacuum` field in [[Options]]), so there is usually no need to call
* this method, unless one wants to make sure to perform vacuuming at a
* specific moment.
*
* Vacuuming traverses all terms in the inverted index in batches, and cleans
* up references to discarded documents from the posting list, allowing memory
* to be released.
*
* The method takes an optional object as argument with the following keys:
*
* - `batchSize`: the size of each batch (1000 by default)
*
* - `batchWait`: the number of milliseconds to wait between batches (10 by
* default)
*
* On large indexes, vacuuming could have a non-negligible cost: batching
* avoids blocking the thread for long, diluting this cost so that it is not
* negatively affecting the application. Nonetheless, this method should only
* be called when necessary, and relying on automatic vacuuming is usually
* better.
*
* It returns a promise that resolves (to undefined) when the clean up is
* completed. If vacuuming is already ongoing at the time this method is
* called, a new one is enqueued immediately after the ongoing one, and a
* corresponding promise is returned. However, no more than one vacuuming is
* enqueued on top of the ongoing one, even if this method is called more
* times (enqueuing multiple ones would be useless).
*
* @param options Configuration options for the batch size and delay. See
* [[VacuumOptions]].
*/
vacuum (options: VacuumOptions = {}): Promise<void> {
return this.conditionalVacuum(options)
}
private conditionalVacuum (options: VacuumOptions, conditions?: VacuumConditions): Promise<void> {
// If a vacuum is already ongoing, schedule another as soon as it finishes,
// unless there's already one enqueued. If one was already enqueued, do not
// enqueue another on top, but make sure that the conditions are the
// broadest.
if (this._currentVacuum) {
this._enqueuedVacuumConditions = this._enqueuedVacuumConditions && conditions
if (this._enqueuedVacuum != null) { return this._enqueuedVacuum }
this._enqueuedVacuum = this._currentVacuum.then(() => {
const conditions = this._enqueuedVacuumConditions
this._enqueuedVacuumConditions = defaultVacuumConditions
return this.performVacuuming(options, conditions)
})
return this._enqueuedVacuum
}
if (this.vacuumConditionsMet(conditions) === false) { return Promise.resolve() }
this._currentVacuum = this.performVacuuming(options)
return this._currentVacuum
}
private async performVacuuming (options: VacuumOptions, conditions?: VacuumConditions): Promise<void> {
const initialDirtCount = this._dirtCount
if (this.vacuumConditionsMet(conditions)) {
const batchSize = options.batchSize || defaultVacuumOptions.batchSize
const batchWait = options.batchWait || defaultVacuumOptions.batchWait
let i = 1
for (const [term, fieldsData] of this._index) {
for (const [fieldId, fieldIndex] of fieldsData) {
for (const [shortId] of fieldIndex) {
if (this._documentIds.has(shortId)) { continue }
if (fieldIndex.size <= 1) {
fieldsData.delete(fieldId)
} else {
fieldIndex.delete(shortId)
}
}
}
if (this._index.get(term)!.size === 0) {
this._index.delete(term)
}
if (i % batchSize === 0) {
await new Promise((resolve) => setTimeout(resolve, batchWait))
}
i += 1
}
this._dirtCount -= initialDirtCount
}
// Make the next lines always async, so they execute after this function returns
await null
this._currentVacuum = this._enqueuedVacuum
this._enqueuedVacuum = null
}
private vacuumConditionsMet (conditions?: VacuumConditions) {
if (conditions == null) { return true }
let { minDirtCount, minDirtFactor } = conditions
minDirtCount = minDirtCount || defaultAutoVacuumOptions.minDirtCount
minDirtFactor = minDirtFactor || defaultAutoVacuumOptions.minDirtFactor