Skip to content

Commit

Permalink
Rename AnalyzeIndex -> AnalyzeText (Azure#315)
Browse files Browse the repository at this point in the history
  • Loading branch information
CodeRunRepeat authored and navalev committed Dec 4, 2019
1 parent af1b1e3 commit b859a8b
Show file tree
Hide file tree
Showing 4 changed files with 33 additions and 33 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -857,8 +857,8 @@ Mono<Response<Void>> deleteIndexWithResponse(String indexName,
* @param analyzeRequest the text and analyzer or analysis components to test
* @return analyze result.
*/
public PagedFlux<TokenInfo> analyzeIndex(String indexName, AnalyzeRequest analyzeRequest) {
return this.analyzeIndex(indexName, analyzeRequest, null);
public PagedFlux<TokenInfo> analyzeText(String indexName, AnalyzeRequest analyzeRequest) {
return this.analyzeText(indexName, analyzeRequest, null);
}

/**
Expand All @@ -870,27 +870,27 @@ public PagedFlux<TokenInfo> analyzeIndex(String indexName, AnalyzeRequest analyz
* Contains the tracking ID sent with the request to help with debugging
* @return a response containing analyze result.
*/
public PagedFlux<TokenInfo> analyzeIndex(String indexName,
AnalyzeRequest analyzeRequest, RequestOptions requestOptions) {
public PagedFlux<TokenInfo> analyzeText(String indexName,
AnalyzeRequest analyzeRequest, RequestOptions requestOptions) {
return new PagedFlux<>(
() -> withContext(context -> this.analyzeIndexWithResponse(indexName,
() -> withContext(context -> this.analyzeTextWithResponse(indexName,
analyzeRequest, requestOptions, context)),
nextLink -> Mono.empty());
}

PagedFlux<TokenInfo> analyzeIndex(String indexName,
AnalyzeRequest analyzeRequest,
RequestOptions requestOptions,
Context context) {
PagedFlux<TokenInfo> analyzeText(String indexName,
AnalyzeRequest analyzeRequest,
RequestOptions requestOptions,
Context context) {
return new PagedFlux<>(
() -> this.analyzeIndexWithResponse(indexName, analyzeRequest, requestOptions, context),
() -> this.analyzeTextWithResponse(indexName, analyzeRequest, requestOptions, context),
nextLink -> Mono.empty());
}

private Mono<PagedResponse<TokenInfo>> analyzeIndexWithResponse(String indexName,
AnalyzeRequest analyzeRequest,
RequestOptions requestOptions,
Context context) {
private Mono<PagedResponse<TokenInfo>> analyzeTextWithResponse(String indexName,
AnalyzeRequest analyzeRequest,
RequestOptions requestOptions,
Context context) {
return restClient.indexes()
.analyzeWithRestResponseAsync(indexName, analyzeRequest, requestOptions, context)
.map(response -> new PagedResponseBase<>(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -606,8 +606,8 @@ public Response<Void> deleteIndexWithResponse(String indexName,
* @param analyzeRequest the text and analyzer or analysis components to test
* @return analyze result.
*/
public PagedIterable<TokenInfo> analyzeIndex(String indexName, AnalyzeRequest analyzeRequest) {
return new PagedIterable<>(asyncClient.analyzeIndex(indexName, analyzeRequest));
public PagedIterable<TokenInfo> analyzeText(String indexName, AnalyzeRequest analyzeRequest) {
return new PagedIterable<>(asyncClient.analyzeText(indexName, analyzeRequest));
}

/**
Expand All @@ -620,11 +620,11 @@ public PagedIterable<TokenInfo> analyzeIndex(String indexName, AnalyzeRequest an
* @param context additional context that is passed through the HTTP pipeline during the service call
* @return analyze result.
*/
public PagedIterable<TokenInfo> analyzeIndex(String indexName,
AnalyzeRequest analyzeRequest,
RequestOptions requestOptions,
Context context) {
return new PagedIterable<>(asyncClient.analyzeIndex(indexName, analyzeRequest, requestOptions, context));
public PagedIterable<TokenInfo> analyzeText(String indexName,
AnalyzeRequest analyzeRequest,
RequestOptions requestOptions,
Context context) {
return new PagedIterable<>(asyncClient.analyzeText(indexName, analyzeRequest, requestOptions, context));
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ public void canAnalyze() {
.setText("One two")
.setAnalyzer(AnalyzerName.WHITESPACE);
StepVerifier
.create(searchServiceClient.analyzeIndex(index.getName(), request))
.create(searchServiceClient.analyzeText(index.getName(), request))
.assertNext(firstTokenInfo -> assertTokenInfoEqual("One", 0, 3, 0, firstTokenInfo))
.assertNext(secondTokenInfo -> assertTokenInfoEqual("two", 4, 7, 1, secondTokenInfo))
.expectNextCount(0L)
Expand All @@ -230,15 +230,15 @@ public void canAnalyze() {
.setTokenFilters(Arrays.asList(TokenFilterName.APOSTROPHE))
.setCharFilters(Arrays.asList(CharFilterName.HTML_STRIP));
StepVerifier
.create(searchServiceClient.analyzeIndex(index.getName(), request))
.create(searchServiceClient.analyzeText(index.getName(), request))
.assertNext(onlyTokenInfo -> {
// End offset is based on the original token, not the one emitted by the filters.
assertTokenInfoEqual("One", 0, 5, 0, onlyTokenInfo);
})
.verifyComplete();

StepVerifier
.create(searchServiceClient.analyzeIndex(index.getName(), request, generateRequestOptions()))
.create(searchServiceClient.analyzeText(index.getName(), request, generateRequestOptions()))
.assertNext(onlyTokenInfo -> {
// End offset is based on the original token, not the one emitted by the filters.
assertTokenInfoEqual("One", 0, 5, 0, onlyTokenInfo);
Expand All @@ -256,23 +256,23 @@ public void canAnalyzeWithAllPossibleNames() {
.setText("One two")
.setAnalyzer(an))
.forEach(r -> {
searchServiceClient.analyzeIndex(index.getName(), r);
searchServiceClient.analyzeText(index.getName(), r);
});

Arrays.stream(TokenizerName.values())
.map(tn -> new AnalyzeRequest()
.setText("One two")
.setTokenizer(tn))
.forEach(r -> {
searchServiceClient.analyzeIndex(index.getName(), r);
searchServiceClient.analyzeText(index.getName(), r);
});

AnalyzeRequest request = new AnalyzeRequest()
.setText("One two")
.setTokenizer(TokenizerName.WHITESPACE)
.setTokenFilters(Arrays.asList(TokenFilterName.values()))
.setCharFilters(Arrays.asList(CharFilterName.values()));
searchServiceClient.analyzeIndex(index.getName(), request);
searchServiceClient.analyzeText(index.getName(), request);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ public void canAnalyze() {
AnalyzeRequest request = new AnalyzeRequest()
.setText("One two")
.setAnalyzer(AnalyzerName.WHITESPACE);
PagedIterable<TokenInfo> results = searchServiceClient.analyzeIndex(index.getName(), request);
PagedIterable<TokenInfo> results = searchServiceClient.analyzeText(index.getName(), request);
Iterator<TokenInfo> iterator = results.stream().iterator();
assertTokenInfoEqual("One", 0, 3, 0, iterator.next());
assertTokenInfoEqual("two", 4, 7, 1, iterator.next());
Expand All @@ -223,13 +223,13 @@ public void canAnalyze() {
.setTokenizer(TokenizerName.WHITESPACE)
.setTokenFilters(Arrays.asList(TokenFilterName.APOSTROPHE))
.setCharFilters(Arrays.asList(CharFilterName.HTML_STRIP));
results = searchServiceClient.analyzeIndex(index.getName(), request);
results = searchServiceClient.analyzeText(index.getName(), request);
// End offset is based on the original token, not the one emitted by the filters.
iterator = results.stream().iterator();
assertTokenInfoEqual("One", 0, 5, 0, iterator.next());
Assert.assertFalse(iterator.hasNext());

results = searchServiceClient.analyzeIndex(index.getName(), request, generateRequestOptions(), Context.NONE);
results = searchServiceClient.analyzeText(index.getName(), request, generateRequestOptions(), Context.NONE);
// End offset is based on the original token, not the one emitted by the filters.
iterator = results.stream().iterator();
assertTokenInfoEqual("One", 0, 5, 0, iterator.next());
Expand All @@ -246,23 +246,23 @@ public void canAnalyzeWithAllPossibleNames() {
.setText("One two")
.setAnalyzer(an))
.forEach(r -> {
searchServiceClient.analyzeIndex(index.getName(), r);
searchServiceClient.analyzeText(index.getName(), r);
});

Arrays.stream(TokenizerName.values())
.map(tn -> new AnalyzeRequest()
.setText("One two")
.setTokenizer(tn))
.forEach(r -> {
searchServiceClient.analyzeIndex(index.getName(), r);
searchServiceClient.analyzeText(index.getName(), r);
});

AnalyzeRequest request = new AnalyzeRequest()
.setText("One two")
.setTokenizer(TokenizerName.WHITESPACE)
.setTokenFilters(Arrays.asList(TokenFilterName.values()))
.setCharFilters(Arrays.asList(CharFilterName.values()));
searchServiceClient.analyzeIndex(index.getName(), request);
searchServiceClient.analyzeText(index.getName(), request);
}

@Override
Expand Down

0 comments on commit b859a8b

Please sign in to comment.