Skip to content

Commit

Permalink
AnalyzerTextOptions ctors changes and name suffix on properties (#11810)
Browse files Browse the repository at this point in the history
  • Loading branch information
sima-zhu authored Jun 5, 2020
1 parent 388270f commit 65ae7e7
Show file tree
Hide file tree
Showing 8 changed files with 61 additions and 76 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,32 +24,32 @@ public static AnalyzeTextOptions map(com.azure.search.documents.indexes.implemen
if (obj == null) {
return null;
}
AnalyzeTextOptions analyzeTextOptions = new AnalyzeTextOptions();
AnalyzeTextOptions analyzeTextOptions = null;

if (obj.getTokenizer() != null) {
LexicalTokenizerName tokenizer = LexicalTokenizerNameConverter.map(obj.getTokenizer());
analyzeTextOptions = new AnalyzeTextOptions(obj.getText(), tokenizer);
analyzeTextOptions.setTokenizerName(tokenizer);
} else {
LexicalAnalyzerName analyzer = LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
analyzeTextOptions = new AnalyzeTextOptions(obj.getText(), analyzer);
analyzeTextOptions.setAnalyzerName(analyzer);
}

if (obj.getCharFilters() != null) {
List<CharFilterName> charFilters =
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
analyzeTextOptions.setCharFilters(charFilters);
}

if (obj.getAnalyzer() != null) {
LexicalAnalyzerName analyzer = LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
analyzeTextOptions.setAnalyzer(analyzer);
}

if (obj.getTokenFilters() != null) {
List<TokenFilterName> tokenFilters =
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
analyzeTextOptions.setTokenFilters(tokenFilters);
}

String text = obj.getText();
analyzeTextOptions.setText(text);

if (obj.getTokenizer() != null) {
LexicalTokenizerName tokenizer = LexicalTokenizerNameConverter.map(obj.getTokenizer());
analyzeTextOptions.setTokenizer(tokenizer);
}
return analyzeTextOptions;
}

Expand All @@ -69,9 +69,9 @@ public static com.azure.search.documents.indexes.implementation.models.AnalyzeRe
analyzeRequest.setCharFilters(charFilters);
}

if (obj.getAnalyzer() != null) {
if (obj.getAnalyzerName() != null) {
com.azure.search.documents.indexes.implementation.models.LexicalAnalyzerName analyzer =
LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
LexicalAnalyzerNameConverter.map(obj.getAnalyzerName());
analyzeRequest.setAnalyzer(analyzer);
}

Expand All @@ -84,9 +84,9 @@ public static com.azure.search.documents.indexes.implementation.models.AnalyzeRe
String text = obj.getText();
analyzeRequest.setText(text);

if (obj.getTokenizer() != null) {
if (obj.getTokenizerName() != null) {
com.azure.search.documents.indexes.implementation.models.LexicalTokenizerName tokenizer =
LexicalTokenizerNameConverter.map(obj.getTokenizer());
LexicalTokenizerNameConverter.map(obj.getTokenizerName());
analyzeRequest.setTokenizer(tokenizer);
}
return analyzeRequest;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
* A converter between {@link com.azure.search.documents.indexes.implementation.models.Suggester} and {@link SearchSuggester}.
*/
public final class SuggesterConverter {
private static final String SEARCH_MODE = "analyzingInfixMatching";
/**
* Maps from {@link com.azure.search.documents.indexes.implementation.models.Suggester} to {@link SearchSuggester}.
*/
Expand Down Expand Up @@ -51,7 +50,6 @@ public static com.azure.search.documents.indexes.implementation.models.Suggester
String name = obj.getName();
suggester.setName(name);

suggester.setSearchMode(SEARCH_MODE);
return suggester;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
* A converter between {@link com.azure.search.documents.indexes.implementation.models.SynonymMap} and {@link SynonymMap}.
*/
public final class SynonymMapConverter {
private static final String FORMAT = "solr";

/**
* Maps from {@link com.azure.search.documents.indexes.implementation.models.SynonymMap} to {@link SynonymMap}.
*/
Expand Down Expand Up @@ -53,8 +53,6 @@ public static com.azure.search.documents.indexes.implementation.models.SynonymMa
String name = obj.getName();
synonymMap.setName(name);

synonymMap.setFormat(FORMAT);

String eTag = obj.getETag();
synonymMap.setETag(eTag);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ public final class AnalyzeTextOptions {
* The text to break into tokens.
*/
@JsonProperty(value = "text", required = true)
private String text;
private final String text;

/*
* The name of the analyzer to use to break the given text. If this
Expand Down Expand Up @@ -45,7 +45,7 @@ public final class AnalyzeTextOptions {
* 'Simple', 'Stop', 'Whitespace'
*/
@JsonProperty(value = "analyzer")
private LexicalAnalyzerName analyzer;
private LexicalAnalyzerName analyzerName;

/*
* The name of the tokenizer to use to break the given text. If this
Expand All @@ -57,7 +57,7 @@ public final class AnalyzeTextOptions {
* 'Pattern', 'Standard', 'UaxUrlEmail', 'Whitespace'
*/
@JsonProperty(value = "tokenizer")
private LexicalTokenizerName tokenizer;
private LexicalTokenizerName tokenizerName;

/*
* An optional list of token filters to use when breaking the given text.
Expand All @@ -74,23 +74,34 @@ public final class AnalyzeTextOptions {
private List<CharFilterName> charFilters;

/**
* Get the text property: The text to break into tokens.
* Constructor to {@link AnalyzeTextOptions} which takes analyzerName.
*
* @return the text value.
* @param text The text break into tokens.
* @param analyzerName The name of the analyze name.
*/
public String getText() {
return this.text;
public AnalyzeTextOptions(String text, LexicalAnalyzerName analyzerName) {
this.text = text;
this.analyzerName = analyzerName;
}

/**
* Set the text property: The text to break into tokens.
* Constructor to {@link AnalyzeTextOptions} which takes tokenizerName.
*
* @param text the text value to set.
* @return the AnalyzeRequest object itself.
* @param text The text break into tokens.
* @param tokenizerName The name of the tokenizer name.
*/
public AnalyzeTextOptions setText(String text) {
public AnalyzeTextOptions(String text, LexicalTokenizerName tokenizerName) {
this.text = text;
return this;
this.tokenizerName = tokenizerName;
}

/**
* Get the text property: The text to break into tokens.
*
* @return the text value.
*/
public String getText() {
return this.text;
}

/**
Expand Down Expand Up @@ -121,8 +132,8 @@ public AnalyzeTextOptions setText(String text) {
*
* @return the analyzer value.
*/
public LexicalAnalyzerName getAnalyzer() {
return this.analyzer;
public LexicalAnalyzerName getAnalyzerName() {
return this.analyzerName;
}

/**
Expand Down Expand Up @@ -151,11 +162,12 @@ public LexicalAnalyzerName getAnalyzer() {
* 'StandardAsciiFoldingLucene', 'Keyword', 'Pattern', 'Simple', 'Stop',
* 'Whitespace'.
*
* @param analyzer the analyzer value to set.
* @param analyzerName the analyzer value to set.
* @return the AnalyzeRequest object itself.
*/
public AnalyzeTextOptions setAnalyzer(LexicalAnalyzerName analyzer) {
this.analyzer = analyzer;
public AnalyzeTextOptions setAnalyzerName(LexicalAnalyzerName analyzerName) {
this.analyzerName = analyzerName;
this.tokenizerName = null;
return this;
}

Expand All @@ -170,8 +182,8 @@ public AnalyzeTextOptions setAnalyzer(LexicalAnalyzerName analyzer) {
*
* @return the tokenizer value.
*/
public LexicalTokenizerName getTokenizer() {
return this.tokenizer;
public LexicalTokenizerName getTokenizerName() {
return this.tokenizerName;
}

/**
Expand All @@ -183,11 +195,12 @@ public LexicalTokenizerName getTokenizer() {
* 'MicrosoftLanguageStemmingTokenizer', 'NGram', 'PathHierarchy',
* 'Pattern', 'Standard', 'UaxUrlEmail', 'Whitespace'.
*
* @param tokenizer the tokenizer value to set.
* @param tokenizerName the tokenizer value to set.
* @return the AnalyzeRequest object itself.
*/
public AnalyzeTextOptions setTokenizer(LexicalTokenizerName tokenizer) {
this.tokenizer = tokenizer;
public AnalyzeTextOptions setTokenizerName(LexicalTokenizerName tokenizerName) {
this.tokenizerName = tokenizerName;
this.analyzerName = null;
return this;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ public final class CustomAnalyzer extends LexicalAnalyzer {
* 'Pattern', 'Standard', 'UaxUrlEmail', 'Whitespace'
*/
@JsonProperty(value = "tokenizer", required = true)
private LexicalTokenizerName tokenizer;
private LexicalTokenizerName tokenizerName;

/*
* A list of token filters used to filter out or modify the tokens
Expand Down Expand Up @@ -60,7 +60,7 @@ public final class CustomAnalyzer extends LexicalAnalyzer {
* @return the tokenizer value.
*/
public LexicalTokenizerName getTokenizer() {
return this.tokenizer;
return this.tokenizerName;
}

/**
Expand All @@ -71,11 +71,11 @@ public LexicalTokenizerName getTokenizer() {
* 'MicrosoftLanguageStemmingTokenizer', 'NGram', 'PathHierarchy',
* 'Pattern', 'Standard', 'UaxUrlEmail', 'Whitespace'.
*
* @param tokenizer the tokenizer value to set.
* @param tokenizerName the tokenizer value to set.
* @return the CustomAnalyzer object itself.
*/
public CustomAnalyzer setTokenizer(LexicalTokenizerName tokenizer) {
this.tokenizer = tokenizer;
public CustomAnalyzer setTokenizer(LexicalTokenizerName tokenizerName) {
this.tokenizerName = tokenizerName;
return this;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,6 @@
*/
@Fluent
public final class SearchSuggester {
/*
* A value indicating the capabilities of the suggester.
*/
@JsonProperty(value = "searchMode", required = true)
private static final String SEARCH_MODE = "analyzingInfixMatching";

/*
* The name of the suggester.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,6 @@
*/
@Fluent
public final class SynonymMap {

/*
* The format of the synonym map. Only the 'solr' format is currently
* supported.
*/
@JsonProperty(value = "format", required = true)
private static final String FORMAT = "solr";

/*
* The name of the synonym map.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,18 +204,14 @@ public void canAnalyze() {
searchIndexClient.createIndex(index);
indexesToCleanup.add(index.getName());

AnalyzeTextOptions request = new AnalyzeTextOptions()
.setText("One two")
.setAnalyzer(LexicalAnalyzerName.WHITESPACE);
AnalyzeTextOptions request = new AnalyzeTextOptions("One two", LexicalAnalyzerName.WHITESPACE);
PagedIterable<AnalyzedTokenInfo> results = searchIndexClient.analyzeText(index.getName(), request);
Iterator<AnalyzedTokenInfo> iterator = results.iterator();
assertTokenInfoEqual("One", 0, 3, 0, iterator.next());
assertTokenInfoEqual("two", 4, 7, 1, iterator.next());
assertFalse(iterator.hasNext());

request = new AnalyzeTextOptions()
.setText("One's <two/>")
.setTokenizer(LexicalTokenizerName.WHITESPACE)
request = new AnalyzeTextOptions("One's <two/>", LexicalTokenizerName.WHITESPACE)
.setTokenFilters(Collections.singletonList(TokenFilterName.APOSTROPHE))
.setCharFilters(Collections.singletonList(CharFilterName.HTML_STRIP));
results = searchIndexClient.analyzeText(index.getName(), request);
Expand All @@ -239,21 +235,15 @@ public void canAnalyzeWithAllPossibleNames() {

LexicalAnalyzerName.values()
.stream()
.map(an -> new AnalyzeTextOptions()
.setText("One two")
.setAnalyzer(an))
.map(an -> new AnalyzeTextOptions("One two", an))
.forEach(r -> searchIndexClient.analyzeText(index.getName(), r));

LexicalTokenizerName.values()
.stream()
.map(tn -> new AnalyzeTextOptions()
.setText("One two")
.setTokenizer(tn))
.map(tn -> new AnalyzeTextOptions("One two", tn))
.forEach(r -> searchIndexClient.analyzeText(index.getName(), r));

AnalyzeTextOptions request = new AnalyzeTextOptions()
.setText("One two")
.setTokenizer(LexicalTokenizerName.WHITESPACE)
AnalyzeTextOptions request = new AnalyzeTextOptions("One two", LexicalTokenizerName.WHITESPACE)
.setTokenFilters(new ArrayList<>(TokenFilterName.values()))
.setCharFilters(new ArrayList<>(CharFilterName.values()));
searchIndexClient.analyzeText(index.getName(), request);
Expand Down

0 comments on commit 65ae7e7

Please sign in to comment.