diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_models.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_models.py index bfdbc1701aa8..7da916d21bdb 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_models.py @@ -119,7 +119,7 @@ class CustomAnalyzer(LexicalAnalyzer): _validation = { 'odata_type': {'required': True}, 'name': {'required': True}, - 'tokenizer': {'required': True}, + 'tokenizer_name': {'required': True}, } _attribute_map = { diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py index fe3722a8e75d..9ad43085cd07 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py @@ -78,6 +78,7 @@ def prep_if_none_match(etag, match_condition): def pack_custom_analyzer(custom_analyzer): # type: (CustomAnalyzer) -> _CustomAnalyzer return _CustomAnalyzer( + name=custom_analyzer.name, odata_type=custom_analyzer.odata_type, tokenizer=custom_analyzer.tokenizer_name, token_filters=custom_analyzer.token_filters, @@ -87,7 +88,8 @@ def pack_custom_analyzer(custom_analyzer): def unpack_custom_analyzer(custom_analyzer): # type: (_CustomAnalyzer) -> CustomAnalyzer - return _CustomAnalyzer( + return CustomAnalyzer( + name=custom_analyzer.name, odata_type=custom_analyzer.odata_type, tokenizer_name=custom_analyzer.tokenizer, token_filters=custom_analyzer.token_filters,