From cd4ad88f8509b63da78d9e34b5e1962773d8ad9e Mon Sep 17 00:00:00 2001 From: Sarangan Rajamanickam Date: Fri, 22 Jan 2021 21:02:03 -0800 Subject: [PATCH] Search documents migration (#12829) * Modify the Configuration Markup Files * Regenerated Changes * Custom Code Changes * Test Code and API Changes * Updated Recordings Files * Update Test Code --- ...returns_the_correct_autocomplete_result.js | 12 +- ..._returns_zero_results_for_invalid_query.js | 12 +- ...ount_returns_the_correct_document_count.js | 12 +- ...ocuments_delete_a_document_by_documents.js | 36 +- ...uments_delete_a_document_by_keykeynames.js | 20 +- ...ent_returns_the_correct_document_result.js | 12 +- ...ows_error_for_invalid_getdocument_value.js | 12 +- ...dexdocuments_deletes_existing_documents.js | 24 +- ...exdocuments_merges_an_existing_document.js | 24 +- ...ng_indexdocuments_mergeupload_documents.js | 36 +- ...ng_indexdocuments_upload_a_new_document.js | 24 +- ...ents_modify__merge_an_existing_document.js | 36 +- ...eoruploaddocuments_merge_a_new_document.js | 24 +- ...ents_modify__merge_an_existing_document.js | 34 +- ...earch_returns_the_correct_search_result.js | 12 +- ..._returns_zero_results_for_invalid_query.js | 12 +- ...suggest_returns_the_correct_suggestions.js | 12 +- ...urns_zero_suggestions_for_invalid_input.js | 12 +- ...loaddocuments_upload_a_set_of_documents.js | 24 +- ..._index_object_using_createorupdateindex.js | 40 +- ...recording_gets_the_correct_index_object.js | 12 +- .../recording_gets_the_list_of_indexes.js | 12 +- ...ecording_gets_the_list_of_indexes_names.js | 12 +- ...ing_modify_and_updates_the_index_object.js | 44 +- ...g_throws_error_for_invalid_index_object.js | 12 +- ...p_object_using_createorupdatesynonymmap.js | 40 +- ...ding_gets_the_correct_synonymmap_object.js | 14 +- .../recording_gets_the_list_of_synonymmaps.js | 12 +- ...ding_gets_the_list_of_synonymmaps_names.js | 10 +- ...odify_and_updates_the_synonymmap_object.js | 44 +- ...ows_error_for_invalid_synonymmap_object.js | 12 +- ...sing_createorupdatedatasourceconnection.js | 40 +- ...the_correct_datasourceconnection_object.js | 14 +- ..._the_list_of_datasourceconnection_names.js | 12 +- ..._gets_the_list_of_datasourceconnections.js | 12 +- ...updates_the_datasourceconnection_object.js | 44 +- ...for_invalid_datasourceconnection_object.js | 12 +- ...exer_object_using_createorupdateindexer.js | 40 +- ...cording_gets_the_correct_indexer_object.js | 14 +- ...ecording_gets_the_list_of_indexer_names.js | 12 +- .../recording_gets_the_list_of_indexers.js | 12 +- ...ecording_gets_the_status_of_the_indexer.js | 12 +- ...g_modify_and_updates_the_indexer_object.js | 42 +- ...throws_error_for_invalid_indexer_object.js | 12 +- ...set_object_using_createorupdateskillset.js | 42 +- ...ording_gets_the_correct_skillset_object.js | 14 +- ...cording_gets_the_list_of_skillset_names.js | 12 +- .../recording_gets_the_list_of_skillsets.js | 12 +- ...modify_and_updates_the_skillsets_object.js | 44 +- ...hrows_error_for_invalid_skillset_object.js | 12 +- .../review/search-documents.api.md | 1079 ++- .../typescript/src/indexes/analyzeText.ts | 5 +- .../typescript/src/indexes/getIndex.ts | 8 +- .../samples/typescript/src/utils/setup.ts | 3 +- .../src/generated/data/index.ts | 11 + .../src/generated/data/models/index.ts | 1163 +-- .../src/generated/data/models/mappers.ts | 712 +- .../src/generated/data/models/parameters.ts | 694 +- .../generated/data/operations/documents.ts | 606 +- .../src/generated/data/operations/index.ts | 8 +- .../src/generated/data/searchClient.ts | 47 +- .../src/generated/data/searchClientContext.ts | 43 +- .../src/generated/service/index.ts | 11 + .../src/generated/service/models/index.ts | 7775 ++++++++--------- .../src/generated/service/models/mappers.ts | 5744 ++++++------ .../generated/service/models/parameters.ts | 191 +- .../service/operations/dataSources.ts | 320 +- .../src/generated/service/operations/index.ts | 8 +- .../generated/service/operations/indexers.ts | 450 +- .../generated/service/operations/indexes.ts | 428 +- .../generated/service/operations/skillsets.ts | 321 +- .../service/operations/synonymMaps.ts | 320 +- .../generated/service/searchServiceClient.ts | 114 +- .../service/searchServiceClientContext.ts | 41 +- sdk/search/search-documents/src/index.ts | 35 +- .../search-documents/src/searchClient.ts | 2 +- .../search-documents/src/searchIndexClient.ts | 6 +- .../src/searchIndexerClient.ts | 8 +- .../search-documents/src/serviceModels.ts | 30 +- .../search-documents/src/serviceUtils.ts | 110 +- sdk/search/search-documents/swagger/Data.md | 37 +- .../search-documents/swagger/Service.md | 17 +- .../test/public/utils/setup.ts | 1 + 83 files changed, 10095 insertions(+), 11321 deletions(-) create mode 100644 sdk/search/search-documents/src/generated/data/index.ts create mode 100644 sdk/search/search-documents/src/generated/service/index.ts diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_autocomplete_returns_the_correct_autocomplete_result.js b/sdk/search/search-documents/recordings/node/searchclient/recording_autocomplete_returns_the_correct_autocomplete_result.js index dd8beecac78b..db154068ba56 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_autocomplete_returns_the_correct_autocomplete_result.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_autocomplete_returns_the_correct_autocomplete_result.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.post.autocomplete', {"search":"sec","suggesterName":"sg"}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffc519bbf6b3f7af451934febbcfd68f4d12f5ae7f5f5cb72ddbc09bef825dfff25ff0f9aa508c636000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffc519bbf6b3f7af451934febbcfd68f4d12f5ae7f5f5cb72ddbc09bef825dfff25ff0f9aa508c636000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'c01cbb5b-4d88-4e4f-bfb4-587a30fc5db9', + 'd86ab2ac-f3db-47d5-a6f9-0bb3814654b9', 'elapsed-time', - '133', + '110', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:13:58 GMT', + 'Fri, 22 Jan 2021 00:05:45 GMT', 'Content-Length', - '164' ]); + '164' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_autocomplete_returns_zero_results_for_invalid_query.js b/sdk/search/search-documents/recordings/node/searchclient/recording_autocomplete_returns_zero_results_for_invalid_query.js index 7ef061df7f82..7edb73ad0868 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_autocomplete_returns_zero_results_for_invalid_query.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_autocomplete_returns_zero_results_for_invalid_query.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.post.autocomplete', {"search":"garbxyz","suggesterName":"sg"}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bdefff92ff0742ea40440c000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bdefff92ff0742ea40440c000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '116f5a67-9fe1-4219-97ab-48954881e3a7', + '2b52bd90-b5f9-466b-af71-aa633e893085', 'elapsed-time', - '22', + '23', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:14:15 GMT', + 'Fri, 22 Jan 2021 00:06:03 GMT', 'Content-Length', - '133' ]); + '133' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_count_returns_the_correct_document_count.js b/sdk/search/search-documents/recordings/node/searchclient/recording_count_returns_the_correct_document_count.js index 4bd67c9d30cf..04e423a2d941 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_count_returns_the_correct_document_count.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_count_returns_the_correct_document_count.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs/$count') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71fdcddf97f00bbe0538805000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71fdcddf97f00bbe0538805000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'fd27e3d0-84d6-4558-a0f8-67ebf5a19006', + 'c31d26d0-e728-4a87-9227-ddd7247804bc', 'elapsed-time', - '7', + '12', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:13:40 GMT', + 'Fri, 22 Jan 2021 00:05:28 GMT', 'Content-Length', - '127' ]); + '127' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_deletedocuments_delete_a_document_by_documents.js b/sdk/search/search-documents/recordings/node/searchclient/recording_deletedocuments_delete_a_document_by_documents.js index 74c24929026d..98ff112b0b03 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_deletedocuments_delete_a_document_by_documents.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_deletedocuments_delete_a_document_by_documents.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs(%278%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3e3af868247fbdc816f9478f96ebb21c7d34cb9b695dacdaa25a52936f674dda548b3cadab6c962eaba2c9d36c394b8b265de6efdab4add2769ea797797d9daeaab298e669d36678799c3ec9da795d558b269dd3bb8baacecb745a51d36279d18ca96fafab6735757656a6d76996ced6e9a45e176d3acbd332a39ed76d9ee66d4a5db7f4c7659efebeeb9d9d7c279de2e7f97e8b1ff9436daedde32f41689c3ecf69105959d20ffa749215cb34bbcc8a7c892e9ab4ce2f1942d6e60bfa8c1b11b6055e009a53fae2a2aaaf0d85daeca2f9e8d1f7be3ffa6895d56f693067cb69b99ee5445269d02c2a7c7c5c96d595fbb4cc9af655beac2e19bfa704d47c53d327cb0bf357595187d4c2fc9dcd6675de508ff2275314ddff92ff07355dccd3cb010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3e3af868247fbdc816f9478f96ebb21c7d34cb9b695dacdaa25a52936f674dda548b3cadab6c962eaba2c9d36c394b8b265de6efdab4add2769ea797797d9daeaab298e669d36678799c3ec9da795d558b269dd3bb8baacecb745a51d36279d18ca96fafab6735757656a6d76996ced6e9a45e176d3acbd332a39ed76d9ee66d4a5db7f4c7659efebeeb9d9d7c279de2e7f97e8b1ff9436daedde32f41689c3ecf69105959d20ffa749215cb34bbcc8a7c892e9ab4ce2f1942d6e60bfa8c1b11b6055e009a53fae2a2aaaf0d85daeca2f9e8d1f7be3ffa6895d56f693067cb69b99ee5445269d02c2a7c7c5c96d595fbb4cc9af655beac2e19bfa704d47c53d327cb0bf357595187d4c2fc9dcd6675de508ff2275314ddff92ff07355dccd3cb010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'e2d11466-9233-4485-8b8f-38370db96717', + '0d658f12-993a-4a1c-9e4c-56fb168c1987', 'elapsed-time', - '22', + '13', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:16:21 GMT', + 'Fri, 22 Jan 2021 00:08:03 GMT', 'Content-Length', - '411' ]); + '411' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.index', {"value":[{"@search.action":"delete","hotelId":"8","hotelName":null,"description":"Has some road noise and is next to the very police station. Bathrooms had morel coverings.","descriptionFr":"Il y a du bruit de la route et se trouve à côté de la station de police. Les salles de bain avaient des revêtements de morilles.","category":null,"tags":[],"parkingIncluded":null,"smokingAllowed":null,"lastRenovationDate":null,"rating":null,"location":null,"address":null,"rooms":[]}]}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f1d7c34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab85c97a5697152cde893bd9d9d5ff2fd5ff2ff0038a311724a000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f1d7c34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab85c97a5697152cde893bd9d9d5ff2fd5ff2ff0038a311724a000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,9 +53,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '46164676-d72a-44f0-8c10-9f76412af3b4', + 'd82da534-4506-4df7-bd7a-d548d05e3f31', 'elapsed-time', - '37', + '27', 'OData-Version', '4.0', 'Preference-Applied', @@ -60,14 +63,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:16:21 GMT', + 'Fri, 22 Jan 2021 00:08:03 GMT', 'Content-Length', - '184' ]); + '184' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs/$count') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71f7cf8ff00fca50b5a04000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71f7cf8ff00fca50b5a04000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -80,9 +85,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '234d46b4-9003-4b97-8089-06e5d605d80c', + 'e5c9c220-83e6-4dc8-94c6-99e0fec82b68', 'elapsed-time', - '8', + '4', 'OData-Version', '4.0', 'Preference-Applied', @@ -90,6 +95,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:16:25 GMT', + 'Fri, 22 Jan 2021 00:08:07 GMT', 'Content-Length', - '126' ]); + '126' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_deletedocuments_delete_a_document_by_keykeynames.js b/sdk/search/search-documents/recordings/node/searchclient/recording_deletedocuments_delete_a_document_by_keykeynames.js index ddfa409a6be7..e166f6fcae39 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_deletedocuments_delete_a_document_by_keykeynames.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_deletedocuments_delete_a_document_by_keykeynames.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.index', {"value":[{"@search.action":"delete","hotelId":"9"},{"@search.action":"delete","hotelId":"10"}]}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f3dfc68f451d366edbaf9e8515baff3d147795d57f51779d36417d470b92e4bd3e2a49ad1277b3b3bbf64645edfddf95aef7fff97fc3fafab5d608a000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f3dfc68f451d366edbaf9e8515baff3d147795d57f51779d36417d470b92e4bd3e2a49ad1277b3b3bbf64645edfddf95aef7fff97fc3fafab5d608a000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,7 +21,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '2c0b90d3-8a38-46fa-a94b-54a15b518255', + 'f40e90d1-430b-4d64-817b-04fca4b5e3df', 'elapsed-time', '36', 'OData-Version', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:16:42 GMT', + 'Fri, 22 Jan 2021 00:08:23 GMT', 'Content-Length', - '191' ]); + '191' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs/$count') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71f3cf87f006a950c2d04000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71f3cf87f006a950c2d04000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,7 +53,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '9f1778e6-1a86-4527-b28a-d4e5e703621a', + '63de6f03-c589-46c6-b7be-24ba9ff7688e', 'elapsed-time', '5', 'OData-Version', @@ -60,6 +63,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:16:46 GMT', + 'Fri, 22 Jan 2021 00:08:28 GMT', 'Content-Length', - '126' ]); + '126' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_getdocument_returns_the_correct_document_result.js b/sdk/search/search-documents/recordings/node/searchclient/recording_getdocument_returns_the_correct_document_result.js index c18bc0339a44..04d044bc8529 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_getdocument_returns_the_correct_document_result.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_getdocument_returns_the_correct_document_result.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs(%278%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3e3af868247fbdc816f9478f96ebb21c7d34cb9b695dacdaa25a52936f674dda548b3cadab6c962eaba2c9d36c394b8b265de6efdab4add2769ea797797d9daeaab298e669d36678799c3ec9da795d558b269dd3bb8baacecb745a51d36279d18ca96fafab6735757656a6d76996ced6e9a45e176d3acbd332a39ed76d9ee66d4a5db7f4c7659efebeeb9d9d7c279de2e7f97e8b1ff9436daedde32f41689c3ecf69105959d20ffa749215cb34bbcc8a7c892e9ab4ce2f1942d6e60bfa8c1b11b6055e009a53fae2a2aaaf0d85daeca2f9e8d1f7be3ffa6895d56f693067cb69b99ee5445269d02c2a7c7c5c96d595fbb4cc9af655beac2e19bfa704d47c53d327cb0bf357595187d4c2fc9dcd6675de508ff2275314ddff92ff07355dccd3cb010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3e3af868247fbdc816f9478f96ebb21c7d34cb9b695dacdaa25a52936f674dda548b3cadab6c962eaba2c9d36c394b8b265de6efdab4add2769ea797797d9daeaab298e669d36678799c3ec9da795d558b269dd3bb8baacecb745a51d36279d18ca96fafab6735757656a6d76996ced6e9a45e176d3acbd332a39ed76d9ee66d4a5db7f4c7659efebeeb9d9d7c279de2e7f97e8b1ff9436daedde32f41689c3ecf69105959d20ffa749215cb34bbcc8a7c892e9ab4ce2f1942d6e60bfa8c1b11b6055e009a53fae2a2aaaf0d85daeca2f9e8d1f7be3ffa6895d56f693067cb69b99ee5445269d02c2a7c7c5c96d595fbb4cc9af655beac2e19bfa704d47c53d327cb0bf357595187d4c2fc9dcd6675de508ff2275314ddff92ff07355dccd3cb010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'a53d9d89-bd6c-4f35-848a-5b1e151145f6', + '6a038d16-aaff-4506-b9ee-ecbd497183e0', 'elapsed-time', - '37', + '36', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:15:45 GMT', + 'Fri, 22 Jan 2021 00:07:28 GMT', 'Content-Length', - '411' ]); + '411' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_getdocument_throws_error_for_invalid_getdocument_value.js b/sdk/search/search-documents/recordings/node/searchclient/recording_getdocument_throws_error_for_invalid_getdocument_value.js index 6a4e5c27f30f..b4c05b99dbc4 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_getdocument_throws_error_for_invalid_getdocument_value.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_getdocument_throws_error_for_invalid_getdocument_value.js @@ -7,19 +7,21 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs(%27garbxyz%27)') .query(true) - .reply(404, "", [ 'Cache-Control', + .reply(404, "", [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', 'Expires', '-1', 'request-id', - 'f088a8e1-14ac-40f8-b627-1d6fa4f134e1', + 'afe19624-6fce-49f3-a38a-0415696deeee', 'elapsed-time', - '7', + '12', 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:16:03 GMT', + 'Fri, 22 Jan 2021 00:07:45 GMT', 'Content-Length', - '0' ]); + '0' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_deletes_existing_documents.js b/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_deletes_existing_documents.js index 5fc6cbd79890..ae343791bdbf 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_deletes_existing_documents.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_deletes_existing_documents.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.index', {"value":[{"@search.action":"delete","hotelId":"9"},{"@search.action":"delete","hotelId":"10"}]}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f3dfc68f451d366edbaf9e8515baff3d147795d57f51779d36417d470b92e4bd3e2a49ad1277b3b3bbf64645edfddf95aef7fff97fc3fafab5d608a000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f3dfc68f451d366edbaf9e8515baff3d147795d57f51779d36417d470b92e4bd3e2a49ad1277b3b3bbf64645edfddf95aef7fff97fc3fafab5d608a000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'f2e24d14-ec1b-47e5-a1cc-d176b2a9b6c1', + '7dae618c-ba38-4a13-a8d5-817ac127aeb4', 'elapsed-time', - '39', + '32', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:18:58 GMT', + 'Fri, 22 Jan 2021 00:10:32 GMT', 'Content-Length', - '191' ]); + '191' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs/$count') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71f3cf87f006a950c2d04000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71f3cf87f006a950c2d04000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,9 +53,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'ecb25bfc-bb01-47c9-a22a-6588d267e321', + 'b7d42c80-cad8-486f-916a-85f5c7dc4b28', 'elapsed-time', - '4', + '8', 'OData-Version', '4.0', 'Preference-Applied', @@ -60,6 +63,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:19:02 GMT', + 'Fri, 22 Jan 2021 00:10:36 GMT', 'Content-Length', - '126' ]); + '126' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_merges_an_existing_document.js b/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_merges_an_existing_document.js index 7ef1ee1c4309..9e52074c98ff 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_merges_an_existing_document.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_merges_an_existing_document.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.index', {"value":[{"@search.action":"merge","hotelId":"8","description":"Modified Description"}]}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f1d7c34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab85c97a5697152cde893bd9d9d5ff2fd5ff2ff0038a311724a000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f1d7c34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab85c97a5697152cde893bd9d9d5ff2fd5ff2ff0038a311724a000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '0d78b989-a9f4-48b7-bf00-79ca66f21f32', + '7d6f4e9d-537a-4f43-b771-89c68746ef00', 'elapsed-time', - '33', + '31', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:19:20 GMT', + 'Fri, 22 Jan 2021 00:10:53 GMT', 'Content-Length', - '184' ]); + '184' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs(%278%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3e3af868247fbdc816f9478f96ebb21c7d34cb9b695dacdaa25a52932faa59715ee4b3f4a9f771d0e8594dcdcecaf43acdd2d93a9dd4eba24d67795a66695daddb3ccddbb4c9d396feb8ccd3df77bdb393efa453fc3cdf6ff1237fa8cd9b360340fcb5aaca629a8fd3e77993365959d20ffa749215cb34bbcc8a7c892e9ab4ce2f1942d6e60bfa8c1b2daabac00b6342734a5f5c54f5b5195b9b5d341f3dfadef7471fadb2fa6db1bc385b4ecbf52c276248836651e1e3e3b2acaedca765d6b4aff26575c9f83d25a0e69b9a3e595e98bfca8a3aa416e6ef6c36abf3867a943febaa5ad01fdffbfe2ff97f0015ab79db85010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3e3af868247fbdc816f9478f96ebb21c7d34cb9b695dacdaa25a52932faa59715ee4b3f4a9f771d0e8594dcdcecaf43acdd2d93a9dd4eba24d67795a66695daddb3ccddbb4c9d396feb8ccd3df77bdb393efa453fc3cdf6ff1237fa8cd9b360340fcb5aaca629a8fd3e77993365959d20ffa749215cb34bbcc8a7c892e9ab4ce2f1942d6e60bfa8c1b2daabac00b6342734a5f5c54f5b5195b9b5d341f3dfadef7471fadb2fa6db1bc385b4ecbf52c276248836651e1e3e3b2acaedca765d6b4aff26575c9f83d25a0e69b9a3e595e98bfca8a3aa416e6ef6c36abf3867a943febaa5ad01fdffbfe2ff97f0015ab79db85010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,9 +53,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'd2f51a50-9e26-4769-a108-1a970bfbb172', + '24b60a4a-1a9a-48d6-bdc0-c6bf17180e7f', 'elapsed-time', - '8', + '11', 'OData-Version', '4.0', 'Preference-Applied', @@ -60,6 +63,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:19:23 GMT', + 'Fri, 22 Jan 2021 00:10:56 GMT', 'Content-Length', - '364' ]); + '364' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_mergeupload_documents.js b/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_mergeupload_documents.js index 3770b34e0af9..be2fc48768e1 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_mergeupload_documents.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_mergeupload_documents.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.index', {"value":[{"@search.action":"mergeOrUpload","hotelId":"8","description":"Modified Description"},{"@search.action":"mergeOrUpload","hotelId":"11","description":"New Hotel Description","lastRenovationDate":null}]}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f1d7c34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab85c97a5697152cde893bd9d9d5f3232afefee7e8df7777fc9f77fc9ff03dc317a548a000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f1d7c34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab85c97a5697152cde893bd9d9d5f3232afefee7e8df7777fc9f77fc9ff03dc317a548a000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'eb5ebc39-e3f3-4352-a7c3-f7973c9f5ad4', + '7321f046-fc52-42c4-b09b-3f7e4b27d997', 'elapsed-time', - '55', + '43', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:19:41 GMT', + 'Fri, 22 Jan 2021 00:11:13 GMT', 'Content-Length', - '194' ]); + '194' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs(%278%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3e3af868247fbdc816f9478f96ebb21c7d34cb9b695dacdaa25a52932faa59715ee4b3f4a9f771d0e8594dcdcecaf43acdd2d93a9dd4eba24d67795a66695daddb3ccddbb4c9d396feb8ccd3df77bdb393efa453fc3cdf6ff1237fa8cd9b360340fcb5aaca629a8fd3e77993365959d20ffa749215cb34bbcc8a7c892e9ab4ce2f1942d6e60bfa8c1b2daabac00b6342734a5f5c54f5b5195b9b5d341f3dfadef7471fadb2fa6db1bc385b4ecbf52c276248836651e1e3e3b2acaedca765d6b4aff26575c9f83d25a0e69b9a3e595e98bfca8a3aa416e6ef6c36abf3867a943febaa5ad01fdffbfe2ff97f0015ab79db85010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3e3af868247fbdc816f9478f96ebb21c7d34cb9b695dacdaa25a52932faa59715ee4b3f4a9f771d0e8594dcdcecaf43acdd2d93a9dd4eba24d67795a66695daddb3ccddbb4c9d396feb8ccd3df77bdb393efa453fc3cdf6ff1237fa8cd9b360340fcb5aaca629a8fd3e77993365959d20ffa749215cb34bbcc8a7c892e9ab4ce2f1942d6e60bfa8c1b2daabac00b6342734a5f5c54f5b5195b9b5d341f3dfadef7471fadb2fa6db1bc385b4ecbf52c276248836651e1e3e3b2acaedca765d6b4aff26575c9f83d25a0e69b9a3e595e98bfca8a3aa416e6ef6c36abf3867a943febaa5ad01fdffbfe2ff97f0015ab79db85010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,9 +53,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '7d841694-a639-4e1a-9efb-7788d175aea4', + '83f3d63e-f7ee-479e-9595-6d2fb90014b9', 'elapsed-time', - '11', + '9', 'OData-Version', '4.0', 'Preference-Applied', @@ -60,14 +63,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:19:45 GMT', + 'Fri, 22 Jan 2021 00:11:18 GMT', 'Content-Length', - '364' ]); + '364' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs/$count') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71fdcddfd7f002dd054ff05000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71fdcddfd7f002dd054ff05000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -80,9 +85,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '998e405c-7ed1-4014-ac20-4ddb70df191f', + 'b3262c73-5c8a-464b-83cd-0f2b03ffcc38', 'elapsed-time', - '4', + '5', 'OData-Version', '4.0', 'Preference-Applied', @@ -90,6 +95,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:19:45 GMT', + 'Fri, 22 Jan 2021 00:11:18 GMT', 'Content-Length', - '127' ]); + '127' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_upload_a_new_document.js b/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_upload_a_new_document.js index f59bb06a836c..2bbfbcfd24d7 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_upload_a_new_document.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_indexdocuments_upload_a_new_document.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.index', {"value":[{"@search.action":"upload","hotelId":"11","description":"New Hotel Description","lastRenovationDate":null}]}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471fedee7e34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab95c97a5697152cde893bd9ddd5ff2fd5ff2ff00bc77d14f4b000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471fedee7e34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab95c97a5697152cde893bd9ddd5ff2fd5ff2ff00bc77d14f4b000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '029ae0eb-b85b-414b-b2d4-7d03491dc153', + '83d7134c-7634-44b1-8e47-13cff8f2b8e7', 'elapsed-time', - '29', + '32', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:18:36 GMT', + 'Fri, 22 Jan 2021 00:10:10 GMT', 'Content-Length', - '185' ]); + '185' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs/$count') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71fdcddfd7f002dd054ff05000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71fdcddfd7f002dd054ff05000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,9 +53,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'c9f583d5-3f36-4861-9b1f-8ac446aa37cc', + 'ca2d3515-72cd-45be-a3a5-3451beebf225', 'elapsed-time', - '5', + '4', 'OData-Version', '4.0', 'Preference-Applied', @@ -60,6 +63,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:18:40 GMT', + 'Fri, 22 Jan 2021 00:10:15 GMT', 'Content-Length', - '127' ]); + '127' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_mergedocuments_modify__merge_an_existing_document.js b/sdk/search/search-documents/recordings/node/searchclient/recording_mergedocuments_modify__merge_an_existing_document.js index fc9c5db8dde7..96d8014f123f 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_mergedocuments_modify__merge_an_existing_document.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_mergedocuments_modify__merge_an_existing_document.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs(%276%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3efaf4a391fcf5225be41f3d5aaecb72f4d12c6fa675b16a8b6a494d5eafeb555d34c5f2a2bc4ef377ab7cd91497f938fda29ae565daac8b366fd2797699a7d932ada679b6dcbe2cf2ab3141f6003dab0df469d6e617557d6dfe6eb38be6a347dffbfee8a35556bfa56ece96d3723dcb093d69d02c2a7c7c5c96d595fbb4cc9af655beac2e33807f4a40cd37357db2bc307f951575482dccdfd96c56e70df5287fd655b5a03fbef7fd5ff2ff00978a208417010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3efaf4a391fcf5225be41f3d5aaecb72f4d12c6fa675b16a8b6a494d5eafeb555d34c5f2a2bc4ef377ab7cd91497f938fda29ae565daac8b366fd2797699a7d932ada679b6dcbe2cf2ab3141f6003dab0df469d6e617557d6dfe6eb38be6a347dffbfee8a35556bfa56ece96d3723dcb093d69d02c2a7c7c5c96d595fbb4cc9af655beac2e33807f4a40cd37357db2bc307f951575482dccdfd96c56e70df5287fd655b5a03fbef7fd5ff2ff00978a208417010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '660c080d-c536-4d24-bf59-a65c654508c8', + '38bdee25-9c71-4808-8994-dba17e279909', 'elapsed-time', - '13', + '19', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:17:48 GMT', + 'Fri, 22 Jan 2021 00:09:28 GMT', 'Content-Length', - '301' ]); + '301' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.index', {"value":[{"@search.action":"merge","hotelId":"6","hotelName":null,"description":"Modified Description","descriptionFr":null,"category":null,"tags":[],"parkingIncluded":null,"smokingAllowed":null,"lastRenovationDate":null,"rating":null,"location":null,"address":null,"rooms":[]}]}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f7dfad1e8a3a6cdda75f3d1a3b65ee7a38ff2baaeea2ff2a6c92ea8e1725d96a6c54935a34ff676767ec9f77fc9ff037255c7d34a000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f7dfad1e8a3a6cdda75f3d1a3b65ee7a38ff2baaeea2ff2a6c92ea8e1725d96a6c54935a34ff676767ec9f77fc9ff037255c7d34a000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,9 +53,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '935a4df1-d4cb-4f09-a93c-f72b08b05051', + 'e2fa228d-adc0-4fa2-a3a3-112eb2623fe6', 'elapsed-time', - '29', + '27', 'OData-Version', '4.0', 'Preference-Applied', @@ -60,14 +63,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:17:48 GMT', + 'Fri, 22 Jan 2021 00:09:28 GMT', 'Content-Length', - '184' ]); + '184' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs(%276%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3efaf4a391fcf5225be41f3d5aaecb72f4d12c6fa675b16a8b6a494dbea866c57991cfd2a7dec741a367b579739ab5f945555f9bbfdbeca2f9e8d1f7be3ffa6895d56f8be5c5d9725aae6739752d0d9a45858f8fcbb2ba729f9659d3beca97d56506f04f09a8f9a6a64f9617e6afb2a20ea985f93b9bcdeabca11ee5cfbaaa16f4c7f7beff4bfe1fcc8e7273f3000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3efaf4a391fcf5225be41f3d5aaecb72f4d12c6fa675b16a8b6a494dbea866c57991cfd2a7dec741a367b579739ab5f945555f9bbfdbeca2f9e8d1f7be3ffa6895d56f8be5c5d9725aae6739752d0d9a45858f8fcbb2ba729f9659d3beca97d56506f04f09a8f9a6a64f9617e6afb2a20ea985f93b9bcdeabca11ee5cfbaaa16f4c7f7beff4bfe1fcc8e7273f3000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -80,9 +85,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '12ca1c59-ba37-415c-b34d-bd04f38c0b2d', + '5cd33b5b-e229-4930-8b11-7a609f44a0ac', 'elapsed-time', - '11', + '8', 'OData-Version', '4.0', 'Preference-Applied', @@ -90,6 +95,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:17:52 GMT', + 'Fri, 22 Jan 2021 00:09:32 GMT', 'Content-Length', - '264' ]); + '264' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_mergeoruploaddocuments_merge_a_new_document.js b/sdk/search/search-documents/recordings/node/searchclient/recording_mergeoruploaddocuments_merge_a_new_document.js index cf8969383713..26aa61aa4a94 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_mergeoruploaddocuments_merge_a_new_document.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_mergeoruploaddocuments_merge_a_new_document.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.index', {"value":[{"@search.action":"mergeOrUpload","hotelId":"11","description":"New Hotel Description","lastRenovationDate":null}]}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471fedee7e34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab95c97a5697152cde893bd9ddd5ff2fd5ff2ff00bc77d14f4b000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471fedee7e34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab95c97a5697152cde893bd9ddd5ff2fd5ff2ff00bc77d14f4b000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '172dc265-05ec-4fe6-80fa-51ac558b9158', + 'bbae313e-3069-43a2-884e-23c089d177b4', 'elapsed-time', - '32', + '31', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:17:26 GMT', + 'Fri, 22 Jan 2021 00:09:07 GMT', 'Content-Length', - '185' ]); + '185' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs/$count') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71fdcddfd7f002dd054ff05000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71fdcddfd7f002dd054ff05000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,9 +53,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '44126448-3ad5-4cc9-80a6-1927fe55ddbc', + '127256d0-6cd5-4889-9344-bd9eb27e8379', 'elapsed-time', - '7', + '4', 'OData-Version', '4.0', 'Preference-Applied', @@ -60,6 +63,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:17:30 GMT', + 'Fri, 22 Jan 2021 00:09:11 GMT', 'Content-Length', - '127' ]); + '127' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_mergeoruploaddocuments_modify__merge_an_existing_document.js b/sdk/search/search-documents/recordings/node/searchclient/recording_mergeoruploaddocuments_modify__merge_an_existing_document.js index 1d04d3e784e3..8f14fcf92976 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_mergeoruploaddocuments_modify__merge_an_existing_document.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_mergeoruploaddocuments_modify__merge_an_existing_document.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs(%276%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3efaf4a391fcf5225be41f3d5aaecb72f4d12c6fa675b16a8b6a494d5eafeb555d34c5f2a2bc4ef377ab7cd91497f938fda29ae565daac8b366fd2797699a7d932ada679b6dcbe2cf2ab3141f6003dab0df469d6e617557d6dfe6eb38be6a347dffbfee8a35556bfa56ece96d3723dcb093d69d02c2a7c7c5c96d595fbb4cc9af655beac2e33807f4a40cd37357db2bc307f951575482dccdfd96c56e70df5287fd655b5a03fbef7fd5ff2ff00978a208417010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3efaf4a391fcf5225be41f3d5aaecb72f4d12c6fa675b16a8b6a494d5eafeb555d34c5f2a2bc4ef377ab7cd91497f938fda29ae565daac8b366fd2797699a7d932ada679b6dcbe2cf2ab3141f6003dab0df469d6e617557d6dfe6eb38be6a347dffbfee8a35556bfa56ece96d3723dcb093d69d02c2a7c7c5c96d595fbb4cc9af655beac2e33807f4a40cd37357db2bc307f951575482dccdfd96c56e70df5287fd655b5a03fbef7fd5ff2ff00978a208417010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,7 +21,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '1d6a41ae-5184-4f80-89c1-7fa1d0906a2d', + '400bd9fd-3d50-4104-8d8d-e9ae908628a2', 'elapsed-time', '11', 'OData-Version', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:17:04 GMT', + 'Fri, 22 Jan 2021 00:08:45 GMT', 'Content-Length', - '301' ]); + '301' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.index', {"value":[{"@search.action":"mergeOrUpload","hotelId":"6","hotelName":null,"description":"Modified Description","descriptionFr":null,"category":null,"tags":[],"parkingIncluded":null,"smokingAllowed":null,"lastRenovationDate":null,"rating":null,"location":null,"address":null,"rooms":[]}]}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f7dfad1e8a3a6cdda75f3d1a3b65ee7a38ff2baaeea2ff2a6c92ea8e1725d96a6c54935a34ff676767ec9f77fc9ff037255c7d34a000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471f7dfad1e8a3a6cdda75f3d1a3b65ee7a38ff2baaeea2ff2a6c92ea8e1725d96a6c54935a34ff676767ec9f77fc9ff037255c7d34a000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,9 +53,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '0af996b0-dced-4aa8-9f0e-e680f30d9576', + '926c8083-94cd-464b-bd55-4da9d6b656bd', 'elapsed-time', - '45', + '30', 'OData-Version', '4.0', 'Preference-Applied', @@ -60,14 +63,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:17:04 GMT', + 'Fri, 22 Jan 2021 00:08:45 GMT', 'Content-Length', - '184' ]); + '184' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs(%276%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3efaf4a391fcf5225be41f3d5aaecb72f4d12c6fa675b16a8b6a494dbea866c57991cfd2a7dec741a367b579739ab5f945555f9bbfdbeca2f9e8d1f7be3ffa6895d56f8be5c5d9725aae6739752d0d9a45858f8fcbb2ba729f9659d3beca97d56506f04f09a8f9a6a64f9617e6afb2a20ea985f93b9bcdeabca11ee5cfbaaa16f4c7f7beff4bfe1fcc8e7273f3000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147f3aacdcbb3d9478f3efaf4a391fcf5225be41f3d5aaecb72f4d12c6fa675b16a8b6a494dbea866c57991cfd2a7dec741a367b579739ab5f945555f9bbfdbeca2f9e8d1f7be3ffa6895d56f8be5c5d9725aae6739752d0d9a45858f8fcbb2ba729f9659d3beca97d56506f04f09a8f9a6a64f9617e6afb2a20ea985f93b9bcdeabca11ee5cfbaaa16f4c7f7beff4bfe1fcc8e7273f3000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -80,9 +85,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '8aa7d716-f13b-4869-9f2f-59edf49e13bb', + '7f561a83-75ca-4f5f-873b-e980afd9d656', 'elapsed-time', - '11', + '10', 'OData-Version', '4.0', 'Preference-Applied', @@ -90,6 +95,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:17:08 GMT', + 'Fri, 22 Jan 2021 00:08:50 GMT', 'Content-Length', - '264' ]); + '264' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_search_returns_the_correct_search_result.js b/sdk/search/search-documents/recordings/node/searchclient/recording_search_returns_the_correct_search_result.js index e56bbae33a47..1c4d45b166bf 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_search_returns_the_correct_search_result.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_search_returns_the_correct_search_result.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.post.search', {"count":true,"search":"budget","skip":0,"top":5}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cadd6cbf6a3479f8e3ebaccca75fed1a3efd1574d9ed5d3f9b89956357db23bdebfb773ffde8307a38fe6559b9767b38f1e7d74ef23fdeb45b6a0361f9d4eab65f5bacdaee9f359de4ceb62d516d592bef9c9bcbe4e57d56a5d6675caafa4c5326dabab65d8f4594d8dbffdfbae7776cef7d1a8ccd355b96ef4d5a2ced37c995e166599d37bd3accd2faafa9a5e79b29e5de42d7dd666170de1ffd155715ed09f13f9fcfba38f5659fdb6585e9c2da7e57a9613f66dbdce471f358b0a1f1f976575854fcfb3b2a18fcbac695fe5cbea3203564fa923ea64f7e1c3fbdb3b0fb67776dfec3c78b4b343fffb29eaa3a636cb8b8f1eed8f77e8c58ab0a2573e7af48b3f6aaf5778ed6555107509ddaaaa67c5926001c3edddbdbdf1eebdddfb44d2fd4fc79f3e38b87fb04b784e6bfad6bebb046109f9ba5ae5755be0cd5f2c1f12b55fbefefcd1febdbd4f3ffa25bfe4978c3eca66b33a6fa8c1725d968455552de88fef7dff978c2293b97b40cf1e4d38cf054fe6ee0e75c47fea6c9e802beaeba698e5e9b7f1397def4d15b5789d5de6e97a45f398dedff9ddd3eafc3c6deb6c56e0ebac94796ec669faaccef3f4bbc5b362945ed479d6a6864ae992f04a67c4066085517a4ea8a76f8b763acfe9afabac99e775fa0bd3597d9dd7a3746fffee83b459af5655dd8ed249755512ddd38c98e19a5e2dda250d3f9de6cb965eca96b37441831d8748337f81bda60fc1ab8ba2c97f90fef4baf945eb8ff169bec30369d635715e93cef1193362e306b6a4bf684c180e8d266bd70521d3d6689a1f34e9a4200e6d8a76cd1f3c4c57f69bd99a91abf36d66e0513a5d174db1ccd369b55895d2aaa58f4ba2eabac969dc0d7ff890a8417f8f082d1e3ad3c18e9fc030d09466c9d0206f53969a7c89f9060988dc83a2b2b7bf3daf68c4e775b56c094cf3366df2fab29882f3a634a9347924ac65359940b089c9da6c5d67c4d3c4ae3db15201eaca9548db90583ddcdea1ff7d1a15ab7bef27560f0ec60ff777f60fee8deedd1f3fdcd9dffd94b0fc70a1fac51f3544e4bc3d361f7cf4e9c3dd9df459769db76dce139abe9a11d469d182ca4fd7f53c5bd0df44ac367f595797c59208fae8a31727f4212b5c9e8daf5e1fd3dfab8a9a9527d50c2df61e3cd8bdf711756e44f817fb1c4c0d5e13cb113beca6bf1711287d92cfd2ade345be24eecc9b3b04cd6bcdfc6edb3b2e25c62501298b36dd7a2d73cd6f2a81f805fa739235f92b42ffa3477be3fd7dfa3b9f7dc96009ab8f5cf7d4b229f37cd5b0c6a0c6f47774fa0dc729532db2b779fd112b280f65022d2c9abea2f103ed9f58e72453d4d10de33c218a4f4812304823dfbf88fa25109bc7ebf5471fba513f187ffa90fe0e476db1a1a6370d5b856160dc3cf28e6ade193ff8f4c1ce2efa65f5c9aaf92175c57fa9667e9d4f6bc2972520fd025f50038f1ad4e4cd3c17fd9b164d4a1a9cb4e435893049119191146f4bdf2f32126b523e8bbc9e16a4afb39a34e735a971fe168c0cb9c7ef73c2b1c5172ff2abf4f7a9eab7e3f4383da7df17c5724d8297665719356ed237c522ffb8495fffa2754613019af3eb45d356753135aa8a00e163f4304ab326bdca49edd3cf8a3ead497565343d6844c3cb49da5a6a4d460364b308d018180e310401cea8cf05c9509ab5a4a7a76d4176099d4f2b9a915545f39ed1489971a00d3d5231e33cffd869fa14fd1533fc9d3fccca9c18aef5f539db868cd43ac973b1a206201b7f77402373d464854c0d453510c7acca9ca839a5a6bbf7ef1110fada0c06d424462de9ff340e25295980b6aed6341282c234810569f3c58a143b9950ea8c50a33e95baf432605263ee92085b2c9b0270d3d9c7d99a5a362400f9fa1dfd4984253c6838a21088b8bf685da4e7a2ff2d5a69f9f19aa84cd41290f43efd9f2d8ba33323e3e84c7f1384f2e3e305437ec87881e6e03c6381aa758b8fe9532319abaa020f938747b09662684990e823fa735ae4f545fe8d599b073be4c16def1ebcd939205343fff3adcd7b3a710fee8d1f3eb8bfbf736fb40fc9ddb97ff0b3666e1e3c48efb7f3f4f812c0889741493353f4091990d0cefc3ef4e1463bb3bbb3b3b7b7c9ce786a113ad42abe74eb84ba875f081deabdc3c2743b2d0c3980c0e1074915f317a029cdbcaee943a7911f7ea84616ce306c7739adefce2e67a48b49137b0321b01e0640ddd8ba74eb0bc026d94a7fb2c8af80b2f7de4d04881ae01ec01b687030dee9d3c0e0472dbf1e09461ffd74365dffe00745daae2744102649cf38edddbf77efdec37b04958d0b5ba73d7a97ff52ebf4aacaa6f301b374429664050dcb2f9082a7d8e16a394ecf96e7a44e4867911aa72fa030bc1799ac6154b8207924671ff682e69ed9c741592fe97b6a083024c956ef3041e93333766e447fbf7794289f0ee89783bded9dfded3dd22f116f76f7fdf4cbae172412efff3083c4f864ef5347fc974ef6e9bb1500329736f49d376bf4ed4b7253c88bb8a8aa99cc78d882e7f5b8410c36219fc419616a46141a9cb71f05f74c6dccdbf77fc9ff03a337a51ac6110000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cadd6cbf6a3479f8e3ebaccca75fed1a3efd1574d9ed5d3f9b89956357db23bdebfb773ffde8307a38fe6559b9767b38f1e7d74ef23fdeb45b6a0361f9d4eab65f5bacdaee9f359de4ceb62d516d592bef9c9bcbe4e57d56a5d6675caafa4c5326dabab65d8f4594d8dbffdfbae7776cef7d1a8ccd355b96ef4d5a2ced37c995e166599d37bd3accd2faafa9a5e79b29e5de42d7dd666170de1ffd155715ed09f13f9fcfba38f5659fdb6585e9c2da7e57a9613f66dbdce471f358b0a1f1f976575854fcfb3b2a18fcbac695fe5cbea3203564fa923ea64f7e1c3fbdb3b0fb67776dfec3c78b4b343fffb29eaa3a636cb8b8f1eed8f77e8c58ab0a2573e7af48b3f6aaf5778ed6555107509ddaaaa67c5926001c3edddbdbdf1eebdddfb44d2fd4fc79f3e38b87fb04b784e6bfad6bebb046109f9ba5ae5755be0cd5f2c1f12b55fbefefcd1febdbd4f3ffa25bfe4978c3eca66b33a6fa8c1725d968455552de88fef7dff978c2293b97b40cf1e4d38cf054fe6ee0e75c47fea6c9e802beaeba698e5e9b7f1397def4d15b5789d5de6e97a45f398dedff9ddd3eafc3c6deb6c56e0ebac94796ec669faaccef3f4bbc5b362945ed479d6a6864ae992f04a67c4066085517a4ea8a76f8b763acfe9afabac99e775fa0bd3597d9dd7a3746fffee83b459af5655dd8ed249755512ddd38c98e19a5e2dda250d3f9de6cb965eca96b37441831d8748337f81bda60fc1ab8ba2c97f90fef4baf945eb8ff169bec30369d635715e93cef1193362e306b6a4bf684c180e8d266bd70521d3d6689a1f34e9a4200e6d8a76cd1f3c4c57f69bd99a91abf36d66e0513a5d174db1ccd369b55895d2aaa58f4ba2eabac969dc0d7ff890a8417f8f082d1e3ad3c18e9fc030d09466c9d0206f53969a7c89f9060988dc83a2b2b7bf3daf68c4e775b56c094cf3366df2fab29882f3a634a9347924ac65359940b089c9da6c5d67c4d3c4ae3db15201eaca9548db90583ddcdea1ff7d1a15ab7bef27560f0ec60ff777f60fee8deedd1f3fdcd9dffd94b0fc70a1fac51f3544e4bc3d361f7cf4e9c3dd9df459769db76dce139abe9a11d469d182ca4fd7f53c5bd0df44ac367f595797c59208fae8a31727f4212b5c9e8daf5e1fd3dfab8a9a9527d50c2df61e3cd8bdf711756e44f817fb1c4c0d5e13cb113beca6bf1711287d92cfd2ade345be24eecc9b3b04cd6bcdfc6edb3b2e25c62501298b36dd7a2d73cd6f2a81f805fa739235f92b42ffa3477be3fd7dfa3b9f7dc96009ab8f5cf7d4b229f37cd5b0c6a0c6f47774fa0dc729532db2b779fd112b280f65022d2c9abea2f103ed9f58e72453d4d10de33c218a4f4812304823dfbf88fa25109bc7ebf5471fba513f187ffa90fe0e476db1a1a6370d5b856160dc3cf28e6ade193ff8f4c1ce2efa65f5c9aaf92175c57fa9667e9d4f6bc2972520fd025f50038f1ad4e4cd3c17fd9b164d4a1a9cb4e435893049119191146f4bdf2f32126b523e8bbc9e16a4afb39a34e735a971fe168c0cb9c7ef73c2b1c5172ff2abf4f7a9eab7e3f4383da7df17c5724d8297665719356ed237c522ffb8495fffa2754613019af3eb45d356753135aa8a00e163f4304ab326bdca49edd3cf8a3ead497565343d6844c3cb49da5a6a4d460364b308d018180e310401cea8cf05c9509ab5a4a7a76d4176099d4f2b9a915545f39ed1489971a00d3d5231e33cffd869fa14fd1533fc9d3fccca9c18aef5f539db868cd43ac973b1a206201b7f77402373d464854c0d453510c7acca9ca839a5a6bbf7ef1110fada0c06d424462de9ff340e25295980b6aed6341282c234810569f3c58a143b9950ea8c50a33e95baf432605263ee92085b2c9b0270d3d9c7d99a5a362400f9fa1dfd4984253c6838a21088b8bf685da4e7a2ff2d5a69f9f19aa84cd41290f43efd9f2d8ba33323e3e84c7f1384f2e3e305437ec87881e6e03c6381aa758b8fe9532319abaa020f938747b09662684990e823fa735ae4f545fe8d599b073be4c16def1ebcd939205343fff3adcd7b3a710fee8d1f3eb8bfbf736fb40fc9ddb97ff0b3666e1e3c48efb7f3f4f812c0889741493353f4091990d0cefc3ef4e1463bb3bbb3b3b7b7c9ce786a113ad42abe74eb84ba875f081deabdc3c2743b2d0c3980c0e1074915f317a029cdbcaee943a7911f7ea84616ce306c7739adefce2e67a48b49137b0321b01e0640ddd8ba74eb0bc026d94a7fb2c8af80b2f7de4d04881ae01ec01b687030dee9d3c0e0472dbf1e09461ffd74365dffe00745daae2744102649cf38edddbf77efdec37b04958d0b5ba73d7a97ff52ebf4aacaa6f301b374429664050dcb2f9082a7d8e16a394ecf96e7a44e4867911aa72fa030bc1799ac6154b8207924671ff682e69ed9c741592fe97b6a083024c956ef3041e93333766e447fbf7794289f0ee89783bded9dfded3dd22f116f76f7fdf4cbae172412efff3083c4f864ef5347fc974ef6e9bb1500329736f49d376bf4ed4b7253c88bb8a8aa99cc78d882e7f5b8410c36219fc419616a46141a9cb71f05f74c6dccdbf77fc9ff03a337a51ac6110000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '65f2fd63-f001-4d66-a118-ae440e55b2e3', + '7c6f248d-955e-4bbf-9436-64ec72b0a113', 'elapsed-time', - '123', + '129', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:14:33 GMT', + 'Fri, 22 Jan 2021 00:06:20 GMT', 'Content-Length', - '1958' ]); + '1958' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_search_returns_zero_results_for_invalid_query.js b/sdk/search/search-documents/recordings/node/searchclient/recording_search_returns_zero_results_for_invalid_query.js index 6dc5c7f02e59..99cc1a346295 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_search_returns_zero_results_for_invalid_query.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_search_returns_zero_results_for_invalid_query.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.post.search', {"count":true,"search":"garbxyz","skip":0,"top":5}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cadd6cbf6a3473ba38f2eb3729d7ff4e87bdfff25ff0f36f0f25c1d000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cadd6cbf6a3473ba38f2eb3729d7ff4e87bdfff25ff0f36f0f25c1d000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'd761e77d-c6fa-4b93-811f-87d0dd222110', + '6175427f-80b7-4eed-ac66-656264f9eac4', 'elapsed-time', - '37', + '34', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:14:52 GMT', + 'Fri, 22 Jan 2021 00:06:37 GMT', 'Content-Length', - '149' ]); + '149' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_suggest_returns_the_correct_suggestions.js b/sdk/search/search-documents/recordings/node/searchclient/recording_suggest_returns_the_correct_suggestions.js index 6cb6a0720e06..5a57db2581f7 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_suggest_returns_the_correct_suggestions.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_suggest_returns_the_correct_suggestions.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.post.suggest', {"search":"WiFi","suggesterName":"sg"}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1efd9e4593d9d8fdbfc5dfbd1a38f5e679779ba5ea56d95dedff9ddd3eafc3c6deb6c56b445b5ccca745eb579d98cd3f4599de7e9778b67c528bda8f3ac4dcb6a9aa151ba2478e9acba5ab6f4ff517abe2ecbf46dd14ee739fd759535f3bc4e7f613aabaff37a94eeeddf7d9036ebd5aaaadb513aa9aeca6279916665995fd3ab45bbcc9b269de6cb965eca96b37451d5f9f8a3d1478cc7d98c10deddf9e8977cff97fc3f4bd5b195d2000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1efd9e4593d9d8fdbfc5dfbd1a38f5e679779ba5ea56d95dedff9ddd3eafc3c6deb6c56b445b5ccca745eb579d98cd3f4599de7e9778b67c528bda8f3ac4dcb6a9aa151ba2478e9acba5ab6f4ff517abe2ecbf46dd14ee739fd759535f3bc4e7f613aabaff37a94eeeddf7d9036ebd5aaaadb513aa9aeca6279916665995fd3ab45bbcc9b269de6cb965eca96b37451d5f9f8a3d1478cc7d98c10deddf9e8977cff97fc3f4bd5b195d2000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'eb23c788-6d59-4b93-b447-a2327171a32e', + '1fdac2f5-997b-4f02-8b76-71119e59d4b9', 'elapsed-time', - '197', + '104', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:15:10 GMT', + 'Fri, 22 Jan 2021 00:06:53 GMT', 'Content-Length', - '295' ]); + '295' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_suggest_returns_zero_suggestions_for_invalid_input.js b/sdk/search/search-documents/recordings/node/searchclient/recording_suggest_returns_zero_suggestions_for_invalid_input.js index 1825930718f8..13e25720995f 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_suggest_returns_zero_suggestions_for_invalid_input.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_suggest_returns_zero_suggestions_for_invalid_input.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.post.suggest', {"search":"garbxyz","suggesterName":"sg"}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bdefff92ff0742ea40440c000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bdefff92ff0742ea40440c000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '35d89143-625e-40c2-acfc-a4e489fbfaa2', + 'fc8de0d5-9d32-4342-8c87-d85158f86a42', 'elapsed-time', - '56', + '63', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:15:28 GMT', + 'Fri, 22 Jan 2021 00:07:11 GMT', 'Content-Length', - '133' ]); + '133' +]); diff --git a/sdk/search/search-documents/recordings/node/searchclient/recording_uploaddocuments_upload_a_set_of_documents.js b/sdk/search/search-documents/recordings/node/searchclient/recording_uploaddocuments_upload_a_set_of_documents.js index a428e06e81ad..b35c3491e78a 100644 --- a/sdk/search/search-documents/recordings/node/searchclient/recording_uploaddocuments_upload_a_set_of_documents.js +++ b/sdk/search/search-documents/recordings/node/searchclient/recording_uploaddocuments_upload_a_set_of_documents.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .post('/indexes(%27hotel-live-test1%27)/docs/search.index', {"value":[{"@search.action":"upload","hotelId":"11","description":"New Hotel Description","lastRenovationDate":null},{"@search.action":"upload","hotelId":"12","description":"New Hotel II Description","lastRenovationDate":null}]}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471fedee7e34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab95c97a5697152cde893bd9ddd5f32b2efef7dadf7bfff4bfe1fabe085b98b000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef1479759b9ce3f7af4bd5ffcd1dbfcfaa3471fedee7e34faa869b376dd7cf4a8add7f9e8a3bcaeabfa8bbc69b20b6ab95c97a5697152cde893bd9ddd5f32b2efef7dadf7bfff4bfe1fabe085b98b000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'a037e00a-fe36-4756-a237-2508e56918db', + '24002421-abe3-45ed-93f9-9e23f0b49b30', 'elapsed-time', - '50', + '33', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:18:09 GMT', + 'Fri, 22 Jan 2021 00:09:49 GMT', 'Content-Length', - '191' ]); + '191' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test1%27)/docs/$count') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71fdcddfb7f0097815d6605000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f22fec7bff71fdcddfb7f0097815d6605000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,9 +53,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '7ac1e328-bc39-4db8-9548-0703a0938160', + 'd426ef37-13d2-4cb0-8f31-fde7117dd132', 'elapsed-time', - '6', + '8', 'OData-Version', '4.0', 'Preference-Applied', @@ -60,6 +63,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:18:13 GMT', + 'Fri, 22 Jan 2021 00:09:53 GMT', 'Content-Length', - '127' ]); + '127' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_creates_the_index_object_using_createorupdateindex.js b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_creates_the_index_object_using_createorupdateindex.js index b75e0134626d..ff2dde169bca 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_creates_the_index_object_using_createorupdateindex.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_creates_the_index_object_using_createorupdateindex.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .put('/indexes(%27hotel-live-test4%27)', {"name":"hotel-live-test4","fields":[{"name":"id","type":"Edm.String","key":true,"searchable":false,"filterable":false,"sortable":false,"facetable":false},{"name":"awesomenessLevel","type":"Edm.Double","searchable":false,"filterable":true,"sortable":true,"facetable":true},{"name":"description","type":"Edm.String","searchable":true,"filterable":false,"sortable":false,"facetable":false},{"name":"details","type":"Edm.ComplexType","fields":[{"name":"tags","type":"Collection(Edm.String)","searchable":true}]},{"name":"hiddenWeight","type":"Edm.Int32","retrievable":false,"searchable":false,"filterable":false,"sortable":false,"facetable":false}]}) .query(true) - .reply(201, {"@odata.context":"https://endpoint/$metadata#indexes/$entity","@odata.etag":"\"0x8D8809AA1562A9F\"","name":"hotel-live-test4","defaultScoringProfile":null,"fields":[{"name":"id","type":"Edm.String","searchable":false,"filterable":false,"retrievable":true,"sortable":false,"facetable":false,"key":true,"indexAnalyzer":null,"searchAnalyzer":null,"analyzer":null,"synonymMaps":[]},{"name":"awesomenessLevel","type":"Edm.Double","searchable":false,"filterable":true,"retrievable":true,"sortable":true,"facetable":true,"key":false,"indexAnalyzer":null,"searchAnalyzer":null,"analyzer":null,"synonymMaps":[]},{"name":"description","type":"Edm.String","searchable":true,"filterable":false,"retrievable":true,"sortable":false,"facetable":false,"key":false,"indexAnalyzer":null,"searchAnalyzer":null,"analyzer":null,"synonymMaps":[]},{"name":"details","type":"Edm.ComplexType","fields":[{"name":"tags","type":"Collection(Edm.String)","searchable":true,"filterable":true,"retrievable":true,"sortable":false,"facetable":true,"key":false,"indexAnalyzer":null,"searchAnalyzer":null,"analyzer":null,"synonymMaps":[]}]},{"name":"hiddenWeight","type":"Edm.Int32","searchable":false,"filterable":false,"retrievable":false,"sortable":false,"facetable":false,"key":false,"indexAnalyzer":null,"searchAnalyzer":null,"analyzer":null,"synonymMaps":[]}],"scoringProfiles":[],"corsOptions":null,"suggesters":[],"analyzers":[],"tokenizers":[],"tokenFilters":[],"charFilters":[],"encryptionKey":null,"similarity":{"@odata.type":"#Microsoft.Azure.Search.BM25Similarity","k1":null,"b":null}}, [ 'Cache-Control', + .reply(201, {"@odata.context":"https://endpoint/$metadata#indexes/$entity","@odata.etag":"\"0x8D8BE6A79A726A9\"","name":"hotel-live-test4","defaultScoringProfile":null,"fields":[{"name":"id","type":"Edm.String","searchable":false,"filterable":false,"retrievable":true,"sortable":false,"facetable":false,"key":true,"indexAnalyzer":null,"searchAnalyzer":null,"analyzer":null,"synonymMaps":[]},{"name":"awesomenessLevel","type":"Edm.Double","searchable":false,"filterable":true,"retrievable":true,"sortable":true,"facetable":true,"key":false,"indexAnalyzer":null,"searchAnalyzer":null,"analyzer":null,"synonymMaps":[]},{"name":"description","type":"Edm.String","searchable":true,"filterable":false,"retrievable":true,"sortable":false,"facetable":false,"key":false,"indexAnalyzer":null,"searchAnalyzer":null,"analyzer":null,"synonymMaps":[]},{"name":"details","type":"Edm.ComplexType","fields":[{"name":"tags","type":"Collection(Edm.String)","searchable":true,"filterable":true,"retrievable":true,"sortable":false,"facetable":true,"key":false,"indexAnalyzer":null,"searchAnalyzer":null,"analyzer":null,"synonymMaps":[]}]},{"name":"hiddenWeight","type":"Edm.Int32","searchable":false,"filterable":false,"retrievable":false,"sortable":false,"facetable":false,"key":false,"indexAnalyzer":null,"searchAnalyzer":null,"analyzer":null,"synonymMaps":[]}],"scoringProfiles":[],"corsOptions":null,"suggesters":[],"analyzers":[],"tokenizers":[],"tokenFilters":[],"charFilters":[],"encryptionKey":null,"similarity":{"@odata.type":"#Microsoft.Azure.Search.BM25Similarity","k1":null,"b":null}}, [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -16,13 +17,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809AA1562A9F"', + 'W/"0x8D8BE6A79A726A9"', 'Location', - 'https://endpoint/indexes(\'hotel-live-test4\')?api-version=2020-06-30', + "https://endpoint/indexes('hotel-live-test4')?api-version=2020-06-30", 'request-id', - '98388f31-8c76-4584-bd97-ef196eace968', + 'cc057090-afd9-40b6-942e-04946040c48b', 'elapsed-time', - '610', + '537', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:21:28 GMT', + 'Fri, 22 Jan 2021 00:12:58 GMT', 'Content-Length', - '1589' ]); + '1589' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test4%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f9737777fb77cd916edf5472303971a5c10d0dff7a39d77074f0f0e761e1e1fefdeff74eff8e1b3dff7236ab5cc16397d3dafdabcdc2e8bcb7c1bddeed337b3fc3c5b97edeb695517cb8b9775755e94d474b92ecbd147e7455ece9a8f1e7def171b08c58cde69af57f8fd74b618bf6ef11a7d26686713bc7c9e954d8eb7cb36af838fea9cdae797f2595bafe9a3a6aa5bf95bdb9c67531a8dffc9dbfcdab466021c2fb3f2fa07796dd094bebb9f669dbf9beb65b5bc5e7c91ad30a2efff92911d54769537d5225fe64df33cbfcc4b1a8e37c4a7d51ac81000ee26c0cc1fa220b87184f2b73740f980c7a7107f560638cb9b695dacdaa25ad238bcb145a74f90f287a6b86d1c9bb6f106a79ffc1046d76645d9d028bc919d548b5599bf7b834f08ab1e2393bc786f9c5465994f419f2dbc2c64b943df0b6e321e19b14f17f9e43dc9222d7e96a8e2d3655ecc66f9f2bb7971316f69283a548cef6cd9dedba38fa40f414b71f187a71f05e3d3cf360d503ff9d91a217d14282bfe78f4117dd67cc92c4e1fe8abeb8b0b5273794d9fa08901ad7fb6d5db7c5974fe7ec6e3d74f883675f041be9cd6d7dcc9ef85d16937c5a228b31afaf89155f44aed1fffa298d655539db7e3e31faceb7cfc9a073d7ef2c5defdd7ee3da2d6ae0137915f7ec92ff97f00baedc17735060000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f9737777fb77cd916edf5472303971a5c10d0dff7a39d77074f0f9e9c7e7afce0e1f183bd4f8f1ffebe1f51ab65b6c8e9eb79d5e6e576595ce6dbe8769fbe99e5e7d9ba6c5f4fabba585ebcacabf3a2a4a6cb75598e3e3a2ff272d67cf4e87bbfd8402866f44e7bbdc2efa7b3c5f8758bd7e833413b9be0e5f3ac6c72bc5db6791d7c54e7d43ebf94cfda7a4d1f3555ddcadfdae63c9bd268fc4fdee6d7a63513e0789995d73fc86b83a6f4ddfd34ebfcdd5c2fabe5f5e28b6c85117dff978ceca0b2abbca916f9326f9ae7f9655ed270bc213eadd640860070370166fe1005c18d2394bfbd01ca073c3e85f8b332c059de4ceb62d516d592c6e18d2d3a7d82943f34c56de3d8b48d3738fde48730ba362bca8646e18deca45aaccafcdd1b7c4258f51899e4c57be3a42acb7c0afa6ce16521cb1dfa5e7093f1c8887dbac827ef491669f1b344159f2ef36236cb97dfcd8b8b794b43d1a1627c67cbf6de1e7d247d085a8a8b3f3cfd28189f7eb66980fac9cfd608e9a34059f1c7a38fe8b3e64b6671fa405f5d5f5c909acb6bfa044d0c68fdb3addee6cba2f3f7331ebf7e42b4a9830ff2e5b4bee64e7e2f8c4ebb29164599d5d0c78faca2576afff817c5b4ae9aeabc1d1fff605de7e3d73ce8f1932ff6eebf76ef11b5760db889fcf24b7ec9ff03f09f854035060000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -48,13 +51,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809AA1562A9F"', + 'W/"0x8D8BE6A79A726A9"', 'Vary', 'Accept-Encoding', 'request-id', - 'a7c16f6b-fe61-4efa-b6d6-d10d39f26216', + '43c18e09-236b-4dc2-b6da-c46b71959ada', 'elapsed-time', - '17', + '21', 'OData-Version', '4.0', 'Preference-Applied', @@ -62,24 +65,27 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:21:28 GMT', + 'Fri, 22 Jan 2021 00:12:58 GMT', 'Content-Length', - '662' ]); + '662' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .delete('/indexes(%27hotel-live-test4%27)') .query(true) - .reply(204, "", [ 'Cache-Control', + .reply(204, "", [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', 'Expires', '-1', 'request-id', - '8a6ab71f-f89b-4f43-98b5-e35cb05bff45', + '3ad3a4d7-5121-451c-8fd9-98c1d607216b', 'elapsed-time', - '131', + '118', 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:21:28 GMT' ]); + 'Fri, 22 Jan 2021 00:12:58 GMT' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_correct_index_object.js b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_correct_index_object.js index 0406f8983ad2..baefe45ffb9e 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_correct_index_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_correct_index_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test3%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f9737777fb77cd916edf5472303971a5c10d0dff7a39d77074f0f0e761e1e3fbc77ffe4e9fd83e3dff7236ab5cc16397d3dafdabcdc2e8bcb7c1bdddea36f66f979b62edbd7d3aa2e96172febeabc28a9e9725d96a38fce8bbc9c351f3dfade2f36108a19bdd35eaff0fbe96c317edde235fa4cd0ce2678f93c2b9b1c6f976d5e071fd539b5cf2fe5b3b65ed3474d55b7f2b7b639cfa6341aff93b7f9b569cd04385e66e5f50ff2daa0297d773fcd3a7f37d7cb6a79bdf8225b6144dfff25233ba8ec2a6faa45becc9be6797e9997341c6f884fab35902100dc4d80993f444170e308e56f6f80f2018f4f21feac0c709637d3ba58b545b5a47178638b4e9f20e50f4d71db38366de30d4e3ff9218caecd8ab2a15178233ba916ab327ff7069f10563d462679f1de38a9ca329f823e5b7859c87287be17dc643c32629f2ef2c97b92455afc2c51c5a7cbbc98cdf2e577f3e262ded25074a818dfd9b2bdb7471f491f8296e2e20f4f3f0ac6a79f6d1aa07ef2b33542fa285056fcf1e823faacf992599c3ed057d71717a4e6f29a3e4113035aff6cabb7f9b2e8fcfd8cc7af9f106deae0837c39adafb993df0ba3d36e8a45516635f4f123abe895da3ffe4531adaba63a6fc7c73f58d7f9f8350f7afce48bbdfbafdd7b44ad5d036e22bffc925ff2ff0064407a1635060000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f9737777fb77cd916edf5472303971a5c10d0dff7a39d77074f0f9e9c7e7afce9c9dec1d3d37bc7bfef47d46a992d72fa7a5eb579b95d1697f936babd47dfccf2f36c5db6afa7555d2c2f5ed6d5795152d3e5ba2c471f9d1779396b3e7af4bd5f6c2014337aa7bd5ee1f7d3d962fcbac56bf499a09d4df0f279563639de2edbbc0e3eaa736a9f5fca676dbda68f9aaa6ee56f6d739e4d6934fe276ff36bd39a0970bccccaeb1fe4b54153faee7e9a75fe6eae97d5f27af145b6c288beff4b467650d955de548b7c9937cdf3fc322f6938de109f566b204300b89b00337f8882e0c611cadfde00e5031e9f42fc5919e02c6fa675b16a8b6a49e3f0c6169d3e41ca1f9ae2b6716cdac61b9c7ef243185d9b156543a3f04676522d5665feee0d3e21ac7a8c4cf2e2bd715295653e057db6f0b290e50e7d2fb8c97864c43e5de493f7248bb4f859a28a4f9779319be5cbefe6c5c5bca5a1e85031beb3657b6f8f3e923e042dc5c51f9e7e148c4f3fdb3440fde4676b84f451a0acf8e3d147f459f325b3387da0afae2f2e48cde5357d822606b4fed9566ff365d1f9fb198f5f3f21dad4c107f9725a5f7327bf1746a7dd148ba2cc6ae8e34756d12bb57ffc8b625a574d75de8e8f7fb0aef3f16b1ef4f8c9177bf75fbbf7885abb06dc447ef925bfe4ff018fd4194e35060000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,11 +19,11 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809A935CD58A"', + 'W/"0x8D8BE6A6C28DE3A"', 'Vary', 'Accept-Encoding', 'request-id', - 'a34b0c6c-66e4-4f81-a956-8e5f7fc9f3d8', + 'f46fccb6-3b29-4a59-af9f-a20e8d58f6b6', 'elapsed-time', '21', 'OData-Version', @@ -32,6 +33,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:21:09 GMT', + 'Fri, 22 Jan 2021 00:12:40 GMT', 'Content-Length', - '662' ]); + '662' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_list_of_indexes.js b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_list_of_indexes.js index f074e97a8ddd..820d023335f8 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_list_of_indexes.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_list_of_indexes.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97371f8d3ebaccca75fed1a3ef59c8d4e482c0febe1fedbc3b787a70b0f3f0f8e0607f6ff7e9f1deeffb11b55f660b6afed1bc6af372bb2c2ef36d747c8fbe99e5e7d9ba6c5f4fabba585ebcacabf3a2a4a6cb75598e3e3a2ff272d6703f0aa198d13bedf50abf9fce16e3d72d5ea3cf04f16c8297cfb3b2c9f176d9e675f0519d53fbfc523e6beb357dd454752b7f6b9bf36c4aa3f13f799b5f9bd64c82e365565eff20af0d9ad277f7d3acf37773bdac96d78b2fb21546f4fd5f32b283caaef2a65ae4cbbc699ee7977949c3f186f8b45a031902c0dd0498f9431404378e50fef606281ff0f814e2cfca00677933ad8b555b544b1a8737b6e8f40952fed014b78d63d336dee0f4931fc2e8daac282116dec84eaac5aaccdfbdc12784558f91495ebc374eaab2cca7a0cf165e16b2dca1ef0537198f8cd8a78b7cf29e6491163f4b54f1e9322f66b37cf9ddbcb898b734141d2ac677b66cefedd147d287a0a5b8f8c3d38f82f1e9679b06a89ffc6c8d903e0a94157f3cfa883e6bbe6416a70ff4d5f5c505a9b9bca64fd0c480d63fdbea6dbe2c3a7f3fe3f1eb27449b3af8205f4eeb6beee4f7c2e8b49b625194595db4f48955c84aed1fffa298d655539db7e3e31faceb7cfc9a073d7ef2c5defdd7ee3da2d6ae0137915f7ec92ff9fe2ff97f00c5157c0239060000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97371f8d3ebaccca75fed1a3ef59c8d4e482c0febe1fedbc3b787af0e4f4d3e34f77ef3fdc3d3e39f97d3fa2f6cb6c41cd3f9a576d5e6e97c565be8d8eefd137b3fc3c5b97edeb695517cb8b9775755e94d474b92ecbd147e7455ece1aee472114337aa7bd5ee1f7d3d962fcbac56bf499209e4df0f279563639de2edbbc0e3eaa736a9f5fca676dbda68f9aaa6ee56f6d739e4d6934fe276ff36bd39a4970bccccaeb1fe4b54153faee7e9a75fe6eae97d5f27af145b6c288beff4b467650d955de548b7c9937cdf3fc322f6938de109f566b204300b89b00337f8882e0c611cadfde00e5031e9f42fc5919e02c6fa675b16a8b6a49e3f0c6169d3e41ca1f9ae2b6716cdac61b9c7ef243185d9b1525c4c21bd949b55895f9bb37f884b0ea3132c98bf7c6495596f914f4d9c2cb42963bf4bde026e39111fb74914fde932cd2e267892a3e5de6c56c962fbf9b1717f39686a243c5f8ce96edbd3dfa48fa10b414177f78fa51303efd6cd300f5939fad11d24781b2e28f471fd167cd97cce2f481bebabeb8203597d7f4099a18d0fa675bbdcd9745e7ef673c7efd846853071fe4cb697dcd9dfc5e189d76532c8a32ab8b963eb10a59a9fde35f14d3ba6aaaf3767cfc83759d8f5ff3a0c74fbed8bbffdabd47d4da35e026f2cb2fc14c2a30a2aca7dd1f9eec1c9fec3dbdffe0c9c126ed7e701f53fc9efa9d619c7d4d252f9c1db08b7ce4b845fe1e6016f9f21be415a2a03fae17f8fde691091adfe4c0f4f79f9591fd7f5bb793448dcbf5946c2f10bdcd209fd5d4f2ff73c33caf6f33cc69d6e617550d7df0fe23944f360e50fef6c6271f7cf0f0e4efe17191fafaffa3b175035c65f55b1ac4d9725aae6779477d3ea9aa32cf209dd28d60a6e8bce708e5ef1ffe009b4585011e97657595ffff707c65d6b4aff2657599812f9f921cd270bc31e29337c522fff2fcbcc9e14d498782a322f6ff91a1d634445627fef0febf1fe39415694f9a3b1a8437b0cff3eaa2ce56f3eb9755b1fc599a3705f3b33ec26c36ab2944a5417803bc29ce69da3acfdbe3d8aba278e933414c06a3a3fb4092c8073feb14998aeffcffab31352da91af2d32f8be51453faffabc14dabf5b2fdff9f7fb3aa68d6ca936af6ff9309f3c75657d5c2531c1de7cd533ff0e0fafac7f3dce97b058237bf39dae8f87f58c471a4f1c6f635a312f9e4ff9da3e3d1fcff6d5013cab2bf22054b83f006f60dba3fe6ad1ffec8f299497f0563fbffc1a435659eaf9a13d80e1a8737b65be63105cdff978eed87145899b77ee803fcff69ec4fffd1874156155fd8612f092a25616964e7eb250ff8f8e2a2ce2f2446a1795f2fe83b590d941e4c3b01c396941396416843414c5eafaa52fea6af0a74445f287d67057922e2384e2af24a3e7ab437de21d884cb1ceb5ba6b34576b12c5aca55980fec8b94c2aef3f3bca65477ce41d3cbac2634a85b02bfb8265c0c6c1af953fbd6eecece78e797101ec85403a4508808e4b2f2bff8a34cd8fccbbab828f0c9f73efad647d46a91bd3bbec8cf96af735a2185ffc00088be7e02df92b671daec0bb85c1419f164113e67cbf3e2dd17593b9dd31f6855adeb69fe0cb40408df62d3b72e29ccb80a949c3ba33f7939e0ffc5cb03dfff25ff0f9dec1338581e0000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'e1e02747-a9b4-454b-b754-a8786ff4d657', + 'f3aa966b-3b6c-4299-b10c-631337f61de2', 'elapsed-time', - '51', + '69', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:20:51 GMT', + 'Fri, 22 Jan 2021 00:12:22 GMT', 'Content-Length', - '662' ]); + '1356' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_list_of_indexes_names.js b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_list_of_indexes_names.js index b2d0178cb67c..72d3540d3024 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_list_of_indexes_names.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_gets_the_list_of_indexes_names.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97375bcb6c91dff968f4d16556aef38f1e7def177f844f00b56af372bb2c2ef36d00bef7d12ff9fe2ff97f000791cf707d000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97375bcb6c91dff968f4d16556aef38f1e7def177f844f00b56af372bb2c2ef36d00bef7d12f190d7d77707fefa35ff2fd5ff2ff0003b5259b9b000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '51a723af-f34f-4d9a-89b2-4a7ad31e7d0f', + '2196e96b-a69d-4235-8c16-8c4f84386979', 'elapsed-time', - '24', + '20', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:21:00 GMT', + 'Fri, 22 Jan 2021 00:12:31 GMT', 'Content-Length', - '224' ]); + '233' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_modify_and_updates_the_index_object.js b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_modify_and_updates_the_index_object.js index 16c89f9417e9..313d64d73104 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_modify_and_updates_the_index_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_modify_and_updates_the_index_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test3%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f9737777fb77cd916edf5472303971a5c10d0dff7a39d77074f0f0e761e1e1fef1fecdddbbbfff0f7fd885a2db3454e5fcfab362fb7cbe232df46b7f7e89b597e9eadcbf6f5b4aa8be5c5cbba3a2f4a6aba5c97e5e8a3f3222f67cd478fbef78b0d846246efb4d72bfc7e3a5b8c5fb7788d3e13b4b3095e3ecfca26c7db659bd7c147754eedf34bf9acadd7f45153d5adfcad6dceb3298dc6ffe46d7e6d5a33018e975979fd83bc36684adfdd4fb3cedfcdf5b25a5e2fbec85618d1f77fc9c80e2abbca9b6a912ff3a6799e5fe6250dc71be2d36a0d640800771360e60f5110dc3842f9db1ba07cc0e353883f2b039ce5cdb42e566d512d691cded8a2d32748f94353dc368e4ddb7883d34f7e08a36bb3a26c6814dec84eaac5aaccdfbdc12784558f91495ebc374eaab2cca7a0cf165e16b2dca1ef0537198f8cd8a78b7cf29e6491163f4b54f1e9322f66b37cf9ddbcb898b734141d2ac677b66cefedd147d287a0a5b8f8c3d38f82f1e9679b06a89ffc6c8d903e0a94157f3cfa883e6bbe6416a70ff4d5f5c505a9b9bca64fd0c480d63fdbea6dbe2c3a7f3fe3f1eb27449b3af8205f4eeb6beee4f7c2e8b49b625194590d7dfcc82a7aa5f68f7f514cebaaa9cedbf1f10fd6753e7ecd831e3ff962effe6bf71e516bd7809bc82fbfe497fc3f7ffa616c35060000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f9737777fb77cd916edf5472303971a5c10d0dff7a39d77074f0f9e9c7e7afce0e4c9b39d07cf8e7fdf8fa8d5325be4f4f5bc6af372bb2c2ef36d747b8fbe99e5e7d9ba6c5f4fabba585ebcacabf3a2a4a6cb75598e3e3a2ff272d67cf4e87bbfd8402866f44e7bbdc2efa7b3c5f8758bd7e833413b9be0e5f3ac6c72bc5db6791d7c54e7d43ebf94cfda7a4d1f3555ddcadfdae63c9bd268fc4fdee6d7a63513e0789995d73fc86b83a6f4ddfd34ebfcdd5c2fabe5f5e28b6c85117dff978ceca0b2abbca916f9326f9ae7f9655ed270bc213eadd640860070370166fe1005c18d2394bfbd01ca073c3e85f8b332c059de4ceb62d516d592c6e18d2d3a7d82943f34c56de3d8b48d3738fde48730ba362bca8646e18deca45aaccafcdd1b7c4258f51899e4c57be3a42acb7c0afa6ce16521cb1dfa5e7093f1c8887dbac827ef491669f1b344159f2ef36236cb97dfcd8b8b794b43d1a1627c67cbf6de1e7d247d085a8a8b3f3cfd28189f7eb66980fac9cfd608e9a34059f1c7a38fe8b3e64b6671fa405f5d5f5c909acb6bfa044d0c68fdb3addee6cba2f3f7331ebf7e42b4a9830ff2e5b4bee64e7e2f8c4ebb29164599d5d0c78faca2576afff817c5b4ae9aeabc1d1fff605de7e3d73ce8f1932ff6eebf76ef11b5760db889fcf24b7ec9ff0347f983d235060000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,13 +19,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809AA4823259"', + 'W/"0x8D8BE6A7CBF07FA"', 'Vary', 'Accept-Encoding', 'request-id', - '66ab0844-a336-49bd-abb6-d4ca4d7b4199', + 'f8ef32a6-3d86-4fa9-9257-6b555a05abca', 'elapsed-time', - '19', + '26', 'OData-Version', '4.0', 'Preference-Applied', @@ -32,14 +33,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:21:38 GMT', + 'Fri, 22 Jan 2021 00:13:08 GMT', 'Content-Length', - '661' ]); + '662' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) - .put('/indexes(%27hotel-live-test3%27)', {"name":"hotel-live-test3","fields":[{"name":"id","type":"Edm.String","key":true,"retrievable":true,"searchable":false,"filterable":false,"sortable":false,"facetable":false,"searchAnalyzer":null,"indexAnalyzer":null,"synonymMaps":[]},{"name":"awesomenessLevel","type":"Edm.Double","key":false,"retrievable":true,"searchable":false,"filterable":true,"sortable":true,"facetable":true,"searchAnalyzer":null,"indexAnalyzer":null,"synonymMaps":[]},{"name":"description","type":"Edm.String","key":false,"retrievable":true,"searchable":true,"filterable":false,"sortable":false,"facetable":false,"searchAnalyzer":null,"indexAnalyzer":null,"synonymMaps":[]},{"name":"details","type":"Edm.ComplexType","fields":[{"name":"tags","type":"Collection(Edm.String)","key":false,"retrievable":true,"searchable":true,"filterable":true,"sortable":false,"facetable":true,"analyzer":null,"searchAnalyzer":null,"indexAnalyzer":null,"synonymMaps":[]}]},{"name":"hiddenWeight","type":"Edm.Int32","key":false,"retrievable":false,"searchable":false,"filterable":false,"sortable":false,"facetable":false,"searchAnalyzer":null,"indexAnalyzer":null,"synonymMaps":[]},{"name":"lastUpdatedOn","type":"Edm.DateTimeOffset","searchable":false,"filterable":true,"sortable":false,"facetable":false}],"scoringProfiles":[],"defaultScoringProfile":null,"corsOptions":null,"suggesters":[],"analyzers":[],"tokenizers":[],"tokenFilters":[],"charFilters":[],"encryptionKey":null,"similarity":{"@odata.type":"#Microsoft.Azure.Search.BM25Similarity","k1":null,"b":null},"@odata.etag":"\"0x8D8809AA4823259\""}) + .put('/indexes(%27hotel-live-test3%27)', {"name":"hotel-live-test3","fields":[{"name":"id","type":"Edm.String","key":true,"retrievable":true,"searchable":false,"filterable":false,"sortable":false,"facetable":false,"searchAnalyzer":null,"indexAnalyzer":null,"synonymMaps":[]},{"name":"awesomenessLevel","type":"Edm.Double","key":false,"retrievable":true,"searchable":false,"filterable":true,"sortable":true,"facetable":true,"searchAnalyzer":null,"indexAnalyzer":null,"synonymMaps":[]},{"name":"description","type":"Edm.String","key":false,"retrievable":true,"searchable":true,"filterable":false,"sortable":false,"facetable":false,"searchAnalyzer":null,"indexAnalyzer":null,"synonymMaps":[]},{"name":"details","type":"Edm.ComplexType","fields":[{"name":"tags","type":"Collection(Edm.String)","key":false,"retrievable":true,"searchable":true,"filterable":true,"sortable":false,"facetable":true,"analyzer":null,"searchAnalyzer":null,"indexAnalyzer":null,"synonymMaps":[]}]},{"name":"hiddenWeight","type":"Edm.Int32","key":false,"retrievable":false,"searchable":false,"filterable":false,"sortable":false,"facetable":false,"searchAnalyzer":null,"indexAnalyzer":null,"synonymMaps":[]},{"name":"lastUpdatedOn","type":"Edm.DateTimeOffset","searchable":false,"filterable":true,"sortable":false,"facetable":false}],"scoringProfiles":[],"defaultScoringProfile":null,"corsOptions":null,"suggesters":[],"analyzers":[],"tokenizers":[],"tokenFilters":[],"charFilters":[],"encryptionKey":null,"similarity":{"@odata.type":"#Microsoft.Azure.Search.BM25Similarity"},"@odata.etag":"\"0x8D8BE6A7CBF07FA\""}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f9737777fb77cd916edf5472303971a5c10d0dff7a39d77074f0f0e761e1e1f3fd8dd3b7df66cfff7fd885a2db3454e5fcfab362fb7cbe232df46b7f7e89b597e9eadcbf6f5b4aa8be5c5cbba3a2f4a6aba5c97e5e8a3f3222f67cd478fbef78b0d846246efb4d72bfc7e3a5b8c5fb7788d3e13b4b3095e3ecfca26c7db659bd7c147754eedf34bf9acadd7f45153d5adfcad6dceb3298dc6ffe46d7e6d5a33018e975979fd83bc36684adfdd4fb3cedfcdf5b25a5e2fbec85618d1f77fc9c80e2abbca9b6a912ff3a6799e5fe6250dc71be2d36a0d640800771360e60f5110dc3842f9db1ba07cc0e353883f2b039ce5cdb42e566d512d691cded8a2d32748f94353dc368e4ddb7883d34f7e08a36bb3a26c6814dec84eaac5aaccdfbdc12784558f91495ebc374eaab2cca7a0cf165e16b2dca1ef0537198f8cd8a78b7cf29e6491163f4b54f1e9322f66b37cf9ddbcb898b734141d2ac677b66cefedd147d287a0a5b8f8c3d38f82f1e9679b06a89ffc2c8dd08dafcc9af6ab15a9bf7cf66587af9fd2876f8a45fee5f9799363f0d257809f3f52999360a0f2d1cfdd38bf4f1f054a993f1e7d449f355fb228d307faeafae282d4795ed327686240eb9f6df5365f169dbf9ff1e8f513a24c1d7c902fa7f53577f27b6174da4db128caac86dd79640d9a12fdc7bf28a675d554e7edf8f807eb3a1fbfe6418f9f7cb177ffb57b8fa8b56bc04de4975ff24bfe1fc947db3c1d070000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f9737777fb77cd916edf5472303971a5c10d0dff7a39d77074f0f9e9c7e7afce0d9fed3fbf7f7f67edf8fa8d5325be4f4f5bc6af372bb2c2ef36d747b8fbe99e5e7d9ba6c5f4fabba585ebcacabf3a2a4a6cb75598e3e3a2ff272d67cf4e87bbfd8402866f44e7bbdc2efa7b3c5f8758bd7e833413b9be0e5f3ac6c72bc5db6791d7c54e7d43ebf94cfda7a4d1f3555ddcadfdae63c9bd268fc4fdee6d7a63513e0789995d73fc86b83a6f4ddfd34ebfcdd5c2fabe5f5e28b6c85117dff978ceca0b2abbca916f9326f9ae7f9655ed270bc213eadd640860070370166fe1005c18d2394bfbd01ca073c3e85f8b332c059de4ceb62d516d592c6e18d2d3a7d82943f34c56de3d8b48d3738fde48730ba362bca8646e18deca45aaccafcdd1b7c4258f51899e4c57be3a42acb7c0afa6ce16521cb1dfa5e7093f1c8887dbac827ef491669f1b344159f2ef36236cb97dfcd8b8b794b43d1a1627c67cbf6de1e7d247d085a8a8b3f3cfd28189f7eb66980fac9cfd208ddf8caac69bf5a91facb675f76f8fa297df8a658e45f9e9f3739062f7d05f8f923953909062a1ffddc8df3fbf451a094f9e3d147f459f3258b327da0afae2f2e489de7357d822606b4fed9566ff365d1f9fb198f5e3f21cad4c107f9725a5f7327bf1746a7dd148ba2cc6ad89d47d6a029d17ffc8b625a574d75de8e8f7fb0aef3f16b1ef4f8c9177bf75fbbf7885abb06dc447ef925bfe4ff0117a691d01d070000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,13 +53,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809AA712EFF4"', + 'W/"0x8D8BE6A7F4D5522"', 'Vary', 'Accept-Encoding', 'request-id', - '21adfd83-f829-4277-a091-a92a3d81b555', + '50dc273a-f47d-42fd-92d8-e060a880c7cf', 'elapsed-time', - '157', + '162', 'OData-Version', '4.0', 'Preference-Applied', @@ -64,14 +67,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:21:38 GMT', + 'Fri, 22 Jan 2021 00:13:08 GMT', 'Content-Length', - '705' ]); + '706' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27hotel-live-test3%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f9737777fb77cd916edf5472303971a5c10d0dff7a39d77074f0f0e761e1e1f3fd8dd3b7df66cfff7fd885a2db3454e5fcfab362fb7cbe232df46b7f7e89b597e9eadcbf6f5b4aa8be5c5cbba3a2f4a6aba5c97e5e8a3f3222f67cd478fbef78b0d846246efb4d72bfc7e3a5b8c5fb7788d3e13b4b3095e3ecfca26c7db659bd7c147754eedf34bf9acadd7f45153d5adfcad6dceb3298dc6ffe46d7e6d5a33018e975979fd83bc36684adfdd4fb3cedfcdf5b25a5e2fbec85618d1f77fc9c80e2abbca9b6a912ff3a6799e5fe6250dc71be2d36a0d640800771360e60f5110dc3842f9db1ba07cc0e353883f2b039ce5cdb42e566d512d691cded8a2d32748f94353dc368e4ddb7883d34f7e08a36bb3a26c6814dec84eaac5aaccdfbdc12784558f91495ebc374eaab2cca7a0cf165e16b2dca1ef0537198f8cd8a78b7cf29e6491163f4b54f1e9322f66b37cf9ddbcb898b734141d2ac677b66cefedd147d287a0a5b8f8c3d38f82f1e9679b06a89ffc2c8dd08dafcc9af6ab15a9bf7cf66587af9fd2876f8a45fee5f9799363f0d257809f3f52999360a0f2d1cfdd38bf4f1f054a993f1e7d449f355fb228d307faeafae282d4795ed327686240eb9f6df5365f169dbf9ff1e8f513a24c1d7c902fa7f53577f27b6174da4db128caac86dd79640d9a12fdc7bf28a675d554e7edf8f807eb3a1fbfe6418f9f7cb177ffb57b8fa8b56bc04de4975ff24bfe1fc947db3c1d070000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f9737777fb77cd916edf5472303971a5c10d0dff7a39d77074f0f9e9c7e7afce0d9fed3fbf7f7f67edf8fa8d5325be4f4f5bc6af372bb2c2ef36d747b8fbe99e5e7d9ba6c5f4fabba585ebcacabf3a2a4a6cb75598e3e3a2ff272d67cf4e87bbfd8402866f44e7bbdc2efa7b3c5f8758bd7e833413b9be0e5f3ac6c72bc5db6791d7c54e7d43ebf94cfda7a4d1f3555ddcadfdae63c9bd268fc4fdee6d7a63513e0789995d73fc86b83a6f4ddfd34ebfcdd5c2fabe5f5e28b6c85117dff978ceca0b2abbca916f9326f9ae7f9655ed270bc213eadd640860070370166fe1005c18d2394bfbd01ca073c3e85f8b332c059de4ceb62d516d592c6e18d2d3a7d82943f34c56de3d8b48d3738fde48730ba362bca8646e18deca45aaccafcdd1b7c4258f51899e4c57be3a42acb7c0afa6ce16521cb1dfa5e7093f1c8887dbac827ef491669f1b344159f2ef36236cb97dfcd8b8b794b43d1a1627c67cbf6de1e7d247d085a8a8b3f3cfd28189f7eb66980fac9cfd208ddf8caac69bf5a91facb675f76f8fa297df8a658e45f9e9f3739062f7d05f8f923953909062a1ffddc8df3fbf451a094f9e3d147f459f3258b327da0afae2f2e489de7357d822606b4fed9566ff365d1f9fb198f5e3f21cad4c107f9725a5f7327bf1746a7dd148ba2cc6ad89d47d6a029d17ffc8b625a574d75de8e8f7fb0aef3f16b1ef4f8c9177bf75fbbf7885abb06dc447ef925bfe4ff0117a691d01d070000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -82,13 +87,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809AA712EFF4"', + 'W/"0x8D8BE6A7F4D5522"', 'Vary', 'Accept-Encoding', 'request-id', - '6b340c8d-491f-4b57-b8a2-908f12e03655', + 'fff6ebf4-de5f-42b6-8b47-2ded2ee0c8de', 'elapsed-time', - '18', + '17', 'OData-Version', '4.0', 'Preference-Applied', @@ -96,6 +101,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:21:38 GMT', + 'Fri, 22 Jan 2021 00:13:08 GMT', 'Content-Length', - '705' ]); + '706' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_throws_error_for_invalid_index_object.js b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_throws_error_for_invalid_index_object.js index 16660b2e235b..7455ce26327a 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_throws_error_for_invalid_index_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_indexes/recording_throws_error_for_invalid_index_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexes(%27garbxyz%27)') .query(true) - .reply(404, {"error":{"code":"","message":"No index with the name 'garbxyz' was found in the service 'testsearchcases'."}}, [ 'Cache-Control', + .reply(404, {"error":{"code":"","message":"No index with the name 'garbxyz' was found in the service 'testsearchcases'."}}, [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,9 +19,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'request-id', - '07858434-6737-4d70-a3a7-c3ce18bc6968', + 'fa711e82-e29e-49d8-98d1-7542619b8d25', 'elapsed-time', - '207', + '15', 'OData-Version', '4.0', 'Preference-Applied', @@ -28,6 +29,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:21:19 GMT', + 'Fri, 22 Jan 2021 00:12:48 GMT', 'Content-Length', - '110' ]); + '110' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_creates_the_synonymmap_object_using_createorupdatesynonymmap.js b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_creates_the_synonymmap_object_using_createorupdatesynonymmap.js index 56c8c16537c8..9f2d1d631e89 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_creates_the_synonymmap_object_using_createorupdatesynonymmap.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_creates_the_synonymmap_object_using_createorupdatesynonymmap.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .put('/synonymmaps(%27my-azure-synonymmap-3%27)', {"name":"my-azure-synonymmap-3","format":"solr","synonyms":"United States, United States of America => USA\nWashington, Wash. => WA"}) .query(true) - .reply(201, {"@odata.context":"https://endpoint/$metadata#synonymmaps/$entity","@odata.etag":"\"0x8D8809A7F9727ED\"","name":"my-azure-synonymmap-3","format":"solr","synonyms":"United States, United States of America => USA\nWashington, Wash. => WA","encryptionKey":null}, [ 'Cache-Control', + .reply(201, {"@odata.context":"https://endpoint/$metadata#synonymmaps/$entity","@odata.etag":"\"0x8D8BE6A58EEDF81\"","name":"my-azure-synonymmap-3","format":"solr","synonyms":"United States, United States of America => USA\nWashington, Wash. => WA","encryptionKey":null}, [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -16,13 +17,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809A7F9727ED"', + 'W/"0x8D8BE6A58EEDF81"', 'Location', - 'https://endpoint/synonymmaps(\'my-azure-synonymmap-3\')?api-version=2020-06-30', + "https://endpoint/synonymmaps('my-azure-synonymmap-3')?api-version=2020-06-30", 'request-id', - '1abfd528-49de-42c8-89e7-e4cc759abdd5', + '2f1b5424-a099-4cbe-8618-97484d04c844', 'elapsed-time', - '28', + '33', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:20:32 GMT', + 'Fri, 22 Jan 2021 00:12:03 GMT', 'Content-Length', - '284' ]); + '284' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/synonymmaps(%27my-azure-synonymmap-3%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd5dcfdddf2655bb4d71f8d0c6c6a7441807fdf8f76de1d3c3d38d87978fce0d9c3077b0f4e9ffebe1f51ab65b6c8e9ebc5f576f683759d6f3b58dbf7e8ebf3aa5e6440aca9ca9afed6af1bfae4ab65d1e6b3f4759b11b2a334f833adced3e3455e17d32cfdec28fdeaf5f1efbbfc6ed6cc8be5455b2d47297e1fe39bef1e13d07c39adaf576d512d7faffcfaa347cb7559fe92ff07c15651ab1c010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd5dcfdddf2655bb4d71f8d0c6c6a7441807fdf8f76de1d3c3d7872fae9f1fd83d3d3a7cf0e767fdf8fa8d5325be4f4f5e27a3bfbc1baceb71daced7bf4f579552f3220d654654d7febd70d7df2d5b268f359fabacd08d9511afc9956e7e9f122af8b69967e76947ef5faf8f75d7e376be6c5f2a2ad96a314bf8ff1cd778f0968be9cd6d7abb6a896bf577efdd1a3e5ba2c7fc9ff03c29423261c010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -48,13 +51,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809A7F9727ED"', + 'W/"0x8D8BE6A58EEDF81"', 'Vary', 'Accept-Encoding', 'request-id', - '4b344103-cf6d-495b-80c5-6e850c18d2fe', + '813b9d71-445f-4f3c-8300-24d8a82dbfd0', 'elapsed-time', - '5', + '7', 'OData-Version', '4.0', 'Preference-Applied', @@ -62,24 +65,27 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:20:32 GMT', + 'Fri, 22 Jan 2021 00:12:03 GMT', 'Content-Length', - '334' ]); + '334' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .delete('/synonymmaps(%27my-azure-synonymmap-3%27)') .query(true) - .reply(204, "", [ 'Cache-Control', + .reply(204, "", [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', 'Expires', '-1', 'request-id', - 'bfcadeb3-4ae5-4222-b819-6213345a703a', + '5d59020f-d4d0-44db-b42a-761528cba861', 'elapsed-time', - '13', + '11', 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:20:32 GMT' ]); + 'Fri, 22 Jan 2021 00:12:03 GMT' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_correct_synonymmap_object.js b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_correct_synonymmap_object.js index 5e65d8700882..44df4ffe4e73 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_correct_synonymmap_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_correct_synonymmap_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/synonymmaps(%27my-azure-synonymmap-1%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd5dcfdddf2655bb4d71f8d0c6c6a7441807fdf8f76de1d3c3d38d87978fc60f7e1fdd3bd9d93dff7236ab5cc16397dbdb8dece7eb0aef36d076b7b97be3eafea4506c49aaaace96ffdbaa14fbe5a166d3e4b5fb719213b4a833fd3ea3c3d5ee47531cdd2cf8ed2af5e1fffbecbef66cdbc585eb4d57294e2f731bef9ee3101cd97d3fa7ad516d5f2f7caaf3f7ab45c97e52ff97f00a8c3ea221c010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd5dcfdddf2655bb4d71f8d0c6c6a7441807fdf8f76de1d3c3d7872fae9f1fe934fef9dec3d7cf0fb7e44ad96d922a7af17d7dbd90fd675beed606defd2d7e755bdc88058539535fdad5f37f4c957cba2cd67e9eb3623644769f0675a9da7c78bbc2ea659fad951fad5ebe3df77f9ddac9917cb8bb65a8e52fc3ec637df3d26a0f9725a5fafdaa25afe5ef9f5478f96ebb2fc25ff0ff3fe4f621c010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,13 +19,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809A7195E20C"', + 'W/"0x8D8BE6A4B63C297"', 'Vary', 'Accept-Encoding', 'request-id', - 'a651dd98-de86-4cf7-9ad0-87498eaf9d66', + '7e9adbee-e677-4cd6-ad6e-32041199a320', 'elapsed-time', - '7', + '10', 'OData-Version', '4.0', 'Preference-Applied', @@ -32,6 +33,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:20:13 GMT', + 'Fri, 22 Jan 2021 00:11:45 GMT', 'Content-Length', - '334' ]); + '334' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_list_of_synonymmaps.js b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_list_of_synonymmaps.js index 87bed14d37b4..d45ed1119d91 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_list_of_synonymmaps.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_list_of_synonymmaps.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/synonymmaps') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd57c34fae8322bd7f9478fbe67a153b30b02fdfb7eb4f3eee0e9c1c1cec3e34f3f3d38d8db7f76eff7fd88da2fb30535ff6871bd9dfd605de7db0edaf62e7d7d5ed58b0ca8355559d3dffa75439f7cb52cda7c96be6e33427794067fa6d5797abcc8eb629aa59f1da55fbd3efe7d97dfcd9a79b1bc68abe528c5ef637cf3dd63029a2fa7f5f5aa2daae5ef955f7ff468b92ecb5f32da3c80673bf79e3db969007bf4f5cfd900beff4bfe1f0031ff3be1010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd57c34fae8322bd7f9478fbe67a153b30b02fdfb7eb4f3eee0e9c193d34f8ff7778eef7d7ab0b7fffb7e44ed97d9829a7fb4b8dece7eb0aef36d076d7b97be3eafea4506d49aaaace96ffdbaa14fbe5a166d3e4b5fb719a13b4a833fd3ea3c3d5ee47531cdd2cf8ed2af5e1fffbecbef66cdbc585eb4d57294e2f731bef9ee3101cd97d3fa7ad516d5f2f7caaf3f7ab45c97e52f196d1ec09307cf9eeddc34803dfafae76c00dfff25ff0f6bcc504ce1010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '5abbb118-66be-4865-a2ad-42ecebcf6c0a', + 'c78417ed-937d-4738-a69e-faa5c2f2aac7', 'elapsed-time', - '28', + '26', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:19:55 GMT', + 'Fri, 22 Jan 2021 00:11:28 GMT', 'Content-Length', - '356' ]); + '356' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_list_of_synonymmaps_names.js b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_list_of_synonymmaps_names.js index bd5e0648adf2..e30da7be7556 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_list_of_synonymmaps_names.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_gets_the_list_of_synonymmaps_names.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/synonymmaps') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd56c2db3457ee7a3d1479759b9ce3f7af4bd5ffc113e21c88bebedec07eb3adf76adb7773ffa25a38d0df63efa25dfff25ff0fc3457414a7000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd56c2db3457ee7a3d1479759b9ce3f7af4bd5ffc113e21c88bebedec07eb3adf76adb7773ffa25a38d0df63efa25dfff25ff0fc3457414a7000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,7 +21,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '9e049600-565f-4d21-b1f5-62fb0249a793', + 'f483b373-2fbf-4b06-9f1c-e22afafb08bd', 'elapsed-time', '7', 'OData-Version', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:20:04 GMT', + 'Fri, 22 Jan 2021 00:11:37 GMT', 'Content-Length', - '236' ]); + '236' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_modify_and_updates_the_synonymmap_object.js b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_modify_and_updates_the_synonymmap_object.js index 66e003884104..e4cf45e5c945 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_modify_and_updates_the_synonymmap_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_modify_and_updates_the_synonymmap_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/synonymmaps(%27my-azure-synonymmap-1%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd5dcfdddf2655bb4d71f8d0c6c6a7441807fdf8f76de1d3c3d38d879787cb0b7ff64efc9eeeeeffb11b55a668b9cbe5e5c6f673f58d7f9b683b5bd4b5f9f57f52203624d55d6f4b77eddd0275f2d8b369fa5afdb8c901da5c19f69759e1e2ff2ba9866e96747e957af8f7fdfe577b3665e2c2fda6a394af1fb18df7cf79880e6cb697dbd6a8b6af97be5d71f3d5aaecbf297fc3ffae250851c010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd5dcfdddf2655bb4d71f8d0c6c6a7441807fdf8f76de1d3c3d7872fae9f1fd2707a74f9e3c7bf6fb7e44ad96d922a7af17d7dbd90fd675beed606defd2d7e755bdc88058539535fdad5f37f4c957cba2cd67e9eb3623644769f0675a9da7c78bbc2ea659fad951fad5ebe3df77f9ddac9917cb8bb65a8e52fc3ec637df3d26a0f9725a5fafdaa25afe5ef9f5478f96ebb2fc25ff0f717c51661c010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,13 +19,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809A824B2B11"', + 'W/"0x8D8BE6A5B8EBBFF"', 'Vary', 'Accept-Encoding', 'request-id', - '0d05c64c-0d0f-434d-a298-d97a74703916', + '38efcd22-f9db-4288-82d2-9bdaf74e2d10', 'elapsed-time', - '8', + '7', 'OData-Version', '4.0', 'Preference-Applied', @@ -32,14 +33,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:20:42 GMT', + 'Fri, 22 Jan 2021 00:12:13 GMT', 'Content-Length', - '333' ]); + '334' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) - .put('/synonymmaps(%27my-azure-synonymmap-1%27)', {"name":"my-azure-synonymmap-1","format":"solr","synonyms":"United States, United States of America => USA\nWashington, Wash. => WA\nCalifornia, Clif. => CA","encryptionKey":null,"@odata.etag":"\"0x8D8809A824B2B11\""}) + .put('/synonymmaps(%27my-azure-synonymmap-1%27)', {"name":"my-azure-synonymmap-1","format":"solr","synonyms":"United States, United States of America => USA\nWashington, Wash. => WA\nCalifornia, Clif. => CA","encryptionKey":null,"@odata.etag":"\"0x8D8BE6A5B8EBBFF\""}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd5dcfdddf2655bb4d71f8d0c6c6a7441807fdf8f76de1d3c3d38d879787c70ffdededee9fedeeffb11b55a668b9cbe5e5c6f673f58d7f9b683b5bd4b5f9f57f52203624d55d6f4b77eddd0275f2d8b369fa5afdb8c901da5c19f69759e1e2ff2ba9866e96747e957af8f7fdfe577b3665e2c2fda6a394af1fb18df7c97be38c9ca82fa5916d9283da15ff98b9363ea2d5f4eebeb555b54cbdf2bbffee8d1725d96bfe4ff011a270f7735010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd5dcfdddf2655bb4d71f8d0c6c6a7441807fdf8f76de1d3c3d7872fae9f1fdd34f0ff61e3e38f97d3fa256cb6c91d3d78bebedec07eb3adf76b0b677e9ebf3aa5e6440aca9ca9afed6af1bfae4ab65d1e6b3f4759b11b2a334f833adced3e3455e17d32cfdec28fdeaf5f1efbbfc6ed6cc8be5455b2d47297e1fe39befd217275959503fcb221ba527f42b7f71724cbde5cb697dbd6a8b6af97be5d71f3d5aaecbf297fc3fc0c7776435010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,13 +53,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809A85322E42"', + 'W/"0x8D8BE6A5E68297C"', 'Vary', 'Accept-Encoding', 'request-id', - '8c2da308-2aaa-4965-9692-c2f852eca544', + 'c83bd515-f805-4889-a17b-078a25f4a0fa', 'elapsed-time', - '26', + '20', 'OData-Version', '4.0', 'Preference-Applied', @@ -64,14 +67,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:20:42 GMT', + 'Fri, 22 Jan 2021 00:12:13 GMT', 'Content-Length', - '350' ]); + '350' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/synonymmaps(%27my-azure-synonymmap-1%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd5dcfdddf2655bb4d71f8d0c6c6a7441807fdf8f76de1d3c3d38d879787c70ffdededee9fedeeffb11b55a668b9cbe5e5c6f673f58d7f9b683b5bd4b5f9f57f52203624d55d6f4b77eddd0275f2d8b369fa5afdb8c901da5c19f69759e1e2ff2ba9866e96747e957af8f7fdfe577b3665e2c2fda6a394af1fb18df7c97be38c9ca82fa5916d9283da15ff98b9363ea2d5f4eebeb555b54cbdf2bbffee8d1725d96bfe4ff011a270f7735010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e67a592daf178b6cd5dcfdddf2655bb4d71f8d0c6c6a7441807fdf8f76de1d3c3d7872fae9f1fdd34f0ff61e3e38f97d3fa256cb6c91d3d78bebedec07eb3adf76b0b677e9ebf3aa5e6440aca9ca9afed6af1bfae4ab65d1e6b3f4759b11b2a334f833adced3e3455e17d32cfdec28fdeaf5f1efbbfc6ed6cc8be5455b2d47297e1fe39befd217275959503fcb221ba527f42b7f71724cbde5cb697dbd6a8b6af97be5d71f3d5aaecbf297fc3fc0c7776435010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -82,13 +87,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809A85322E42"', + 'W/"0x8D8BE6A5E68297C"', 'Vary', 'Accept-Encoding', 'request-id', - '53476a7f-52ee-4c6d-b97e-8c1d5590f68d', + 'eff2f68c-ae21-4a90-b8a6-db99ece4d61c', 'elapsed-time', - '7', + '5', 'OData-Version', '4.0', 'Preference-Applied', @@ -96,6 +101,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:20:42 GMT', + 'Fri, 22 Jan 2021 00:12:13 GMT', 'Content-Length', - '350' ]); + '350' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_throws_error_for_invalid_synonymmap_object.js b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_throws_error_for_invalid_synonymmap_object.js index 5675ccdf6951..c0bf42809433 100644 --- a/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_throws_error_for_invalid_synonymmap_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexclient_synonymmaps/recording_throws_error_for_invalid_synonymmap_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/synonymmaps(%27garbxyz%27)') .query(true) - .reply(404, {"error":{"code":"","message":"No synonym map with the name 'garbxyz' was found in service 'testsearchcases'."}}, [ 'Cache-Control', + .reply(404, {"error":{"code":"","message":"No synonym map with the name 'garbxyz' was found in service 'testsearchcases'."}}, [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,9 +19,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'request-id', - 'be28c516-6cc9-492f-8418-86e48ea61f19', + 'cd7533c9-1440-450e-827c-4a5adf21c343', 'elapsed-time', - '13', + '12', 'OData-Version', '4.0', 'Preference-Applied', @@ -28,6 +29,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:20:23 GMT', + 'Fri, 22 Jan 2021 00:11:55 GMT', 'Content-Length', - '112' ]); + '112' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_creates_the_datasourceconnection_object_using_createorupdatedatasourceconnection.js b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_creates_the_datasourceconnection_object_using_createorupdatedatasourceconnection.js index b018a247f50c..6d4cde6ed6c3 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_creates_the_datasourceconnection_object_using_createorupdatedatasourceconnection.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_creates_the_datasourceconnection_object_using_createorupdatedatasourceconnection.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .put('/datasources(%27my-data-source-3%27)', {"name":"my-data-source-3","type":"cosmosdb","credentials":{"connectionString":"AccountEndpoint=https://hotels-docbb.documents.azure.com:443/;AccountKey=4UPsNZyFAjgZ1tzHPGZaxS09XcwLrIawbXBWk6IixcxJoSePTcjBn0mi53XiKWu8MaUgowUhIovOv7kjksqAug==;Database=SampleData"},"container":{"name":"hotels"}}) .query(true) - .reply(201, {"@odata.context":"https://endpoint/$metadata#datasources/$entity","@odata.etag":"\"0x8D8809B18C4963A\"","name":"my-data-source-3","description":null,"type":"cosmosdb","subtype":null,"credentials":{"connectionString":null},"container":{"name":"hotels","query":null},"dataChangeDetectionPolicy":null,"dataDeletionDetectionPolicy":null,"encryptionKey":null}, [ 'Cache-Control', + .reply(201, {"@odata.context":"https://endpoint/$metadata#datasources/$entity","@odata.etag":"\"0x8D8BE6ADB3FD718\"","name":"my-data-source-3","description":null,"type":"cosmosdb","subtype":null,"credentials":{"connectionString":null},"container":{"name":"hotels","query":null},"dataChangeDetectionPolicy":null,"dataDeletionDetectionPolicy":null,"encryptionKey":null}, [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -16,13 +17,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B18C4963A"', + 'W/"0x8D8BE6ADB3FD718"', 'Location', - 'https://endpoint/datasources(\'my-data-source-3\')?api-version=2020-06-30', + "https://endpoint/datasources('my-data-source-3')?api-version=2020-06-30", 'request-id', - 'b05d7edc-1828-432d-a50f-4cc39da99cf2', + 'b2d2300e-4df5-434b-9c7e-9a43b8353f29', 'elapsed-time', - '49', + '40', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:24:48 GMT', + 'Fri, 22 Jan 2021 00:15:42 GMT', 'Content-Length', - '381' ]); + '381' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/datasources(%27my-data-source-3%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a37777fb77cd916edf54723039b1a5d10e0dff7a39d77074f0f0e761e3ed93d38d97ff8e9bde3dff7236ab5cc16397dbdb8de46eb6d01b37d8fbe99e5cdb42e566d512d3f7ab45c97e5e8a3f67a85c6d3aa5954cd6c428d9af5443e9406d33a9f0181ac6c3e7af48ba9e172994f01e1755b174bc203cd7e09b5a30167c532afd14a5198576d4eaf8d3efa45ebbcbeb64d81d6c93c5b5ee44ff35680bdacca62aa2da4c1d3bcccf1cd40937c39adaf7924bf57ae1ffe92ff07412bef847d010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a37777fb77cd916edf54723039b1a5d10e0dff7a39d77074f0f9e9c7e7afcf4c9bd674f1fec1efcbe1f51ab65b6c8e9ebc5f5365a6f0b98ed7bf4cd2c6fa675b16a8b6af9d1a3e5ba2c471fb5d72b349e56cda26a66136ad4ac27f2a13498d6f90c086465f3d1a35f4c0d97cb7c0a08afdbba58121e68f64ba81d0d382b96798d568ac2bc6a737a6df4d12f5ae7f5b56d0ab44ee6d9f2227f9ab702ec655516536d210d9ee6658e6f069ae4cb697dcd23f9bd72fdf097fc3f6eb7484b7d010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -48,13 +51,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B18C4963A"', + 'W/"0x8D8BE6ADB3FD718"', 'Vary', 'Accept-Encoding', 'request-id', - '6d3baa9c-5d22-4c25-81e3-0c6e8f5a6026', + 'f9236f6d-2977-4b22-9867-e8255ab30584', 'elapsed-time', - '8', + '7', 'OData-Version', '4.0', 'Preference-Applied', @@ -62,24 +65,27 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:24:48 GMT', + 'Fri, 22 Jan 2021 00:15:42 GMT', 'Content-Length', - '367' ]); + '367' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .delete('/datasources(%27my-data-source-3%27)') .query(true) - .reply(204, "", [ 'Cache-Control', + .reply(204, "", [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', 'Expires', '-1', 'request-id', - '5993c931-9275-4906-97e0-f6ecbde8fbc0', + 'c90cbe4b-c424-4244-8f61-b7105100ac30', 'elapsed-time', - '39', + '19', 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:24:48 GMT' ]); + 'Fri, 22 Jan 2021 00:15:42 GMT' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_correct_datasourceconnection_object.js b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_correct_datasourceconnection_object.js index 8d1218570bb9..aacda8515318 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_correct_datasourceconnection_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_correct_datasourceconnection_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/datasources(%27my-data-source-1%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a37777fb77cd916edf54723039b1a5d10e0dff7a39d77074f0f0e761e3ed9d9ff74efd3277b0f7edf8fa8d5325be4f4f5e27a1badb705ccf62e7d33cb9b695dacdaa25a7ef468b92ecbd147edf50a8da755b3a89ad9841a35eb897c280da6753e030259d97cf4e81753c3e5329f02c2ebb62e9684079afd126a4703ce8a655ea395a230afda9c5e1b7df48bd6797d6d9b02ad9379b6bcc89fe6ad007b5995c5545b4883a77999e39b8126f9725a5ff3487eaf5c3ffc25ff0fc374533b7d010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a37777fb77cd916edf54723039b1a5d10e0dff7a39d77074f0f9e9c7e7a7cf2e0c9c1a74f9f3cf97d3fa256cb6c91d3d78beb6db4de1630dbbbf4cd2c6fa675b16a8b6af9d1a3e5ba2c471fb5d72b349e56cda26a66136ad4ac27f2a13498d6f90c086465f3d1a35f4c0d97cb7c0a08afdbba58121e68f64ba81d0d382b96798d568ac2bc6a737a6df4d12f5ae7f5b56d0ab44ee6d9f2227f9ab702ec655516536d210d9ee6658e6f069ae4cb697dcd23f9bd72fdf097fc3fac167b167d010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,13 +19,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B04626B27"', + 'W/"0x8D8BE6AC7B86DBB"', 'Vary', 'Accept-Encoding', 'request-id', - '5b216c71-ac59-4684-a005-9ab63ebcc858', + '919d19f6-dd10-4434-bbc2-6e33145c96bb', 'elapsed-time', - '12', + '8', 'OData-Version', '4.0', 'Preference-Applied', @@ -32,6 +33,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:24:23 GMT', + 'Fri, 22 Jan 2021 00:15:17 GMT', 'Content-Length', - '367' ]); + '367' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_list_of_datasourceconnection_names.js b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_list_of_datasourceconnection_names.js index 60b802fa6411..5f27685a1695 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_list_of_datasourceconnection_names.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_list_of_datasourceconnection_names.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/datasources') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a375bcb6c91dff968f4d16556aef38f1e7def177f844f08f2e27a1bedb6a5e1f6ee47bf6434f4ddde47bfe4fbbfe4ff01704ecf6c9d000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a375bcb6c91dff968f4d16556aef38f1e7def177f844f08f2e27a1bedb6a5e1f6ee47bf6434f4ddde47bfe4fbbfe4ff01704ecf6c9d000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'db264fc3-518d-4ab4-b382-374fd8504fa4', + '8f88294b-f0fb-408a-9342-809016736dbd', 'elapsed-time', - '13', + '11', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:24:10 GMT', + 'Fri, 22 Jan 2021 00:15:05 GMT', 'Content-Length', - '231' ]); + '231' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_list_of_datasourceconnections.js b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_list_of_datasourceconnections.js index 9e5d625ae7d8..c7194baca2ea 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_list_of_datasourceconnections.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_gets_the_list_of_datasourceconnections.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/datasources') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a371f8d3ebaccca75fed1a3ef59e8d4ec8240ffbe1fedbc3b787a70b0f3f0f8d9fdbdbddd87f79efcbe1f51fb65b6a0e61f2daeb7d17a5b006defd237b3bc99d6c5aa2daae5478f96ebb21c7dd45eafd0785a358baa994da851b39ec887d2605ae7b37cd91659d97cf4e81753c3e5329f02c2ebb62e9684079afd126a4743ce8a655ea395a230afda9c5e1b7df48bd6797d6d9b02ad9379b6bcc89fe6ad007b5995c5545b4883a77999e39b8126f9725a5ff3487eaf5c3ffc25a3cd147af2e9c3274f3750688fbef9ff3985beff4bfe1ffffe3494a3020000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a371f8d3ebaccca75fed1a3ef59e8d4ec8240ffbe1fedbc3b787af0e4f4d3e32707a7cf0e9eedddff7d3fa2f6cb6c41cd3f5a5c6fa3f5b600dadea56f667933ad8b555b54cb8f1e2dd76539faa8bd5ea1f1b46a1655339b50a3663d910fa5c1b4ce67f9b22db2b2f9e8d12fa686cb653e0584d76d5d2c090f34fb25d48e869c15cbbc462b45615eb539bd36fae817adf3fada36055a27f36c79913fcd5b01f6b22a8ba9b690064ff332c737034df2e5b4bee691fc5eb97ef84b461b29f4ece9c9dec1c30d14daa36ffe7f4ea1efff92ff078e5fcc65a3020000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'b9637160-6062-46cf-9984-bdade75fea67', + '4f6d3e44-3f3a-47d5-9bd4-0fd1cbebe7f6', 'elapsed-time', - '25', + '27', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:23:57 GMT', + 'Fri, 22 Jan 2021 00:14:52 GMT', 'Content-Length', - '391' ]); + '392' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_modify_and_updates_the_datasourceconnection_object.js b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_modify_and_updates_the_datasourceconnection_object.js index b2d4878994fe..a05cf5f1679e 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_modify_and_updates_the_datasourceconnection_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_modify_and_updates_the_datasourceconnection_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/datasources(%27my-data-source-1%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a37777fb77cd916edf54723039b1a5d10e0dff7a39d77074f0f0e761e3ed97d72f26ce7fecebddff7236ab5cc16397dbdb8de46eb6d01b3bd4bdfccf2665a17abb6a8961f3d5aaecb72f4517bbd42e369d52caa6636a146cd7a221f4a83699dcf804056361f3dfac5d470b9cca780f0baad8b25e18166bf84dad180b36299d768a528ccab36a7d7461ffda2755e5fdba640eb649e2d2ff2a7792bc05e566531d516d2e0695ee6f866a049be9cd6d73c92df2bd70f7fc9ff031a3cd54f7d010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a37777fb77cd916edf54723039b1a5d10e0dff7a39d77074f0f9e9c7e7afcf474f7c193a70f9ffebe1f51ab65b6c8e9ebc5f5365a6f0b98ed5dfa669637d3ba58b545b5fce8d1725d96a38fdaeb151a4fab665135b309356ad613f9501a4ceb7c0604b2b2f9e8d12fa686cb653e0584d76d5d2c090f34fb25d48e069c15cbbc462b45615eb539bd36fae817adf3fada36055a27f36c79913fcd5b01f6b22a8ba9b690064ff332c737034df2e5b4bee691fc5eb97ef84bfe1fcb9c21397d010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,13 +19,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B1BCF0503"', + 'W/"0x8D8BE6ADE17BD9D"', 'Vary', 'Accept-Encoding', 'request-id', - 'e3d11984-6018-41b3-bc99-0ec54696c4b7', + '3e995a84-551c-4eaa-9be4-10163fa8783d', 'elapsed-time', - '11', + '7', 'OData-Version', '4.0', 'Preference-Applied', @@ -32,14 +33,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:25:02 GMT', + 'Fri, 22 Jan 2021 00:15:55 GMT', 'Content-Length', - '367' ]); + '367' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) - .put('/datasources(%27my-data-source-1%27)', {"name":"my-data-source-1","description":"my-data-source-1","type":"cosmosdb","credentials":{"connectionString":null},"container":{"name":"my-container-2","query":null},"dataChangeDetectionPolicy":null,"dataDeletionDetectionPolicy":null,"@odata.etag":"\"0x8D8809B1BCF0503\"","encryptionKey":null}) + .put('/datasources(%27my-data-source-1%27)', {"name":"my-data-source-1","description":"my-data-source-1","type":"cosmosdb","credentials":{"connectionString":null},"container":{"name":"my-container-2","query":null},"dataChangeDetectionPolicy":null,"dataDeletionDetectionPolicy":null,"@odata.etag":"\"0x8D8BE6ADE17BD9D\"","encryptionKey":null}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a37777fb77cd916edf54723039b1a5d10e0dff7a39d77074f0f0e761e3ed9db39dedb3b3e7df2fb7e44ad96d922a7af17d7db68bd2d60b677e99b59de4ceb62d516d532dea0bd5ee1d569d52caa6636a14f9af5443e5caecb72f4d1b4ce6740272b9b8f1efd626ab85ce653c07bddd6c592b042b35f42ed68f859b1cc6bb47208d98fb7f708f62f5ae7f5b57d05b89cccb3e545fe346f05e8cbaa2ca6da421a3ccdcb1cdf0c34c997d3fa9ac7f77be5fae12ff97f0074fec97693010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a37777fb77cd916edf54723039b1a5d10e0dff7a39d77074f0f9e9c7e7a7cba7772fcecc993fbbfef47d46a992d72fa7a71bd8dd6db02667b97be99e5cdb42e566d512de30ddaeb155e9d56cda26a6613faa4594fe4c3e5ba2c471f4deb7c0674b2b2f9e8d12fa686cb653e05bcd76d5d2c092b34fb25d48e869f15cbbc462b8790fd787b8f60ffa2755e5fdb5780cbc93c5b5ee44ff35680beacca62aa2da4c1d3bcccf1cd40937c39adaf797cbf57ae1ffe92ff0777a8b0d793010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,13 +53,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B20A22AEB"', + 'W/"0x8D8BE6AE2CAFBB5"', 'Vary', 'Accept-Encoding', 'request-id', - '1cb0759f-3e0c-4428-9f40-e54ab1b993a4', + '06c2bb2a-e707-43b0-a4a7-9dacaf3dd6b3', 'elapsed-time', - '112', + '67', 'OData-Version', '4.0', 'Preference-Applied', @@ -64,14 +67,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:25:02 GMT', + 'Fri, 22 Jan 2021 00:15:55 GMT', 'Content-Length', - '364' ]); + '364' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/datasources(%27my-data-source-1%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a37777fb77cd916edf54723039b1a5d10e0dff7a39d77074f0f0e761e3ed9db39dedb3b3e7df2fb7e44ad96d922a7af17d7db68bd2d60b677e99b59de4ceb62d516d532dea0bd5ee1d569d52caa6636a14f9af5443e5caecb72f4d1b4ce6740272b9b8f1efd626ab85ce653c07bddd6c592b042b35f42ed68f859b1cc6bb47208d98fb7f708f62f5ae7f5b57d05b89cccb3e545fe346f05e8cbaa2ca6da421a3ccdcb1cdf0c34c997d3fa9ac7f77be5fae12ff97f0074fec97693010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fcd354eb7a9a37777fb77cd916edf54723039b1a5d10e0dff7a39d77074f0f9e9c7e7a7cba7772fcecc993fbbfef47d46a992d72fa7a71bd8dd6db02667b97be99e5cdb42e566d512de30ddaeb155e9d56cda26a6613faa4594fe4c3e5ba2c471f4deb7c0674b2b2f9e8d12fa686cb653e05bcd76d5d2c092b34fb25d48e869f15cbbc462b8790fd787b8f60ffa2755e5fdb5780cbc93c5b5ee44ff35680beacca62aa2da4c1d3bcccf1cd40937c39adaf797cbf57ae1ffe92ff0777a8b0d793010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -82,13 +87,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B20A22AEB"', + 'W/"0x8D8BE6AE2CAFBB5"', 'Vary', 'Accept-Encoding', 'request-id', - '6da12f2e-a094-4faa-8685-b7e526b5f92f', + '23f00aea-6c05-4d2f-b7cd-bc5945c6f192', 'elapsed-time', - '35', + '7', 'OData-Version', '4.0', 'Preference-Applied', @@ -96,6 +101,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:25:02 GMT', + 'Fri, 22 Jan 2021 00:15:55 GMT', 'Content-Length', - '364' ]); + '364' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_throws_error_for_invalid_datasourceconnection_object.js b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_throws_error_for_invalid_datasourceconnection_object.js index b2d0645bc3b1..e0164cc40d06 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_throws_error_for_invalid_datasourceconnection_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_datasourceconnections/recording_throws_error_for_invalid_datasourceconnection_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/datasources(%27garbxyz%27)') .query(true) - .reply(404, {"error":{"code":"","message":"No data source with the name 'garbxyz' was found in service 'testsearchcases'."}}, [ 'Cache-Control', + .reply(404, {"error":{"code":"","message":"No data source with the name 'garbxyz' was found in service 'testsearchcases'."}}, [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,9 +19,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'request-id', - 'ccb64a38-11e6-4496-8160-26289291e647', + 'fc9036c5-427d-43be-a7b8-38c601b6820e', 'elapsed-time', - '7', + '4', 'OData-Version', '4.0', 'Preference-Applied', @@ -28,6 +29,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:24:36 GMT', + 'Fri, 22 Jan 2021 00:15:29 GMT', 'Content-Length', - '112' ]); + '112' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_creates_the_indexer_object_using_createorupdateindexer.js b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_creates_the_indexer_object_using_createorupdateindexer.js index 36b13f339264..18c80ae5e0e3 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_creates_the_indexer_object_using_createorupdateindexer.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_creates_the_indexer_object_using_createorupdateindexer.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .put('/indexers(%27my-azure-indexer-3%27)', {"name":"my-azure-indexer-3","description":"Description for Sample Indexer","dataSourceName":"my-data-source-1","targetIndexName":"hotel-live-test2","disabled":false}) .query(true) - .reply(201, {"@odata.context":"https://endpoint/$metadata#indexers/$entity","@odata.etag":"\"0x8D8809AE2BFE4D1\"","name":"my-azure-indexer-3","description":"Description for Sample Indexer","dataSourceName":"my-data-source-1","skillsetName":null,"targetIndexName":"hotel-live-test2","disabled":false,"schedule":null,"parameters":null,"fieldMappings":[],"outputFieldMappings":[],"encryptionKey":null}, [ 'Cache-Control', + .reply(201, {"@odata.context":"https://endpoint/$metadata#indexers/$entity","@odata.etag":"\"0x8D8BE6AA7122B3E\"","name":"my-azure-indexer-3","description":"Description for Sample Indexer","dataSourceName":"my-data-source-1","skillsetName":null,"targetIndexName":"hotel-live-test2","disabled":false,"schedule":null,"parameters":null,"fieldMappings":[],"outputFieldMappings":[],"encryptionKey":null}, [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -16,13 +17,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809AE2BFE4D1"', + 'W/"0x8D8BE6AA7122B3E"', 'Location', - 'https://endpoint/indexers(\'my-azure-indexer-3\')?api-version=2020-06-30', + "https://endpoint/indexers('my-azure-indexer-3')?api-version=2020-06-30", 'request-id', - '0d466916-3b0f-422f-ae35-5438b4458821', + '4d319a00-1503-499f-a476-bfa9de1e629e', 'elapsed-time', - '726', + '890', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:23:18 GMT', + 'Fri, 22 Jan 2021 00:14:15 GMT', 'Content-Length', - '412' ]); + '412' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexers(%27my-azure-indexer-3%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cddddf2d5fb6457bfdd1c800a6161704f5f7fd68e7ddc1d383839d87c7a77b4f9e9dee3fddfd7d3fa256cb6c91d3d78bebedec07eb3adf5640dbf7e8bb59de4ceb62d516d5929a3c757fa5e7559dbece16ab324fcfe40534a7ee5e57eb7a9abfb040f1d976c31f6eef529be66d51964dde4a8be5ba2c471fb5597d91b70c473efe685eb579b95d1697f936a8b0472fce8a269b94f9eca347e719012048d3793e5b97d45ca0acb29a5e6ef3ba319f9c177939fb225bad8ae5057df8bdef8f3eaad6ed6add3eeb7f912fa7f5358fedf7caaf05c02ff97f00966488cf9c010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cddddf2d5fb6457bfdd1c800a6161704f5f7fd68e7ddc1d38327a79f1e1f3fd8dddb7b72eff4f7fd885a2db3454e5f2faeb7b31faceb7c5b016ddfa3ef667933ad8b555b544b6af2d4fd959e5775fa3a5bacca3c3d9317d09cba7b5dadeb69fec202c567db0d7fb8bd4b6d9ab7455936792b2d96ebb21c7dd466f545de321cf9f8a379d5e6e576595ce6dba0c21ebd382b9a6c52e6b38f1e9d670480204de7f96c5d527381b2ca6a7ab9cdebc67c725ee4e5ec8b6cb52a9617f4e1f7be3ffaa85ab7ab75fbacff45be9cd6d73cb6df2bbf1600bfe4ff01125a3d199c010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -48,13 +51,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809AE2BFE4D1"', + 'W/"0x8D8BE6AA7122B3E"', 'Vary', 'Accept-Encoding', 'request-id', - '1181ba4e-26bf-4390-8551-a27eb7ec816c', + 'fb7fa401-6978-4e62-841a-151627a71dee', 'elapsed-time', - '9', + '5', 'OData-Version', '4.0', 'Preference-Applied', @@ -62,24 +65,27 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:23:18 GMT', + 'Fri, 22 Jan 2021 00:14:15 GMT', 'Content-Length', - '397' ]); + '397' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .delete('/indexers(%27my-azure-indexer-3%27)') .query(true) - .reply(204, "", [ 'Cache-Control', + .reply(204, "", [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', 'Expires', '-1', 'request-id', - '346e62a9-0ada-471a-8af0-f925a6a862a5', + '371b281d-447f-4733-95e6-a1d2ac1ce0e7', 'elapsed-time', - '45', + '35', 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:23:18 GMT' ]); + 'Fri, 22 Jan 2021 00:14:15 GMT' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_correct_indexer_object.js b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_correct_indexer_object.js index cada7f006e01..5525aeec3870 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_correct_indexer_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_correct_indexer_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexers(%27my-azure-indexer-1%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cddddf2d5fb6457bfdd1c800a6161704f5f7fd68e7ddc1d383839d87c74f779f3edd3d3ebdfffb7e44ad96d922a7af17d7dbd90fd675bead80b677e9bb59de4ceb62d516d5929a3c757fa5e7559dbece16ab324fcfe40534a7ee5e57eb7a9abfb040f1d976c31f32c8e66d51964dde4a8be5ba2c471fb5597d91b70c473efe685eb579b95d1697f936a8b0472fce8a269b94f9eca347e719012048d3793e5b97d45ca0acb29a5e6ef3ba319f9c177939fb225bad8ae5057df8bdef8f3eaad6ed6add3eeb7f912fa7f5358fedf7caaf05c02ff97f00223972f49c010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cddddf2d5fb6457bfdd1c800a6161704f5f7fd68e7ddc1d38327a79f1e3ffcf4e4c1a7cf4e9ffcbe1f51ab65b6c8e9ebc5f576f683759d6f2ba0ed5dfa6e9637d3ba58b545b5a4264fdd5fe97955a7afb3c5aaccd3337901cda9bbd7d5ba9ee62f2c507cb6ddf0870cb2795b946593b7d262b92ecbd1476d565fe42dc3918f3f9a576d5e6e97c565be0d2aecd18bb3a2c926653efbe8d179460008d2749ecfd625351728abaca697dbbc6ecc27e7455ecebec856ab6279411f7eeffba38faa75bb5ab7cffa5fe4cb697dcd63fbbdf26b01f04bfe1f917301579c010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,13 +19,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809AD1DD1AE5"', + 'W/"0x8D8BE6A96C76FEB"', 'Vary', 'Accept-Encoding', 'request-id', - '8624ae93-9506-4dd4-b883-f2a1ab988590', + 'ba402a56-0121-4586-a199-9dd76f76b3a6', 'elapsed-time', - '15', + '9', 'OData-Version', '4.0', 'Preference-Applied', @@ -32,6 +33,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:22:51 GMT', + 'Fri, 22 Jan 2021 00:13:48 GMT', 'Content-Length', - '395' ]); + '395' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_list_of_indexer_names.js b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_list_of_indexer_names.js index e8f32f726286..71a21d7b0248 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_list_of_indexer_names.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_list_of_indexer_names.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexers') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cdd6325be4773e1a7d749995ebfca347dffbc51fe11302bbb8dece7eb0aef36d6dbabdfbd12f190d7fbbf7d12ff9fe2ff97f00a86bf33d9e000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cdd6325be4773e1a7d749995ebfca347dffbc51fe11302bbb8dece7eb0aef36d6dbabdfbd12f190d7fbbf7d12ff9fe2ff97f00a86bf33d9e000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'dd4075ff-f06a-45a7-9c2d-98e4f6885c83', + '0cee0c68-93b5-4f49-a339-38ab38b81749', 'elapsed-time', - '7', + '8', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:22:38 GMT', + 'Fri, 22 Jan 2021 00:13:35 GMT', 'Content-Length', - '233' ]); + '233' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_list_of_indexers.js b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_list_of_indexers.js index 9059c2e7944f..94be7367b124 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_list_of_indexers.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_list_of_indexers.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexers') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cd47a38f2eb3729d7ff4e87b1634b5b920b8bfef473bef0e9e1e1cec3c3c3ed97d7ab2f7606fe7f7fd88da2fb30535ff6871bd9dfd605de7db0a6a7b97be9be5cdb42e566d512da9c953f7577a5ed5e9eb6cb12af3f44c5e4073eaee75b5aea7f90b0b149f6d37fc21836cde1665d9e4adb458aecb72f4519bd51779cb70e4e38fe6559b97db6571996f830e7bf4e2ac68b24999cf3e7a749e110082349de7b37549cd05ca2aabe9e536272ae827e7455ecebec856ab6279411f7eeffba38faa75bb5ab7cffa5fe4cb697dcd63fbbdf26b01f04b469b6848047c7a72f260230d19efdc528d9afcbca3e1f77fc9ff030353f7a4e4020000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cd47a38f2eb3729d7ff4e87b1634b5b920b8bfef473bef0e9e1e3c39fdf4f8e0c1e9d393fb9fdefb7d3fa2f6cb6c41cd3f5a5c6f673f58d7f9b682dadea5ef667933ad8b555b544b6af2d4fd959e5775fa3a5bacca3c3d9317d09cba7b5dadeb69fec202c567db0d7fc8209bb7455936792b2d96ebb21c7dd466f545de321cf9f8a379d5e6e576595ce6dba0c31ebd382b9a6c52e6b38f1e9d670480204de7f96c5d527381b2ca6a7ab9cd890afac9799197b32fb2d5aa585ed087dffbfee8a36addaed6edb3fe17f9725a5ff3d87eaffc5a00fc92d1261a1e3c38797af0f474230d19efdc528d9afcbca3e1f77fc9ff03d859b385e4020000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '7fbbad32-f099-4454-9cc8-a67bb16c52a8', + '003bdf82-062f-45cb-ab97-876ec5a1ef2d', 'elapsed-time', - '38', + '18', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:22:25 GMT', + 'Fri, 22 Jan 2021 00:13:23 GMT', 'Content-Length', - '421' ]); + '423' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_status_of_the_indexer.js b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_status_of_the_indexer.js index 1d7dedc955e2..5d20ae8301df 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_status_of_the_indexer.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_gets_the_status_of_the_indexer.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexers(%27my-azure-indexer-1%27)/search.status') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff12f8a695d35d5793b3efec1bacec7afa5f14feeedecedfcfe3b9ffefef776c667cb59fe2eaf4fdfe5d3755b54cbb3e579f5d1e8a365b6c8a9d7c5f5768637b7093c9a6defd2774d9bb5eb86beadd7cb65b1bca08fcaac695fe5cdba245497ebb21c7d44ad05e0b78ba6adeaeb8f1e7deffbd4ae58142dbdfa8b3f5a64ef5ead976f0aeee6e59b9dd704853e7b5a4dd78b7cd99ebe6beb6c8af75f173fa0263bc1b727200bfd98676894d7cd9b4a5fa096bfe497fc3f2e6e44303f010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff12f8a695d35d5793b3efec1bacec7afa5f14feeedecedfcfe3b9ffefef776c667cb59fe2eaf4fdfe5d3755b54cbb3e579f5d1e8a365b6c8a9d7c5f5768637b7093c9a6defd2774d9bb5eb86beadd7cb65b1bca08fcaac695fe5cdba245497ebb21c7d44ad05e0b78ba6adeaeb8f1e7deffbd4ae58142dbdfa8b3f5a64ef5ead976f0aeee6e59b9dd704853e7b5a4dd78b7cd99ebe6beb6c8af75f173fa0263bc1b727200bfd98676894d7cd9b4a5fa096bfe497fc3f2e6e44303f010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'e4097b7a-f88f-4d8c-bde3-6576b5d9bdfd', + 'd717e07d-babe-4717-9c7a-30145faba076', 'elapsed-time', - '24', + '27', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:23:44 GMT', + 'Fri, 22 Jan 2021 00:14:40 GMT', 'Content-Length', - '358' ]); + '358' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_modify_and_updates_the_indexer_object.js b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_modify_and_updates_the_indexer_object.js index f5415a5ad3c0..287403148d9e 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_modify_and_updates_the_indexer_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_modify_and_updates_the_indexer_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexers(%27my-azure-indexer-1%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cddddf2d5fb6457bfdd1c800a6161704f5f7fd68e7ddc1d383839d87c7a70f9fde7bf0e9c9c9effb11b55a668b9cbe5e5c6f673f58d7f9b602dadea5ef667933ad8b555b544b6af2d4fd959e5775fa3a5bacca3c3d9317d09cba7b5dadeb69fec202c567db0d7fc8209bb7455936792b2d96ebb21c7dd466f545de321cf9f8a379d5e6e576595ce6dba0c21ebd382b9a6c52e6b38f1e9d670480204de7f96c5d527381b2ca6a7ab9cdebc67c725ee4e5ec8b6cb52a9617f4e1f7be3ffaa85ab7ab75fbacff45be9cd6d73cb6df2bbf1600bfe4ff01d877cdaa9c010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cddddf2d5fb6457bfdd1c800a6161704f5f7fd68e7ddc1d38327a79f1e1f9feede3bdd7bf2f4f7fd885a2db3454e5f2faeb7b31faceb7c5b016defd277b3bc99d6c5aa2daa253579eafe4acfab3a7d9d2d56659e9ec90b684eddbdaed6f5347f6181e2b3ed863f6490cddba22c9bbc9516cb75598e3e6ab3fa226f198e7cfcd1bc6af372bb2c2ef36d50618f5e9c154d3629f3d9478fce33024090a6f37cb62ea9b940596535bddce675633e392ff272f645b65a15cb0bfaf07bdf1f7d54addbd5ba7dd6ff225f4eeb6b1edbef955f0b805ff2ff001c0ea9a99c010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,13 +19,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809AE9D376CC"', + 'W/"0x8D8BE6AAE13E2BD"', 'Vary', 'Accept-Encoding', 'request-id', - '779ebe88-e6a8-4d35-8899-8059ea195b59', + '5570d2a6-3986-4732-a730-bb8b832933a9', 'elapsed-time', - '7', + '5', 'OData-Version', '4.0', 'Preference-Applied', @@ -32,14 +33,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:23:32 GMT', + 'Fri, 22 Jan 2021 00:14:27 GMT', 'Content-Length', - '395' ]); + '395' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) - .put('/indexers(%27my-azure-indexer-1%27)', {"name":"my-azure-indexer-1","description":"Description for Sample Indexer","dataSourceName":"my-data-source-1","skillsetName":null,"targetIndexName":"hotel-live-test2","schedule":null,"parameters":null,"fieldMappings":[],"outputFieldMappings":[],"disabled":true,"@odata.etag":"\"0x8D8809AE9D376CC\"","encryptionKey":null}) + .put('/indexers(%27my-azure-indexer-1%27)', {"name":"my-azure-indexer-1","description":"Description for Sample Indexer","dataSourceName":"my-data-source-1","skillsetName":null,"targetIndexName":"hotel-live-test2","schedule":null,"parameters":null,"fieldMappings":[],"outputFieldMappings":[],"disabled":true,"@odata.etag":"\"0x8D8BE6AAE13E2BD\"","encryptionKey":null}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cddddf2d5fb6457bfdd1c800a6161704f5f7fd68e7ddc1d383839d87c7a7c7c74f0feeedeeffbe1f51ab65b6c8e9ebc5f576f683759d6f2ba0ed5dfa6e9637d3ba58b545b5a4264fdd5fe97955a7afb3c5aaccd3337901cda9bbd7d5ba9ee62f2c507cb6ddf0870cb2795b946593b7d262b92ecbd1476d565fe42dc3918f3f9a576d5e6e97c565be0d2aecd18bb3a2c926653efbe8515baf7302349de7b37549ad05c82aabe9dd36af1bf3c9799197b32fb2d5aa585ed087dffbfee8a36addaed6edb3fe17f9725a5ff3d07eaffc5a00fc92ff07e7b5399d9b010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cddddf2d5fb6457bfdd1c800a6161704f5f7fd68e7ddc1d38327a79f1e1f9f9edc3f7d7af2ecf7fd885a2db3454e5f2faeb7b31faceb7c5b016defd277b3bc99d6c5aa2daa253579eafe4acfab3a7d9d2d56659e9ec90b684eddbdaed6f5347f6181e2b3ed863f6490cddba22c9bbc9516cb75598e3e6ab3fa226f198e7cfcd1bc6af372bb2c2ef36d50618f5e9c154d3629f3d9478fda7a9d13a0e93c9fad4b6a2d4056594defb679dd984fce8bbc9c7d91ad56c5f2823efcdef7471f55eb76b56e9ff5bfc897d3fa9a87f67be5d702e097fc3f7320b9639b010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,13 +53,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809AEAAD8314"', + 'W/"0x8D8BE6AAEC5EDCF"', 'Vary', 'Accept-Encoding', 'request-id', - '0822a7a9-0b1a-4325-a16c-fc3b5117e221', + '79e78103-b2e9-48b8-925d-951600f8d0ac', 'elapsed-time', - '65', + '67', 'OData-Version', '4.0', 'Preference-Applied', @@ -64,14 +67,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:23:32 GMT', + 'Fri, 22 Jan 2021 00:14:27 GMT', 'Content-Length', - '394' ]); + '394' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexers(%27my-azure-indexer-1%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cddddf2d5fb6457bfdd1c800a6161704f5f7fd68e7ddc1d383839d87c7a7c7c74f0feeedeeffbe1f51ab65b6c8e9ebc5f576f683759d6f2ba0ed5dfa6e9637d3ba58b545b5a4264fdd5fe97955a7afb3c5aaccd3337901cda9bbd7d5ba9ee62f2c507cb6ddf0870cb2795b946593b7d262b92ecbd1476d565fe42dc3918f3f9a576d5e6e97c565be0d2aecd18bb3a2c926653efbe8515baf7302349de7b37549ad05c82aabe9dd36af1bf3c9799197b32fb2d5aa585ed087dffbfee8a36addaed6edb3fe17f9725a5ff3d07eaffc5a00fc92ff07e7b5399d9b010000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195eff71fa307f97d7cddddf2d5fb6457bfdd1c800a6161704f5f7fd68e7ddc1d38327a79f1e1f9f9edc3f7d7af2ecf7fd885a2db3454e5f2faeb7b31faceb7c5b016defd277b3bc99d6c5aa2daa253579eafe4acfab3a7d9d2d56659e9ec90b684eddbdaed6f5347f6181e2b3ed863f6490cddba22c9bbc9516cb75598e3e6ab3fa226f198e7cfcd1bc6af372bb2c2ef36d50618f5e9c154d3629f3d9478fda7a9d13a0e93c9fad4b6a2d4056594defb679dd984fce8bbc9c7d91ad56c5f2823efcdef7471f55eb76b56e9ff5bfc897d3fa9a87f67be5d702e097fc3f7320b9639b010000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -82,11 +87,11 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809AEAAD8314"', + 'W/"0x8D8BE6AAEC5EDCF"', 'Vary', 'Accept-Encoding', 'request-id', - '33064fff-d1da-4ffd-badc-63d33655285b', + 'c60ad66c-b8c3-4d0b-af7c-06759bb02a8c', 'elapsed-time', '6', 'OData-Version', @@ -96,6 +101,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:23:32 GMT', + 'Fri, 22 Jan 2021 00:14:27 GMT', 'Content-Length', - '394' ]); + '394' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_throws_error_for_invalid_indexer_object.js b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_throws_error_for_invalid_indexer_object.js index 1c2ec7d65208..312e6ad35526 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_throws_error_for_invalid_indexer_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_indexers/recording_throws_error_for_invalid_indexer_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/indexers(%27garbxyz%27)') .query(true) - .reply(404, {"error":{"code":"","message":"Indexer 'garbxyz' was not found in service 'testsearchcases'."}}, [ 'Cache-Control', + .reply(404, {"error":{"code":"","message":"Indexer 'garbxyz' was not found in service 'testsearchcases'."}}, [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,9 +19,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'request-id', - 'a358e2ca-b10a-4f3d-b818-c4414c33b7f9', + '378c285a-3155-48a8-9fa7-683de5432fc1', 'elapsed-time', - '9', + '5', 'OData-Version', '4.0', 'Preference-Applied', @@ -28,6 +29,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:23:04 GMT', + 'Fri, 22 Jan 2021 00:14:01 GMT', 'Content-Length', - '95' ]); + '95' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_creates_the_skillset_object_using_createorupdateskillset.js b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_creates_the_skillset_object_using_createorupdateskillset.js index 2845ed1a0748..cd34d56353f5 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_creates_the_skillset_object_using_createorupdateskillset.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_creates_the_skillset_object_using_createorupdateskillset.js @@ -5,9 +5,10 @@ module.exports.hash = "1838563fc0e9712c8a9969e1999c326d"; module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) - .put('/skillsets(%27my-azureblob-skillset-3%27)', {"name":"my-azureblob-skillset-3","description":"Skillset description","skills":[{"inputs":[{"name":"text","source":"/document/merged_content"},{"name":"languageCode","source":"/document/language"}],"outputs":[{"name":"persons","targetName":"people"},{"name":"organizations","targetName":"organizations"},{"name":"locations","targetName":"locations"}],"@odata.type":"#Microsoft.Skills.Text.EntityRecognitionSkill"}]}) + .put('/skillsets(%27my-azureblob-skillset-3%27)', {"name":"my-azureblob-skillset-3","description":"Skillset description","skills":[{"@odata.type":"#Microsoft.Skills.Text.EntityRecognitionSkill","inputs":[{"name":"text","source":"/document/merged_content"},{"name":"languageCode","source":"/document/language"}],"outputs":[{"name":"persons","targetName":"people"},{"name":"organizations","targetName":"organizations"},{"name":"locations","targetName":"locations"}]}]}) .query(true) - .reply(201, {"@odata.context":"https://endpoint/$metadata#skillsets/$entity","@odata.etag":"\"0x8D8809B47B47264\"","name":"my-azureblob-skillset-3","description":"Skillset description","skills":[{"@odata.type":"#Microsoft.Skills.Text.EntityRecognitionSkill","name":null,"description":null,"context":null,"categories":[],"defaultLanguageCode":null,"minimumPrecision":null,"includeTypelessEntities":null,"inputs":[{"name":"text","source":"/document/merged_content","sourceContext":null,"inputs":[]},{"name":"languageCode","source":"/document/language","sourceContext":null,"inputs":[]}],"outputs":[{"name":"persons","targetName":"people"},{"name":"organizations","targetName":"organizations"},{"name":"locations","targetName":"locations"}]}],"cognitiveServices":null,"knowledgeStore":null,"encryptionKey":null}, [ 'Cache-Control', + .reply(201, {"@odata.context":"https://endpoint/$metadata#skillsets/$entity","@odata.etag":"\"0x8D8BE6B0876A55A\"","name":"my-azureblob-skillset-3","description":"Skillset description","skills":[{"@odata.type":"#Microsoft.Skills.Text.EntityRecognitionSkill","name":null,"description":null,"context":null,"categories":[],"defaultLanguageCode":null,"minimumPrecision":null,"includeTypelessEntities":null,"inputs":[{"name":"text","source":"/document/merged_content","sourceContext":null,"inputs":[]},{"name":"languageCode","source":"/document/language","sourceContext":null,"inputs":[]}],"outputs":[{"name":"persons","targetName":"people"},{"name":"organizations","targetName":"organizations"},{"name":"locations","targetName":"locations"}]}],"cognitiveServices":null,"knowledgeStore":null,"encryptionKey":null}, [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -16,13 +17,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B47B47264"', + 'W/"0x8D8BE6B0876A55A"', 'Location', - 'https://endpoint/skillsets(\'my-azureblob-skillset-3\')?api-version=2020-06-30', + "https://endpoint/skillsets('my-azureblob-skillset-3')?api-version=2020-06-30", 'request-id', - 'ac01dfab-b3b3-416f-bb81-8fc74c5a2245', + '9eb8bfdd-4010-460b-979c-2c29e72f4d17', 'elapsed-time', - '134', + '58', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,14 +31,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:26:07 GMT', + 'Fri, 22 Jan 2021 00:16:58 GMT', 'Content-Length', - '822' ]); + '822' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/skillsets(%27my-azureblob-skillset-3%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde36777fb77cd916edf5472303999a5c10d8dff7a39d77074f0f0e761e3ed97f40ffdbfb74fff7fd885a2db3454e5f2faeb7b31faceb7c5256936d036dfb1e3598e5cdb42e566d512da9dd6bfd2af53f1e7d246f7cf4e87b7640edf50a707ffc8b625a574d75de8ee5d5f11b1ae6f894917c954fab8b650118fc250152747e7c977ef7bb78b45c97e5e82347a6bbb36aba5ed060a9e1346bf38baa2e7260f0d1cbbc6ee88dd1473fb1ceb81bfaf5cbfa225b163fc818d6e8a3af5e3da77f4f1759813e9f570440be784a90de1484c2f7d1fd79b62edbe7d9f2629d5de427d50c98e568b62896c562bd7859e7d3a2c19b8a5eb19c96eb59fe86c65ee64dc3a364acccd7ab754b7f1195749c3c18225fb5aea7f8db8eeaee22af2ff2d9efcf03e6414a9b13fc4def74217eff978c2cd0d2c7d8bc489f3be0a685fd76182c11a25ab7faa7ed62c5346ee8fd36233cdb17e6e36a55e61f79b8541ee17bcdc32fbdb74a9d91de1bee8b5fc2a829ff5ce6aff3fab2983a52bf5d5657653ebbc85fb7554d6fcaa7f9725a5f3343fd5ef9b57cf84bfe1f74921a8783030000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde36777fb77cd916edf5472303999a5c10d8dff7a39d77074f0f9e9c7efa64e7e0c1a7c7f7ef1fffbe1f51ab65b6c8e9ebc5f576f683759d4fca6ab26da06ddfa306b3bc99d6c5aa2daa25b57bad5fa5fec7a38fe48d8f1e7dcf0ea8bd5e01ee8f7f514cebaaa9cedbb1bc3a7e43c31c9f3292aff26975b12c0083bf24408ace8fefd2ef7e178f96ebb21c7de4c87477564dd70b1a2c359c666d7e51d5450e0c3e7a99d70dbd31fae827d6197743bf7e595f64cbe20719c31a7df4d5abe7f4efe9222bd0e7f38a00c8174f09d29b8250f83eba3fcfd665fb3c5b5eacb38bfca49a01b31ccd16c5b258ac172feb7c5a347853d12b96d3723dcbdfd0d8cbbc6978948c95f97ab56ee92fa2928e930743e4abd6f5147fdb51dd5de4f5453efbfd79c03c48697382bfe99d2ec4efff9291055afa189b17e97307dcb4b0df0e83254254eb56ffb45dac98c60dbddf668467fbc27c5cadcafc230f97ca237caf79f8a5f756a933d27bc37df14b1835e59fcbfc755e5f165347eab7cbeaaacc6717f9ebb6aae94df9345f4eeb6b66a8df2bbf960f7fc9ff03a724009983030000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -48,13 +51,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B47B47264"', + 'W/"0x8D8BE6B0876A55A"', 'Vary', 'Accept-Encoding', 'request-id', - '46f471ef-f415-43cc-a8fe-d635325ff3c6', + 'ba367bb7-4f56-437c-bde2-fee8b01df288', 'elapsed-time', - '68', + '28', 'OData-Version', '4.0', 'Preference-Applied', @@ -62,24 +65,27 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:26:07 GMT', + 'Fri, 22 Jan 2021 00:16:58 GMT', 'Content-Length', - '585' ]); + '587' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .delete('/skillsets(%27my-azureblob-skillset-3%27)') .query(true) - .reply(204, "", [ 'Cache-Control', + .reply(204, "", [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', 'Expires', '-1', 'request-id', - '8896d7a9-8209-407d-bb27-7b4ae996cfb9', + 'd8e8346c-18fc-4e80-889f-c9ea45176aad', 'elapsed-time', - '39', + '42', 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:26:07 GMT' ]); + 'Fri, 22 Jan 2021 00:16:58 GMT' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_correct_skillset_object.js b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_correct_skillset_object.js index 5f1aaadf2499..16c80e9b7ed8 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_correct_skillset_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_correct_skillset_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/skillsets(%27my-azureblob-skillset-1%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde36777fb77cd916edf5472303999a5c10d8dff7a39d77074f0f0e761e3eb977efdecebdfde327bfef47d46a992d72fa7a71bd9dfd605de793b29a6c1b68dbbbd4609637d3ba58b545b5a476aff5abd4ff78f491bcf1d1a3efd901b5d72bc0fdf12f8a695d35d5793b9657c76f6898e35346f2553ead2e960560f0970448d1f9f15ecfcb75598e3e7264ba3baba6eb050d961a4eb336bfa8ea2207061fbdcceb86de187df413eb8cbba15fbfac2fb265f1838c618d3efaead573faf7749115e8f3794500e48ba704e94d41287c1fdd9f67ebb27d9e2d2fd6d9457e52cd80598e668b62592cd68b97753e2d1abca9e815cb69b99ee56f68ec65de343c4ac6ca7cbd5ab7f4175149c7c98321f255eb7a8abfeda8ee2ef2fa229ffdfe3c601ea4b439c1dff44e17e2f77fc9c8022d7d8ccd8bf4b9036e5ad86f87c11221aa75ab7fda2e564ce386de6f33c2b37d613eae5665fe918f8bd2b6d7d47df14bb813e584cbfc755e5f165347b4b7cbeaaacc6717f9ebb6aae94df9345f4eeb6b668ddf2bbf960f7fc9ff0312b0cd734d030000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde36777fb77cd916edf5472303999a5c10d8dff7a39d77074f0f9e9c7e7afcecfeded3834ff7f77edf8fa8d5325be4f4f5e27a3bfbc1bace276535d936d0b677a9c12c6fa675b16a8b6a49ed5eeb57a9fff1e82379e3a347dfb3036aaf5780fbe35f14d3ba6aaaf3762caf8edfd030c7a78ce4ab7c5a5d2c0bc0e02f0990a2f3e3bd9e97ebb21c7de4c87477564dd70b1a2c359c666d7e51d5450e0c3e7a99d70dbd31fae827d6197743bf7e595f64cbe20719c31a7df4d5abe7f4efe9222bd0e7f38a00c8174f09d29b8250f83eba3fcfd665fb3c5b5eacb38bfca49a01b31ccd16c5b258ac172feb7c5a347853d12b96d3723dcbdfd0d8cbbc6978948c95f97ab56ee92fa2928e930743e4abd6f5147fdb51dd5de4f5453efbfd79c03c48697382bfe99d2ec4efff9291055afa189b17e97307dcb4b0df0e83254254eb56ffb45dac98c60dbddf668467fbc27c5cadcafc231f17a56dafa9fbe2977027ca0997f9ebbcbe2ca68e686f97d55599cf2ef2d76d55d39bf269be9cd6d7cc1abf577e2d1ffe92ff07a39146104d030000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,13 +19,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B333034AB"', + 'W/"0x8D8BE6AF52D8642"', 'Vary', 'Accept-Encoding', 'request-id', - '5e103131-2b15-49a5-98bf-9b66cdf41a0d', + '176d344c-2c15-4f5c-80e1-06948a5430a7', 'elapsed-time', - '335', + '30', 'OData-Version', '4.0', 'Preference-Applied', @@ -32,6 +33,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:25:41 GMT', + 'Fri, 22 Jan 2021 00:16:32 GMT', 'Content-Length', - '573' ]); + '573' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_list_of_skillset_names.js b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_list_of_skillset_names.js index 5171689eda51..099e96006cf9 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_list_of_skillset_names.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_list_of_skillset_names.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/skillsets') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde365bcb6c91dff968f4d16556aef38f1e7def177f844f08eee27a3bfbc1bace276535d936edb7773ffa25a31b9aec7df44bbeff4bfe1f2c9e0ca0a9000000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde365bcb6c91dff968f4d16556aef38f1e7def177f844f08eee27a3bfbc1bace276535d936edb7773ffa25a31b9aec7df44bbeff4bfe1f2c9e0ca0a9000000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - '7b88ce41-d371-41dc-878d-f3ed7b7120a0', + 'c4ecc703-4ed3-494f-ae8e-9a9d495000af', 'elapsed-time', - '26', + '24', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:25:28 GMT', + 'Fri, 22 Jan 2021 00:16:20 GMT', 'Content-Length', - '237' ]); + '237' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_list_of_skillsets.js b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_list_of_skillsets.js index 2866ca56f980..34ec9a85a2c0 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_list_of_skillsets.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_gets_the_list_of_skillsets.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/skillsets') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde361f8d3ebaccca75fed1a3ef59d8d4e88200ffbe1fedbc3b787a70b0f3f0c9debd27a77bc7f7767fdf8fa8fd325b50f38f16d7dbd90fd6753e29abc9b681b7bd4b0d667933ad8b555b544b6af75abf4afd8f471fc91b7eb7edf50a707ffc8b625a574d75de8ee5d5f11b1ae8f874d916edf5ab7c5a5d2c0bc0e02f0990a2f3e3bd9e97ebb21c7de4087577564dd78b7cd952c369d6e617555de4c0e0a39779ddd01ba38f7e629d7137f4eb97f545b62c7e9031acd1475fbd7a4eff9e2eb2027d3eaf08807cf19420bd290885efa3fbf36c5db6cfb3e5c53abbc84faa1930cbd16c512c8bc57af1b2cea745833715bd62392dd7b3fc0d8dbdcc9b8647c95899af576b9a245049c7c98321f255eb7a8abfeda8ee2ef2fa229ffdfe3c601ea4b439c1dff44e17e2f77fc9c8022d7d8ccd8bf4b9036e5ad86f87c11221aa75ab7fda2e564c63f05b9b119eed0bf371b52af38f7c5c94b6bda6ee8b5fc29d28275ce6aff3fab2983aa2bd5d5657653ebbc85fb7554d6fcaa7f9725a5f336bfc5ef9b57c885e95f9623c7f727a6fefc9bd9b797e8f1a049cf7239eff11cf03d4ff2b79fefbbfe4ff016d478dc745060000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde361f8d3ebaccca75fed1a3ef59d8d4e88200ffbe1fedbc3b787af0e4f4d3e3d3fb4f760fe8dfdff7236abfcc16d4fca3c5f576f683759d4fca6ab26de06def528359de4ceb62d516d592dabdd6af52ffe3d147f286df6d7bbd02dc1fffa298d655539db7637975fc86063a3e5db6457bfd2a9f5617cb0230f84b02a4e8fc78afe7e5ba2c471f3942dd9d55d3f5225fb6d4709ab5f94555173930f8e8655e37f4c6e8a39f5867dc0dfdfa657d912d8b1f640c6bf4d157af9ed3bfa78bac409fcf2b02205f3c25486f0a42e1fbe8fe3c5b97edf36c79b1ce2ef2936a06cc72345b14cb62b15ebcacf369d1e04d45af584ecbf52c7f43632ff3a6e1513256e6ebd59a260954d271f260887cd5ba9ee26f3baabb8bbcbec867bf3f0f9807296d4ef037bdd385f8fd5f32b2404b1f63f3227dee809b16f6db61b044886addea9fb68b15d318fcd6668467fbc27c5cadcafc231f17a56dafa9fbe2977027ca0997f9ebbcbe2ca68e686f97d55599cf2ef2d76d55d39bf269be9cd6d7cc1abf577e2d1fa25765be18cf9fec3ebcf7e9a737f3fc1e350838ef473cff239e07a8ff57f2fcf77fc9ff03d0bea16c45060000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -20,9 +21,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Vary', 'Accept-Encoding', 'request-id', - 'd965c116-ae50-4429-adb1-9fb23c8814b0', + '7c064664-5805-49b5-afa7-9eeea37789d2', 'elapsed-time', - '64', + '100', 'OData-Version', '4.0', 'Preference-Applied', @@ -30,6 +31,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:25:14 GMT', + 'Fri, 22 Jan 2021 00:16:07 GMT', 'Content-Length', - '605' ]); + '604' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_modify_and_updates_the_skillsets_object.js b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_modify_and_updates_the_skillsets_object.js index 5a9b90b67d74..895c1c463a2a 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_modify_and_updates_the_skillsets_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_modify_and_updates_the_skillsets_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/skillsets(%27my-azureblob-skillset-2%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde36777fb77cd916edf5472303999a5c10d8dff7a39d77074f0f0e761e3ed97ffaf0d3930727f77fdf8fa8d5325be4f4f5e27a3bfbc1bace276535d936d0b6f7a8c12c6fa675b16a8b6a49ed5eeb57a9fff1e82379e3a347dfb3036aaf5780fbe35f14d3ba6aaaf3762caf8edfd030c7a78ce4ab7c5a5d2c0bc0e02f0990a2f3e3bbf4bbdfc5a3e5ba2c471f3932dd9d55d3f582064b0da7599b5f54759103838f5ee675436f8c3efa8975c6ddd0af5fd617d9b2f841c6b0461f7df5ea39fd7bbac80af4f9bc2200f2c55382f4a62014be8feecfb375d93ecf9617ebec223fa966c02c47b345b12c16ebc5cb3a9f160dde54f48ae5b45ccff23734f6326f1a1e256365be5ead5bfa8ba8a4e3e4c110f9aa753dc5df76547717797d91cf7e7f1e300f52da9ce06f7aa70bf1fbbf646481963ec6e645fadc01372decb7c3608910d5bad53f6d172ba67143efb719e1d9be301f57ab32ffc8c74569db6beabef825dc8972c265fe3aaf2f8ba923dadb657555e6b38bfc755bd5f4a67c9a2fa7f535b3c6ef955fcb87bfe4ff017a3217da4d030000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde36777fb77cd916edf5472303999a5c10d8dff7a39d77074f0f9e9c7efa64e7c9c1dee9c9d307bfef47d46a992d72fa7a71bd9dfd605de793b29a6c1b68db7bd4609637d3ba58b545b5a476aff5abd4ff78f491bcf1d1a3efd901b5d72bc0fdf12f8a695d35d5793b9657c76f6898e35346f2553ead2e960560f0970448d1f9f15dfaddefe2d1725d96a38f1c99eeceaae97a4183a586d3accd2faabac881c1472ff3baa137461ffdc43ae36ee8d72feb8b6c59fc206358a38fbe7af59cfe3d5d6405fa7c5e1100f9e229417a53100adf47f7e7d9ba6c9f67cb8b7576919f54336096a3d9a258168bf5e2659d4f8b066f2a7ac5725aae67f91b1a7b99370d8f92b1325fafd62dfd4554d271f260887cd5ba9ee26f3baabb8bbcbec867bf3f0f9807296d4ef037bdd385f8fd5f32b2404b1f63f3227dee809b16f6db61b044886addea9fb68b15d3b8a1f7db8cf06c5f988fab55997fe4e3a2b4ed35755ffc12ee4439e1327f9dd797c5d411ededb2ba2af3d945febaad6a7a533ecd97d3fa9a59e3f7caafe5c35ff2ff003c9dcab04d030000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,13 +19,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B4D96C7C5"', + 'W/"0x8D8BE6B0B82ECD7"', 'Vary', 'Accept-Encoding', 'request-id', - '0b40e2b4-b27d-4511-b4c1-9366b810af5c', + '1764ddb9-322f-4b0e-b459-d6810a28a856', 'elapsed-time', - '33', + '28', 'OData-Version', '4.0', 'Preference-Applied', @@ -32,14 +33,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:26:26 GMT', + 'Fri, 22 Jan 2021 00:17:10 GMT', 'Content-Length', - '577' ]); + '577' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) - .put('/skillsets(%27my-azureblob-skillset-2%27)', {"name":"my-azureblob-skillset-2","description":"Skillset description","skills":[{"name":"#1","description":null,"context":"/document","inputs":[{"name":"text","source":"/document/merged_content","sourceContext":null,"inputs":[]},{"name":"languageCode","source":"/document/language","sourceContext":null,"inputs":[]}],"outputs":[{"name":"persons","targetName":"people"},{"name":"locations","targetName":"locations"},{"name":"organizations","targetName":"organizations"}],"@odata.type":"#Microsoft.Skills.Text.EntityRecognitionSkill","categories":["Person","Quantity","Organization","URL","Email","Location","DateTime"],"defaultLanguageCode":"en","includeTypelessEntities":null,"minimumPrecision":null}],"cognitiveServices":null,"@odata.etag":"\"0x8D8809B4D96C7C5\"","encryptionKey":null}) + .put('/skillsets(%27my-azureblob-skillset-2%27)', {"name":"my-azureblob-skillset-2","description":"Skillset description","skills":[{"@odata.type":"#Microsoft.Skills.Text.EntityRecognitionSkill","name":"#1","description":null,"context":"/document","inputs":[{"name":"text","source":"/document/merged_content","sourceContext":null,"inputs":[]},{"name":"languageCode","source":"/document/language","sourceContext":null,"inputs":[]}],"outputs":[{"name":"persons","targetName":"people"},{"name":"locations","targetName":"locations"},{"name":"organizations","targetName":"organizations"}]}],"cognitiveServices":null,"@odata.etag":"\"0x8D8BE6B0B82ECD7\"","encryptionKey":null}) .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde36777fb77cd916edf5472303999a5c10d8dff7a39d77074f0f0e761e3eb9bfb77ff0e0e9cebddff7236ab5cc16397dbdb8dece7eb0aef349594db60db4ed3d6a30cb9b695dacdaa25a52bbd7fa55ea7f3cfa48def8e8d1f7ec80daeb15e0fef817c5b4ae9aeabc1dcbabe33734ccf12923f92a9f5617cb0230f84b02a4e8fcf82efdee77f168b92ecbd1478e4c7767d574bda0c152c369d6e617555de4c0e0a39779ddd01ba38f7e629d7137f4eb97f545b62c7e9031acd1475fbd7a4eff9e2eb2027d3eaf08807cf19420bd290885efa3fbf36c5db6cfb3e5c53abbc84faa1930cbd16c512c8bc57af1b2cea745833715bd62392dd7b3fc0d8dbdcc9b8647c95899af57eb96fe222ae938793044be6a5d4ff1b71dd5dd455e5fe4b3df9f07cc83943627f89bdee942fcfe2f1959a0a58fb179913e77c04d0bfbed30582244b56ef54fdbc58a69dcd0fb6d4678b62fccc7d5aacc3ff27151daf69aba2fbcd695374dbd37c22f7f09a3a6fc7399bfceebcb62ea48fd76595d95f9ec227fdd5635bd2d9fe6cb697dcd0cf57be5d7f2e12ff97f0028a9e4b783030000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde36777fb77cd916edf5472303999a5c10d8dff7a39d77074f0f9e9c7efa647767e7e0f8c9a70f7fdf8fa8d5325be4f4f5e27a3bfbc1bace276535d936d0b6f7a8c12c6fa675b16a8b6a49ed5eeb57a9fff1e82379e3a347dfb3036aaf5780fbe35f14d3ba6aaaf3762caf8edfd030c7a78ce4ab7c5a5d2c0bc0e02f0990a2f3e3bbf4bbdfc5a3e5ba2c471f3932dd9d55d3f582064b0da7599b5f5475910383efe3c5f36c5db6cfb3e5c53abbc84faa19c114008b62592cd68b97753e2d1a0f70b19c96eb59fe86b02ef3a661fc189ef97ab56ee92f1a9f62c868d0c0ab753dc5df169fbb8bbcbec867bf3fa3cae8499b13fc4def74217eff978c2cd0d2c7d8bc489f3be0a685fd76182c11a25ab7faa7ed6295d74db56ce8fd36233cdb17e6e36a55e61ff9b8544456a250afa9fbc26b5dd517d9b2f8817ed17923fcf297306a3af397f9ebbcbe2ca68ed46f97d55599cf2ef2d76d55d3dbf269be9cd6d7cc0abf577e2d1ffe92ff0726e1378f3d030000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -50,13 +53,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B52487D03"', + 'W/"0x8D8BE6B1008AB69"', 'Vary', 'Accept-Encoding', 'request-id', - '8cbd240e-b0c1-4cf4-8a52-bb0f3d6750a3', + '88419731-09de-4454-99df-c4c9bae42b13', 'elapsed-time', - '65', + '63', 'OData-Version', '4.0', 'Preference-Applied', @@ -64,14 +67,16 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:26:26 GMT', + 'Fri, 22 Jan 2021 00:17:10 GMT', 'Content-Length', - '586' ]); + '544' +]); nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/skillsets(%27my-azureblob-skillset-2%27)') .query(true) - .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde36777fb77cd916edf5472303999a5c10d8dff7a39d77074f0f0e761e3eb9bfb77ff0e0e9cebddff7236ab5cc16397dbdb8dece7eb0aef349594db60db4ed3d6a30cb9b695dacdaa25a52bbd7fa55ea7f3cfa48def8e8d1f7ec80daeb15e0fef817c5b4ae9aeabc1dcbabe33734ccf12923f92a9f5617cb0230f84b02a4e8fcf82efdee77f168b92ecbd1478e4c7767d574bda0c152c369d6e617555de4c0e0a39779ddd01ba38f7e629d7137f4eb97f545b62c7e9031acd1475fbd7a4eff9e2eb2027d3eaf08807cf19420bd290885efa3fbf36c5db6cfb3e5c53abbc84faa1930cbd16c512c8bc57af1b2cea745833715bd62392dd7b3fc0d8dbdcc9b8647c95899af57eb96fe222ae938793044be6a5d4ff1b71dd5dd455e5fe4b3df9f07cc83943627f89bdee942fcfe2f1959a0a58fb179913e77c04d0bfbed30582244b56ef54fdbc58a69dcd0fb6d4678b62fccc7d5aacc3ff27151daf69aba2fbcd695374dbd37c22f7f09a3a6fc7399bfceebcb62ea48fd76595d95f9ec227fdd5635bd2d9fe6cb697dcd0cf57be5d7f2e12ff97f0028a9e4b783030000"], [ 'Cache-Control', + .reply(200, ["1f8b0800000000000400edbd07601c499625262f6dca7b7f4af54ad7e074a10880601324d8904010ecc188cde692ec1d69472329ab2a81ca6556655d661640cced9dbcf7de7befbdf7de7befbdf7ba3b9d4e27f7dfff3f5c6664016cf6ce4adac99e2180aac81f3f7e7c1f3f227ef147bf6735cbda6c3cad966dfeaefde8d147f3b65d358feede6df3a66df2ac9ecea759933763f97d7c552c67d555335ee6eddddf6d91b7195efff1e66d51964dde36777fb77cd916edf5472303999a5c10d8dff7a39d77074f0f9e9c7efa647767e7e0f8c9a70f7fdf8fa8d5325be4f4f5e27a3bfbc1bace276535d936d0b6f7a8c12c6fa675b16a8b6a49ed5eeb57a9fff1e82379e3a347dfb3036aaf5780fbe35f14d3ba6aaaf3762caf8edfd030c7a78ce4ab7c5a5d2c0bc0e02f0990a2f3e3bbf4bbdfc5a3e5ba2c471f3932dd9d55d3f582064b0da7599b5f54759103838f5ee675436f8c3efa8975c6ddd0af5fd617d9b2f841c6b0461f7df5ea39fd7bbac80af4f9bc2200f2c55382f4a62014be8feecfb375d93ecf9617ebec223fa966c02c47b345b12c16ebc5cb3a9f160dde54f48ae5b45ccff23734f6326f1a1e256365be5ead5bfa8ba8a4e3e4c110f9aa753dc5df76547717797d91cf7e7f1e300f52da9ce06f7aa70bf1fbbf646481963ec6e645fadc01372decb7c3608910d5bad53f6d172ba67143efb719e1d9be301f57ab32ffc8c74569db6beabef05a57de34f5de08bffc258c9af2cf65fe3aaf2f8ba923f5db657555e6b38bfc755bd5f4b67c9a2fa7f53533d4ef955fcb87bfe4ff01483ac29083030000"], [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -82,13 +87,13 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'ETag', - 'W/"0x8D8809B52487D03"', + 'W/"0x8D8BE6B1008AB69"', 'Vary', 'Accept-Encoding', 'request-id', - 'bc60a638-a574-4139-b204-9adac11626f5', + '439c420d-7a91-4ff7-8a85-41b8891552fb', 'elapsed-time', - '85', + '28', 'OData-Version', '4.0', 'Preference-Applied', @@ -96,6 +101,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:26:26 GMT', + 'Fri, 22 Jan 2021 00:17:11 GMT', 'Content-Length', - '586' ]); + '586' +]); diff --git a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_throws_error_for_invalid_skillset_object.js b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_throws_error_for_invalid_skillset_object.js index 78a223bbf5f8..c3f00999b83e 100644 --- a/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_throws_error_for_invalid_skillset_object.js +++ b/sdk/search/search-documents/recordings/node/searchindexerclient_skillsets/recording_throws_error_for_invalid_skillset_object.js @@ -7,7 +7,8 @@ module.exports.testInfo = {"uniqueName":{},"newDate":{}} nock('https://endpoint:443', {"encodedQueryParams":true}) .get('/skillsets(%27garbxyz%27)') .query(true) - .reply(404, {"error":{"code":"","message":"No skillset with the name 'garbxyz' was found in service 'Microsoft.WindowsAzure.Search.Core.Models.SearchService'."}}, [ 'Cache-Control', + .reply(404, {"error":{"code":"","message":"No skillset with the name 'garbxyz' was found in service 'Microsoft.WindowsAzure.Search.Core.Models.SearchService'."}}, [ + 'Cache-Control', 'no-cache', 'Pragma', 'no-cache', @@ -18,9 +19,9 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Expires', '-1', 'request-id', - '3f7e2cc7-9263-4cf4-98ee-fded9571740d', + '27f69e01-6eb6-4592-8037-a0b8f27c9b52', 'elapsed-time', - '21', + '22', 'OData-Version', '4.0', 'Preference-Applied', @@ -28,6 +29,7 @@ nock('https://endpoint:443', {"encodedQueryParams":true}) 'Strict-Transport-Security', 'max-age=15724800; includeSubDomains', 'Date', - 'Wed, 04 Nov 2020 08:25:55 GMT', + 'Fri, 22 Jan 2021 00:16:45 GMT', 'Content-Length', - '149' ]); + '149' +]); diff --git a/sdk/search/search-documents/review/search-documents.api.md b/sdk/search/search-documents/review/search-documents.api.md index 9f1ba16e7e9c..c0f80505f535 100644 --- a/sdk/search/search-documents/review/search-documents.api.md +++ b/sdk/search/search-documents/review/search-documents.api.md @@ -37,11 +37,10 @@ export interface AnalyzeResult { export type AnalyzeTextOptions = OperationOptions & AnalyzeRequest; // @public -export interface AsciiFoldingTokenFilter { - name: string; +export type AsciiFoldingTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter"; preserveOriginal?: boolean; -} +}; // @public export interface AutocompleteItem { @@ -50,7 +49,7 @@ export interface AutocompleteItem { } // @public -export type AutocompleteMode = 'oneTerm' | 'twoTerms' | 'oneTermWithContext'; +export type AutocompleteMode = "oneTerm" | "twoTerms" | "oneTermWithContext"; // @public export type AutocompleteOptions = OperationOptions & AutocompleteRequest; @@ -82,68 +81,122 @@ export interface AzureActiveDirectoryApplicationCredentials { export { AzureKeyCredential } // @public -export type BlobIndexerDataToExtract = 'storageMetadata' | 'allMetadata' | 'contentAndMetadata'; +export interface BaseCharFilter { + name: string; + odatatype: "#Microsoft.Azure.Search.MappingCharFilter" | "#Microsoft.Azure.Search.PatternReplaceCharFilter"; +} // @public -export type BlobIndexerImageAction = 'none' | 'generateNormalizedImages' | 'generateNormalizedImagePerPage'; +export interface BaseCognitiveServicesAccount { + description?: string; + odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices" | "#Microsoft.Azure.Search.CognitiveServicesByKey"; +} // @public -export type BlobIndexerParsingMode = 'default' | 'text' | 'delimitedText' | 'json' | 'jsonArray' | 'jsonLines'; +export interface BaseDataChangeDetectionPolicy { + odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy" | "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; +} // @public -export type BlobIndexerPDFTextRotationAlgorithm = 'none' | 'detectAngles'; +export interface BaseDataDeletionDetectionPolicy { + odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"; +} // @public -export interface BM25Similarity { - b?: number; - k1?: number; - odatatype: "#Microsoft.Azure.Search.BM25Similarity"; +export interface BaseLexicalAnalyzer { + name: string; + odatatype: "#Microsoft.Azure.Search.CustomAnalyzer" | "#Microsoft.Azure.Search.PatternAnalyzer" | "#Microsoft.Azure.Search.StandardAnalyzer" | "#Microsoft.Azure.Search.StopAnalyzer"; } // @public -export type CharFilter = MappingCharFilter | PatternReplaceCharFilter; +export interface BaseLexicalTokenizer { + name: string; + odatatype: "#Microsoft.Azure.Search.ClassicTokenizer" | "#Microsoft.Azure.Search.EdgeNGramTokenizer" | "#Microsoft.Azure.Search.KeywordTokenizer" | "#Microsoft.Azure.Search.KeywordTokenizerV2" | "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer" | "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer" | "#Microsoft.Azure.Search.NGramTokenizer" | "#Microsoft.Azure.Search.PathHierarchyTokenizerV2" | "#Microsoft.Azure.Search.PatternTokenizer" | "#Microsoft.Azure.Search.StandardTokenizer" | "#Microsoft.Azure.Search.StandardTokenizerV2" | "#Microsoft.Azure.Search.UaxUrlEmailTokenizer"; +} // @public -export interface CjkBigramTokenFilter { - ignoreScripts?: CjkBigramTokenFilterScripts[]; +export interface BaseScoringFunction { + boost: number; + fieldName: string; + interpolation?: ScoringFunctionInterpolation; + type: "distance" | "freshness" | "magnitude" | "tag"; +} + +// @public +export interface BaseSearchIndexerSkill { + context?: string; + description?: string; + inputs: InputFieldMappingEntry[]; + name?: string; + odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Custom.WebApiSkill"; + outputs: OutputFieldMappingEntry[]; +} + +// @public +export interface BaseTokenFilter { name: string; + odatatype: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter" | "#Microsoft.Azure.Search.CjkBigramTokenFilter" | "#Microsoft.Azure.Search.CommonGramTokenFilter" | "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter" | "#Microsoft.Azure.Search.EdgeNGramTokenFilter" | "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2" | "#Microsoft.Azure.Search.ElisionTokenFilter" | "#Microsoft.Azure.Search.KeepTokenFilter" | "#Microsoft.Azure.Search.KeywordMarkerTokenFilter" | "#Microsoft.Azure.Search.LengthTokenFilter" | "#Microsoft.Azure.Search.LimitTokenFilter" | "#Microsoft.Azure.Search.NGramTokenFilter" | "#Microsoft.Azure.Search.NGramTokenFilterV2" | "#Microsoft.Azure.Search.PatternCaptureTokenFilter" | "#Microsoft.Azure.Search.PatternReplaceTokenFilter" | "#Microsoft.Azure.Search.PhoneticTokenFilter" | "#Microsoft.Azure.Search.ShingleTokenFilter" | "#Microsoft.Azure.Search.SnowballTokenFilter" | "#Microsoft.Azure.Search.StemmerTokenFilter" | "#Microsoft.Azure.Search.StemmerOverrideTokenFilter" | "#Microsoft.Azure.Search.StopwordsTokenFilter" | "#Microsoft.Azure.Search.SynonymTokenFilter" | "#Microsoft.Azure.Search.TruncateTokenFilter" | "#Microsoft.Azure.Search.UniqueTokenFilter" | "#Microsoft.Azure.Search.WordDelimiterTokenFilter"; +} + +// @public +export type BlobIndexerDataToExtract = string; + +// @public +export type BlobIndexerImageAction = string; + +// @public +export type BlobIndexerParsingMode = string; + +// @public +export type BlobIndexerPDFTextRotationAlgorithm = string; + +// @public +export type BM25Similarity = Similarity & { + odatatype: "#Microsoft.Azure.Search.BM25Similarity"; + k1?: number | null; + b?: number | null; +}; + +// @public +export type CharFilter = MappingCharFilter | PatternReplaceCharFilter; + +// @public +export type CjkBigramTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.CjkBigramTokenFilter"; + ignoreScripts?: CjkBigramTokenFilterScripts[]; outputUnigrams?: boolean; -} +}; // @public -export type CjkBigramTokenFilterScripts = 'han' | 'hiragana' | 'katakana' | 'hangul'; +export type CjkBigramTokenFilterScripts = "han" | "hiragana" | "katakana" | "hangul"; // @public -export interface ClassicSimilarity { +export type ClassicSimilarity = Similarity & { odatatype: "#Microsoft.Azure.Search.ClassicSimilarity"; -} +}; // @public -export interface ClassicTokenizer { - maxTokenLength?: number; - name: string; +export type ClassicTokenizer = BaseLexicalTokenizer & { odatatype: "#Microsoft.Azure.Search.ClassicTokenizer"; -} + maxTokenLength?: number; +}; // @public export type CognitiveServicesAccount = DefaultCognitiveServicesAccount | CognitiveServicesAccountKey; // @public -export interface CognitiveServicesAccountKey { - description?: string; - key: string; +export type CognitiveServicesAccountKey = BaseCognitiveServicesAccount & { odatatype: "#Microsoft.Azure.Search.CognitiveServicesByKey"; -} + key: string; +}; // @public -export interface CommonGramTokenFilter { +export type CommonGramTokenFilter = BaseTokenFilter & { + odatatype: "#Microsoft.Azure.Search.CommonGramTokenFilter"; commonWords: string[]; ignoreCase?: boolean; - name: string; - odatatype: "#Microsoft.Azure.Search.CommonGramTokenFilter"; useQueryMode?: boolean; -} +}; // @public export type ComplexDataType = "Edm.ComplexType" | "Collection(Edm.ComplexType)"; @@ -156,19 +209,14 @@ export interface ComplexField { } // @public -export interface ConditionalSkill { - context?: string; - description?: string; - inputs: InputFieldMappingEntry[]; - name?: string; +export type ConditionalSkill = BaseSearchIndexerSkill & { odatatype: "#Microsoft.Skills.Util.ConditionalSkill"; - outputs: OutputFieldMappingEntry[]; -} +}; // @public export interface CorsOptions { allowedOrigins: string[]; - maxAgeInSeconds?: number; + maxAgeInSeconds?: number | null; } // @public @@ -221,9 +269,47 @@ export interface CustomAnalyzer { name: string; odatatype: "#Microsoft.Azure.Search.CustomAnalyzer"; tokenFilters?: string[]; - tokenizerName: string; + tokenizer: string; +} + +// @public +export interface CustomEntity { + accentSensitive?: boolean | null; + aliases?: CustomEntityAlias[] | null; + caseSensitive?: boolean | null; + defaultAccentSensitive?: boolean | null; + defaultCaseSensitive?: boolean | null; + defaultFuzzyEditDistance?: number | null; + description?: string | null; + fuzzyEditDistance?: number | null; + id?: string | null; + name: string; + subtype?: string | null; + type?: string | null; +} + +// @public +export interface CustomEntityAlias { + accentSensitive?: boolean | null; + caseSensitive?: boolean | null; + fuzzyEditDistance?: number | null; + text: string; } +// @public +export type CustomEntityLookupSkill = BaseSearchIndexerSkill & { + odatatype: "#Microsoft.Skills.Text.CustomEntityLookupSkill"; + defaultLanguageCode?: CustomEntityLookupSkillLanguage | null; + entitiesDefinitionUri?: string | null; + inlineEntitiesDefinition?: CustomEntity[] | null; + globalDefaultCaseSensitive?: boolean | null; + globalDefaultAccentSensitive?: boolean | null; + globalDefaultFuzzyEditDistance?: number | null; +}; + +// @public +export type CustomEntityLookupSkillLanguage = string; + // @public export type DataChangeDetectionPolicy = HighWaterMarkChangeDetectionPolicy | SqlIntegratedChangeTrackingPolicy; @@ -240,10 +326,9 @@ export const DEFAULT_FLUSH_WINDOW: number; export const DEFAULT_RETRY_COUNT: number; // @public -export interface DefaultCognitiveServicesAccount { - description?: string; +export type DefaultCognitiveServicesAccount = BaseCognitiveServicesAccount & { odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices"; -} +}; // @public export interface DeleteDataSourceConnectionOptions extends OperationOptions { @@ -274,24 +359,20 @@ export interface DeleteSynonymMapOptions extends OperationOptions { } // @public -export interface DictionaryDecompounderTokenFilter { - maxSubwordSize?: number; - minSubwordSize?: number; - minWordSize?: number; - name: string; +export type DictionaryDecompounderTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter"; - onlyLongestMatch?: boolean; wordList: string[]; -} + minWordSize?: number; + minSubwordSize?: number; + maxSubwordSize?: number; + onlyLongestMatch?: boolean; +}; // @public -export interface DistanceScoringFunction { - boost: number; - fieldName: string; - interpolation?: ScoringFunctionInterpolation; - parameters: DistanceScoringParameters; +export type DistanceScoringFunction = BaseScoringFunction & { type: "distance"; -} + parameters: DistanceScoringParameters; +}; // @public export interface DistanceScoringParameters { @@ -309,43 +390,36 @@ export interface EdgeNGramTokenFilter { } // @public -export type EdgeNGramTokenFilterSide = 'front' | 'back'; +export type EdgeNGramTokenFilterSide = "front" | "back"; // @public -export interface EdgeNGramTokenizer { - maxGram?: number; - minGram?: number; - name: string; +export type EdgeNGramTokenizer = BaseLexicalTokenizer & { odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenizer"; + minGram?: number; + maxGram?: number; tokenChars?: TokenCharacterKind[]; -} +}; // @public -export interface ElisionTokenFilter { - articles?: string[]; - name: string; +export type ElisionTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.ElisionTokenFilter"; -} + articles?: string[]; +}; // @public -export type EntityCategory = 'location' | 'organization' | 'person' | 'quantity' | 'datetime' | 'url' | 'email'; +export type EntityCategory = string; // @public -export interface EntityRecognitionSkill { +export type EntityRecognitionSkill = BaseSearchIndexerSkill & { + odatatype: "#Microsoft.Skills.Text.EntityRecognitionSkill"; categories?: EntityCategory[]; - context?: string; defaultLanguageCode?: EntityRecognitionSkillLanguage; - description?: string; - includeTypelessEntities?: boolean; - inputs: InputFieldMappingEntry[]; - minimumPrecision?: number; - name?: string; - odatatype: "#Microsoft.Skills.Text.EntityRecognitionSkill"; - outputs: OutputFieldMappingEntry[]; -} + includeTypelessEntities?: boolean | null; + minimumPrecision?: number | null; +}; // @public -export type EntityRecognitionSkillLanguage = 'ar' | 'cs' | 'zh-Hans' | 'zh-Hant' | 'da' | 'nl' | 'en' | 'fi' | 'fr' | 'de' | 'el' | 'hu' | 'it' | 'ja' | 'ko' | 'no' | 'pl' | 'pt-PT' | 'pt-BR' | 'ru' | 'es' | 'sv' | 'tr'; +export type EntityRecognitionSkillLanguage = string; // @public export interface FacetResult { @@ -355,7 +429,7 @@ export interface FacetResult { // @public export interface FieldMapping { - mappingFunction?: FieldMappingFunction; + mappingFunction?: FieldMappingFunction | null; sourceFieldName: string; targetFieldName?: string; } @@ -369,13 +443,10 @@ export interface FieldMappingFunction { } // @public -export interface FreshnessScoringFunction { - boost: number; - fieldName: string; - interpolation?: ScoringFunctionInterpolation; - parameters: FreshnessScoringParameters; +export type FreshnessScoringFunction = BaseScoringFunction & { type: "freshness"; -} + parameters: FreshnessScoringParameters; +}; // @public export interface FreshnessScoringParameters { @@ -420,32 +491,27 @@ export type GetSkillSetOptions = OperationOptions; export type GetSynonymMapsOptions = OperationOptions; // @public -export interface HighWaterMarkChangeDetectionPolicy { - highWaterMarkColumnName: string; +export type HighWaterMarkChangeDetectionPolicy = BaseDataChangeDetectionPolicy & { odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy"; -} + highWaterMarkColumnName: string; +}; // @public -export interface ImageAnalysisSkill { - context?: string; - defaultLanguageCode?: ImageAnalysisSkillLanguage; - description?: string; - details?: ImageDetail[]; - inputs: InputFieldMappingEntry[]; - name?: string; +export type ImageAnalysisSkill = BaseSearchIndexerSkill & { odatatype: "#Microsoft.Skills.Vision.ImageAnalysisSkill"; - outputs: OutputFieldMappingEntry[]; + defaultLanguageCode?: ImageAnalysisSkillLanguage; visualFeatures?: VisualFeature[]; -} + details?: ImageDetail[]; +}; // @public -export type ImageAnalysisSkillLanguage = 'en' | 'es' | 'ja' | 'pt' | 'zh'; +export type ImageAnalysisSkillLanguage = string; // @public -export type ImageDetail = 'celebrities' | 'landmarks'; +export type ImageDetail = string; // @public -export type IndexActionType = 'upload' | 'merge' | 'mergeOrUpload' | 'delete'; +export type IndexActionType = "upload" | "merge" | "mergeOrUpload" | "delete"; // @public export type IndexDocumentsAction = { @@ -474,11 +540,11 @@ export interface IndexDocumentsResult { } // @public -export type IndexerExecutionEnvironment = 'standard' | 'private'; +export type IndexerExecutionEnvironment = string; // @public export interface IndexerExecutionResult { - readonly endTime?: Date; + readonly endTime?: Date | null; readonly errorMessage?: string; readonly errors: SearchIndexerError[]; readonly failedItemCount: number; @@ -491,18 +557,17 @@ export interface IndexerExecutionResult { } // @public -export type IndexerExecutionStatus = 'transientFailure' | 'success' | 'inProgress' | 'reset'; +export type IndexerExecutionStatus = "transientFailure" | "success" | "inProgress" | "reset"; // @public -export type IndexerStatus = 'unknown' | 'error' | 'running'; +export type IndexerStatus = "unknown" | "error" | "running"; // @public export interface IndexingParameters { - batchSize?: number; - // (undocumented) + batchSize?: number | null; configuration?: IndexingParametersConfiguration; - maxFailedItems?: number; - maxFailedItemsPerBatch?: number; + maxFailedItems?: number | null; + maxFailedItemsPerBatch?: number | null; } // @public @@ -555,35 +620,28 @@ export interface InputFieldMappingEntry { } // @public -export interface KeepTokenFilter { +export type KeepTokenFilter = BaseTokenFilter & { + odatatype: "#Microsoft.Azure.Search.KeepTokenFilter"; keepWords: string[]; lowerCaseKeepWords?: boolean; - name: string; - odatatype: "#Microsoft.Azure.Search.KeepTokenFilter"; -} +}; // @public -export interface KeyPhraseExtractionSkill { - context?: string; - defaultLanguageCode?: KeyPhraseExtractionSkillLanguage; - description?: string; - inputs: InputFieldMappingEntry[]; - maxKeyPhraseCount?: number; - name?: string; +export type KeyPhraseExtractionSkill = BaseSearchIndexerSkill & { odatatype: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill"; - outputs: OutputFieldMappingEntry[]; -} + defaultLanguageCode?: KeyPhraseExtractionSkillLanguage; + maxKeyPhraseCount?: number | null; +}; // @public -export type KeyPhraseExtractionSkillLanguage = 'da' | 'nl' | 'en' | 'fi' | 'fr' | 'de' | 'it' | 'ja' | 'ko' | 'no' | 'pl' | 'pt-PT' | 'pt-BR' | 'ru' | 'es' | 'sv'; +export type KeyPhraseExtractionSkillLanguage = string; // @public -export interface KeywordMarkerTokenFilter { - ignoreCase?: boolean; - keywords: string[]; - name: string; +export type KeywordMarkerTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter"; -} + keywords: string[]; + ignoreCase?: boolean; +}; // @public export interface KeywordTokenizer { @@ -689,11 +747,380 @@ export enum KnownAnalyzerNames { ZhHantMicrosoft = "zh-Hant.microsoft" } +// @public +export const enum KnownBlobIndexerDataToExtract { + AllMetadata = "allMetadata", + ContentAndMetadata = "contentAndMetadata", + StorageMetadata = "storageMetadata" +} + +// @public +export const enum KnownBlobIndexerImageAction { + GenerateNormalizedImagePerPage = "generateNormalizedImagePerPage", + GenerateNormalizedImages = "generateNormalizedImages", + None = "none" +} + +// @public +export const enum KnownBlobIndexerParsingMode { + Default = "default", + DelimitedText = "delimitedText", + Json = "json", + JsonArray = "jsonArray", + JsonLines = "jsonLines", + Text = "text" +} + +// @public +export const enum KnownBlobIndexerPDFTextRotationAlgorithm { + DetectAngles = "detectAngles", + None = "none" +} + // @public export enum KnownCharFilterNames { HtmlStrip = "html_strip" } +// @public +export const enum KnownCustomEntityLookupSkillLanguage { + Da = "da", + De = "de", + En = "en", + Es = "es", + Fi = "fi", + Fr = "fr", + It = "it", + Ko = "ko", + Pt = "pt" +} + +// @public +export const enum KnownEntityCategory { + Datetime = "datetime", + Email = "email", + Location = "location", + Organization = "organization", + Person = "person", + Quantity = "quantity", + Url = "url" +} + +// @public +export const enum KnownEntityRecognitionSkillLanguage { + Ar = "ar", + Cs = "cs", + Da = "da", + De = "de", + El = "el", + En = "en", + Es = "es", + Fi = "fi", + Fr = "fr", + Hu = "hu", + It = "it", + Ja = "ja", + Ko = "ko", + Nl = "nl", + No = "no", + Pl = "pl", + PtBR = "pt-BR", + PtPT = "pt-PT", + Ru = "ru", + Sv = "sv", + Tr = "tr", + ZhHans = "zh-Hans", + ZhHant = "zh-Hant" +} + +// @public +export const enum KnownImageAnalysisSkillLanguage { + En = "en", + Es = "es", + Ja = "ja", + Pt = "pt", + Zh = "zh" +} + +// @public +export const enum KnownImageDetail { + Celebrities = "celebrities", + Landmarks = "landmarks" +} + +// @public +export const enum KnownKeyPhraseExtractionSkillLanguage { + Da = "da", + De = "de", + En = "en", + Es = "es", + Fi = "fi", + Fr = "fr", + It = "it", + Ja = "ja", + Ko = "ko", + Nl = "nl", + No = "no", + Pl = "pl", + PtBR = "pt-BR", + PtPT = "pt-PT", + Ru = "ru", + Sv = "sv" +} + +// @public +export const enum KnownLexicalAnalyzerName { + ArLucene = "ar.lucene", + ArMicrosoft = "ar.microsoft", + BgLucene = "bg.lucene", + BgMicrosoft = "bg.microsoft", + BnMicrosoft = "bn.microsoft", + CaLucene = "ca.lucene", + CaMicrosoft = "ca.microsoft", + CsLucene = "cs.lucene", + CsMicrosoft = "cs.microsoft", + DaLucene = "da.lucene", + DaMicrosoft = "da.microsoft", + DeLucene = "de.lucene", + DeMicrosoft = "de.microsoft", + ElLucene = "el.lucene", + ElMicrosoft = "el.microsoft", + EnLucene = "en.lucene", + EnMicrosoft = "en.microsoft", + EsLucene = "es.lucene", + EsMicrosoft = "es.microsoft", + EtMicrosoft = "et.microsoft", + EuLucene = "eu.lucene", + FaLucene = "fa.lucene", + FiLucene = "fi.lucene", + FiMicrosoft = "fi.microsoft", + FrLucene = "fr.lucene", + FrMicrosoft = "fr.microsoft", + GaLucene = "ga.lucene", + GlLucene = "gl.lucene", + GuMicrosoft = "gu.microsoft", + HeMicrosoft = "he.microsoft", + HiLucene = "hi.lucene", + HiMicrosoft = "hi.microsoft", + HrMicrosoft = "hr.microsoft", + HuLucene = "hu.lucene", + HuMicrosoft = "hu.microsoft", + HyLucene = "hy.lucene", + IdLucene = "id.lucene", + IdMicrosoft = "id.microsoft", + IsMicrosoft = "is.microsoft", + ItLucene = "it.lucene", + ItMicrosoft = "it.microsoft", + JaLucene = "ja.lucene", + JaMicrosoft = "ja.microsoft", + Keyword = "keyword", + KnMicrosoft = "kn.microsoft", + KoLucene = "ko.lucene", + KoMicrosoft = "ko.microsoft", + LtMicrosoft = "lt.microsoft", + LvLucene = "lv.lucene", + LvMicrosoft = "lv.microsoft", + MlMicrosoft = "ml.microsoft", + MrMicrosoft = "mr.microsoft", + MsMicrosoft = "ms.microsoft", + NbMicrosoft = "nb.microsoft", + NlLucene = "nl.lucene", + NlMicrosoft = "nl.microsoft", + NoLucene = "no.lucene", + PaMicrosoft = "pa.microsoft", + Pattern = "pattern", + PlLucene = "pl.lucene", + PlMicrosoft = "pl.microsoft", + PtBrLucene = "pt-BR.lucene", + PtBrMicrosoft = "pt-BR.microsoft", + PtPtLucene = "pt-PT.lucene", + PtPtMicrosoft = "pt-PT.microsoft", + RoLucene = "ro.lucene", + RoMicrosoft = "ro.microsoft", + RuLucene = "ru.lucene", + RuMicrosoft = "ru.microsoft", + Simple = "simple", + SkMicrosoft = "sk.microsoft", + SlMicrosoft = "sl.microsoft", + SrCyrillicMicrosoft = "sr-cyrillic.microsoft", + SrLatinMicrosoft = "sr-latin.microsoft", + StandardAsciiFoldingLucene = "standardasciifolding.lucene", + StandardLucene = "standard.lucene", + Stop = "stop", + SvLucene = "sv.lucene", + SvMicrosoft = "sv.microsoft", + TaMicrosoft = "ta.microsoft", + TeMicrosoft = "te.microsoft", + ThLucene = "th.lucene", + ThMicrosoft = "th.microsoft", + TrLucene = "tr.lucene", + TrMicrosoft = "tr.microsoft", + UkMicrosoft = "uk.microsoft", + UrMicrosoft = "ur.microsoft", + ViMicrosoft = "vi.microsoft", + Whitespace = "whitespace", + ZhHansLucene = "zh-Hans.lucene", + ZhHansMicrosoft = "zh-Hans.microsoft", + ZhHantLucene = "zh-Hant.lucene", + ZhHantMicrosoft = "zh-Hant.microsoft" +} + +// @public +export const enum KnownOcrSkillLanguage { + Ar = "ar", + Cs = "cs", + Da = "da", + De = "de", + El = "el", + En = "en", + Es = "es", + Fi = "fi", + Fr = "fr", + Hu = "hu", + It = "it", + Ja = "ja", + Ko = "ko", + Nb = "nb", + Nl = "nl", + Pl = "pl", + Pt = "pt", + Ro = "ro", + Ru = "ru", + Sk = "sk", + SrCyrl = "sr-Cyrl", + SrLatn = "sr-Latn", + Sv = "sv", + Tr = "tr", + ZhHans = "zh-Hans", + ZhHant = "zh-Hant" +} + +// @public +export const enum KnownRegexFlags { + CanonEq = "CANON_EQ", + CaseInsensitive = "CASE_INSENSITIVE", + Comments = "COMMENTS", + DotAll = "DOTALL", + Literal = "LITERAL", + Multiline = "MULTILINE", + UnicodeCase = "UNICODE_CASE", + UnixLines = "UNIX_LINES" +} + +// @public +export const enum KnownSearchIndexerDataSourceType { + AzureBlob = "azureblob", + AzureSql = "azuresql", + AzureTable = "azuretable", + CosmosDb = "cosmosdb", + MySql = "mysql" +} + +// @public +export const enum KnownSentimentSkillLanguage { + Da = "da", + De = "de", + El = "el", + En = "en", + Es = "es", + Fi = "fi", + Fr = "fr", + It = "it", + Nl = "nl", + No = "no", + Pl = "pl", + PtPT = "pt-PT", + Ru = "ru", + Sv = "sv", + Tr = "tr" +} + +// @public +export const enum KnownSplitSkillLanguage { + Da = "da", + De = "de", + En = "en", + Es = "es", + Fi = "fi", + Fr = "fr", + It = "it", + Ko = "ko", + Pt = "pt" +} + +// @public +export const enum KnownTextSplitMode { + Pages = "pages", + Sentences = "sentences" +} + +// @public +export const enum KnownTextTranslationSkillLanguage { + Af = "af", + Ar = "ar", + Bg = "bg", + Bn = "bn", + Bs = "bs", + Ca = "ca", + Cs = "cs", + Cy = "cy", + Da = "da", + De = "de", + El = "el", + En = "en", + Es = "es", + Et = "et", + Fa = "fa", + Fi = "fi", + Fil = "fil", + Fj = "fj", + Fr = "fr", + He = "he", + Hi = "hi", + Hr = "hr", + Ht = "ht", + Hu = "hu", + Id = "id", + Is = "is", + It = "it", + Ja = "ja", + Ko = "ko", + Lt = "lt", + Lv = "lv", + Mg = "mg", + Ms = "ms", + Mt = "mt", + Mww = "mww", + Nb = "nb", + Nl = "nl", + Otq = "otq", + Pl = "pl", + Pt = "pt", + Ro = "ro", + Ru = "ru", + Sk = "sk", + Sl = "sl", + Sm = "sm", + SrCyrl = "sr-Cyrl", + SrLatn = "sr-Latn", + Sv = "sv", + Sw = "sw", + Ta = "ta", + Te = "te", + Th = "th", + Tlh = "tlh", + To = "to", + Tr = "tr", + Ty = "ty", + Uk = "uk", + Ur = "ur", + Vi = "vi", + Yua = "yua", + Yue = "yue", + ZhHans = "zh-Hans", + ZhHant = "zh-Hant" +} + // @public export enum KnownTokenFilterNames { Apostrophe = "apostrophe", @@ -750,39 +1177,43 @@ export enum KnownTokenizerNames { } // @public -export interface LanguageDetectionSkill { - context?: string; - description?: string; - inputs: InputFieldMappingEntry[]; - name?: string; - odatatype: "#Microsoft.Skills.Text.LanguageDetectionSkill"; - outputs: OutputFieldMappingEntry[]; +export const enum KnownVisualFeature { + Adult = "adult", + Brands = "brands", + Categories = "categories", + Description = "description", + Faces = "faces", + Objects = "objects", + Tags = "tags" } // @public -export interface LengthTokenFilter { - maxLength?: number; - minLength?: number; - name: string; +export type LanguageDetectionSkill = BaseSearchIndexerSkill & { + odatatype: "#Microsoft.Skills.Text.LanguageDetectionSkill"; +}; + +// @public +export type LengthTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.LengthTokenFilter"; -} + minLength?: number; + maxLength?: number; +}; // @public export type LexicalAnalyzer = CustomAnalyzer | PatternAnalyzer | LuceneStandardAnalyzer | StopAnalyzer; // @public -export type LexicalAnalyzerName = 'ar.microsoft' | 'ar.lucene' | 'hy.lucene' | 'bn.microsoft' | 'eu.lucene' | 'bg.microsoft' | 'bg.lucene' | 'ca.microsoft' | 'ca.lucene' | 'zh-Hans.microsoft' | 'zh-Hans.lucene' | 'zh-Hant.microsoft' | 'zh-Hant.lucene' | 'hr.microsoft' | 'cs.microsoft' | 'cs.lucene' | 'da.microsoft' | 'da.lucene' | 'nl.microsoft' | 'nl.lucene' | 'en.microsoft' | 'en.lucene' | 'et.microsoft' | 'fi.microsoft' | 'fi.lucene' | 'fr.microsoft' | 'fr.lucene' | 'gl.lucene' | 'de.microsoft' | 'de.lucene' | 'el.microsoft' | 'el.lucene' | 'gu.microsoft' | 'he.microsoft' | 'hi.microsoft' | 'hi.lucene' | 'hu.microsoft' | 'hu.lucene' | 'is.microsoft' | 'id.microsoft' | 'id.lucene' | 'ga.lucene' | 'it.microsoft' | 'it.lucene' | 'ja.microsoft' | 'ja.lucene' | 'kn.microsoft' | 'ko.microsoft' | 'ko.lucene' | 'lv.microsoft' | 'lv.lucene' | 'lt.microsoft' | 'ml.microsoft' | 'ms.microsoft' | 'mr.microsoft' | 'nb.microsoft' | 'no.lucene' | 'fa.lucene' | 'pl.microsoft' | 'pl.lucene' | 'pt-BR.microsoft' | 'pt-BR.lucene' | 'pt-PT.microsoft' | 'pt-PT.lucene' | 'pa.microsoft' | 'ro.microsoft' | 'ro.lucene' | 'ru.microsoft' | 'ru.lucene' | 'sr-cyrillic.microsoft' | 'sr-latin.microsoft' | 'sk.microsoft' | 'sl.microsoft' | 'es.microsoft' | 'es.lucene' | 'sv.microsoft' | 'sv.lucene' | 'ta.microsoft' | 'te.microsoft' | 'th.microsoft' | 'th.lucene' | 'tr.microsoft' | 'tr.lucene' | 'uk.microsoft' | 'ur.microsoft' | 'vi.microsoft' | 'standard.lucene' | 'standardasciifolding.lucene' | 'keyword' | 'pattern' | 'simple' | 'stop' | 'whitespace'; +export type LexicalAnalyzerName = string; // @public export type LexicalTokenizer = ClassicTokenizer | EdgeNGramTokenizer | KeywordTokenizer | MicrosoftLanguageTokenizer | MicrosoftLanguageStemmingTokenizer | NGramTokenizer | PathHierarchyTokenizer | PatternTokenizer | LuceneStandardTokenizer | UaxUrlEmailTokenizer; // @public -export interface LimitTokenFilter { - consumeAllTokens?: boolean; - maxTokenCount?: number; - name: string; +export type LimitTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.LimitTokenFilter"; -} + maxTokenCount?: number; + consumeAllTokens?: boolean; +}; // @public export type ListDataSourceConnectionsOptions = OperationOptions; @@ -805,12 +1236,11 @@ export type ListSkillsetsOptions = OperationOptions; export type ListSynonymMapsOptions = OperationOptions; // @public -export interface LuceneStandardAnalyzer { - maxTokenLength?: number; - name: string; +export type LuceneStandardAnalyzer = BaseLexicalAnalyzer & { odatatype: "#Microsoft.Azure.Search.StandardAnalyzer"; + maxTokenLength?: number; stopwords?: string[]; -} +}; // @public export interface LuceneStandardTokenizer { @@ -820,13 +1250,10 @@ export interface LuceneStandardTokenizer { } // @public -export interface MagnitudeScoringFunction { - boost: number; - fieldName: string; - interpolation?: ScoringFunctionInterpolation; - parameters: MagnitudeScoringParameters; +export type MagnitudeScoringFunction = BaseScoringFunction & { type: "magnitude"; -} + parameters: MagnitudeScoringParameters; +}; // @public export interface MagnitudeScoringParameters { @@ -836,11 +1263,10 @@ export interface MagnitudeScoringParameters { } // @public -export interface MappingCharFilter { - mappings: string[]; - name: string; +export type MappingCharFilter = BaseCharFilter & { odatatype: "#Microsoft.Azure.Search.MappingCharFilter"; -} + mappings: string[]; +}; // @public export type MergeDocumentsOptions = IndexDocumentsOptions; @@ -849,40 +1275,33 @@ export type MergeDocumentsOptions = IndexDocumentsOptions; export type MergeOrUploadDocumentsOptions = IndexDocumentsOptions; // @public -export interface MergeSkill { - context?: string; - description?: string; - inputs: InputFieldMappingEntry[]; - insertPostTag?: string; - insertPreTag?: string; - name?: string; +export type MergeSkill = BaseSearchIndexerSkill & { odatatype: "#Microsoft.Skills.Text.MergeSkill"; - outputs: OutputFieldMappingEntry[]; -} + insertPreTag?: string; + insertPostTag?: string; +}; // @public -export interface MicrosoftLanguageStemmingTokenizer { +export type MicrosoftLanguageStemmingTokenizer = BaseLexicalTokenizer & { + odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer"; + maxTokenLength?: number; isSearchTokenizer?: boolean; language?: MicrosoftStemmingTokenizerLanguage; - maxTokenLength?: number; - name: string; - odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer"; -} +}; // @public -export interface MicrosoftLanguageTokenizer { +export type MicrosoftLanguageTokenizer = BaseLexicalTokenizer & { + odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"; + maxTokenLength?: number; isSearchTokenizer?: boolean; language?: MicrosoftTokenizerLanguage; - maxTokenLength?: number; - name: string; - odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"; -} +}; // @public -export type MicrosoftStemmingTokenizerLanguage = 'arabic' | 'bangla' | 'bulgarian' | 'catalan' | 'croatian' | 'czech' | 'danish' | 'dutch' | 'english' | 'estonian' | 'finnish' | 'french' | 'german' | 'greek' | 'gujarati' | 'hebrew' | 'hindi' | 'hungarian' | 'icelandic' | 'indonesian' | 'italian' | 'kannada' | 'latvian' | 'lithuanian' | 'malay' | 'malayalam' | 'marathi' | 'norwegianBokmaal' | 'polish' | 'portuguese' | 'portugueseBrazilian' | 'punjabi' | 'romanian' | 'russian' | 'serbianCyrillic' | 'serbianLatin' | 'slovak' | 'slovenian' | 'spanish' | 'swedish' | 'tamil' | 'telugu' | 'turkish' | 'ukrainian' | 'urdu'; +export type MicrosoftStemmingTokenizerLanguage = "arabic" | "bangla" | "bulgarian" | "catalan" | "croatian" | "czech" | "danish" | "dutch" | "english" | "estonian" | "finnish" | "french" | "german" | "greek" | "gujarati" | "hebrew" | "hindi" | "hungarian" | "icelandic" | "indonesian" | "italian" | "kannada" | "latvian" | "lithuanian" | "malay" | "malayalam" | "marathi" | "norwegianBokmaal" | "polish" | "portuguese" | "portugueseBrazilian" | "punjabi" | "romanian" | "russian" | "serbianCyrillic" | "serbianLatin" | "slovak" | "slovenian" | "spanish" | "swedish" | "tamil" | "telugu" | "turkish" | "ukrainian" | "urdu"; // @public -export type MicrosoftTokenizerLanguage = 'bangla' | 'bulgarian' | 'catalan' | 'chineseSimplified' | 'chineseTraditional' | 'croatian' | 'czech' | 'danish' | 'dutch' | 'english' | 'french' | 'german' | 'greek' | 'gujarati' | 'hindi' | 'icelandic' | 'indonesian' | 'italian' | 'japanese' | 'kannada' | 'korean' | 'malay' | 'malayalam' | 'marathi' | 'norwegianBokmaal' | 'polish' | 'portuguese' | 'portugueseBrazilian' | 'punjabi' | 'romanian' | 'russian' | 'serbianCyrillic' | 'serbianLatin' | 'slovenian' | 'spanish' | 'swedish' | 'tamil' | 'telugu' | 'thai' | 'ukrainian' | 'urdu' | 'vietnamese'; +export type MicrosoftTokenizerLanguage = "bangla" | "bulgarian" | "catalan" | "chineseSimplified" | "chineseTraditional" | "croatian" | "czech" | "danish" | "dutch" | "english" | "french" | "german" | "greek" | "gujarati" | "hindi" | "icelandic" | "indonesian" | "italian" | "japanese" | "kannada" | "korean" | "malay" | "malayalam" | "marathi" | "norwegianBokmaal" | "polish" | "portuguese" | "portugueseBrazilian" | "punjabi" | "romanian" | "russian" | "serbianCyrillic" | "serbianLatin" | "slovenian" | "spanish" | "swedish" | "tamil" | "telugu" | "thai" | "ukrainian" | "urdu" | "vietnamese"; // @public export interface NGramTokenFilter { @@ -893,28 +1312,22 @@ export interface NGramTokenFilter { } // @public -export interface NGramTokenizer { - maxGram?: number; - minGram?: number; - name: string; +export type NGramTokenizer = BaseLexicalTokenizer & { odatatype: "#Microsoft.Azure.Search.NGramTokenizer"; + minGram?: number; + maxGram?: number; tokenChars?: TokenCharacterKind[]; -} +}; // @public -export interface OcrSkill { - context?: string; - defaultLanguageCode?: OcrSkillLanguage; - description?: string; - inputs: InputFieldMappingEntry[]; - name?: string; +export type OcrSkill = BaseSearchIndexerSkill & { odatatype: "#Microsoft.Skills.Vision.OcrSkill"; - outputs: OutputFieldMappingEntry[]; + defaultLanguageCode?: OcrSkillLanguage; shouldDetectOrientation?: boolean; -} +}; // @public -export type OcrSkillLanguage = 'zh-Hans' | 'zh-Hant' | 'cs' | 'da' | 'nl' | 'en' | 'fi' | 'fr' | 'de' | 'el' | 'hu' | 'it' | 'ja' | 'ko' | 'nb' | 'pl' | 'pt' | 'ru' | 'es' | 'sv' | 'tr' | 'ar' | 'ro' | 'sr-Cyrl' | 'sr-Latn' | 'sk'; +export type OcrSkillLanguage = string; // @public export function odata(strings: TemplateStringsArray, ...values: unknown[]): string; @@ -926,15 +1339,14 @@ export interface OutputFieldMappingEntry { } // @public -export interface PathHierarchyTokenizer { - delimiter?: string; - maxTokenLength?: number; - name: string; - numberOfTokensToSkip?: number; +export type PathHierarchyTokenizer = BaseLexicalTokenizer & { odatatype: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2"; + delimiter?: string; replacement?: string; + maxTokenLength?: number; reverseTokenOrder?: boolean; -} + numberOfTokensToSkip?: number; +}; // @public export interface PatternAnalyzer { @@ -947,28 +1359,25 @@ export interface PatternAnalyzer { } // @public -export interface PatternCaptureTokenFilter { - name: string; +export type PatternCaptureTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.PatternCaptureTokenFilter"; patterns: string[]; preserveOriginal?: boolean; -} +}; // @public -export interface PatternReplaceCharFilter { - name: string; +export type PatternReplaceCharFilter = BaseCharFilter & { odatatype: "#Microsoft.Azure.Search.PatternReplaceCharFilter"; pattern: string; replacement: string; -} +}; // @public -export interface PatternReplaceTokenFilter { - name: string; +export type PatternReplaceTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.PatternReplaceTokenFilter"; pattern: string; replacement: string; -} +}; // @public export interface PatternTokenizer { @@ -980,28 +1389,27 @@ export interface PatternTokenizer { } // @public -export type PhoneticEncoder = 'metaphone' | 'doubleMetaphone' | 'soundex' | 'refinedSoundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerPhonetik' | 'haasePhonetik' | 'beiderMorse'; +export type PhoneticEncoder = "metaphone" | "doubleMetaphone" | "soundex" | "refinedSoundex" | "caverphone1" | "caverphone2" | "cologne" | "nysiis" | "koelnerPhonetik" | "haasePhonetik" | "beiderMorse"; // @public -export interface PhoneticTokenFilter { - encoder?: PhoneticEncoder; - name: string; +export type PhoneticTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.PhoneticTokenFilter"; + encoder?: PhoneticEncoder; replaceOriginalTokens?: boolean; -} +}; // @public -export type QueryType = 'simple' | 'full'; +export type QueryType = "simple" | "full"; // @public -export type RegexFlags = 'CANON_EQ' | 'CASE_INSENSITIVE' | 'COMMENTS' | 'DOTALL' | 'LITERAL' | 'MULTILINE' | 'UNICODE_CASE' | 'UNIX_LINES'; +export type RegexFlags = string; // @public export type ResetIndexerOptions = OperationOptions; // @public export interface ResourceCounter { - quota?: number; + quota?: number | null; usage: number; } @@ -1012,10 +1420,10 @@ export type RunIndexerOptions = OperationOptions; export type ScoringFunction = DistanceScoringFunction | FreshnessScoringFunction | MagnitudeScoringFunction | TagScoringFunction; // @public -export type ScoringFunctionAggregation = 'sum' | 'average' | 'minimum' | 'maximum' | 'firstMatching'; +export type ScoringFunctionAggregation = "sum" | "average" | "minimum" | "maximum" | "firstMatching"; // @public -export type ScoringFunctionInterpolation = 'linear' | 'constant' | 'quadratic' | 'logarithmic'; +export type ScoringFunctionInterpolation = "linear" | "constant" | "quadratic" | "logarithmic"; // @public export interface ScoringProfile { @@ -1026,7 +1434,7 @@ export interface ScoringProfile { } // @public -export type ScoringStatistics = 'local' | 'global'; +export type ScoringStatistics = "local" | "global"; // @public export class SearchClient { @@ -1081,9 +1489,9 @@ export type SearchFieldDataType = "Edm.String" | "Edm.Int32" | "Edm.Int64" | "Ed export interface SearchIndex { analyzers?: LexicalAnalyzer[]; charFilters?: CharFilter[]; - corsOptions?: CorsOptions; + corsOptions?: CorsOptions | null; defaultScoringProfile?: string; - encryptionKey?: SearchResourceEncryptionKey; + encryptionKey?: SearchResourceEncryptionKey | null; etag?: string; fields: SearchField[]; name: string; @@ -1124,14 +1532,14 @@ export type SearchIndexClientOptions = PipelineOptions; export interface SearchIndexer { dataSourceName: string; description?: string; - encryptionKey?: SearchResourceEncryptionKey; + encryptionKey?: SearchResourceEncryptionKey | null; etag?: string; fieldMappings?: FieldMapping[]; - isDisabled?: boolean; + isDisabled?: boolean | null; name: string; outputFieldMappings?: FieldMapping[]; - parameters?: IndexingParameters; - schedule?: IndexingSchedule; + parameters?: IndexingParameters | null; + schedule?: IndexingSchedule | null; skillsetName?: string; targetIndexName: string; } @@ -1177,17 +1585,17 @@ export interface SearchIndexerDataContainer { export interface SearchIndexerDataSourceConnection { connectionString?: string; container: SearchIndexerDataContainer; - dataChangeDetectionPolicy?: DataChangeDetectionPolicy; - dataDeletionDetectionPolicy?: DataDeletionDetectionPolicy; + dataChangeDetectionPolicy?: DataChangeDetectionPolicy | null; + dataDeletionDetectionPolicy?: DataDeletionDetectionPolicy | null; description?: string; - encryptionKey?: SearchResourceEncryptionKey; + encryptionKey?: SearchResourceEncryptionKey | null; etag?: string; name: string; type: SearchIndexerDataSourceType; } // @public -export type SearchIndexerDataSourceType = 'azuresql' | 'cosmosdb' | 'azureblob' | 'azuretable' | 'mysql'; +export type SearchIndexerDataSourceType = string; // @public export interface SearchIndexerError { @@ -1199,7 +1607,7 @@ export interface SearchIndexerError { readonly statusCode: number; } -// @public +// @public (undocumented) export interface SearchIndexerLimits { readonly maxDocumentContentCharactersToExtract?: number; readonly maxDocumentExtractionSize?: number; @@ -1207,13 +1615,13 @@ export interface SearchIndexerLimits { } // @public -export type SearchIndexerSkill = ConditionalSkill | KeyPhraseExtractionSkill | OcrSkill | ImageAnalysisSkill | LanguageDetectionSkill | ShaperSkill | MergeSkill | EntityRecognitionSkill | SentimentSkill | SplitSkill | TextTranslationSkill | WebApiSkill; +export type SearchIndexerSkill = ConditionalSkill | KeyPhraseExtractionSkill | OcrSkill | ImageAnalysisSkill | LanguageDetectionSkill | ShaperSkill | MergeSkill | EntityRecognitionSkill | SentimentSkill | SplitSkill | TextTranslationSkill | WebApiSkill | CustomEntityLookupSkill; // @public export interface SearchIndexerSkillset { cognitiveServicesAccount?: CognitiveServicesAccount; description?: string; - encryptionKey?: SearchResourceEncryptionKey; + encryptionKey?: SearchResourceEncryptionKey | null; etag?: string; name: string; skills: SearchIndexerSkill[]; @@ -1295,7 +1703,7 @@ export interface SearchIndexStatistics { export type SearchIterator = PagedAsyncIterableIterator, SearchDocumentsPageResult, ListSearchResultsPageSettings>; // @public -export type SearchMode = 'any' | 'all'; +export type SearchMode = "any" | "all"; // @public export type SearchOptions = OperationOptions & SearchRequestOptions; @@ -1372,22 +1780,18 @@ export interface SearchServiceStatistics { // @public export interface SearchSuggester { name: string; + searchMode: "analyzingInfixMatching"; sourceFields: string[]; } // @public -export interface SentimentSkill { - context?: string; - defaultLanguageCode?: SentimentSkillLanguage; - description?: string; - inputs: InputFieldMappingEntry[]; - name?: string; +export type SentimentSkill = BaseSearchIndexerSkill & { odatatype: "#Microsoft.Skills.Text.SentimentSkill"; - outputs: OutputFieldMappingEntry[]; -} + defaultLanguageCode?: SentimentSkillLanguage; +}; // @public -export type SentimentSkillLanguage = 'da' | 'nl' | 'en' | 'fi' | 'fr' | 'de' | 'el' | 'it' | 'no' | 'pl' | 'pt-PT' | 'ru' | 'es' | 'sv' | 'tr'; +export type SentimentSkillLanguage = string; // @public export interface ServiceCounters { @@ -1401,32 +1805,31 @@ export interface ServiceCounters { // @public export interface ServiceLimits { - maxComplexCollectionFieldsPerIndex?: number; - maxComplexObjectsInCollectionsPerDocument?: number; - maxFieldNestingDepthPerIndex?: number; - maxFieldsPerIndex?: number; + maxComplexCollectionFieldsPerIndex?: number | null; + maxComplexObjectsInCollectionsPerDocument?: number | null; + maxFieldNestingDepthPerIndex?: number | null; + maxFieldsPerIndex?: number | null; } // @public -export interface ShaperSkill { - context?: string; - description?: string; - inputs: InputFieldMappingEntry[]; - name?: string; +export type ShaperSkill = BaseSearchIndexerSkill & { odatatype: "#Microsoft.Skills.Util.ShaperSkill"; - outputs: OutputFieldMappingEntry[]; -} +}; // @public -export interface ShingleTokenFilter { - filterToken?: string; +export type ShingleTokenFilter = BaseTokenFilter & { + odatatype: "#Microsoft.Azure.Search.ShingleTokenFilter"; maxShingleSize?: number; minShingleSize?: number; - name: string; - odatatype: "#Microsoft.Azure.Search.ShingleTokenFilter"; outputUnigrams?: boolean; outputUnigramsIfNoShingles?: boolean; tokenSeparator?: string; + filterToken?: string; +}; + +// @public +export interface Similarity { + odatatype: "#Microsoft.Azure.Search.ClassicSimilarity" | "#Microsoft.Azure.Search.BM25Similarity"; } // @public @@ -1449,79 +1852,69 @@ export interface SimpleField { } // @public -export interface SnowballTokenFilter { - language: SnowballTokenFilterLanguage; - name: string; +export type SnowballTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.SnowballTokenFilter"; -} + language: SnowballTokenFilterLanguage; +}; // @public -export type SnowballTokenFilterLanguage = 'armenian' | 'basque' | 'catalan' | 'danish' | 'dutch' | 'english' | 'finnish' | 'french' | 'german' | 'german2' | 'hungarian' | 'italian' | 'kp' | 'lovins' | 'norwegian' | 'porter' | 'portuguese' | 'romanian' | 'russian' | 'spanish' | 'swedish' | 'turkish'; +export type SnowballTokenFilterLanguage = "armenian" | "basque" | "catalan" | "danish" | "dutch" | "english" | "finnish" | "french" | "german" | "german2" | "hungarian" | "italian" | "kp" | "lovins" | "norwegian" | "porter" | "portuguese" | "romanian" | "russian" | "spanish" | "swedish" | "turkish"; // @public -export interface SoftDeleteColumnDeletionDetectionPolicy { +export type SoftDeleteColumnDeletionDetectionPolicy = BaseDataDeletionDetectionPolicy & { odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"; softDeleteColumnName?: string; softDeleteMarkerValue?: string; -} +}; // @public -export interface SplitSkill { - context?: string; - defaultLanguageCode?: SplitSkillLanguage; - description?: string; - inputs: InputFieldMappingEntry[]; - maxPageLength?: number; - name?: string; +export type SplitSkill = BaseSearchIndexerSkill & { odatatype: "#Microsoft.Skills.Text.SplitSkill"; - outputs: OutputFieldMappingEntry[]; + defaultLanguageCode?: SplitSkillLanguage; textSplitMode?: TextSplitMode; -} + maxPageLength?: number | null; +}; // @public -export type SplitSkillLanguage = 'da' | 'de' | 'en' | 'es' | 'fi' | 'fr' | 'it' | 'ko' | 'pt'; +export type SplitSkillLanguage = string; // @public -export interface SqlIntegratedChangeTrackingPolicy { +export type SqlIntegratedChangeTrackingPolicy = BaseDataChangeDetectionPolicy & { odatatype: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; -} +}; // @public -export interface StemmerOverrideTokenFilter { - name: string; +export type StemmerOverrideTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter"; rules: string[]; -} +}; // @public -export interface StemmerTokenFilter { - language: StemmerTokenFilterLanguage; - name: string; +export type StemmerTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.StemmerTokenFilter"; -} + language: StemmerTokenFilterLanguage; +}; // @public -export type StemmerTokenFilterLanguage = 'arabic' | 'armenian' | 'basque' | 'brazilian' | 'bulgarian' | 'catalan' | 'czech' | 'danish' | 'dutch' | 'dutchKp' | 'english' | 'lightEnglish' | 'minimalEnglish' | 'possessiveEnglish' | 'porter2' | 'lovins' | 'finnish' | 'lightFinnish' | 'french' | 'lightFrench' | 'minimalFrench' | 'galician' | 'minimalGalician' | 'german' | 'german2' | 'lightGerman' | 'minimalGerman' | 'greek' | 'hindi' | 'hungarian' | 'lightHungarian' | 'indonesian' | 'irish' | 'italian' | 'lightItalian' | 'sorani' | 'latvian' | 'norwegian' | 'lightNorwegian' | 'minimalNorwegian' | 'lightNynorsk' | 'minimalNynorsk' | 'portuguese' | 'lightPortuguese' | 'minimalPortuguese' | 'portugueseRslp' | 'romanian' | 'russian' | 'lightRussian' | 'spanish' | 'lightSpanish' | 'swedish' | 'lightSwedish' | 'turkish'; +export type StemmerTokenFilterLanguage = "arabic" | "armenian" | "basque" | "brazilian" | "bulgarian" | "catalan" | "czech" | "danish" | "dutch" | "dutchKp" | "english" | "lightEnglish" | "minimalEnglish" | "possessiveEnglish" | "porter2" | "lovins" | "finnish" | "lightFinnish" | "french" | "lightFrench" | "minimalFrench" | "galician" | "minimalGalician" | "german" | "german2" | "lightGerman" | "minimalGerman" | "greek" | "hindi" | "hungarian" | "lightHungarian" | "indonesian" | "irish" | "italian" | "lightItalian" | "sorani" | "latvian" | "norwegian" | "lightNorwegian" | "minimalNorwegian" | "lightNynorsk" | "minimalNynorsk" | "portuguese" | "lightPortuguese" | "minimalPortuguese" | "portugueseRslp" | "romanian" | "russian" | "lightRussian" | "spanish" | "lightSpanish" | "swedish" | "lightSwedish" | "turkish"; // @public -export interface StopAnalyzer { - name: string; +export type StopAnalyzer = BaseLexicalAnalyzer & { odatatype: "#Microsoft.Azure.Search.StopAnalyzer"; stopwords?: string[]; -} +}; // @public -export type StopwordsList = 'arabic' | 'armenian' | 'basque' | 'brazilian' | 'bulgarian' | 'catalan' | 'czech' | 'danish' | 'dutch' | 'english' | 'finnish' | 'french' | 'galician' | 'german' | 'greek' | 'hindi' | 'hungarian' | 'indonesian' | 'irish' | 'italian' | 'latvian' | 'norwegian' | 'persian' | 'portuguese' | 'romanian' | 'russian' | 'sorani' | 'spanish' | 'swedish' | 'thai' | 'turkish'; +export type StopwordsList = "arabic" | "armenian" | "basque" | "brazilian" | "bulgarian" | "catalan" | "czech" | "danish" | "dutch" | "english" | "finnish" | "french" | "galician" | "german" | "greek" | "hindi" | "hungarian" | "indonesian" | "irish" | "italian" | "latvian" | "norwegian" | "persian" | "portuguese" | "romanian" | "russian" | "sorani" | "spanish" | "swedish" | "thai" | "turkish"; // @public -export interface StopwordsTokenFilter { - ignoreCase?: boolean; - name: string; +export type StopwordsTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.StopwordsTokenFilter"; - removeTrailingStopWords?: boolean; stopwords?: string[]; stopwordsList?: StopwordsList; -} + ignoreCase?: boolean; + removeTrailingStopWords?: boolean; +}; // @public export interface SuggestDocumentsResult { @@ -1553,29 +1946,25 @@ export type SuggestResult = { // @public export interface SynonymMap { - encryptionKey?: SearchResourceEncryptionKey; + encryptionKey?: SearchResourceEncryptionKey | null; etag?: string; name: string; synonyms: string[]; } // @public -export interface SynonymTokenFilter { - expand?: boolean; - ignoreCase?: boolean; - name: string; +export type SynonymTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.SynonymTokenFilter"; synonyms: string[]; -} + ignoreCase?: boolean; + expand?: boolean; +}; // @public -export interface TagScoringFunction { - boost: number; - fieldName: string; - interpolation?: ScoringFunctionInterpolation; - parameters: TagScoringParameters; +export type TagScoringFunction = BaseScoringFunction & { type: "tag"; -} + parameters: TagScoringParameters; +}; // @public export interface TagScoringParameters { @@ -1583,23 +1972,18 @@ export interface TagScoringParameters { } // @public -export type TextSplitMode = 'pages' | 'sentences'; +export type TextSplitMode = string; // @public -export interface TextTranslationSkill { - context?: string; - defaultFromLanguageCode?: TextTranslationSkillLanguage; - defaultToLanguageCode: TextTranslationSkillLanguage; - description?: string; - inputs: InputFieldMappingEntry[]; - name?: string; +export type TextTranslationSkill = BaseSearchIndexerSkill & { odatatype: "#Microsoft.Skills.Text.TranslationSkill"; - outputs: OutputFieldMappingEntry[]; - suggestedFrom?: TextTranslationSkillLanguage; -} + defaultToLanguageCode: TextTranslationSkillLanguage; + defaultFromLanguageCode?: TextTranslationSkillLanguage; + suggestedFrom?: TextTranslationSkillLanguage | null; +}; // @public -export type TextTranslationSkillLanguage = 'af' | 'ar' | 'bn' | 'bs' | 'bg' | 'yue' | 'ca' | 'zh-Hans' | 'zh-Hant' | 'hr' | 'cs' | 'da' | 'nl' | 'en' | 'et' | 'fj' | 'fil' | 'fi' | 'fr' | 'de' | 'el' | 'ht' | 'he' | 'hi' | 'mww' | 'hu' | 'is' | 'id' | 'it' | 'ja' | 'sw' | 'tlh' | 'ko' | 'lv' | 'lt' | 'mg' | 'ms' | 'mt' | 'nb' | 'fa' | 'pl' | 'pt' | 'otq' | 'ro' | 'ru' | 'sm' | 'sr-Cyrl' | 'sr-Latn' | 'sk' | 'sl' | 'es' | 'sv' | 'ty' | 'ta' | 'te' | 'th' | 'to' | 'tr' | 'uk' | 'ur' | 'vi' | 'cy' | 'yua'; +export type TextTranslationSkillLanguage = string; // @public export interface TextWeights { @@ -1609,71 +1993,62 @@ export interface TextWeights { } // @public -export type TokenCharacterKind = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol'; +export type TokenCharacterKind = "letter" | "digit" | "whitespace" | "punctuation" | "symbol"; // @public export type TokenFilter = AsciiFoldingTokenFilter | CjkBigramTokenFilter | CommonGramTokenFilter | DictionaryDecompounderTokenFilter | EdgeNGramTokenFilter | ElisionTokenFilter | KeepTokenFilter | KeywordMarkerTokenFilter | LengthTokenFilter | LimitTokenFilter | NGramTokenFilter | PatternCaptureTokenFilter | PatternReplaceTokenFilter | PhoneticTokenFilter | ShingleTokenFilter | SnowballTokenFilter | StemmerTokenFilter | StemmerOverrideTokenFilter | StopwordsTokenFilter | SynonymTokenFilter | TruncateTokenFilter | UniqueTokenFilter | WordDelimiterTokenFilter; // @public -export interface TruncateTokenFilter { - length?: number; - name: string; +export type TruncateTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.TruncateTokenFilter"; -} + length?: number; +}; // @public -export interface UaxUrlEmailTokenizer { - maxTokenLength?: number; - name: string; +export type UaxUrlEmailTokenizer = BaseLexicalTokenizer & { odatatype: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer"; -} + maxTokenLength?: number; +}; // @public -export interface UniqueTokenFilter { - name: string; +export type UniqueTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.UniqueTokenFilter"; onlyOnSamePosition?: boolean; -} +}; // @public export type UploadDocumentsOptions = IndexDocumentsOptions; // @public -export type VisualFeature = 'adult' | 'brands' | 'categories' | 'description' | 'faces' | 'objects' | 'tags'; +export type VisualFeature = string; // @public -export interface WebApiSkill { - batchSize?: number; - context?: string; - degreeOfParallelism?: number; - description?: string; +export type WebApiSkill = BaseSearchIndexerSkill & { + odatatype: "#Microsoft.Skills.Custom.WebApiSkill"; + uri: string; httpHeaders?: { [propertyName: string]: string; }; httpMethod?: string; - inputs: InputFieldMappingEntry[]; - name?: string; - odatatype: "#Microsoft.Skills.Custom.WebApiSkill"; - outputs: OutputFieldMappingEntry[]; timeout?: string; - uri: string; -} + batchSize?: number | null; + degreeOfParallelism?: number | null; +}; // @public -export interface WordDelimiterTokenFilter { - catenateAll?: boolean; - catenateNumbers?: boolean; - catenateWords?: boolean; - generateNumberParts?: boolean; - generateWordParts?: boolean; - name: string; +export type WordDelimiterTokenFilter = BaseTokenFilter & { odatatype: "#Microsoft.Azure.Search.WordDelimiterTokenFilter"; - preserveOriginal?: boolean; - protectedWords?: string[]; + generateWordParts?: boolean; + generateNumberParts?: boolean; + catenateWords?: boolean; + catenateNumbers?: boolean; + catenateAll?: boolean; splitOnCaseChange?: boolean; + preserveOriginal?: boolean; splitOnNumerics?: boolean; stemEnglishPossessive?: boolean; -} + protectedWords?: string[]; +}; // (No @packageDocumentation comment for this package) diff --git a/sdk/search/search-documents/samples/typescript/src/indexes/analyzeText.ts b/sdk/search/search-documents/samples/typescript/src/indexes/analyzeText.ts index 037e8722e965..9dfcc481fb4c 100644 --- a/sdk/search/search-documents/samples/typescript/src/indexes/analyzeText.ts +++ b/sdk/search/search-documents/samples/typescript/src/indexes/analyzeText.ts @@ -22,7 +22,7 @@ export async function main() { return; } const client = new SearchIndexClient(endpoint, new AzureKeyCredential(apiKey)); - const index:SearchIndex = await client.getIndex("example-index"); + const index: SearchIndex = await client.getIndex("example-index"); index.tokenizers?.push({ name: "example-tokenizer", @@ -45,7 +45,7 @@ export async function main() { index.analyzers?.push({ name: "example-analyzer", odatatype: "#Microsoft.Azure.Search.CustomAnalyzer", - tokenizerName: "example-tokenizer", + tokenizer: "example-tokenizer", charFilters: ["example-char-filter"], tokenFilters: [KnownTokenFilterNames.Lowercase, "example-token-filter"] }); @@ -60,7 +60,6 @@ export async function main() { }); console.log(result.tokens); - } main().catch((err) => { diff --git a/sdk/search/search-documents/samples/typescript/src/indexes/getIndex.ts b/sdk/search/search-documents/samples/typescript/src/indexes/getIndex.ts index 0d3aefba1c90..3fca09179e67 100644 --- a/sdk/search/search-documents/samples/typescript/src/indexes/getIndex.ts +++ b/sdk/search/search-documents/samples/typescript/src/indexes/getIndex.ts @@ -1,11 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { - SearchIndexClient, - AzureKeyCredential, - SearchIndex -} from "@azure/search-documents"; +import { SearchIndexClient, AzureKeyCredential, SearchIndex } from "@azure/search-documents"; import * as dotenv from "dotenv"; dotenv.config(); @@ -20,7 +16,7 @@ export async function main() { } const client = new SearchIndexClient(endpoint, new AzureKeyCredential(apiKey)); console.log(`Get Index example-index`); - const index:SearchIndex = await client.getIndex("example-index"); + const index: SearchIndex = await client.getIndex("example-index"); console.log(`Name: ${index.name}`); console.log(`Similarity Algorithm: ${index.similarity?.odatatype}`); } diff --git a/sdk/search/search-documents/samples/typescript/src/utils/setup.ts b/sdk/search/search-documents/samples/typescript/src/utils/setup.ts index 07898f8702dd..b11da3d42934 100644 --- a/sdk/search/search-documents/samples/typescript/src/utils/setup.ts +++ b/sdk/search/search-documents/samples/typescript/src/utils/setup.ts @@ -189,7 +189,8 @@ export async function createIndex(client: SearchIndexClient, name: string): Prom suggesters: [ { name: "sg", - sourceFields: ["description", "hotelName"] + sourceFields: ["description", "hotelName"], + searchMode: "analyzingInfixMatching" } ], scoringProfiles: [ diff --git a/sdk/search/search-documents/src/generated/data/index.ts b/sdk/search/search-documents/src/generated/data/index.ts new file mode 100644 index 000000000000..5f9136071a08 --- /dev/null +++ b/sdk/search/search-documents/src/generated/data/index.ts @@ -0,0 +1,11 @@ +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. + */ + +export * from "./models"; +export { SearchClient } from "./searchClient"; +export { SearchClientContext } from "./searchClientContext"; diff --git a/sdk/search/search-documents/src/generated/data/models/index.ts b/sdk/search/search-documents/src/generated/data/models/index.ts index 579647d05e7f..6f98ea640204 100644 --- a/sdk/search/search-documents/src/generated/data/models/index.ts +++ b/sdk/search/search-documents/src/generated/data/models/index.ts @@ -1,988 +1,587 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ - import * as coreHttp from "@azure/core-http"; -/** - * A result containing a document found by a suggestion query, plus associated metadata. - */ -export interface SuggestResult { +/** Describes an error condition for the Azure Cognitive Search API. */ +export interface SearchError { /** - * The text of the suggestion result. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * One of a server-defined set of error codes. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - readonly _text: string; + readonly code?: string; /** - * Describes unknown properties. The value of an unknown property can be of "any" type. + * A human-readable representation of the error. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - [property: string]: any; + readonly message: string; + /** + * An array of details about specific errors that led to this reported error. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly details?: SearchError[]; } -/** - * Response containing suggestion query results from an index. - */ -export interface SuggestDocumentsResult { +/** Response containing search results from an index. */ +export interface SearchDocumentsResult { /** - * The sequence of results returned by the query. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * The total count of results found by the search operation, or null if the count was not requested. If present, the count may be greater than the number of results in this response. This can happen if you use the $top or $skip parameters, or if Azure Cognitive Search can't return all the requested documents in a single Search response. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - readonly results: SuggestResult[]; + readonly count?: number; /** - * A value indicating the percentage of the index that was included in the query, or null if - * minimumCoverage was not set in the request. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * A value indicating the percentage of the index that was included in the query, or null if minimumCoverage was not specified in the request. + * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly coverage?: number; + /** + * The facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not include any facet expressions. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly facets?: { [propertyName: string]: FacetResult[] }; + /** + * Continuation JSON payload returned when Azure Cognitive Search can't return all the requested results in a single Search response. You can use this JSON along with @odata.nextLink to formulate another POST Search request to get the next part of the search response. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly nextPageParameters?: SearchRequest; + /** + * The sequence of results returned by the query. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly results: SearchResult[]; + /** + * Continuation URL returned when Azure Cognitive Search can't return all the requested results in a single Search response. You can use this URL to formulate another GET or POST Search request to get the next part of the search response. Make sure to use the same verb (GET or POST) as the request that produced this response. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly nextLink?: string; } -/** - * A single bucket of a facet query result. Reports the number of documents with a field value - * falling within a particular range or having a particular value or interval. - */ +/** A single bucket of a facet query result. Reports the number of documents with a field value falling within a particular range or having a particular value or interval. */ export interface FacetResult { + /** Describes unknown properties. The value of an unknown property can be of "any" type. */ + [property: string]: any; /** * The approximate count of documents falling within the bucket described by this facet. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly count?: number; - /** - * Describes unknown properties. The value of an unknown property can be of "any" type. - */ - [property: string]: any; } -/** - * Parameters for filtering, sorting, faceting, paging, and other search query behaviors. - */ +/** Parameters for filtering, sorting, faceting, paging, and other search query behaviors. */ export interface SearchRequest { - /** - * A value that specifies whether to fetch the total count of results. Default is false. Setting - * this value to true may have a performance impact. Note that the count returned is an - * approximation. - */ + /** A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. */ includeTotalResultCount?: boolean; - /** - * The list of facet expressions to apply to the search query. Each facet expression contains a - * field name, optionally followed by a comma-separated list of name:value pairs. - */ + /** The list of facet expressions to apply to the search query. Each facet expression contains a field name, optionally followed by a comma-separated list of name:value pairs. */ facets?: string[]; - /** - * The OData $filter expression to apply to the search query. - */ + /** The OData $filter expression to apply to the search query. */ filter?: string; - /** - * The comma-separated list of field names to use for hit highlights. Only searchable fields can - * be used for hit highlighting. - */ + /** The comma-separated list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. */ highlightFields?: string; - /** - * A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is - * </em>. - */ + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. */ highlightPostTag?: string; - /** - * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default - * is <em>. - */ + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. */ highlightPreTag?: string; - /** - * A number between 0 and 100 indicating the percentage of the index that must be covered by a - * search query in order for the query to be reported as a success. This parameter can be useful - * for ensuring search availability even for services with only one replica. The default is 100. - */ + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. */ minimumCoverage?: number; - /** - * The comma-separated list of OData $orderby expressions by which to sort the results. Each - * expression can be either a field name or a call to either the geo.distance() or the - * search.score() functions. Each expression can be followed by asc to indicate ascending, or - * desc to indicate descending. The default is ascending order. Ties will be broken by the match - * scores of documents. If no $orderby is specified, the default sort order is descending by - * document match score. There can be at most 32 $orderby clauses. - */ + /** The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ orderBy?: string; - /** - * A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if - * your query uses the Lucene query syntax. Possible values include: 'Simple', 'Full' - */ + /** A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. */ queryType?: QueryType; - /** - * A value that specifies whether we want to calculate scoring statistics (such as document - * frequency) globally for more consistent scoring, or locally, for lower latency. The default is - * 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global - * scoring statistics can increase latency of search queries. Possible values include: 'Local', - * 'Global' - */ + /** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. */ scoringStatistics?: ScoringStatistics; - /** - * A value to be used to create a sticky session, which can help getting more consistent results. - * As long as the same sessionId is used, a best-effort attempt will be made to target the same - * replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the - * load balancing of the requests across replicas and adversely affect the performance of the - * search service. The value used as sessionId cannot start with a '_' character. - */ + /** A value to be used to create a sticky session, which can help getting more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. */ sessionId?: string; - /** - * The list of parameter values to be used in scoring functions (for example, - * referencePointParameter) using the format name-values. For example, if the scoring profile - * defines a function with a parameter called 'mylocation' the parameter string would be - * "mylocation--122.2,44.8" (without the quotes). - */ + /** The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). */ scoringParameters?: string[]; - /** - * The name of a scoring profile to evaluate match scores for matching documents in order to sort - * the results. - */ + /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ scoringProfile?: string; - /** - * A full-text search query expression; Use "*" or omit this parameter to match all documents. - */ + /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ searchText?: string; - /** - * The comma-separated list of field names to which to scope the full-text search. When using - * fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each - * fielded search expression take precedence over any field names listed in this parameter. - */ + /** The comma-separated list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ searchFields?: string; - /** - * A value that specifies whether any or all of the search terms must be matched in order to - * count the document as a match. Possible values include: 'Any', 'All' - */ + /** A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. */ searchMode?: SearchMode; - /** - * The comma-separated list of fields to retrieve. If unspecified, all fields marked as - * retrievable in the schema are included. - */ + /** The comma-separated list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. */ select?: string; - /** - * The number of search results to skip. This value cannot be greater than 100,000. If you need - * to scan documents in sequence, but cannot use skip due to this limitation, consider using - * orderby on a totally-ordered key and filter with a range query instead. - */ + /** The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use skip due to this limitation, consider using orderby on a totally-ordered key and filter with a range query instead. */ skip?: number; - /** - * The number of search results to retrieve. This can be used in conjunction with $skip to - * implement client-side paging of search results. If results are truncated due to server-side - * paging, the response will include a continuation token that can be used to issue another - * Search request for the next page of results. - */ + /** The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. */ top?: number; } -/** - * Contains a document found by a search query, plus associated metadata. - */ +/** Contains a document found by a search query, plus associated metadata. */ export interface SearchResult { + /** Describes unknown properties. The value of an unknown property can be of "any" type. */ + [property: string]: any; /** * The relevance score of the document compared to other documents returned by the query. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly _score: number; /** - * Text fragments from the document that indicate the matching search terms, organized by each - * applicable field; null if hit highlighting was not enabled for the query. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * Text fragments from the document that indicate the matching search terms, organized by each applicable field; null if hit highlighting was not enabled for the query. + * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly _highlights?: { [propertyName: string]: string[] }; - /** - * Describes unknown properties. The value of an unknown property can be of "any" type. - */ - [property: string]: any; } -/** - * Response containing search results from an index. - */ -export interface SearchDocumentsResult { +/** Response containing suggestion query results from an index. */ +export interface SuggestDocumentsResult { /** - * The total count of results found by the search operation, or null if the count was not - * requested. If present, the count may be greater than the number of results in this response. - * This can happen if you use the $top or $skip parameters, or if Azure Cognitive Search can't - * return all the requested documents in a single Search response. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * The sequence of results returned by the query. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - readonly count?: number; + readonly results: SuggestResult[]; /** - * A value indicating the percentage of the index that was included in the query, or null if - * minimumCoverage was not specified in the request. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * A value indicating the percentage of the index that was included in the query, or null if minimumCoverage was not set in the request. + * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly coverage?: number; +} + +/** A result containing a document found by a suggestion query, plus associated metadata. */ +export interface SuggestResult { + /** Describes unknown properties. The value of an unknown property can be of "any" type. */ + [property: string]: any; /** - * The facet query results for the search operation, organized as a collection of buckets for - * each faceted field; null if the query did not include any facet expressions. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly facets?: { [propertyName: string]: FacetResult[] }; - /** - * Continuation JSON payload returned when Azure Cognitive Search can't return all the requested - * results in a single Search response. You can use this JSON along with @odata.nextLink to - * formulate another POST Search request to get the next part of the search response. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly nextPageParameters?: SearchRequest; - /** - * The sequence of results returned by the query. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly results: SearchResult[]; - /** - * Continuation URL returned when Azure Cognitive Search can't return all the requested results - * in a single Search response. You can use this URL to formulate another GET or POST Search - * request to get the next part of the search response. Make sure to use the same verb (GET or - * POST) as the request that produced this response. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * The text of the suggestion result. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - readonly nextLink?: string; + readonly _text: string; } -/** - * Represents an index action that operates on a document. - */ +/** Parameters for filtering, sorting, fuzzy matching, and other suggestions query behaviors. */ +export interface SuggestRequest { + /** An OData expression that filters the documents considered for suggestions. */ + filter?: string; + /** A value indicating whether to use fuzzy matching for the suggestion query. Default is false. When set to true, the query will find suggestions even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are slower and consume more resources. */ + useFuzzyMatching?: boolean; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a suggestion query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ + minimumCoverage?: number; + /** The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ + orderBy?: string; + /** The search text to use to suggest documents. Must be at least 1 character, and no more than 100 characters. */ + searchText: string; + /** The comma-separated list of field names to search for the specified search text. Target fields must be included in the specified suggester. */ + searchFields?: string; + /** The comma-separated list of fields to retrieve. If unspecified, only the key field will be included in the results. */ + select?: string; + /** The name of the suggester as specified in the suggesters collection that's part of the index definition. */ + suggesterName: string; + /** The number of suggestions to retrieve. This must be a value between 1 and 100. The default is 5. */ + top?: number; +} + +/** Contains a batch of document write actions to send to the index. */ +export interface IndexBatch { + /** The actions in the batch. */ + actions: IndexAction[]; +} + +/** Represents an index action that operates on a document. */ export interface IndexAction { - /** - * The operation to perform on a document in an indexing batch. Possible values include: - * 'Upload', 'Merge', 'MergeOrUpload', 'Delete' - */ - __actionType: IndexActionType; - /** - * Describes unknown properties. The value of an unknown property can be of "any" type. - */ + /** Describes unknown properties. The value of an unknown property can be of "any" type. */ [property: string]: any; + /** The operation to perform on a document in an indexing batch. */ + __actionType: IndexActionType; } -/** - * Contains a batch of document write actions to send to the index. - */ -export interface IndexBatch { +/** Response containing the status of operations for all documents in the indexing request. */ +export interface IndexDocumentsResult { /** - * The actions in the batch. + * The list of status information for each document in the indexing request. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - actions: IndexAction[]; + readonly results: IndexingResult[]; } -/** - * Status of an indexing operation for a single document. - */ +/** Status of an indexing operation for a single document. */ export interface IndexingResult { /** * The key of a document that was in the indexing request. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly key: string; /** - * The error message explaining why the indexing operation failed for the document identified by - * the key; null if indexing succeeded. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * The error message explaining why the indexing operation failed for the document identified by the key; null if indexing succeeded. + * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly errorMessage?: string; /** - * A value indicating whether the indexing operation succeeded for the document identified by the - * key. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * A value indicating whether the indexing operation succeeded for the document identified by the key. + * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly succeeded: boolean; /** - * The status code of the indexing operation. Possible values include: 200 for a successful - * update or delete, 201 for successful document creation, 400 for a malformed input document, - * 404 for document not found, 409 for a version conflict, 422 when the index is temporarily - * unavailable, or 503 for when the service is too busy. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * The status code of the indexing operation. Possible values include: 200 for a successful update or delete, 201 for successful document creation, 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy. + * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly statusCode: number; } -/** - * Response containing the status of operations for all documents in the indexing request. - */ -export interface IndexDocumentsResult { - /** - * The list of status information for each document in the indexing request. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly results: IndexingResult[]; -} - -/** - * Parameters for filtering, sorting, fuzzy matching, and other suggestions query behaviors. - */ -export interface SuggestRequest { - /** - * An OData expression that filters the documents considered for suggestions. - */ - filter?: string; - /** - * A value indicating whether to use fuzzy matching for the suggestion query. Default is false. - * When set to true, the query will find suggestions even if there's a substituted or missing - * character in the search text. While this provides a better experience in some scenarios, it - * comes at a performance cost as fuzzy suggestion searches are slower and consume more - * resources. - */ - useFuzzyMatching?: boolean; - /** - * A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, - * hit highlighting of suggestions is disabled. - */ - highlightPostTag?: string; - /** - * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If - * omitted, hit highlighting of suggestions is disabled. - */ - highlightPreTag?: string; - /** - * A number between 0 and 100 indicating the percentage of the index that must be covered by a - * suggestion query in order for the query to be reported as a success. This parameter can be - * useful for ensuring search availability even for services with only one replica. The default - * is 80. - */ - minimumCoverage?: number; - /** - * The comma-separated list of OData $orderby expressions by which to sort the results. Each - * expression can be either a field name or a call to either the geo.distance() or the - * search.score() functions. Each expression can be followed by asc to indicate ascending, or - * desc to indicate descending. The default is ascending order. Ties will be broken by the match - * scores of documents. If no $orderby is specified, the default sort order is descending by - * document match score. There can be at most 32 $orderby clauses. - */ - orderBy?: string; - /** - * The search text to use to suggest documents. Must be at least 1 character, and no more than - * 100 characters. - */ - searchText: string; +/** The result of Autocomplete query. */ +export interface AutocompleteResult { /** - * The comma-separated list of field names to search for the specified search text. Target fields - * must be included in the specified suggester. + * A value indicating the percentage of the index that was considered by the autocomplete request, or null if minimumCoverage was not specified in the request. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - searchFields?: string; + readonly coverage?: number; /** - * The comma-separated list of fields to retrieve. If unspecified, only the key field will be - * included in the results. + * The list of returned Autocompleted items. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - select?: string; + readonly results: AutocompleteItem[]; +} + +/** The result of Autocomplete requests. */ +export interface AutocompleteItem { /** - * The name of the suggester as specified in the suggesters collection that's part of the index - * definition. + * The completed term. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - suggesterName: string; + readonly text: string; /** - * The number of suggestions to retrieve. This must be a value between 1 and 100. The default is - * 5. + * The query along with the completed term. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - top?: number; + readonly queryPlusText: string; } -/** - * Parameters for fuzzy matching, and other autocomplete query behaviors. - */ +/** Parameters for fuzzy matching, and other autocomplete query behaviors. */ export interface AutocompleteRequest { - /** - * The search text on which to base autocomplete results. - */ + /** The search text on which to base autocomplete results. */ searchText: string; - /** - * Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles - * and 'oneTermWithContext' to use the current context while producing auto-completed terms. - * Possible values include: 'OneTerm', 'TwoTerms', 'OneTermWithContext' - */ + /** Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. */ autocompleteMode?: AutocompleteMode; - /** - * An OData expression that filters the documents used to produce completed terms for the - * Autocomplete result. - */ + /** An OData expression that filters the documents used to produce completed terms for the Autocomplete result. */ filter?: string; - /** - * A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. - * When set to true, the query will autocomplete terms even if there's a substituted or missing - * character in the search text. While this provides a better experience in some scenarios, it - * comes at a performance cost as fuzzy autocomplete queries are slower and consume more - * resources. - */ + /** A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. When set to true, the query will autocomplete terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and consume more resources. */ useFuzzyMatching?: boolean; - /** - * A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, - * hit highlighting is disabled. - */ + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. */ highlightPostTag?: string; - /** - * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If - * omitted, hit highlighting is disabled. - */ + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. */ highlightPreTag?: string; - /** - * A number between 0 and 100 indicating the percentage of the index that must be covered by an - * autocomplete query in order for the query to be reported as a success. This parameter can be - * useful for ensuring search availability even for services with only one replica. The default - * is 80. - */ + /** A number between 0 and 100 indicating the percentage of the index that must be covered by an autocomplete query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ minimumCoverage?: number; - /** - * The comma-separated list of field names to consider when querying for auto-completed terms. - * Target fields must be included in the specified suggester. - */ + /** The comma-separated list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. */ searchFields?: string; - /** - * The name of the suggester as specified in the suggesters collection that's part of the index - * definition. - */ + /** The name of the suggester as specified in the suggesters collection that's part of the index definition. */ suggesterName: string; - /** - * The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The - * default is 5. - */ + /** The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. */ top?: number; } -/** - * The result of Autocomplete requests. - */ -export interface AutocompleteItem { - /** - * The completed term. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly text: string; - /** - * The query along with the completed term. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly queryPlusText: string; -} - -/** - * The result of Autocomplete query. - */ -export interface AutocompleteResult { - /** - * A value indicating the percentage of the index that was considered by the autocomplete - * request, or null if minimumCoverage was not specified in the request. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly coverage?: number; - /** - * The list of returned Autocompleted items. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly results: AutocompleteItem[]; -} - -/** - * Describes an error condition for the Azure Cognitive Search API. - */ -export interface SearchError { - /** - * One of a server-defined set of error codes. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly code?: string; - /** - * A human-readable representation of the error. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly message: string; - /** - * An array of details about specific errors that led to this reported error. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly details?: SearchError[]; +/** Parameter group */ +export interface RequestOptions { + /** The tracking ID sent with the request to help with debugging. */ + xMsClientRequestId?: string; } -/** - * Additional parameters for searchGet operation. - */ +/** Parameter group */ export interface SearchOptions { - /** - * A value that specifies whether to fetch the total count of results. Default is false. Setting - * this value to true may have a performance impact. Note that the count returned is an - * approximation. - */ + /** A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. */ includeTotalResultCount?: boolean; - /** - * The list of facet expressions to apply to the search query. Each facet expression contains a - * field name, optionally followed by a comma-separated list of name:value pairs. - */ + /** The list of facet expressions to apply to the search query. Each facet expression contains a field name, optionally followed by a comma-separated list of name:value pairs. */ facets?: string[]; - /** - * The OData $filter expression to apply to the search query. - */ + /** The OData $filter expression to apply to the search query. */ filter?: string; - /** - * The list of field names to use for hit highlights. Only searchable fields can be used for hit - * highlighting. - */ + /** The list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. */ highlightFields?: string[]; - /** - * A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is - * </em>. - */ + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. */ highlightPostTag?: string; - /** - * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default - * is <em>. - */ + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. */ highlightPreTag?: string; - /** - * A number between 0 and 100 indicating the percentage of the index that must be covered by a - * search query in order for the query to be reported as a success. This parameter can be useful - * for ensuring search availability even for services with only one replica. The default is 100. - */ + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. */ minimumCoverage?: number; - /** - * The list of OData $orderby expressions by which to sort the results. Each expression can be - * either a field name or a call to either the geo.distance() or the search.score() functions. - * Each expression can be followed by asc to indicate ascending, and desc to indicate descending. - * The default is ascending order. Ties will be broken by the match scores of documents. If no - * OrderBy is specified, the default sort order is descending by document match score. There can - * be at most 32 $orderby clauses. - */ + /** The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, and desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no OrderBy is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ orderBy?: string[]; - /** - * A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if - * your query uses the Lucene query syntax. Possible values include: 'Simple', 'Full' - */ + /** A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. */ queryType?: QueryType; - /** - * The list of parameter values to be used in scoring functions (for example, - * referencePointParameter) using the format name-values. For example, if the scoring profile - * defines a function with a parameter called 'mylocation' the parameter string would be - * "mylocation--122.2,44.8" (without the quotes). - */ + /** The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). */ scoringParameters?: string[]; - /** - * The name of a scoring profile to evaluate match scores for matching documents in order to sort - * the results. - */ + /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ scoringProfile?: string; - /** - * The list of field names to which to scope the full-text search. When using fielded search - * (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search - * expression take precedence over any field names listed in this parameter. - */ + /** The list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ searchFields?: string[]; - /** - * A value that specifies whether any or all of the search terms must be matched in order to - * count the document as a match. Possible values include: 'Any', 'All' - */ + /** A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. */ searchMode?: SearchMode; - /** - * A value that specifies whether we want to calculate scoring statistics (such as document - * frequency) globally for more consistent scoring, or locally, for lower latency. Possible - * values include: 'Local', 'Global' - */ + /** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. */ scoringStatistics?: ScoringStatistics; - /** - * A value to be used to create a sticky session, which can help to get more consistent results. - * As long as the same sessionId is used, a best-effort attempt will be made to target the same - * replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the - * load balancing of the requests across replicas and adversely affect the performance of the - * search service. The value used as sessionId cannot start with a '_' character. - */ + /** A value to be used to create a sticky session, which can help to get more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. */ sessionId?: string; - /** - * The list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema - * are included. - */ + /** The list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. */ select?: string[]; - /** - * The number of search results to skip. This value cannot be greater than 100,000. If you need - * to scan documents in sequence, but cannot use $skip due to this limitation, consider using - * $orderby on a totally-ordered key and $filter with a range query instead. - */ + /** The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use $skip due to this limitation, consider using $orderby on a totally-ordered key and $filter with a range query instead. */ skip?: number; - /** - * The number of search results to retrieve. This can be used in conjunction with $skip to - * implement client-side paging of search results. If results are truncated due to server-side - * paging, the response will include a continuation token that can be used to issue another - * Search request for the next page of results. - */ + /** The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. */ top?: number; } -/** - * Additional parameters for suggestGet operation. - */ +/** Parameter group */ export interface SuggestOptions { - /** - * An OData expression that filters the documents considered for suggestions. - */ + /** An OData expression that filters the documents considered for suggestions. */ filter?: string; - /** - * A value indicating whether to use fuzzy matching for the suggestions query. Default is false. - * When set to true, the query will find terms even if there's a substituted or missing character - * in the search text. While this provides a better experience in some scenarios, it comes at a - * performance cost as fuzzy suggestions queries are slower and consume more resources. - */ + /** A value indicating whether to use fuzzy matching for the suggestions query. Default is false. When set to true, the query will find terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are slower and consume more resources. */ useFuzzyMatching?: boolean; - /** - * A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, - * hit highlighting of suggestions is disabled. - */ + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. */ highlightPostTag?: string; - /** - * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If - * omitted, hit highlighting of suggestions is disabled. - */ + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. */ highlightPreTag?: string; - /** - * A number between 0 and 100 indicating the percentage of the index that must be covered by a - * suggestions query in order for the query to be reported as a success. This parameter can be - * useful for ensuring search availability even for services with only one replica. The default - * is 80. - */ + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a suggestions query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ minimumCoverage?: number; - /** - * The list of OData $orderby expressions by which to sort the results. Each expression can be - * either a field name or a call to either the geo.distance() or the search.score() functions. - * Each expression can be followed by asc to indicate ascending, or desc to indicate descending. - * The default is ascending order. Ties will be broken by the match scores of documents. If no - * $orderby is specified, the default sort order is descending by document match score. There can - * be at most 32 $orderby clauses. - */ + /** The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ orderBy?: string[]; - /** - * The list of field names to search for the specified search text. Target fields must be - * included in the specified suggester. - */ + /** The list of field names to search for the specified search text. Target fields must be included in the specified suggester. */ searchFields?: string[]; - /** - * The list of fields to retrieve. If unspecified, only the key field will be included in the - * results. - */ + /** The list of fields to retrieve. If unspecified, only the key field will be included in the results. */ select?: string[]; - /** - * The number of suggestions to retrieve. The value must be a number between 1 and 100. The - * default is 5. - */ + /** The number of suggestions to retrieve. The value must be a number between 1 and 100. The default is 5. */ top?: number; } -/** - * Additional parameters for autocompleteGet operation. - */ +/** Parameter group */ export interface AutocompleteOptions { - /** - * Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles - * and 'oneTermWithContext' to use the current context while producing auto-completed terms. - * Possible values include: 'OneTerm', 'TwoTerms', 'OneTermWithContext' - */ + /** Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. */ autocompleteMode?: AutocompleteMode; - /** - * An OData expression that filters the documents used to produce completed terms for the - * Autocomplete result. - */ + /** An OData expression that filters the documents used to produce completed terms for the Autocomplete result. */ filter?: string; - /** - * A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. - * When set to true, the query will find terms even if there's a substituted or missing character - * in the search text. While this provides a better experience in some scenarios, it comes at a - * performance cost as fuzzy autocomplete queries are slower and consume more resources. - */ + /** A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. When set to true, the query will find terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and consume more resources. */ useFuzzyMatching?: boolean; - /** - * A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, - * hit highlighting is disabled. - */ + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. */ highlightPostTag?: string; - /** - * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If - * omitted, hit highlighting is disabled. - */ + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. */ highlightPreTag?: string; - /** - * A number between 0 and 100 indicating the percentage of the index that must be covered by an - * autocomplete query in order for the query to be reported as a success. This parameter can be - * useful for ensuring search availability even for services with only one replica. The default - * is 80. - */ + /** A number between 0 and 100 indicating the percentage of the index that must be covered by an autocomplete query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ minimumCoverage?: number; - /** - * The list of field names to consider when querying for auto-completed terms. Target fields must - * be included in the specified suggester. - */ + /** The list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. */ searchFields?: string[]; - /** - * The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The - * default is 5. - */ + /** The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. */ top?: number; } -/** - * Optional Parameters. - */ -export interface DocumentsSearchGetOptionalParams extends coreHttp.RequestOptionsBase { - /** - * A full-text search query expression; Use "*" or omit this parameter to match all documents. - */ - searchText?: string; - /** - * Additional parameters for the operation - */ - searchOptions?: SearchOptions; -} - -/** - * Optional Parameters. - */ -export interface DocumentsGetOptionalParams extends coreHttp.RequestOptionsBase { - /** - * List of field names to retrieve for the document; Any field not retrieved will be missing from - * the returned document. - */ - selectedFields?: string[]; -} - -/** - * Optional Parameters. - */ -export interface DocumentsSuggestGetOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Additional parameters for the operation - */ - suggestOptions?: SuggestOptions; +/** Known values of {@link ApiVersion20200630} that the service accepts. */ +export const enum KnownApiVersion20200630 { + /** Api Version '2020-06-30' */ + TwoThousandTwenty0630 = "2020-06-30" } /** - * Optional Parameters. - */ -export interface DocumentsAutocompleteGetOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Additional parameters for the operation - */ - autocompleteOptions?: AutocompleteOptions; + * Defines values for ApiVersion20200630. \ + * {@link KnownApiVersion20200630} can be used interchangeably with ApiVersion20200630, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **2020-06-30**: Api Version '2020-06-30' + */ +export type ApiVersion20200630 = string; +/** Defines values for QueryType. */ +export type QueryType = "simple" | "full"; +/** Defines values for SearchMode. */ +export type SearchMode = "any" | "all"; +/** Defines values for ScoringStatistics. */ +export type ScoringStatistics = "local" | "global"; +/** Defines values for IndexActionType. */ +export type IndexActionType = "upload" | "merge" | "mergeOrUpload" | "delete"; +/** Defines values for AutocompleteMode. */ +export type AutocompleteMode = "oneTerm" | "twoTerms" | "oneTermWithContext"; + +/** Optional parameters. */ +export interface DocumentsCountOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; } -/** - * Defines values for QueryType. - * Possible values include: 'Simple', 'Full' - * @readonly - * @enum {string} - */ -export type QueryType = 'simple' | 'full'; - -/** - * Defines values for ScoringStatistics. - * Possible values include: 'Local', 'Global' - * @readonly - * @enum {string} - */ -export type ScoringStatistics = 'local' | 'global'; - -/** - * Defines values for SearchMode. - * Possible values include: 'Any', 'All' - * @readonly - * @enum {string} - */ -export type SearchMode = 'any' | 'all'; - -/** - * Defines values for IndexActionType. - * Possible values include: 'Upload', 'Merge', 'MergeOrUpload', 'Delete' - * @readonly - * @enum {string} - */ -export type IndexActionType = 'upload' | 'merge' | 'mergeOrUpload' | 'delete'; - -/** - * Defines values for AutocompleteMode. - * Possible values include: 'OneTerm', 'TwoTerms', 'OneTermWithContext' - * @readonly - * @enum {string} - */ -export type AutocompleteMode = 'oneTerm' | 'twoTerms' | 'oneTermWithContext'; - -/** - * Contains response data for the count operation. - */ +/** Contains response data for the count operation. */ export type DocumentsCountResponse = { - /** - * The parsed response body. - */ + /** The parsed response body. */ body: number; - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: number; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: number; + }; }; -/** - * Contains response data for the searchGet operation. - */ +/** Optional parameters. */ +export interface DocumentsSearchGetOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Parameter group */ + searchOptions?: SearchOptions; + /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ + searchText?: string; +} + +/** Contains response data for the searchGet operation. */ export type DocumentsSearchGetResponse = SearchDocumentsResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchDocumentsResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchDocumentsResult; + }; }; -/** - * Contains response data for the searchPost operation. - */ +/** Optional parameters. */ +export interface DocumentsSearchPostOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the searchPost operation. */ export type DocumentsSearchPostResponse = SearchDocumentsResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchDocumentsResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchDocumentsResult; + }; }; -/** - * Contains response data for the get operation. - */ +/** Optional parameters. */ +export interface DocumentsGetOptionalParams extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** List of field names to retrieve for the document; Any field not retrieved will be missing from the returned document. */ + selectedFields?: string[]; +} + +/** Contains response data for the get operation. */ export type DocumentsGetResponse = { - /** - * The parsed response body. - */ + /** The parsed response body. */ body: any; - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: any; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: any; + }; }; -/** - * Contains response data for the suggestGet operation. - */ +/** Optional parameters. */ +export interface DocumentsSuggestGetOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Parameter group */ + suggestOptions?: SuggestOptions; +} + +/** Contains response data for the suggestGet operation. */ export type DocumentsSuggestGetResponse = SuggestDocumentsResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SuggestDocumentsResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SuggestDocumentsResult; + }; }; -/** - * Contains response data for the suggestPost operation. - */ +/** Optional parameters. */ +export interface DocumentsSuggestPostOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the suggestPost operation. */ export type DocumentsSuggestPostResponse = SuggestDocumentsResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SuggestDocumentsResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SuggestDocumentsResult; + }; }; -/** - * Contains response data for the index operation. - */ +/** Optional parameters. */ +export interface DocumentsIndexOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the index operation. */ export type DocumentsIndexResponse = IndexDocumentsResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: IndexDocumentsResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: IndexDocumentsResult; + }; }; -/** - * Contains response data for the autocompleteGet operation. - */ +/** Optional parameters. */ +export interface DocumentsAutocompleteGetOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Parameter group */ + autocompleteOptions?: AutocompleteOptions; +} + +/** Contains response data for the autocompleteGet operation. */ export type DocumentsAutocompleteGetResponse = AutocompleteResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: AutocompleteResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: AutocompleteResult; + }; }; -/** - * Contains response data for the autocompletePost operation. - */ +/** Optional parameters. */ +export interface DocumentsAutocompletePostOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the autocompletePost operation. */ export type DocumentsAutocompletePostResponse = AutocompleteResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: AutocompleteResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: AutocompleteResult; + }; }; + +/** Optional parameters. */ +export interface SearchClientOptionalParams + extends coreHttp.ServiceClientOptions { + /** Overrides client endpoint. */ + endpoint?: string; +} diff --git a/sdk/search/search-documents/src/generated/data/models/mappers.ts b/sdk/search/search-documents/src/generated/data/models/mappers.ts index 544bcf8ddbb2..1c09a0595d80 100644 --- a/sdk/search/search-documents/src/generated/data/models/mappers.ts +++ b/sdk/search/search-documents/src/generated/data/models/mappers.ts @@ -1,6 +1,6 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -8,60 +8,101 @@ import * as coreHttp from "@azure/core-http"; - -export const SuggestResult: coreHttp.CompositeMapper = { - serializedName: "SuggestResult", +export const SearchError: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "SuggestResult", + className: "SearchError", modelProperties: { - _text: { + code: { + serializedName: "code", + readOnly: true, + type: { + name: "String" + } + }, + message: { + serializedName: "message", required: true, readOnly: true, - serializedName: "@search\\.text", type: { name: "String" } - } - }, - additionalProperties: { - type: { - name: "Object" + }, + details: { + serializedName: "details", + readOnly: true, + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "SearchError" + } + } + } } } } }; -export const SuggestDocumentsResult: coreHttp.CompositeMapper = { - serializedName: "SuggestDocumentsResult", +export const SearchDocumentsResult: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "SuggestDocumentsResult", + className: "SearchDocumentsResult", modelProperties: { + count: { + serializedName: "@odata\\.count", + readOnly: true, + type: { + name: "Number" + } + }, + coverage: { + serializedName: "@search\\.coverage", + readOnly: true, + type: { + name: "Number" + } + }, + facets: { + serializedName: "@search\\.facets", + readOnly: true, + type: { + name: "Dictionary", + value: { + type: { + name: "Sequence", + element: { type: { name: "Composite", className: "FacetResult" } } + } + } + } + }, + nextPageParameters: { + serializedName: "@search\\.nextPageParameters", + type: { + name: "Composite", + className: "SearchRequest" + } + }, results: { + serializedName: "value", required: true, readOnly: true, - serializedName: "value", type: { name: "Sequence", element: { type: { name: "Composite", - className: "SuggestResult", - additionalProperties: { - type: { - name: "Object" - } - } + className: "SearchResult" } } } }, - coverage: { + nextLink: { + serializedName: "@odata\\.nextLink", readOnly: true, - serializedName: "@search\\.coverage", type: { - name: "Number" + name: "String" } } } @@ -69,29 +110,23 @@ export const SuggestDocumentsResult: coreHttp.CompositeMapper = { }; export const FacetResult: coreHttp.CompositeMapper = { - serializedName: "FacetResult", type: { name: "Composite", className: "FacetResult", + additionalProperties: { type: { name: "Object" } }, modelProperties: { count: { - readOnly: true, serializedName: "count", + readOnly: true, type: { name: "Number" } } - }, - additionalProperties: { - type: { - name: "Object" - } } } }; export const SearchRequest: coreHttp.CompositeMapper = { - serializedName: "SearchRequest", type: { name: "Composite", className: "SearchRequest", @@ -153,20 +188,14 @@ export const SearchRequest: coreHttp.CompositeMapper = { serializedName: "queryType", type: { name: "Enum", - allowedValues: [ - "simple", - "full" - ] + allowedValues: ["simple", "full"] } }, scoringStatistics: { serializedName: "scoringStatistics", type: { name: "Enum", - allowedValues: [ - "local", - "global" - ] + allowedValues: ["local", "global"] } }, sessionId: { @@ -208,10 +237,7 @@ export const SearchRequest: coreHttp.CompositeMapper = { serializedName: "searchMode", type: { name: "Enum", - allowedValues: [ - "any", - "all" - ] + allowedValues: ["any", "all"] } }, select: { @@ -237,219 +263,55 @@ export const SearchRequest: coreHttp.CompositeMapper = { }; export const SearchResult: coreHttp.CompositeMapper = { - serializedName: "SearchResult", type: { name: "Composite", className: "SearchResult", + additionalProperties: { type: { name: "Object" } }, modelProperties: { _score: { + serializedName: "@search\\.score", required: true, - nullable: false, readOnly: true, - serializedName: "@search\\.score", type: { name: "Number" } }, _highlights: { - readOnly: true, serializedName: "@search\\.highlights", + readOnly: true, type: { name: "Dictionary", value: { - type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } - } + type: { name: "Sequence", element: { type: { name: "String" } } } } } } - }, - additionalProperties: { - type: { - name: "Object" - } } } }; -export const SearchDocumentsResult: coreHttp.CompositeMapper = { - serializedName: "SearchDocumentsResult", +export const SuggestDocumentsResult: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "SearchDocumentsResult", + className: "SuggestDocumentsResult", modelProperties: { - count: { - readOnly: true, - serializedName: "@odata\\.count", - type: { - name: "Number" - } - }, - coverage: { - readOnly: true, - serializedName: "@search\\.coverage", - type: { - name: "Number" - } - }, - facets: { - readOnly: true, - serializedName: "@search\\.facets", - type: { - name: "Dictionary", - value: { - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "FacetResult", - additionalProperties: { - type: { - name: "Object" - } - } - } - } - } - } - } - }, - nextPageParameters: { - readOnly: true, - serializedName: "@search\\.nextPageParameters", - type: { - name: "Composite", - className: "SearchRequest" - } - }, results: { - required: true, - readOnly: true, serializedName: "value", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchResult", - additionalProperties: { - type: { - name: "Object" - } - } - } - } - } - }, - nextLink: { - readOnly: true, - serializedName: "@odata\\.nextLink", - type: { - name: "String" - } - } - } - } -}; - -export const IndexAction: coreHttp.CompositeMapper = { - serializedName: "IndexAction", - type: { - name: "Composite", - className: "IndexAction", - modelProperties: { - __actionType: { - required: true, - nullable: false, - serializedName: "@search\\.action", - type: { - name: "Enum", - allowedValues: [ - "upload", - "merge", - "mergeOrUpload", - "delete" - ] - } - } - }, - additionalProperties: { - type: { - name: "Object" - } - } - } -}; - -export const IndexBatch: coreHttp.CompositeMapper = { - serializedName: "IndexBatch", - type: { - name: "Composite", - className: "IndexBatch", - modelProperties: { - actions: { required: true, - serializedName: "value", + readOnly: true, type: { name: "Sequence", element: { type: { name: "Composite", - className: "IndexAction", - additionalProperties: { - type: { - name: "Object" - } - } + className: "SuggestResult" } } } - } - } - } -}; - -export const IndexingResult: coreHttp.CompositeMapper = { - serializedName: "IndexingResult", - type: { - name: "Composite", - className: "IndexingResult", - modelProperties: { - key: { - required: true, - readOnly: true, - serializedName: "key", - type: { - name: "String" - } - }, - errorMessage: { - readOnly: true, - serializedName: "errorMessage", - type: { - name: "String" - } - }, - succeeded: { - required: true, - nullable: false, - readOnly: true, - serializedName: "status", - type: { - name: "Boolean" - } }, - statusCode: { - required: true, - nullable: false, + coverage: { + serializedName: "@search\\.coverage", readOnly: true, - serializedName: "statusCode", type: { name: "Number" } @@ -458,24 +320,18 @@ export const IndexingResult: coreHttp.CompositeMapper = { } }; -export const IndexDocumentsResult: coreHttp.CompositeMapper = { - serializedName: "IndexDocumentsResult", +export const SuggestResult: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "IndexDocumentsResult", + className: "SuggestResult", + additionalProperties: { type: { name: "Object" } }, modelProperties: { - results: { + _text: { + serializedName: "@search\\.text", required: true, readOnly: true, - serializedName: "value", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "IndexingResult" - } - } + name: "String" } } } @@ -483,7 +339,6 @@ export const IndexDocumentsResult: coreHttp.CompositeMapper = { }; export const SuggestRequest: coreHttp.CompositeMapper = { - serializedName: "SuggestRequest", type: { name: "Composite", className: "SuggestRequest", @@ -525,8 +380,8 @@ export const SuggestRequest: coreHttp.CompositeMapper = { } }, searchText: { - required: true, serializedName: "search", + required: true, type: { name: "String" } @@ -544,8 +399,8 @@ export const SuggestRequest: coreHttp.CompositeMapper = { } }, suggesterName: { - required: true, serializedName: "suggesterName", + required: true, type: { name: "String" } @@ -560,132 +415,61 @@ export const SuggestRequest: coreHttp.CompositeMapper = { } }; -export const AutocompleteRequest: coreHttp.CompositeMapper = { - serializedName: "AutocompleteRequest", +export const IndexBatch: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "AutocompleteRequest", + className: "IndexBatch", modelProperties: { - searchText: { + actions: { + serializedName: "value", required: true, - serializedName: "search", - type: { - name: "String" - } - }, - autocompleteMode: { - serializedName: "autocompleteMode", - type: { - name: "Enum", - allowedValues: [ - "oneTerm", - "twoTerms", - "oneTermWithContext" - ] - } - }, - filter: { - serializedName: "filter", type: { - name: "String" - } - }, - useFuzzyMatching: { - serializedName: "fuzzy", - type: { - name: "Boolean" - } - }, - highlightPostTag: { - serializedName: "highlightPostTag", - type: { - name: "String" - } - }, - highlightPreTag: { - serializedName: "highlightPreTag", - type: { - name: "String" - } - }, - minimumCoverage: { - serializedName: "minimumCoverage", - type: { - name: "Number" - } - }, - searchFields: { - serializedName: "searchFields", - type: { - name: "String" - } - }, - suggesterName: { - required: true, - serializedName: "suggesterName", - type: { - name: "String" - } - }, - top: { - serializedName: "top", - type: { - name: "Number" + name: "Sequence", + element: { + type: { + name: "Composite", + className: "IndexAction" + } + } } } } } }; -export const AutocompleteItem: coreHttp.CompositeMapper = { - serializedName: "AutocompleteItem", +export const IndexAction: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "AutocompleteItem", + className: "IndexAction", + additionalProperties: { type: { name: "Object" } }, modelProperties: { - text: { - required: true, - readOnly: true, - serializedName: "text", - type: { - name: "String" - } - }, - queryPlusText: { + __actionType: { + serializedName: "@search\\.action", required: true, - readOnly: true, - serializedName: "queryPlusText", type: { - name: "String" + name: "Enum", + allowedValues: ["upload", "merge", "mergeOrUpload", "delete"] } } } } }; -export const AutocompleteResult: coreHttp.CompositeMapper = { - serializedName: "AutocompleteResult", +export const IndexDocumentsResult: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "AutocompleteResult", + className: "IndexDocumentsResult", modelProperties: { - coverage: { - readOnly: true, - serializedName: "@search\\.coverage", - type: { - name: "Number" - } - }, results: { + serializedName: "value", required: true, readOnly: true, - serializedName: "value", type: { name: "Sequence", element: { type: { name: "Composite", - className: "AutocompleteItem" + className: "IndexingResult" } } } @@ -694,305 +478,165 @@ export const AutocompleteResult: coreHttp.CompositeMapper = { } }; -export const SearchError: coreHttp.CompositeMapper = { - serializedName: "SearchError", +export const IndexingResult: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "SearchError", + className: "IndexingResult", modelProperties: { - code: { + key: { + serializedName: "key", + required: true, readOnly: true, - serializedName: "code", type: { name: "String" } }, - message: { - required: true, + errorMessage: { + serializedName: "errorMessage", readOnly: true, - serializedName: "message", type: { name: "String" } }, - details: { + succeeded: { + serializedName: "status", + required: true, readOnly: true, - serializedName: "details", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchError" - } - } + name: "Boolean" + } + }, + statusCode: { + serializedName: "statusCode", + required: true, + readOnly: true, + type: { + name: "Number" } } } } }; -export const SearchOptions: coreHttp.CompositeMapper = { +export const AutocompleteResult: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "SearchOptions", + className: "AutocompleteResult", modelProperties: { - includeTotalResultCount: { - type: { - name: "Boolean" - } - }, - facets: { - type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } - } - }, - filter: { - type: { - name: "String" - } - }, - highlightFields: { - type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } - } - }, - highlightPostTag: { - type: { - name: "String" - } - }, - highlightPreTag: { - type: { - name: "String" - } - }, - minimumCoverage: { + coverage: { + serializedName: "@search\\.coverage", + readOnly: true, type: { name: "Number" } }, - orderBy: { - type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } - } - }, - queryType: { - type: { - name: "Enum", - allowedValues: [ - "simple", - "full" - ] - } - }, - scoringParameters: { - type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } - } - }, - scoringProfile: { - type: { - name: "String" - } - }, - searchFields: { - type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } - } - }, - searchMode: { - type: { - name: "Enum", - allowedValues: [ - "any", - "all" - ] - } - }, - scoringStatistics: { - type: { - name: "Enum", - allowedValues: [ - "local", - "global" - ] - } - }, - sessionId: { - type: { - name: "String" - } - }, - select: { + results: { + serializedName: "value", + required: true, + readOnly: true, type: { name: "Sequence", element: { type: { - name: "String" + name: "Composite", + className: "AutocompleteItem" } } } - }, - skip: { - type: { - name: "Number" - } - }, - top: { - type: { - name: "Number" - } } } } }; -export const SuggestOptions: coreHttp.CompositeMapper = { +export const AutocompleteItem: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "SuggestOptions", + className: "AutocompleteItem", modelProperties: { - filter: { - type: { - name: "String" - } - }, - useFuzzyMatching: { - type: { - name: "Boolean" - } - }, - highlightPostTag: { + text: { + serializedName: "text", + required: true, + readOnly: true, type: { name: "String" } }, - highlightPreTag: { + queryPlusText: { + serializedName: "queryPlusText", + required: true, + readOnly: true, type: { name: "String" } - }, - minimumCoverage: { - type: { - name: "Number" - } - }, - orderBy: { - type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } - } - }, - searchFields: { - type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } - } - }, - select: { - type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } - } - }, - top: { - type: { - name: "Number" - } } } } }; -export const AutocompleteOptions: coreHttp.CompositeMapper = { +export const AutocompleteRequest: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "AutocompleteOptions", + className: "AutocompleteRequest", modelProperties: { + searchText: { + serializedName: "search", + required: true, + type: { + name: "String" + } + }, autocompleteMode: { + serializedName: "autocompleteMode", type: { name: "Enum", - allowedValues: [ - "oneTerm", - "twoTerms", - "oneTermWithContext" - ] + allowedValues: ["oneTerm", "twoTerms", "oneTermWithContext"] } }, filter: { + serializedName: "filter", type: { name: "String" } }, useFuzzyMatching: { + serializedName: "fuzzy", type: { name: "Boolean" } }, highlightPostTag: { + serializedName: "highlightPostTag", type: { name: "String" } }, highlightPreTag: { + serializedName: "highlightPreTag", type: { name: "String" } }, minimumCoverage: { + serializedName: "minimumCoverage", type: { name: "Number" } }, searchFields: { + serializedName: "searchFields", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "String" + } + }, + suggesterName: { + serializedName: "suggesterName", + required: true, + type: { + name: "String" } }, top: { + serializedName: "top", type: { name: "Number" } diff --git a/sdk/search/search-documents/src/generated/data/models/parameters.ts b/sdk/search/search-documents/src/generated/data/models/parameters.ts index 14b424b1fec4..56e10b8f66de 100644 --- a/sdk/search/search-documents/src/generated/data/models/parameters.ts +++ b/sdk/search/search-documents/src/generated/data/models/parameters.ts @@ -1,121 +1,104 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import * as coreHttp from "@azure/core-http"; +import { + OperationParameter, + OperationURLParameter, + OperationQueryParameter, + QueryCollectionFormat +} from "@azure/core-http"; +import { + SearchRequest as SearchRequestMapper, + SuggestRequest as SuggestRequestMapper, + IndexBatch as IndexBatchMapper, + AutocompleteRequest as AutocompleteRequestMapper +} from "../models/mappers"; -export const apiVersion: coreHttp.OperationQueryParameter = { - parameterPath: "apiVersion", +export const accept: OperationParameter = { + parameterPath: "accept", mapper: { - required: true, - serializedName: "api-version", + defaultValue: "application/json", + isConstant: true, + serializedName: "Accept", type: { name: "String" } } }; -export const autocompleteMode: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "autocompleteOptions", - "autocompleteMode" - ], + +export const endpoint: OperationURLParameter = { + parameterPath: "endpoint", mapper: { - serializedName: "autocompleteMode", + serializedName: "endpoint", + required: true, type: { - name: "Enum", - allowedValues: [ - "oneTerm", - "twoTerms", - "oneTermWithContext" - ] + name: "String" } - } + }, + skipEncoding: true }; -export const endpoint: coreHttp.OperationURLParameter = { - parameterPath: "endpoint", + +export const indexName: OperationURLParameter = { + parameterPath: "indexName", mapper: { + serializedName: "indexName", required: true, - serializedName: "endpoint", - defaultValue: '', type: { name: "String" } - }, - skipEncoding: true + } }; -export const facets: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "facets" - ], + +export const xMsClientRequestId: OperationParameter = { + parameterPath: ["options", "requestOptionsParam", "xMsClientRequestId"], mapper: { - serializedName: "facet", + serializedName: "x-ms-client-request-id", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "Uuid" } - }, - collectionFormat: coreHttp.QueryCollectionFormat.Multi + } }; -export const filter0: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "filter" - ], + +export const apiVersion: OperationQueryParameter = { + parameterPath: "apiVersion", mapper: { - serializedName: "$filter", + serializedName: "api-version", + required: true, type: { name: "String" } } }; -export const filter1: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "suggestOptions", - "filter" - ], + +export const searchText: OperationQueryParameter = { + parameterPath: ["options", "searchText"], mapper: { - serializedName: "$filter", + serializedName: "search", type: { name: "String" } } }; -export const filter2: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "autocompleteOptions", - "filter" - ], + +export const includeTotalResultCount: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "includeTotalResultCount"], mapper: { - serializedName: "$filter", + serializedName: "$count", type: { - name: "String" + name: "Boolean" } } }; -export const highlightFields: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "highlightFields" - ], + +export const facets: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "facets"], mapper: { - serializedName: "highlight", + serializedName: "facet", type: { name: "Sequence", element: { @@ -125,40 +108,37 @@ export const highlightFields: coreHttp.OperationQueryParameter = { } } }, - collectionFormat: coreHttp.QueryCollectionFormat.Csv + collectionFormat: QueryCollectionFormat.Multi }; -export const highlightPostTag0: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "highlightPostTag" - ], + +export const filter: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "filter"], mapper: { - serializedName: "highlightPostTag", + serializedName: "$filter", type: { name: "String" } } }; -export const highlightPostTag1: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "suggestOptions", - "highlightPostTag" - ], + +export const highlightFields: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "highlightFields"], mapper: { - serializedName: "highlightPostTag", + serializedName: "highlight", type: { - name: "String" + name: "Sequence", + element: { + type: { + name: "String" + } + } } - } + }, + collectionFormat: QueryCollectionFormat.Csv }; -export const highlightPostTag2: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "autocompleteOptions", - "highlightPostTag" - ], + +export const highlightPostTag: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "highlightPostTag"], mapper: { serializedName: "highlightPostTag", type: { @@ -166,12 +146,9 @@ export const highlightPostTag2: coreHttp.OperationQueryParameter = { } } }; -export const highlightPreTag0: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "highlightPreTag" - ], + +export const highlightPreTag: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "highlightPreTag"], mapper: { serializedName: "highlightPreTag", type: { @@ -179,132 +156,122 @@ export const highlightPreTag0: coreHttp.OperationQueryParameter = { } } }; -export const highlightPreTag1: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "suggestOptions", - "highlightPreTag" - ], + +export const minimumCoverage: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "minimumCoverage"], mapper: { - serializedName: "highlightPreTag", + serializedName: "minimumCoverage", type: { - name: "String" + name: "Number" } } }; -export const highlightPreTag2: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "autocompleteOptions", - "highlightPreTag" - ], + +export const orderBy: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "orderBy"], mapper: { - serializedName: "highlightPreTag", + serializedName: "$orderby", type: { - name: "String" + name: "Sequence", + element: { + type: { + name: "String" + } + } } - } + }, + collectionFormat: QueryCollectionFormat.Csv }; -export const includeTotalResultCount: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "includeTotalResultCount" - ], + +export const queryType: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "queryType"], mapper: { - serializedName: "$count", + serializedName: "queryType", type: { - name: "Boolean" + name: "Enum", + allowedValues: ["simple", "full"] } } }; -export const indexName: coreHttp.OperationURLParameter = { - parameterPath: "indexName", + +export const scoringParameters: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "scoringParameters"], mapper: { - required: true, - serializedName: "indexName", - defaultValue: '', + serializedName: "scoringParameter", type: { - name: "String" + name: "Sequence", + element: { + type: { + name: "String" + } + } } - } + }, + collectionFormat: QueryCollectionFormat.Multi }; -export const key: coreHttp.OperationURLParameter = { - parameterPath: "key", + +export const scoringProfile: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "scoringProfile"], mapper: { - required: true, - serializedName: "key", + serializedName: "scoringProfile", type: { name: "String" } } }; -export const minimumCoverage0: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "minimumCoverage" - ], + +export const searchFields: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "searchFields"], mapper: { - serializedName: "minimumCoverage", + serializedName: "searchFields", type: { - name: "Number" + name: "Sequence", + element: { + type: { + name: "String" + } + } } - } + }, + collectionFormat: QueryCollectionFormat.Csv }; -export const minimumCoverage1: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "suggestOptions", - "minimumCoverage" - ], + +export const searchMode: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "searchMode"], mapper: { - serializedName: "minimumCoverage", + serializedName: "searchMode", type: { - name: "Number" + name: "Enum", + allowedValues: ["any", "all"] } } }; -export const minimumCoverage2: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "autocompleteOptions", - "minimumCoverage" - ], + +export const scoringStatistics: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "scoringStatistics"], mapper: { - serializedName: "minimumCoverage", + serializedName: "scoringStatistics", type: { - name: "Number" + name: "Enum", + allowedValues: ["local", "global"] } } }; -export const orderBy0: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "orderBy" - ], + +export const sessionId: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "sessionId"], mapper: { - serializedName: "$orderby", + serializedName: "sessionId", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "String" } - }, - collectionFormat: coreHttp.QueryCollectionFormat.Csv + } }; -export const orderBy1: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "suggestOptions", - "orderBy" - ], + +export const select: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "select"], mapper: { - serializedName: "$orderby", + serializedName: "$select", type: { name: "Sequence", element: { @@ -314,82 +281,61 @@ export const orderBy1: coreHttp.OperationQueryParameter = { } } }, - collectionFormat: coreHttp.QueryCollectionFormat.Csv + collectionFormat: QueryCollectionFormat.Csv }; -export const queryType: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "queryType" - ], + +export const skip: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "skip"], mapper: { - serializedName: "queryType", + serializedName: "$skip", type: { - name: "Enum", - allowedValues: [ - "simple", - "full" - ] + name: "Number" } } }; -export const scoringParameters: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "scoringParameters" - ], + +export const top: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "top"], mapper: { - serializedName: "scoringParameter", + serializedName: "$top", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "Number" } - }, - collectionFormat: coreHttp.QueryCollectionFormat.Multi + } }; -export const scoringProfile: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "scoringProfile" - ], + +export const contentType: OperationParameter = { + parameterPath: ["options", "contentType"], mapper: { - serializedName: "scoringProfile", + defaultValue: "application/json", + isConstant: true, + serializedName: "Content-Type", type: { name: "String" } } }; -export const scoringStatistics: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "scoringStatistics" - ], + +export const searchRequest: OperationParameter = { + parameterPath: "searchRequest", + mapper: SearchRequestMapper +}; + +export const key: OperationURLParameter = { + parameterPath: "key", mapper: { - serializedName: "scoringStatistics", + serializedName: "key", + required: true, type: { - name: "Enum", - allowedValues: [ - "local", - "global" - ] + name: "String" } } }; -export const searchFields0: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "searchFields" - ], + +export const selectedFields: OperationQueryParameter = { + parameterPath: ["options", "selectedFields"], mapper: { - serializedName: "searchFields", + serializedName: "$select", type: { name: "Sequence", element: { @@ -399,93 +345,85 @@ export const searchFields0: coreHttp.OperationQueryParameter = { } } }, - collectionFormat: coreHttp.QueryCollectionFormat.Csv + collectionFormat: QueryCollectionFormat.Csv }; -export const searchFields1: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "suggestOptions", - "searchFields" - ], + +export const searchText1: OperationQueryParameter = { + parameterPath: "searchText", mapper: { - serializedName: "searchFields", + serializedName: "search", + required: true, type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "String" } - }, - collectionFormat: coreHttp.QueryCollectionFormat.Csv + } }; -export const searchFields2: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "autocompleteOptions", - "searchFields" - ], + +export const suggesterName: OperationQueryParameter = { + parameterPath: "suggesterName", mapper: { - serializedName: "searchFields", + serializedName: "suggesterName", + required: true, type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "String" } - }, - collectionFormat: coreHttp.QueryCollectionFormat.Csv + } }; -export const searchMode: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "searchMode" - ], + +export const filter1: OperationQueryParameter = { + parameterPath: ["options", "suggestOptions", "filter"], mapper: { - serializedName: "searchMode", + serializedName: "$filter", type: { - name: "Enum", - allowedValues: [ - "any", - "all" - ] + name: "String" } } }; -export const searchText0: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchText" - ], + +export const useFuzzyMatching: OperationQueryParameter = { + parameterPath: ["options", "suggestOptions", "useFuzzyMatching"], mapper: { - serializedName: "search", + serializedName: "fuzzy", + type: { + name: "Boolean" + } + } +}; + +export const highlightPostTag1: OperationQueryParameter = { + parameterPath: ["options", "suggestOptions", "highlightPostTag"], + mapper: { + serializedName: "highlightPostTag", type: { name: "String" } } }; -export const searchText1: coreHttp.OperationQueryParameter = { - parameterPath: "searchText", + +export const highlightPreTag1: OperationQueryParameter = { + parameterPath: ["options", "suggestOptions", "highlightPreTag"], mapper: { - required: true, - serializedName: "search", + serializedName: "highlightPreTag", type: { name: "String" } } }; -export const select0: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "select" - ], + +export const minimumCoverage1: OperationQueryParameter = { + parameterPath: ["options", "suggestOptions", "minimumCoverage"], mapper: { - serializedName: "$select", + serializedName: "minimumCoverage", + type: { + name: "Number" + } + } +}; + +export const orderBy1: OperationQueryParameter = { + parameterPath: ["options", "suggestOptions", "orderBy"], + mapper: { + serializedName: "$orderby", type: { name: "Sequence", element: { @@ -495,16 +433,13 @@ export const select0: coreHttp.OperationQueryParameter = { } } }, - collectionFormat: coreHttp.QueryCollectionFormat.Csv + collectionFormat: QueryCollectionFormat.Csv }; -export const select1: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "suggestOptions", - "select" - ], + +export const searchFields1: OperationQueryParameter = { + parameterPath: ["options", "suggestOptions", "searchFields"], mapper: { - serializedName: "$select", + serializedName: "searchFields", type: { name: "Sequence", element: { @@ -514,13 +449,11 @@ export const select1: coreHttp.OperationQueryParameter = { } } }, - collectionFormat: coreHttp.QueryCollectionFormat.Csv + collectionFormat: QueryCollectionFormat.Csv }; -export const selectedFields: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "selectedFields" - ], + +export const select1: OperationQueryParameter = { + parameterPath: ["options", "suggestOptions", "select"], mapper: { serializedName: "$select", type: { @@ -532,106 +465,117 @@ export const selectedFields: coreHttp.OperationQueryParameter = { } } }, - collectionFormat: coreHttp.QueryCollectionFormat.Csv + collectionFormat: QueryCollectionFormat.Csv }; -export const sessionId: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "sessionId" - ], + +export const top1: OperationQueryParameter = { + parameterPath: ["options", "suggestOptions", "top"], mapper: { - serializedName: "sessionId", + serializedName: "$top", type: { - name: "String" + name: "Number" } } }; -export const skip: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "skip" - ], + +export const suggestRequest: OperationParameter = { + parameterPath: "suggestRequest", + mapper: SuggestRequestMapper +}; + +export const batch: OperationParameter = { + parameterPath: "batch", + mapper: IndexBatchMapper +}; + +export const autocompleteMode: OperationQueryParameter = { + parameterPath: ["options", "autocompleteOptions", "autocompleteMode"], mapper: { - serializedName: "$skip", + serializedName: "autocompleteMode", type: { - name: "Number" + name: "Enum", + allowedValues: ["oneTerm", "twoTerms", "oneTermWithContext"] } } }; -export const suggesterName: coreHttp.OperationQueryParameter = { - parameterPath: "suggesterName", + +export const filter2: OperationQueryParameter = { + parameterPath: ["options", "autocompleteOptions", "filter"], mapper: { - required: true, - serializedName: "suggesterName", + serializedName: "$filter", type: { name: "String" } } }; -export const top0: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "searchOptions", - "top" - ], + +export const useFuzzyMatching1: OperationQueryParameter = { + parameterPath: ["options", "autocompleteOptions", "useFuzzyMatching"], mapper: { - serializedName: "$top", + serializedName: "fuzzy", type: { - name: "Number" + name: "Boolean" } } }; -export const top1: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "suggestOptions", - "top" - ], + +export const highlightPostTag2: OperationQueryParameter = { + parameterPath: ["options", "autocompleteOptions", "highlightPostTag"], mapper: { - serializedName: "$top", + serializedName: "highlightPostTag", type: { - name: "Number" + name: "String" } } }; -export const top2: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "autocompleteOptions", - "top" - ], + +export const highlightPreTag2: OperationQueryParameter = { + parameterPath: ["options", "autocompleteOptions", "highlightPreTag"], mapper: { - serializedName: "$top", + serializedName: "highlightPreTag", type: { - name: "Number" + name: "String" } } }; -export const useFuzzyMatching0: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "suggestOptions", - "useFuzzyMatching" - ], + +export const minimumCoverage2: OperationQueryParameter = { + parameterPath: ["options", "autocompleteOptions", "minimumCoverage"], mapper: { - serializedName: "fuzzy", + serializedName: "minimumCoverage", type: { - name: "Boolean" + name: "Number" } } }; -export const useFuzzyMatching1: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "autocompleteOptions", - "useFuzzyMatching" - ], + +export const searchFields2: OperationQueryParameter = { + parameterPath: ["options", "autocompleteOptions", "searchFields"], mapper: { - serializedName: "fuzzy", + serializedName: "searchFields", type: { - name: "Boolean" + name: "Sequence", + element: { + type: { + name: "String" + } + } + } + }, + collectionFormat: QueryCollectionFormat.Csv +}; + +export const top2: OperationQueryParameter = { + parameterPath: ["options", "autocompleteOptions", "top"], + mapper: { + serializedName: "$top", + type: { + name: "Number" } } }; + +export const autocompleteRequest: OperationParameter = { + parameterPath: "autocompleteRequest", + mapper: AutocompleteRequestMapper +}; diff --git a/sdk/search/search-documents/src/generated/data/operations/documents.ts b/sdk/search/search-documents/src/generated/data/operations/documents.ts index 8d692d00836e..c99a44b4e372 100644 --- a/sdk/search/search-documents/src/generated/data/operations/documents.ts +++ b/sdk/search/search-documents/src/generated/data/operations/documents.ts @@ -1,378 +1,285 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ import * as coreHttp from "@azure/core-http"; -import * as Models from "../models"; -import * as Mappers from "../models/documentsMappers"; +import * as Mappers from "../models/mappers"; import * as Parameters from "../models/parameters"; -import { SearchClientContext } from "../searchClientContext"; +import { SearchClient } from "../searchClient"; +import { + DocumentsCountOptionalParams, + DocumentsCountResponse, + DocumentsSearchGetOptionalParams, + DocumentsSearchGetResponse, + SearchRequest, + DocumentsSearchPostOptionalParams, + DocumentsSearchPostResponse, + DocumentsGetOptionalParams, + DocumentsGetResponse, + DocumentsSuggestGetOptionalParams, + DocumentsSuggestGetResponse, + SuggestRequest, + DocumentsSuggestPostOptionalParams, + DocumentsSuggestPostResponse, + IndexBatch, + DocumentsIndexOptionalParams, + DocumentsIndexResponse, + DocumentsAutocompleteGetOptionalParams, + DocumentsAutocompleteGetResponse, + AutocompleteRequest, + DocumentsAutocompletePostOptionalParams, + DocumentsAutocompletePostResponse +} from "../models"; /** Class representing a Documents. */ export class Documents { - private readonly client: SearchClientContext; + private readonly client: SearchClient; /** - * Create a Documents. - * @param {SearchClientContext} client Reference to the service client. + * Initialize a new instance of the class Documents class. + * @param client Reference to the service client */ - constructor(client: SearchClientContext) { + constructor(client: SearchClient) { this.client = client; } /** * Queries the number of documents in the index. - * @param [options] The optional parameters - * @returns Promise + * @param options The options parameters. */ - count(options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param callback The callback - */ - count(callback: coreHttp.ServiceCallback): void; - /** - * @param options The optional parameters - * @param callback The callback - */ - count(options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - count(options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + count( + options?: DocumentsCountOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - options - }, - countOperationSpec, - callback) as Promise; + operationArguments, + countOperationSpec + ) as Promise; } /** * Searches for documents in the index. - * @param [options] The optional parameters - * @returns Promise + * @param options The options parameters. */ - searchGet(options?: Models.DocumentsSearchGetOptionalParams): Promise; - /** - * @param callback The callback - */ - searchGet(callback: coreHttp.ServiceCallback): void; - /** - * @param options The optional parameters - * @param callback The callback - */ - searchGet(options: Models.DocumentsSearchGetOptionalParams, callback: coreHttp.ServiceCallback): void; - searchGet(options?: Models.DocumentsSearchGetOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + searchGet( + options?: DocumentsSearchGetOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - options - }, - searchGetOperationSpec, - callback) as Promise; + operationArguments, + searchGetOperationSpec + ) as Promise; } /** * Searches for documents in the index. * @param searchRequest The definition of the Search request. - * @param [options] The optional parameters - * @returns Promise - */ - searchPost(searchRequest: Models.SearchRequest, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param searchRequest The definition of the Search request. - * @param callback The callback - */ - searchPost(searchRequest: Models.SearchRequest, callback: coreHttp.ServiceCallback): void; - /** - * @param searchRequest The definition of the Search request. - * @param options The optional parameters - * @param callback The callback + * @param options The options parameters. */ - searchPost(searchRequest: Models.SearchRequest, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - searchPost(searchRequest: Models.SearchRequest, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + searchPost( + searchRequest: SearchRequest, + options?: DocumentsSearchPostOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + searchRequest, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - searchRequest, - options - }, - searchPostOperationSpec, - callback) as Promise; + operationArguments, + searchPostOperationSpec + ) as Promise; } /** * Retrieves a document from the index. * @param key The key of the document to retrieve. - * @param [options] The optional parameters - * @returns Promise - */ - get(key: string, options?: Models.DocumentsGetOptionalParams): Promise; - /** - * @param key The key of the document to retrieve. - * @param callback The callback - */ - get(key: string, callback: coreHttp.ServiceCallback): void; - /** - * @param key The key of the document to retrieve. - * @param options The optional parameters - * @param callback The callback + * @param options The options parameters. */ - get(key: string, options: Models.DocumentsGetOptionalParams, callback: coreHttp.ServiceCallback): void; - get(key: string, options?: Models.DocumentsGetOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + get( + key: string, + options?: DocumentsGetOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + key, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - key, - options - }, - getOperationSpec, - callback) as Promise; + operationArguments, + getOperationSpec + ) as Promise; } /** * Suggests documents in the index that match the given partial query text. - * @param searchText The search text to use to suggest documents. Must be at least 1 character, and - * no more than 100 characters. - * @param suggesterName The name of the suggester as specified in the suggesters collection that's - * part of the index definition. - * @param [options] The optional parameters - * @returns Promise + * @param searchText The search text to use to suggest documents. Must be at least 1 character, and no + * more than 100 characters. + * @param suggesterName The name of the suggester as specified in the suggesters collection that's part + * of the index definition. + * @param options The options parameters. */ - suggestGet(searchText: string, suggesterName: string, options?: Models.DocumentsSuggestGetOptionalParams): Promise; - /** - * @param searchText The search text to use to suggest documents. Must be at least 1 character, and - * no more than 100 characters. - * @param suggesterName The name of the suggester as specified in the suggesters collection that's - * part of the index definition. - * @param callback The callback - */ - suggestGet(searchText: string, suggesterName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param searchText The search text to use to suggest documents. Must be at least 1 character, and - * no more than 100 characters. - * @param suggesterName The name of the suggester as specified in the suggesters collection that's - * part of the index definition. - * @param options The optional parameters - * @param callback The callback - */ - suggestGet(searchText: string, suggesterName: string, options: Models.DocumentsSuggestGetOptionalParams, callback: coreHttp.ServiceCallback): void; - suggestGet(searchText: string, suggesterName: string, options?: Models.DocumentsSuggestGetOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + suggestGet( + searchText: string, + suggesterName: string, + options?: DocumentsSuggestGetOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + searchText, + suggesterName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - searchText, - suggesterName, - options - }, - suggestGetOperationSpec, - callback) as Promise; + operationArguments, + suggestGetOperationSpec + ) as Promise; } /** * Suggests documents in the index that match the given partial query text. * @param suggestRequest The Suggest request. - * @param [options] The optional parameters - * @returns Promise - */ - suggestPost(suggestRequest: Models.SuggestRequest, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param suggestRequest The Suggest request. - * @param callback The callback + * @param options The options parameters. */ - suggestPost(suggestRequest: Models.SuggestRequest, callback: coreHttp.ServiceCallback): void; - /** - * @param suggestRequest The Suggest request. - * @param options The optional parameters - * @param callback The callback - */ - suggestPost(suggestRequest: Models.SuggestRequest, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - suggestPost(suggestRequest: Models.SuggestRequest, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + suggestPost( + suggestRequest: SuggestRequest, + options?: DocumentsSuggestPostOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + suggestRequest, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - suggestRequest, - options - }, - suggestPostOperationSpec, - callback) as Promise; + operationArguments, + suggestPostOperationSpec + ) as Promise; } /** * Sends a batch of document write actions to the index. * @param batch The batch of index actions. - * @param [options] The optional parameters - * @returns Promise - */ - index(batch: Models.IndexBatch, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param batch The batch of index actions. - * @param callback The callback - */ - index(batch: Models.IndexBatch, callback: coreHttp.ServiceCallback): void; - /** - * @param batch The batch of index actions. - * @param options The optional parameters - * @param callback The callback + * @param options The options parameters. */ - index(batch: Models.IndexBatch, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - index(batch: Models.IndexBatch, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + index( + batch: IndexBatch, + options?: DocumentsIndexOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + batch, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - batch, - options - }, - indexOperationSpec, - callback) as Promise; + operationArguments, + indexOperationSpec + ) as Promise; } /** * Autocompletes incomplete query terms based on input text and matching terms in the index. * @param searchText The incomplete term which should be auto-completed. - * @param suggesterName The name of the suggester as specified in the suggesters collection that's - * part of the index definition. - * @param [options] The optional parameters - * @returns Promise + * @param suggesterName The name of the suggester as specified in the suggesters collection that's part + * of the index definition. + * @param options The options parameters. */ - autocompleteGet(searchText: string, suggesterName: string, options?: Models.DocumentsAutocompleteGetOptionalParams): Promise; - /** - * @param searchText The incomplete term which should be auto-completed. - * @param suggesterName The name of the suggester as specified in the suggesters collection that's - * part of the index definition. - * @param callback The callback - */ - autocompleteGet(searchText: string, suggesterName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param searchText The incomplete term which should be auto-completed. - * @param suggesterName The name of the suggester as specified in the suggesters collection that's - * part of the index definition. - * @param options The optional parameters - * @param callback The callback - */ - autocompleteGet(searchText: string, suggesterName: string, options: Models.DocumentsAutocompleteGetOptionalParams, callback: coreHttp.ServiceCallback): void; - autocompleteGet(searchText: string, suggesterName: string, options?: Models.DocumentsAutocompleteGetOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + autocompleteGet( + searchText: string, + suggesterName: string, + options?: DocumentsAutocompleteGetOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + searchText, + suggesterName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - searchText, - suggesterName, - options - }, - autocompleteGetOperationSpec, - callback) as Promise; + operationArguments, + autocompleteGetOperationSpec + ) as Promise; } /** * Autocompletes incomplete query terms based on input text and matching terms in the index. * @param autocompleteRequest The definition of the Autocomplete request. - * @param [options] The optional parameters - * @returns Promise - */ - autocompletePost(autocompleteRequest: Models.AutocompleteRequest, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param autocompleteRequest The definition of the Autocomplete request. - * @param callback The callback + * @param options The options parameters. */ - autocompletePost(autocompleteRequest: Models.AutocompleteRequest, callback: coreHttp.ServiceCallback): void; - /** - * @param autocompleteRequest The definition of the Autocomplete request. - * @param options The optional parameters - * @param callback The callback - */ - autocompletePost(autocompleteRequest: Models.AutocompleteRequest, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - autocompletePost(autocompleteRequest: Models.AutocompleteRequest, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + autocompletePost( + autocompleteRequest: AutocompleteRequest, + options?: DocumentsAutocompletePostOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + autocompleteRequest, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - autocompleteRequest, - options - }, - autocompletePostOperationSpec, - callback) as Promise; + operationArguments, + autocompletePostOperationSpec + ) as Promise; } } - // Operation Specifications -const serializer = new coreHttp.Serializer(Mappers); +const serializer = new coreHttp.Serializer(Mappers, /* isXml */ false); + const countOperationSpec: coreHttp.OperationSpec = { + path: "/docs/$count", httpMethod: "GET", - path: "docs/$count", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], - queryParameters: [ - Parameters.apiVersion - ], responses: { 200: { - bodyMapper: { - serializedName: "parsedResponse", - type: { - name: "Number" - } - } + bodyMapper: { type: { name: "Number" } } }, default: { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const searchGetOperationSpec: coreHttp.OperationSpec = { + path: "/docs", httpMethod: "GET", - path: "docs", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], + responses: { + 200: { + bodyMapper: Mappers.SearchDocumentsResult + }, + default: { + bodyMapper: Mappers.SearchError + } + }, queryParameters: [ - Parameters.searchText0, Parameters.apiVersion, + Parameters.searchText, Parameters.includeTotalResultCount, Parameters.facets, - Parameters.filter0, + Parameters.filter, Parameters.highlightFields, - Parameters.highlightPostTag0, - Parameters.highlightPreTag0, - Parameters.minimumCoverage0, - Parameters.orderBy0, + Parameters.highlightPostTag, + Parameters.highlightPreTag, + Parameters.minimumCoverage, + Parameters.orderBy, Parameters.queryType, Parameters.scoringParameters, Parameters.scoringProfile, - Parameters.searchFields0, + Parameters.searchFields, Parameters.searchMode, Parameters.scoringStatistics, Parameters.sessionId, - Parameters.select0, + Parameters.select, Parameters.skip, - Parameters.top0 + Parameters.top ], - responses: { - 200: { - bodyMapper: Mappers.SearchDocumentsResult - }, - default: { - bodyMapper: Mappers.SearchError - } - }, + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const searchPostOperationSpec: coreHttp.OperationSpec = { + path: "/docs/search.post.search", httpMethod: "POST", - path: "docs/search.post.search", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], - queryParameters: [ - Parameters.apiVersion - ], - requestBody: { - parameterPath: "searchRequest", - mapper: { - ...Mappers.SearchRequest, - required: true - } - }, responses: { 200: { bodyMapper: Mappers.SearchDocumentsResult @@ -381,50 +288,50 @@ const searchPostOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + requestBody: Parameters.searchRequest, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [ + Parameters.accept, + Parameters.xMsClientRequestId, + Parameters.contentType + ], + mediaType: "json", serializer }; - const getOperationSpec: coreHttp.OperationSpec = { + path: "/docs('{key}')", httpMethod: "GET", - path: "docs('{key}')", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName, - Parameters.key - ], - queryParameters: [ - Parameters.selectedFields, - Parameters.apiVersion - ], responses: { 200: { - bodyMapper: { - serializedName: "parsedResponse", - type: { - name: "Object" - } - } + bodyMapper: { type: { name: "any" } } }, default: { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion, Parameters.selectedFields], + urlParameters: [Parameters.endpoint, Parameters.indexName, Parameters.key], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const suggestGetOperationSpec: coreHttp.OperationSpec = { + path: "/docs/search.suggest", httpMethod: "GET", - path: "docs/search.suggest", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], + responses: { + 200: { + bodyMapper: Mappers.SuggestDocumentsResult + }, + default: { + bodyMapper: Mappers.SearchError + } + }, queryParameters: [ + Parameters.apiVersion, Parameters.searchText1, Parameters.suggesterName, - Parameters.apiVersion, Parameters.filter1, - Parameters.useFuzzyMatching0, + Parameters.useFuzzyMatching, Parameters.highlightPostTag1, Parameters.highlightPreTag1, Parameters.minimumCoverage1, @@ -433,34 +340,13 @@ const suggestGetOperationSpec: coreHttp.OperationSpec = { Parameters.select1, Parameters.top1 ], - responses: { - 200: { - bodyMapper: Mappers.SuggestDocumentsResult - }, - default: { - bodyMapper: Mappers.SearchError - } - }, + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const suggestPostOperationSpec: coreHttp.OperationSpec = { + path: "/docs/search.post.suggest", httpMethod: "POST", - path: "docs/search.post.suggest", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], - queryParameters: [ - Parameters.apiVersion - ], - requestBody: { - parameterPath: "suggestRequest", - mapper: { - ...Mappers.SuggestRequest, - required: true - } - }, responses: { 200: { bodyMapper: Mappers.SuggestDocumentsResult @@ -469,26 +355,20 @@ const suggestPostOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + requestBody: Parameters.suggestRequest, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [ + Parameters.accept, + Parameters.xMsClientRequestId, + Parameters.contentType + ], + mediaType: "json", serializer }; - const indexOperationSpec: coreHttp.OperationSpec = { + path: "/docs/search.index", httpMethod: "POST", - path: "docs/search.index", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], - queryParameters: [ - Parameters.apiVersion - ], - requestBody: { - parameterPath: "batch", - mapper: { - ...Mappers.IndexBatch, - required: true - } - }, responses: { 200: { bodyMapper: Mappers.IndexDocumentsResult @@ -500,16 +380,28 @@ const indexOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + requestBody: Parameters.batch, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [ + Parameters.accept, + Parameters.xMsClientRequestId, + Parameters.contentType + ], + mediaType: "json", serializer }; - const autocompleteGetOperationSpec: coreHttp.OperationSpec = { + path: "/docs/search.autocomplete", httpMethod: "GET", - path: "docs/search.autocomplete", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], + responses: { + 200: { + bodyMapper: Mappers.AutocompleteResult + }, + default: { + bodyMapper: Mappers.SearchError + } + }, queryParameters: [ Parameters.apiVersion, Parameters.searchText1, @@ -523,34 +415,13 @@ const autocompleteGetOperationSpec: coreHttp.OperationSpec = { Parameters.searchFields2, Parameters.top2 ], - responses: { - 200: { - bodyMapper: Mappers.AutocompleteResult - }, - default: { - bodyMapper: Mappers.SearchError - } - }, + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const autocompletePostOperationSpec: coreHttp.OperationSpec = { + path: "/docs/search.post.autocomplete", httpMethod: "POST", - path: "docs/search.post.autocomplete", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], - queryParameters: [ - Parameters.apiVersion - ], - requestBody: { - parameterPath: "autocompleteRequest", - mapper: { - ...Mappers.AutocompleteRequest, - required: true - } - }, responses: { 200: { bodyMapper: Mappers.AutocompleteResult @@ -559,5 +430,14 @@ const autocompletePostOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + requestBody: Parameters.autocompleteRequest, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [ + Parameters.accept, + Parameters.xMsClientRequestId, + Parameters.contentType + ], + mediaType: "json", serializer }; diff --git a/sdk/search/search-documents/src/generated/data/operations/index.ts b/sdk/search/search-documents/src/generated/data/operations/index.ts index acdb150c881e..77c96e3f8b79 100644 --- a/sdk/search/search-documents/src/generated/data/operations/index.ts +++ b/sdk/search/search-documents/src/generated/data/operations/index.ts @@ -1,11 +1,9 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ export * from "./documents"; diff --git a/sdk/search/search-documents/src/generated/data/searchClient.ts b/sdk/search/search-documents/src/generated/data/searchClient.ts index fb4021f720b6..58e82643bcfa 100644 --- a/sdk/search/search-documents/src/generated/data/searchClient.ts +++ b/sdk/search/search-documents/src/generated/data/searchClient.ts @@ -1,42 +1,33 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import * as coreHttp from "@azure/core-http"; -import * as Models from "./models"; -import * as Mappers from "./models/mappers"; -import * as operations from "./operations"; +import { Documents } from "./operations"; import { SearchClientContext } from "./searchClientContext"; +import { SearchClientOptionalParams, ApiVersion20200630 } from "./models"; -class SearchClient extends SearchClientContext { - // Operation groups - documents: operations.Documents; - +/** @hidden */ +export class SearchClient extends SearchClientContext { /** * Initializes a new instance of the SearchClient class. - * @param apiVersion Client Api Version. * @param endpoint The endpoint URL of the search service. * @param indexName The name of the index. - * @param [options] The parameter options + * @param apiVersion Api Version + * @param options The parameter options */ - constructor(apiVersion: string, endpoint: string, indexName: string, options?: coreHttp.ServiceClientOptions) { - super(apiVersion, endpoint, indexName, options); - this.documents = new operations.Documents(this); + constructor( + endpoint: string, + indexName: string, + apiVersion: ApiVersion20200630, + options?: SearchClientOptionalParams + ) { + super(endpoint, indexName, apiVersion, options); + this.documents = new Documents(this); } -} -// Operation Specifications - -export { - SearchClient, - SearchClientContext, - Models as SearchModels, - Mappers as SearchMappers -}; -export * from "./operations"; + documents: Documents; +} diff --git a/sdk/search/search-documents/src/generated/data/searchClientContext.ts b/sdk/search/search-documents/src/generated/data/searchClientContext.ts index fe52db6e5bed..d5c350e36128 100644 --- a/sdk/search/search-documents/src/generated/data/searchClientContext.ts +++ b/sdk/search/search-documents/src/generated/data/searchClientContext.ts @@ -1,41 +1,47 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ import * as coreHttp from "@azure/core-http"; +import { ApiVersion20200630, SearchClientOptionalParams } from "./models"; const packageName = "@azure/search-documents"; const packageVersion = "11.1.0-beta.2"; +/** @hidden */ export class SearchClientContext extends coreHttp.ServiceClient { - apiVersion: string; endpoint: string; indexName: string; + apiVersion: ApiVersion20200630; /** * Initializes a new instance of the SearchClientContext class. - * @param apiVersion Client Api Version. * @param endpoint The endpoint URL of the search service. * @param indexName The name of the index. - * @param [options] The parameter options + * @param apiVersion Api Version + * @param options The parameter options */ - constructor(apiVersion: string, endpoint: string, indexName: string, options?: coreHttp.ServiceClientOptions) { - if (apiVersion == undefined) { - throw new Error("'apiVersion' cannot be null."); + constructor( + endpoint: string, + indexName: string, + apiVersion: ApiVersion20200630, + options?: SearchClientOptionalParams + ) { + if (endpoint === undefined) { + throw new Error("'endpoint' cannot be null"); } - if (endpoint == undefined) { - throw new Error("'endpoint' cannot be null."); + if (indexName === undefined) { + throw new Error("'indexName' cannot be null"); } - if (indexName == undefined) { - throw new Error("'indexName' cannot be null."); + if (apiVersion === undefined) { + throw new Error("'apiVersion' cannot be null"); } + // Initializing default values for options if (!options) { options = {}; } @@ -47,10 +53,13 @@ export class SearchClientContext extends coreHttp.ServiceClient { super(undefined, options); - this.baseUri = "{endpoint}/indexes('{indexName}')"; this.requestContentType = "application/json; charset=utf-8"; - this.apiVersion = apiVersion; + + this.baseUri = options.endpoint || "{endpoint}/indexes('{indexName}')"; + + // Parameter assignments this.endpoint = endpoint; this.indexName = indexName; + this.apiVersion = apiVersion; } } diff --git a/sdk/search/search-documents/src/generated/service/index.ts b/sdk/search/search-documents/src/generated/service/index.ts new file mode 100644 index 000000000000..3642e4e87ed7 --- /dev/null +++ b/sdk/search/search-documents/src/generated/service/index.ts @@ -0,0 +1,11 @@ +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. + */ + +export * from "./models"; +export { SearchServiceClient } from "./searchServiceClient"; +export { SearchServiceClientContext } from "./searchServiceClientContext"; diff --git a/sdk/search/search-documents/src/generated/service/models/index.ts b/sdk/search/search-documents/src/generated/service/models/index.ts index 44dfc5371a75..b1b9f05acf3b 100644 --- a/sdk/search/search-documents/src/generated/service/models/index.ts +++ b/sdk/search/search-documents/src/generated/service/models/index.ts @@ -1,4550 +1,3853 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ - import * as coreHttp from "@azure/core-http"; -/** - * Specifies some text and analysis components used to break that text into tokens. - */ -export interface AnalyzeRequest { - /** - * The text to break into tokens. - */ - text: string; - /** - * The name of the analyzer to use to break the given text. If this parameter is not specified, - * you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually - * exclusive. KnownAnalyzerNames is an enum containing known values. - */ - analyzer?: string; - /** - * The name of the tokenizer to use to break the given text. If this parameter is not specified, - * you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually - * exclusive. KnownTokenizerNames is an enum containing known values. - */ - tokenizer?: string; - /** - * An optional list of token filters to use when breaking the given text. This parameter can only - * be set when using the tokenizer parameter. - */ - tokenFilters?: string[]; - /** - * An optional list of character filters to use when breaking the given text. This parameter can - * only be set when using the tokenizer parameter. - */ - charFilters?: string[]; +export type DataChangeDetectionPolicyUnion = + | DataChangeDetectionPolicy + | HighWaterMarkChangeDetectionPolicy + | SqlIntegratedChangeTrackingPolicy; +export type DataDeletionDetectionPolicyUnion = + | DataDeletionDetectionPolicy + | SoftDeleteColumnDeletionDetectionPolicy; +export type SearchIndexerSkillUnion = + | SearchIndexerSkill + | ConditionalSkill + | KeyPhraseExtractionSkill + | OcrSkill + | ImageAnalysisSkill + | LanguageDetectionSkill + | ShaperSkill + | MergeSkill + | EntityRecognitionSkill + | SentimentSkill + | SplitSkill + | CustomEntityLookupSkill + | TextTranslationSkill + | WebApiSkill; +export type CognitiveServicesAccountUnion = + | CognitiveServicesAccount + | DefaultCognitiveServicesAccount + | CognitiveServicesAccountKey; +export type ScoringFunctionUnion = + | ScoringFunction + | DistanceScoringFunction + | FreshnessScoringFunction + | MagnitudeScoringFunction + | TagScoringFunction; +export type LexicalAnalyzerUnion = + | LexicalAnalyzer + | CustomAnalyzer + | PatternAnalyzer + | LuceneStandardAnalyzer + | StopAnalyzer; +export type LexicalTokenizerUnion = + | LexicalTokenizer + | ClassicTokenizer + | EdgeNGramTokenizer + | KeywordTokenizer + | KeywordTokenizerV2 + | MicrosoftLanguageTokenizer + | MicrosoftLanguageStemmingTokenizer + | NGramTokenizer + | PathHierarchyTokenizerV2 + | PatternTokenizer + | LuceneStandardTokenizer + | LuceneStandardTokenizerV2 + | UaxUrlEmailTokenizer; +export type TokenFilterUnion = + | TokenFilter + | AsciiFoldingTokenFilter + | CjkBigramTokenFilter + | CommonGramTokenFilter + | DictionaryDecompounderTokenFilter + | EdgeNGramTokenFilter + | EdgeNGramTokenFilterV2 + | ElisionTokenFilter + | KeepTokenFilter + | KeywordMarkerTokenFilter + | LengthTokenFilter + | LimitTokenFilter + | NGramTokenFilter + | NGramTokenFilterV2 + | PatternCaptureTokenFilter + | PatternReplaceTokenFilter + | PhoneticTokenFilter + | ShingleTokenFilter + | SnowballTokenFilter + | StemmerTokenFilter + | StemmerOverrideTokenFilter + | StopwordsTokenFilter + | SynonymTokenFilter + | TruncateTokenFilter + | UniqueTokenFilter + | WordDelimiterTokenFilter; +export type CharFilterUnion = + | CharFilter + | MappingCharFilter + | PatternReplaceCharFilter; +export type SimilarityUnion = Similarity | ClassicSimilarity | BM25Similarity; + +/** Represents a datasource definition, which can be used to configure an indexer. */ +export interface SearchIndexerDataSource { + /** The name of the datasource. */ + name: string; + /** The description of the datasource. */ + description?: string; + /** The type of the datasource. */ + type: SearchIndexerDataSourceType; + /** Credentials for the datasource. */ + credentials: DataSourceCredentials; + /** The data container for the datasource. */ + container: SearchIndexerDataContainer; + /** The data change detection policy for the datasource. */ + dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion | null; + /** The data deletion detection policy for the datasource. */ + dataDeletionDetectionPolicy?: DataDeletionDetectionPolicyUnion | null; + /** The ETag of the data source. */ + etag?: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your datasource definition when you want full assurance that no one, not even Microsoft, can decrypt your data source definition in Azure Cognitive Search. Once you have encrypted your data source definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your datasource definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey | null; } -/** - * Information about a token returned by an analyzer. - */ -export interface AnalyzedTokenInfo { - /** - * The token returned by the analyzer. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly token: string; +/** Represents credentials that can be used to connect to a datasource. */ +export interface DataSourceCredentials { + /** The connection string for the datasource. Set to '' if you do not want the connection string updated. */ + connectionString?: string; +} + +/** Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. */ +export interface SearchIndexerDataContainer { + /** The name of the table or view (for Azure SQL data source) or collection (for CosmosDB data source) that will be indexed. */ + name: string; + /** A query that is applied to this data container. The syntax and meaning of this parameter is datasource-specific. Not supported by Azure SQL datasources. */ + query?: string; +} + +/** Base type for data change detection policies. */ +export interface DataChangeDetectionPolicy { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: + | "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy" + | "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; +} + +/** Base type for data deletion detection policies. */ +export interface DataDeletionDetectionPolicy { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"; +} + +/** A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. */ +export interface SearchResourceEncryptionKey { + /** The name of your Azure Key Vault key to be used to encrypt your data at rest. */ + keyName: string; + /** The version of your Azure Key Vault key to be used to encrypt your data at rest. */ + keyVersion: string; + /** The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be used to encrypt your data at rest. An example URI might be https://my-keyvault-name.vault.azure.net. */ + vaultUri: string; + /** Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead. */ + accessCredentials?: AzureActiveDirectoryApplicationCredentials; +} + +/** Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault. */ +export interface AzureActiveDirectoryApplicationCredentials { + /** An AAD Application ID that was granted the required access permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID should not be confused with the Object ID for your AAD Application. */ + applicationId: string; + /** The authentication key of the specified AAD application. */ + applicationSecret?: string; +} + +/** Describes an error condition for the Azure Cognitive Search API. */ +export interface SearchError { /** - * The index of the first character of the token in the input text. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * One of a server-defined set of error codes. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - readonly startOffset: number; + readonly code?: string; /** - * The index of the last character of the token in the input text. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * A human-readable representation of the error. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - readonly endOffset: number; + readonly message: string; /** - * The position of the token in the input text relative to other tokens. The first token in the - * input text has position 0, the next has position 1, and so on. Depending on the analyzer used, - * some tokens might have the same position, for example if they are synonyms of each other. - * **NOTE: This property will not be serialized. It can only be populated by the server.** + * An array of details about specific errors that led to this reported error. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - readonly position: number; + readonly details?: SearchError[]; } -/** - * The result of testing an analyzer on text. - */ -export interface AnalyzeResult { +/** Response from a List Datasources request. If successful, it includes the full definitions of all datasources. */ +export interface ListDataSourcesResult { /** - * The list of tokens returned by the analyzer specified in the request. + * The datasources in the Search service. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - tokens: AnalyzedTokenInfo[]; + readonly dataSources: SearchIndexerDataSource[]; } -/** - * Contains the possible cases for LexicalAnalyzer. - */ -export type LexicalAnalyzerUnion = LexicalAnalyzer | CustomAnalyzer | PatternAnalyzer | LuceneStandardAnalyzer | StopAnalyzer; +/** Represents an indexer. */ +export interface SearchIndexer { + /** The name of the indexer. */ + name: string; + /** The description of the indexer. */ + description?: string; + /** The name of the datasource from which this indexer reads data. */ + dataSourceName: string; + /** The name of the skillset executing with this indexer. */ + skillsetName?: string; + /** The name of the index to which this indexer writes data. */ + targetIndexName: string; + /** The schedule for this indexer. */ + schedule?: IndexingSchedule | null; + /** Parameters for indexer execution. */ + parameters?: IndexingParameters | null; + /** Defines mappings between fields in the data source and corresponding target fields in the index. */ + fieldMappings?: FieldMapping[]; + /** Output field mappings are applied after enrichment and immediately before indexing. */ + outputFieldMappings?: FieldMapping[]; + /** A value indicating whether the indexer is disabled. Default is false. */ + isDisabled?: boolean | null; + /** The ETag of the indexer. */ + etag?: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your indexer definition (as well as indexer execution status) when you want full assurance that no one, not even Microsoft, can decrypt them in Azure Cognitive Search. Once you have encrypted your indexer definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your indexer definition (and indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey | null; +} -/** - * Base type for analyzers. - */ -export interface LexicalAnalyzer { - /** - * Polymorphic Discriminator - */ - odatatype: "LexicalAnalyzer"; - /** - * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, - * can only start and end with alphanumeric characters, and is limited to 128 characters. - */ +/** Represents a schedule for indexer execution. */ +export interface IndexingSchedule { + /** The interval of time between indexer executions. */ + interval: string; + /** The time when an indexer should start running. */ + startTime?: Date; +} + +/** Represents parameters for indexer execution. */ +export interface IndexingParameters { + /** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */ + batchSize?: number | null; + /** The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0. */ + maxFailedItems?: number | null; + /** The maximum number of items in a single batch that can fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. */ + maxFailedItemsPerBatch?: number | null; + /** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ + configuration?: IndexingParametersConfiguration; +} + +/** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ +export interface IndexingParametersConfiguration { + /** Describes unknown properties. The value of an unknown property can be of "any" type. */ + [property: string]: any; + /** Represents the parsing mode for indexing from an Azure blob data source. */ + parsingMode?: BlobIndexerParsingMode; + /** Comma-delimited list of filename extensions to ignore when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip over those files during indexing. */ + excludedFileNameExtensions?: string; + /** Comma-delimited list of filename extensions to select when processing from Azure blob storage. For example, you could focus indexing on specific application files ".docx, .pptx, .msg" to specifically include those file types. */ + indexedFileNameExtensions?: string; + /** For Azure blobs, set to false if you want to continue indexing when an unsupported content type is encountered, and you don't know all the content types (file extensions) in advance. */ + failOnUnsupportedContentType?: boolean; + /** For Azure blobs, set to false if you want to continue indexing if a document fails indexing. */ + failOnUnprocessableDocument?: boolean; + /** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://docs.microsoft.com/azure/search/search-limits-quotas-capacity. */ + indexStorageMetadataOnlyForOversizedDocuments?: boolean; + /** For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. */ + delimitedTextHeaders?: string; + /** For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each line starts a new document (for example, "|"). */ + delimitedTextDelimiter?: string; + /** For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. */ + firstLineContainsHeaders?: boolean; + /** For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property. */ + documentRoot?: string; + /** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */ + dataToExtract?: BlobIndexerDataToExtract; + /** Determines how to process embedded images and image files in Azure blob storage. Setting the "imageAction" configuration to any value other than "none" requires that a skillset also be attached to that indexer. */ + imageAction?: BlobIndexerImageAction; + /** If true, will create a path //document//file_data that is an object representing the original file data downloaded from your blob data source. This allows you to pass the original file data to a custom skill for processing within the enrichment pipeline, or to the Document Extraction skill. */ + allowSkillsetToReadFileData?: boolean; + /** Determines algorithm for text extraction from PDF files in Azure blob storage. */ + pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm; + /** Specifies the environment in which the indexer should execute. */ + executionEnvironment?: IndexerExecutionEnvironment; + /** Increases the timeout beyond the 5-minute default for Azure SQL database data sources, specified in the format "hh:mm:ss". */ + queryTimeout?: string; +} + +/** Defines a mapping between a field in a data source and a target field in an index. */ +export interface FieldMapping { + /** The name of the field in the data source. */ + sourceFieldName: string; + /** The name of the target field in the index. Same as the source field name by default. */ + targetFieldName?: string; + /** A function to apply to each source field value before indexing. */ + mappingFunction?: FieldMappingFunction | null; +} + +/** Represents a function that transforms a value from a data source before indexing. */ +export interface FieldMappingFunction { + /** The name of the field mapping function. */ name: string; + /** A dictionary of parameter name/value pairs to pass to the function. Each value must be of a primitive type. */ + parameters?: { [propertyName: string]: any }; } -/** - * Allows you to take control over the process of converting text into indexable/searchable tokens. - * It's a user-defined configuration consisting of a single predefined tokenizer and one or more - * filters. The tokenizer is responsible for breaking text into tokens, and the filters for - * modifying tokens emitted by the tokenizer. - */ -export interface CustomAnalyzer { +/** Response from a List Indexers request. If successful, it includes the full definitions of all indexers. */ +export interface ListIndexersResult { /** - * Polymorphic Discriminator + * The indexers in the Search service. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - odatatype: "#Microsoft.Azure.Search.CustomAnalyzer"; + readonly indexers: SearchIndexer[]; +} + +/** Represents the current status and execution history of an indexer. */ +export interface SearchIndexerStatus { /** - * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, - * can only start and end with alphanumeric characters, and is limited to 128 characters. + * Overall indexer status. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - name: string; + readonly status: IndexerStatus; /** - * The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as - * breaking a sentence into words. KnownTokenizerNames is an enum containing known values. + * The result of the most recent or an in-progress indexer execution. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - tokenizer: string; + readonly lastResult?: IndexerExecutionResult; /** - * A list of token filters used to filter out or modify the tokens generated by a tokenizer. For - * example, you can specify a lowercase filter that converts all characters to lowercase. The - * filters are run in the order in which they are listed. + * History of the recent indexer executions, sorted in reverse chronological order. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - tokenFilters?: string[]; + readonly executionHistory: IndexerExecutionResult[]; /** - * A list of character filters used to prepare input text before it is processed by the - * tokenizer. For instance, they can replace certain characters or symbols. The filters are run - * in the order in which they are listed. + * The execution limits for the indexer. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - charFilters?: string[]; + readonly limits: SearchIndexerLimits; } -/** - * Flexibly separates text into terms via a regular expression pattern. This analyzer is - * implemented using Apache Lucene. - */ -export interface PatternAnalyzer { +/** Represents the result of an individual indexer execution. */ +export interface IndexerExecutionResult { /** - * Polymorphic Discriminator + * The outcome of this indexer execution. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - odatatype: "#Microsoft.Azure.Search.PatternAnalyzer"; + readonly status: IndexerExecutionStatus; /** - * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, - * can only start and end with alphanumeric characters, and is limited to 128 characters. + * The error message indicating the top-level error, if any. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - name: string; + readonly errorMessage?: string; /** - * A value indicating whether terms should be lower-cased. Default is true. Default value: true. + * The start time of this indexer execution. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - lowerCaseTerms?: boolean; + readonly startTime?: Date; /** - * A regular expression pattern to match token separators. Default is an expression that matches - * one or more non-word characters. Default value: '\W+'. + * The end time of this indexer execution, if the execution has already completed. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - pattern?: string; + readonly endTime?: Date | null; /** - * Regular expression flags. + * The item-level indexing errors. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - flags?: string; + readonly errors: SearchIndexerError[]; /** - * A list of stopwords. + * The item-level indexing warnings. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - stopwords?: string[]; -} - -/** - * Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop - * filter. - */ -export interface LuceneStandardAnalyzer { + readonly warnings: SearchIndexerWarning[]; /** - * Polymorphic Discriminator + * The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - odatatype: "#Microsoft.Azure.Search.StandardAnalyzer"; + readonly itemCount: number; /** - * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, - * can only start and end with alphanumeric characters, and is limited to 128 characters. + * The number of items that failed to be indexed during this indexer execution. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - name: string; + readonly failedItemCount: number; /** - * The maximum token length. Default is 255. Tokens longer than the maximum length are split. The - * maximum token length that can be used is 300 characters. Default value: 255. + * Change tracking state with which an indexer execution started. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - maxTokenLength?: number; + readonly initialTrackingState?: string; /** - * A list of stopwords. + * Change tracking state with which an indexer execution finished. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - stopwords?: string[]; + readonly finalTrackingState?: string; } -/** - * Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is - * implemented using Apache Lucene. - */ -export interface StopAnalyzer { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.StopAnalyzer"; - /** - * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, - * can only start and end with alphanumeric characters, and is limited to 128 characters. - */ - name: string; +/** Represents an item- or document-level indexing error. */ +export interface SearchIndexerError { /** - * A list of stopwords. + * The key of the item for which indexing failed. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - stopwords?: string[]; -} - -/** - * Contains the possible cases for LexicalTokenizer. - */ -export type LexicalTokenizerUnion = LexicalTokenizer | ClassicTokenizer | EdgeNGramTokenizer | KeywordTokenizer | KeywordTokenizerV2 | MicrosoftLanguageTokenizer | MicrosoftLanguageStemmingTokenizer | NGramTokenizer | PathHierarchyTokenizerV2 | PatternTokenizer | LuceneStandardTokenizer | LuceneStandardTokenizerV2 | UaxUrlEmailTokenizer; - -/** - * Base type for tokenizers. - */ -export interface LexicalTokenizer { + readonly key?: string; /** - * Polymorphic Discriminator + * The message describing the error that occurred while processing the item. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - odatatype: "LexicalTokenizer"; + readonly errorMessage: string; /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. + * The status code indicating why the indexing operation failed. Possible values include: 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - name: string; -} - -/** - * Grammar-based tokenizer that is suitable for processing most European-language documents. This - * tokenizer is implemented using Apache Lucene. - */ -export interface ClassicTokenizer { + readonly statusCode: number; /** - * Polymorphic Discriminator + * The name of the source at which the error originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - odatatype: "#Microsoft.Azure.Search.ClassicTokenizer"; + readonly name?: string; /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. + * Additional, verbose details about the error to assist in debugging the indexer. This may not be always available. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - name: string; + readonly details?: string; /** - * The maximum token length. Default is 255. Tokens longer than the maximum length are split. The - * maximum token length that can be used is 300 characters. Default value: 255. + * A link to a troubleshooting guide for these classes of errors. This may not be always available. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - maxTokenLength?: number; + readonly documentationLink?: string; } -/** - * Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is - * implemented using Apache Lucene. - */ -export interface EdgeNGramTokenizer { +/** Represents an item-level warning. */ +export interface SearchIndexerWarning { /** - * Polymorphic Discriminator + * The key of the item which generated a warning. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenizer"; + readonly key?: string; /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. + * The message describing the warning that occurred while processing the item. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - name: string; + readonly message: string; /** - * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of - * maxGram. Default value: 1. + * The name of the source at which the warning originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - minGram?: number; + readonly name?: string; /** - * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2. + * Additional, verbose details about the warning to assist in debugging the indexer. This may not be always available. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - maxGram?: number; + readonly details?: string; /** - * Character classes to keep in the tokens. + * A link to a troubleshooting guide for these classes of warnings. This may not be always available. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - tokenChars?: TokenCharacterKind[]; + readonly documentationLink?: string; } -/** - * Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. - */ -export interface KeywordTokenizer { +export interface SearchIndexerLimits { /** - * Polymorphic Discriminator + * The maximum duration that the indexer is permitted to run for one execution. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - odatatype: "#Microsoft.Azure.Search.KeywordTokenizer"; + readonly maxRunTime?: string; /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. + * The maximum size of a document, in bytes, which will be considered valid for indexing. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - name: string; + readonly maxDocumentExtractionSize?: number; /** - * The read buffer size in bytes. Default is 256. Default value: 256. + * The maximum number of characters that will be extracted from a document picked up for indexing. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - bufferSize?: number; + readonly maxDocumentContentCharactersToExtract?: number; } -/** - * Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. - */ -export interface KeywordTokenizerV2 { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.KeywordTokenizerV2"; - /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ +/** A list of skills. */ +export interface SearchIndexerSkillset { + /** The name of the skillset. */ name: string; - /** - * The maximum token length. Default is 256. Tokens longer than the maximum length are split. The - * maximum token length that can be used is 300 characters. Default value: 256. - */ - maxTokenLength?: number; + /** The description of the skillset. */ + description?: string; + /** A list of skills in the skillset. */ + skills: SearchIndexerSkillUnion[]; + /** Details about cognitive services to be used when running skills. */ + cognitiveServicesAccount?: CognitiveServicesAccountUnion; + /** The ETag of the skillset. */ + etag?: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your skillset definition when you want full assurance that no one, not even Microsoft, can decrypt your skillset definition in Azure Cognitive Search. Once you have encrypted your skillset definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your skillset definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey | null; } -/** - * Divides text using language-specific rules. - */ -export interface MicrosoftLanguageTokenizer { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"; - /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ +/** Base type for skills. */ +export interface SearchIndexerSkill { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: + | "#Microsoft.Skills.Util.ConditionalSkill" + | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" + | "#Microsoft.Skills.Vision.OcrSkill" + | "#Microsoft.Skills.Vision.ImageAnalysisSkill" + | "#Microsoft.Skills.Text.LanguageDetectionSkill" + | "#Microsoft.Skills.Util.ShaperSkill" + | "#Microsoft.Skills.Text.MergeSkill" + | "#Microsoft.Skills.Text.EntityRecognitionSkill" + | "#Microsoft.Skills.Text.SentimentSkill" + | "#Microsoft.Skills.Text.SplitSkill" + | "#Microsoft.Skills.Text.CustomEntityLookupSkill" + | "#Microsoft.Skills.Text.TranslationSkill" + | "#Microsoft.Skills.Custom.WebApiSkill"; + /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */ + name?: string; + /** The description of the skill which describes the inputs, outputs, and usage of the skill. */ + description?: string; + /** Represents the level at which operations take place, such as the document root or document content (for example, /document or /document/content). The default is /document. */ + context?: string; + /** Inputs of the skills could be a column in the source data set, or the output of an upstream skill. */ + inputs: InputFieldMappingEntry[]; + /** The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. */ + outputs: OutputFieldMappingEntry[]; +} + +/** Input field mapping for a skill. */ +export interface InputFieldMappingEntry { + /** The name of the input. */ name: string; - /** - * The maximum token length. Tokens longer than the maximum length are split. Maximum token - * length that can be used is 300 characters. Tokens longer than 300 characters are first split - * into tokens of length 300 and then each of those tokens is split based on the max token length - * set. Default is 255. Default value: 255. - */ - maxTokenLength?: number; - /** - * A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set - * to false if used as the indexing tokenizer. Default is false. Default value: false. - */ - isSearchTokenizer?: boolean; - /** - * The language to use. The default is English. Possible values include: 'Bangla', 'Bulgarian', - * 'Catalan', 'ChineseSimplified', 'ChineseTraditional', 'Croatian', 'Czech', 'Danish', 'Dutch', - * 'English', 'French', 'German', 'Greek', 'Gujarati', 'Hindi', 'Icelandic', 'Indonesian', - * 'Italian', 'Japanese', 'Kannada', 'Korean', 'Malay', 'Malayalam', 'Marathi', - * 'NorwegianBokmaal', 'Polish', 'Portuguese', 'PortugueseBrazilian', 'Punjabi', 'Romanian', - * 'Russian', 'SerbianCyrillic', 'SerbianLatin', 'Slovenian', 'Spanish', 'Swedish', 'Tamil', - * 'Telugu', 'Thai', 'Ukrainian', 'Urdu', 'Vietnamese' - */ - language?: MicrosoftTokenizerLanguage; + /** The source of the input. */ + source?: string; + /** The source context used for selecting recursive inputs. */ + sourceContext?: string; + /** The recursive inputs used when creating a complex type. */ + inputs?: InputFieldMappingEntry[]; } -/** - * Divides text using language-specific rules and reduces words to their base forms. - */ -export interface MicrosoftLanguageStemmingTokenizer { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer"; - /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ +/** Output field mapping for a skill. */ +export interface OutputFieldMappingEntry { + /** The name of the output defined by the skill. */ name: string; - /** - * The maximum token length. Tokens longer than the maximum length are split. Maximum token - * length that can be used is 300 characters. Tokens longer than 300 characters are first split - * into tokens of length 300 and then each of those tokens is split based on the max token length - * set. Default is 255. Default value: 255. - */ - maxTokenLength?: number; - /** - * A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set - * to false if used as the indexing tokenizer. Default is false. Default value: false. - */ - isSearchTokenizer?: boolean; - /** - * The language to use. The default is English. Possible values include: 'Arabic', 'Bangla', - * 'Bulgarian', 'Catalan', 'Croatian', 'Czech', 'Danish', 'Dutch', 'English', 'Estonian', - * 'Finnish', 'French', 'German', 'Greek', 'Gujarati', 'Hebrew', 'Hindi', 'Hungarian', - * 'Icelandic', 'Indonesian', 'Italian', 'Kannada', 'Latvian', 'Lithuanian', 'Malay', - * 'Malayalam', 'Marathi', 'NorwegianBokmaal', 'Polish', 'Portuguese', 'PortugueseBrazilian', - * 'Punjabi', 'Romanian', 'Russian', 'SerbianCyrillic', 'SerbianLatin', 'Slovak', 'Slovenian', - * 'Spanish', 'Swedish', 'Tamil', 'Telugu', 'Turkish', 'Ukrainian', 'Urdu' - */ - language?: MicrosoftStemmingTokenizerLanguage; + /** The target name of the output. It is optional and default to name. */ + targetName?: string; } -/** - * Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using - * Apache Lucene. - */ -export interface NGramTokenizer { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.NGramTokenizer"; +/** Base type for describing any cognitive service resource attached to a skillset. */ +export interface CognitiveServicesAccount { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: + | "#Microsoft.Azure.Search.DefaultCognitiveServices" + | "#Microsoft.Azure.Search.CognitiveServicesByKey"; + /** Description of the cognitive service resource attached to a skillset. */ + description?: string; +} + +/** Response from a list skillset request. If successful, it includes the full definitions of all skillsets. */ +export interface ListSkillsetsResult { /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. + * The skillsets defined in the Search service. + * NOTE: This property will not be serialized. It can only be populated by the server. */ + readonly skillsets: SearchIndexerSkillset[]; +} + +/** Represents a synonym map definition. */ +export interface SynonymMap { + /** The name of the synonym map. */ name: string; - /** - * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of - * maxGram. Default value: 1. - */ - minGram?: number; - /** - * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2. - */ - maxGram?: number; - /** - * Character classes to keep in the tokens. - */ - tokenChars?: TokenCharacterKind[]; + /** The format of the synonym map. Only the 'solr' format is currently supported. */ + format: "solr"; + /** A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. */ + synonyms: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey | null; + /** The ETag of the synonym map. */ + etag?: string; } -/** - * Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. - */ -export interface PathHierarchyTokenizerV2 { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2"; +/** Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps. */ +export interface ListSynonymMapsResult { /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. + * The synonym maps in the Search service. + * NOTE: This property will not be serialized. It can only be populated by the server. */ + readonly synonymMaps: SynonymMap[]; +} + +/** Represents a search index definition, which describes the fields and search behavior of an index. */ +export interface SearchIndex { + /** The name of the index. */ name: string; - /** - * The delimiter character to use. Default is "/". Default value: '/'. - */ - delimiter?: string; - /** - * A value that, if set, replaces the delimiter character. Default is "/". Default value: '/'. - */ - replacement?: string; - /** - * The maximum token length. Default and maximum is 300. Default value: 300. - */ - maxTokenLength?: number; - /** - * A value indicating whether to generate tokens in reverse order. Default is false. Default - * value: false. - */ - reverseTokenOrder?: boolean; - /** - * The number of initial tokens to skip. Default is 0. Default value: 0. - */ - numberOfTokensToSkip?: number; + /** The fields of the index. */ + fields: SearchField[]; + /** The scoring profiles for the index. */ + scoringProfiles?: ScoringProfile[]; + /** The name of the scoring profile to use if none is specified in the query. If this property is not set and no scoring profile is specified in the query, then default scoring (tf-idf) will be used. */ + defaultScoringProfile?: string; + /** Options to control Cross-Origin Resource Sharing (CORS) for the index. */ + corsOptions?: CorsOptions | null; + /** The suggesters for the index. */ + suggesters?: Suggester[]; + /** The analyzers for the index. */ + analyzers?: LexicalAnalyzerUnion[]; + /** The tokenizers for the index. */ + tokenizers?: LexicalTokenizerUnion[]; + /** The token filters for the index. */ + tokenFilters?: TokenFilterUnion[]; + /** The character filters for the index. */ + charFilters?: CharFilterUnion[]; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey | null; + /** The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. */ + similarity?: SimilarityUnion; + /** The ETag of the index. */ + etag?: string; } -/** - * Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is - * implemented using Apache Lucene. - */ -export interface PatternTokenizer { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.PatternTokenizer"; - /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ +/** Represents a field in an index definition, which describes the name, data type, and search behavior of a field. */ +export interface SearchField { + /** The name of the field, which must be unique within the fields collection of the index or parent field. */ name: string; - /** - * A regular expression pattern to match token separators. Default is an expression that matches - * one or more non-word characters. Default value: '\W+'. - */ - pattern?: string; - /** - * Regular expression flags. - */ - flags?: string; - /** - * The zero-based ordinal of the matching group in the regular expression pattern to extract into - * tokens. Use -1 if you want to use the entire pattern to split the input into tokens, - * irrespective of matching groups. Default is -1. Default value: -1. - */ - group?: number; + /** The data type of the field. */ + type: SearchFieldDataType; + /** A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields. */ + key?: boolean; + /** A value indicating whether the field can be returned in a search result. You can disable this option if you want to use a field (for example, margin) as a filter, sorting, or scoring mechanism but do not want the field to be visible to the end user. This property must be true for key fields, and it must be null for complex fields. This property can be changed on existing fields. Enabling this property does not cause any increase in index storage requirements. Default is true for simple fields and null for complex fields. */ + retrievable?: boolean; + /** A value indicating whether the field is full-text searchable. This means it will undergo analysis such as word-breaking during indexing. If you set a searchable field to a value like "sunny day", internally it will be split into the individual tokens "sunny" and "day". This enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) are searchable by default. This property must be false for simple fields of other non-string data types, and it must be null for complex fields. Note: searchable fields consume extra space in your index since Azure Cognitive Search will store an additional tokenized version of the field value for full-text searches. If you want to save space in your index and you don't need a field to be included in searches, set searchable to false. */ + searchable?: boolean; + /** A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null for complex fields. Default is true for simple fields and null for complex fields. */ + filterable?: boolean; + /** A value indicating whether to enable the field to be referenced in $orderby expressions. By default Azure Cognitive Search sorts results by score, but in many experiences users will want to sort by fields in the documents. A simple field can be sortable only if it is single-valued (it has a single value in the scope of the parent document). Simple collection fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex collections are also multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent field, or an ancestor field, that's the complex collection. Complex fields cannot be sortable and the sortable property must be null for such fields. The default for sortable is true for single-valued simple fields, false for multi-valued simple fields, and null for complex fields. */ + sortable?: boolean; + /** A value indicating whether to enable the field to be referenced in facet queries. Typically used in a presentation of search results that includes hit count by category (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. */ + facetable?: boolean; + /** The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */ + analyzer?: LexicalAnalyzerName | null; + /** The name of the analyzer used at search time for the field. This option can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. */ + searchAnalyzer?: LexicalAnalyzerName | null; + /** The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */ + indexAnalyzer?: LexicalAnalyzerName | null; + /** A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. Must be null or an empty collection for complex fields. */ + synonymMaps?: string[]; + /** A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. */ + fields?: SearchField[]; } -/** - * Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using - * Apache Lucene. - */ -export interface LuceneStandardTokenizer { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.StandardTokenizer"; - /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ +/** Defines parameters for a search index that influence scoring in search queries. */ +export interface ScoringProfile { + /** The name of the scoring profile. */ name: string; - /** - * The maximum token length. Default is 255. Tokens longer than the maximum length are split. - * Default value: 255. - */ - maxTokenLength?: number; + /** Parameters that boost scoring based on text matches in certain index fields. */ + textWeights?: TextWeights | null; + /** The collection of functions that influence the scoring of documents. */ + functions?: ScoringFunctionUnion[]; + /** A value indicating how the results of individual scoring functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. */ + functionAggregation?: ScoringFunctionAggregation; } -/** - * Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using - * Apache Lucene. - */ -export interface LuceneStandardTokenizerV2 { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.StandardTokenizerV2"; - /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ +/** Defines weights on index fields for which matches should boost scoring in search queries. */ +export interface TextWeights { + /** The dictionary of per-field weights to boost document scoring. The keys are field names and the values are the weights for each field. */ + weights: { [propertyName: string]: number }; +} + +/** Base type for functions that can modify document scores during ranking. */ +export interface ScoringFunction { + /** Polymorphic discriminator, which specifies the different types this object can be */ + type: "distance" | "freshness" | "magnitude" | "tag"; + /** The name of the field used as input to the scoring function. */ + fieldName: string; + /** A multiplier for the raw score. Must be a positive number not equal to 1.0. */ + boost: number; + /** A value indicating how boosting will be interpolated across document scores; defaults to "Linear". */ + interpolation?: ScoringFunctionInterpolation; +} + +/** Defines options to control Cross-Origin Resource Sharing (CORS) for an index. */ +export interface CorsOptions { + /** The list of origins from which JavaScript code will be granted access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). */ + allowedOrigins: string[]; + /** The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. */ + maxAgeInSeconds?: number | null; +} + +/** Defines how the Suggest API should apply to a group of fields in the index. */ +export interface Suggester { + /** The name of the suggester. */ name: string; - /** - * The maximum token length. Default is 255. Tokens longer than the maximum length are split. The - * maximum token length that can be used is 300 characters. Default value: 255. - */ - maxTokenLength?: number; + /** A value indicating the capabilities of the suggester. */ + searchMode: "analyzingInfixMatching"; + /** The list of field names to which the suggester applies. Each field must be searchable. */ + sourceFields: string[]; } -/** - * Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. - */ -export interface UaxUrlEmailTokenizer { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer"; - /** - * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ +/** Base type for analyzers. */ +export interface LexicalAnalyzer { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: + | "#Microsoft.Azure.Search.CustomAnalyzer" + | "#Microsoft.Azure.Search.PatternAnalyzer" + | "#Microsoft.Azure.Search.StandardAnalyzer" + | "#Microsoft.Azure.Search.StopAnalyzer"; + /** The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ name: string; - /** - * The maximum token length. Default is 255. Tokens longer than the maximum length are split. The - * maximum token length that can be used is 300 characters. Default value: 255. - */ - maxTokenLength?: number; } -/** - * Contains the possible cases for TokenFilter. - */ -export type TokenFilterUnion = TokenFilter | AsciiFoldingTokenFilter | CjkBigramTokenFilter | CommonGramTokenFilter | DictionaryDecompounderTokenFilter | EdgeNGramTokenFilter | EdgeNGramTokenFilterV2 | ElisionTokenFilter | KeepTokenFilter | KeywordMarkerTokenFilter | LengthTokenFilter | LimitTokenFilter | NGramTokenFilter | NGramTokenFilterV2 | PatternCaptureTokenFilter | PatternReplaceTokenFilter | PhoneticTokenFilter | ShingleTokenFilter | SnowballTokenFilter | StemmerTokenFilter | StemmerOverrideTokenFilter | StopwordsTokenFilter | SynonymTokenFilter | TruncateTokenFilter | UniqueTokenFilter | WordDelimiterTokenFilter; +/** Base type for tokenizers. */ +export interface LexicalTokenizer { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: + | "#Microsoft.Azure.Search.ClassicTokenizer" + | "#Microsoft.Azure.Search.EdgeNGramTokenizer" + | "#Microsoft.Azure.Search.KeywordTokenizer" + | "#Microsoft.Azure.Search.KeywordTokenizerV2" + | "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer" + | "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer" + | "#Microsoft.Azure.Search.NGramTokenizer" + | "#Microsoft.Azure.Search.PathHierarchyTokenizerV2" + | "#Microsoft.Azure.Search.PatternTokenizer" + | "#Microsoft.Azure.Search.StandardTokenizer" + | "#Microsoft.Azure.Search.StandardTokenizerV2" + | "#Microsoft.Azure.Search.UaxUrlEmailTokenizer"; + /** The name of the tokenizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} -/** - * Base type for token filters. - */ +/** Base type for token filters. */ export interface TokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "TokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: + | "#Microsoft.Azure.Search.AsciiFoldingTokenFilter" + | "#Microsoft.Azure.Search.CjkBigramTokenFilter" + | "#Microsoft.Azure.Search.CommonGramTokenFilter" + | "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter" + | "#Microsoft.Azure.Search.EdgeNGramTokenFilter" + | "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2" + | "#Microsoft.Azure.Search.ElisionTokenFilter" + | "#Microsoft.Azure.Search.KeepTokenFilter" + | "#Microsoft.Azure.Search.KeywordMarkerTokenFilter" + | "#Microsoft.Azure.Search.LengthTokenFilter" + | "#Microsoft.Azure.Search.LimitTokenFilter" + | "#Microsoft.Azure.Search.NGramTokenFilter" + | "#Microsoft.Azure.Search.NGramTokenFilterV2" + | "#Microsoft.Azure.Search.PatternCaptureTokenFilter" + | "#Microsoft.Azure.Search.PatternReplaceTokenFilter" + | "#Microsoft.Azure.Search.PhoneticTokenFilter" + | "#Microsoft.Azure.Search.ShingleTokenFilter" + | "#Microsoft.Azure.Search.SnowballTokenFilter" + | "#Microsoft.Azure.Search.StemmerTokenFilter" + | "#Microsoft.Azure.Search.StemmerOverrideTokenFilter" + | "#Microsoft.Azure.Search.StopwordsTokenFilter" + | "#Microsoft.Azure.Search.SynonymTokenFilter" + | "#Microsoft.Azure.Search.TruncateTokenFilter" + | "#Microsoft.Azure.Search.UniqueTokenFilter" + | "#Microsoft.Azure.Search.WordDelimiterTokenFilter"; + /** The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ name: string; } -/** - * Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 - * ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such - * equivalents exist. This token filter is implemented using Apache Lucene. - */ -export interface AsciiFoldingTokenFilter { +/** Base type for character filters. */ +export interface CharFilter { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: + | "#Microsoft.Azure.Search.MappingCharFilter" + | "#Microsoft.Azure.Search.PatternReplaceCharFilter"; + /** The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} + +/** Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. */ +export interface Similarity { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: + | "#Microsoft.Azure.Search.ClassicSimilarity" + | "#Microsoft.Azure.Search.BM25Similarity"; +} + +/** Response from a List Indexes request. If successful, it includes the full definitions of all indexes. */ +export interface ListIndexesResult { /** - * Polymorphic Discriminator + * The indexes in the Search service. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - odatatype: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter"; + readonly indexes: SearchIndex[]; +} + +/** Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. */ +export interface GetIndexStatisticsResult { /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. + * The number of documents in the index. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - name: string; + readonly documentCount: number; /** - * A value indicating whether the original token will be kept. Default is false. Default value: - * false. + * The amount of storage in bytes consumed by the index. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - preserveOriginal?: boolean; + readonly storageSize: number; } -/** - * Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is - * implemented using Apache Lucene. - */ -export interface CjkBigramTokenFilter { +/** Specifies some text and analysis components used to break that text into tokens. */ +export interface AnalyzeRequest { + /** The text to break into tokens. */ + text: string; + /** The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownAnalyzerNames is an enum containing known values. */ + analyzer?: string; + /** The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownTokenizerNames is an enum containing known values. */ + tokenizer?: string; + /** An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ + tokenFilters?: string[]; + /** An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ + charFilters?: string[]; +} + +/** The result of testing an analyzer on text. */ +export interface AnalyzeResult { + /** The list of tokens returned by the analyzer specified in the request. */ + tokens: AnalyzedTokenInfo[]; +} + +/** Information about a token returned by an analyzer. */ +export interface AnalyzedTokenInfo { /** - * Polymorphic Discriminator + * The token returned by the analyzer. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - odatatype: "#Microsoft.Azure.Search.CjkBigramTokenFilter"; + readonly token: string; /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. + * The index of the first character of the token in the input text. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - name: string; + readonly startOffset: number; /** - * The scripts to ignore. + * The index of the last character of the token in the input text. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - ignoreScripts?: CjkBigramTokenFilterScripts[]; + readonly endOffset: number; /** - * A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if - * false). Default is false. Default value: false. + * The position of the token in the input text relative to other tokens. The first token in the input text has position 0, the next has position 1, and so on. Depending on the analyzer used, some tokens might have the same position, for example if they are synonyms of each other. + * NOTE: This property will not be serialized. It can only be populated by the server. */ - outputUnigrams?: boolean; + readonly position: number; } -/** - * Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed - * too, with bigrams overlaid. This token filter is implemented using Apache Lucene. - */ -export interface CommonGramTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.CommonGramTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The set of common words. - */ - commonWords: string[]; - /** - * A value indicating whether common words matching will be case insensitive. Default is false. - * Default value: false. - */ - ignoreCase?: boolean; - /** - * A value that indicates whether the token filter is in query mode. When in query mode, the - * token filter generates bigrams and then removes common words and single terms followed by a - * common word. Default is false. Default value: false. - */ - useQueryMode?: boolean; -} - -/** - * Decomposes compound words found in many Germanic languages. This token filter is implemented - * using Apache Lucene. - */ -export interface DictionaryDecompounderTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The list of words to match against. - */ - wordList: string[]; - /** - * The minimum word size. Only words longer than this get processed. Default is 5. Maximum is - * 300. Default value: 5. - */ - minWordSize?: number; - /** - * The minimum subword size. Only subwords longer than this are outputted. Default is 2. Maximum - * is 300. Default value: 2. - */ - minSubwordSize?: number; - /** - * The maximum subword size. Only subwords shorter than this are outputted. Default is 15. - * Maximum is 300. Default value: 15. - */ - maxSubwordSize?: number; - /** - * A value indicating whether to add only the longest matching subword to the output. Default is - * false. Default value: false. - */ - onlyLongestMatch?: boolean; -} - -/** - * Generates n-grams of the given size(s) starting from the front or the back of an input token. - * This token filter is implemented using Apache Lucene. - */ -export interface EdgeNGramTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The minimum n-gram length. Default is 1. Must be less than the value of maxGram. Default - * value: 1. - */ - minGram?: number; - /** - * The maximum n-gram length. Default is 2. Default value: 2. - */ - maxGram?: number; - /** - * Specifies which side of the input the n-gram should be generated from. Default is "front". - * Possible values include: 'Front', 'Back' - */ - side?: EdgeNGramTokenFilterSide; -} - -/** - * Generates n-grams of the given size(s) starting from the front or the back of an input token. - * This token filter is implemented using Apache Lucene. - */ -export interface EdgeNGramTokenFilterV2 { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of - * maxGram. Default value: 1. - */ - minGram?: number; - /** - * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2. - */ - maxGram?: number; - /** - * Specifies which side of the input the n-gram should be generated from. Default is "front". - * Possible values include: 'Front', 'Back' - */ - side?: EdgeNGramTokenFilterSide; -} - -/** - * Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This - * token filter is implemented using Apache Lucene. - */ -export interface ElisionTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.ElisionTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The set of articles to remove. - */ - articles?: string[]; -} - -/** - * A token filter that only keeps tokens with text contained in a specified list of words. This - * token filter is implemented using Apache Lucene. - */ -export interface KeepTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.KeepTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The list of words to keep. - */ - keepWords: string[]; - /** - * A value indicating whether to lower case all words first. Default is false. Default value: - * false. - */ - lowerCaseKeepWords?: boolean; -} - -/** - * Marks terms as keywords. This token filter is implemented using Apache Lucene. - */ -export interface KeywordMarkerTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * A list of words to mark as keywords. - */ - keywords: string[]; - /** - * A value indicating whether to ignore case. If true, all words are converted to lower case - * first. Default is false. Default value: false. - */ - ignoreCase?: boolean; -} - -/** - * Removes words that are too long or too short. This token filter is implemented using Apache - * Lucene. - */ -export interface LengthTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.LengthTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The minimum length in characters. Default is 0. Maximum is 300. Must be less than the value of - * max. Default value: 0. - */ - minLength?: number; - /** - * The maximum length in characters. Default and maximum is 300. Default value: 300. - */ - maxLength?: number; -} - -/** - * Limits the number of tokens while indexing. This token filter is implemented using Apache - * Lucene. - */ -export interface LimitTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.LimitTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The maximum number of tokens to produce. Default is 1. Default value: 1. - */ - maxTokenCount?: number; - /** - * A value indicating whether all tokens from the input must be consumed even if maxTokenCount is - * reached. Default is false. Default value: false. - */ - consumeAllTokens?: boolean; -} - -/** - * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. - */ -export interface NGramTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.NGramTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The minimum n-gram length. Default is 1. Must be less than the value of maxGram. Default - * value: 1. - */ - minGram?: number; - /** - * The maximum n-gram length. Default is 2. Default value: 2. - */ - maxGram?: number; -} - -/** - * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. - */ -export interface NGramTokenFilterV2 { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.NGramTokenFilterV2"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of - * maxGram. Default value: 1. - */ - minGram?: number; - /** - * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2. - */ - maxGram?: number; -} - -/** - * Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. - * This token filter is implemented using Apache Lucene. - */ -export interface PatternCaptureTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.PatternCaptureTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * A list of patterns to match against each token. - */ - patterns: string[]; - /** - * A value indicating whether to return the original token even if one of the patterns matches. - * Default is true. Default value: true. - */ - preserveOriginal?: boolean; -} - -/** - * A character filter that replaces characters in the input string. It uses a regular expression to - * identify character sequences to preserve and a replacement pattern to identify characters to - * replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement - * "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache - * Lucene. - */ -export interface PatternReplaceTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.PatternReplaceTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * A regular expression pattern. - */ - pattern: string; - /** - * The replacement text. - */ - replacement: string; -} - -/** - * Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. - */ -export interface PhoneticTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.PhoneticTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The phonetic encoder to use. Default is "metaphone". Possible values include: 'Metaphone', - * 'DoubleMetaphone', 'Soundex', 'RefinedSoundex', 'Caverphone1', 'Caverphone2', 'Cologne', - * 'Nysiis', 'KoelnerPhonetik', 'HaasePhonetik', 'BeiderMorse' - */ - encoder?: PhoneticEncoder; - /** - * A value indicating whether encoded tokens should replace original tokens. If false, encoded - * tokens are added as synonyms. Default is true. Default value: true. - */ - replaceOriginalTokens?: boolean; -} - -/** - * Creates combinations of tokens as a single token. This token filter is implemented using Apache - * Lucene. - */ -export interface ShingleTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.ShingleTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The maximum shingle size. Default and minimum value is 2. Default value: 2. - */ - maxShingleSize?: number; - /** - * The minimum shingle size. Default and minimum value is 2. Must be less than the value of - * maxShingleSize. Default value: 2. - */ - minShingleSize?: number; - /** - * A value indicating whether the output stream will contain the input tokens (unigrams) as well - * as shingles. Default is true. Default value: true. - */ - outputUnigrams?: boolean; - /** - * A value indicating whether to output unigrams for those times when no shingles are available. - * This property takes precedence when outputUnigrams is set to false. Default is false. Default - * value: false. - */ - outputUnigramsIfNoShingles?: boolean; - /** - * The string to use when joining adjacent tokens to form a shingle. Default is a single space (" - * "). Default value: ''. - */ - tokenSeparator?: string; - /** - * The string to insert for each position at which there is no token. Default is an underscore - * ("_"). Default value: '_'. - */ - filterToken?: string; -} - -/** - * A filter that stems words using a Snowball-generated stemmer. This token filter is implemented - * using Apache Lucene. - */ -export interface SnowballTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.SnowballTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The language to use. Possible values include: 'Armenian', 'Basque', 'Catalan', 'Danish', - * 'Dutch', 'English', 'Finnish', 'French', 'German', 'German2', 'Hungarian', 'Italian', 'Kp', - * 'Lovins', 'Norwegian', 'Porter', 'Portuguese', 'Romanian', 'Russian', 'Spanish', 'Swedish', - * 'Turkish' - */ - language: SnowballTokenFilterLanguage; -} - -/** - * Language specific stemming filter. This token filter is implemented using Apache Lucene. - */ -export interface StemmerTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.StemmerTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The language to use. Possible values include: 'Arabic', 'Armenian', 'Basque', 'Brazilian', - * 'Bulgarian', 'Catalan', 'Czech', 'Danish', 'Dutch', 'DutchKp', 'English', 'LightEnglish', - * 'MinimalEnglish', 'PossessiveEnglish', 'Porter2', 'Lovins', 'Finnish', 'LightFinnish', - * 'French', 'LightFrench', 'MinimalFrench', 'Galician', 'MinimalGalician', 'German', 'German2', - * 'LightGerman', 'MinimalGerman', 'Greek', 'Hindi', 'Hungarian', 'LightHungarian', 'Indonesian', - * 'Irish', 'Italian', 'LightItalian', 'Sorani', 'Latvian', 'Norwegian', 'LightNorwegian', - * 'MinimalNorwegian', 'LightNynorsk', 'MinimalNynorsk', 'Portuguese', 'LightPortuguese', - * 'MinimalPortuguese', 'PortugueseRslp', 'Romanian', 'Russian', 'LightRussian', 'Spanish', - * 'LightSpanish', 'Swedish', 'LightSwedish', 'Turkish' - */ - language: StemmerTokenFilterLanguage; -} - -/** - * Provides the ability to override other stemming filters with custom dictionary-based stemming. - * Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with - * stemmers down the chain. Must be placed before any stemming filters. This token filter is - * implemented using Apache Lucene. - */ -export interface StemmerOverrideTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * A list of stemming rules in the following format: "word => stem", for example: "ran => run". - */ - rules: string[]; -} - -/** - * Removes stop words from a token stream. This token filter is implemented using Apache Lucene. - */ -export interface StopwordsTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.StopwordsTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The list of stopwords. This property and the stopwords list property cannot both be set. - */ - stopwords?: string[]; - /** - * A predefined list of stopwords to use. This property and the stopwords property cannot both be - * set. Default is English. Possible values include: 'Arabic', 'Armenian', 'Basque', 'Brazilian', - * 'Bulgarian', 'Catalan', 'Czech', 'Danish', 'Dutch', 'English', 'Finnish', 'French', - * 'Galician', 'German', 'Greek', 'Hindi', 'Hungarian', 'Indonesian', 'Irish', 'Italian', - * 'Latvian', 'Norwegian', 'Persian', 'Portuguese', 'Romanian', 'Russian', 'Sorani', 'Spanish', - * 'Swedish', 'Thai', 'Turkish' - */ - stopwordsList?: StopwordsList; - /** - * A value indicating whether to ignore case. If true, all words are converted to lower case - * first. Default is false. Default value: false. - */ - ignoreCase?: boolean; - /** - * A value indicating whether to ignore the last search term if it's a stop word. Default is - * true. Default value: true. - */ - removeTrailingStopWords?: boolean; -} - -/** - * Matches single or multi-word synonyms in a token stream. This token filter is implemented using - * Apache Lucene. - */ -export interface SynonymTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.SynonymTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => - * amazing - all terms on the left side of => symbol will be replaced with all terms on its right - * side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent - * words. Set the expand option to change how this list is interpreted. - */ - synonyms: string[]; - /** - * A value indicating whether to case-fold input for matching. Default is false. Default value: - * false. - */ - ignoreCase?: boolean; - /** - * A value indicating whether all words in the list of synonyms (if => notation is not used) will - * map to one another. If true, all words in the list of synonyms (if => notation is not used) - * will map to one another. The following list: incredible, unbelievable, fabulous, amazing is - * equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, - * fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing - * will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is - * true. Default value: true. - */ - expand?: boolean; -} - -/** - * Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. - */ -export interface TruncateTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.TruncateTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * The length at which terms will be truncated. Default and maximum is 300. Default value: 300. - */ - length?: number; -} - -/** - * Filters out tokens with same text as the previous token. This token filter is implemented using - * Apache Lucene. - */ -export interface UniqueTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.UniqueTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * A value indicating whether to remove duplicates only at the same position. Default is false. - * Default value: false. - */ - onlyOnSamePosition?: boolean; -} - -/** - * Splits words into subwords and performs optional transformations on subword groups. This token - * filter is implemented using Apache Lucene. - */ -export interface WordDelimiterTokenFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.WordDelimiterTokenFilter"; - /** - * The name of the token filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * A value indicating whether to generate part words. If set, causes parts of words to be - * generated; for example "AzureSearch" becomes "Azure" "Search". Default is true. Default value: - * true. - */ - generateWordParts?: boolean; - /** - * A value indicating whether to generate number subwords. Default is true. Default value: true. - */ - generateNumberParts?: boolean; - /** - * A value indicating whether maximum runs of word parts will be catenated. For example, if this - * is set to true, "Azure-Search" becomes "AzureSearch". Default is false. Default value: false. - */ - catenateWords?: boolean; - /** - * A value indicating whether maximum runs of number parts will be catenated. For example, if - * this is set to true, "1-2" becomes "12". Default is false. Default value: false. - */ - catenateNumbers?: boolean; - /** - * A value indicating whether all subword parts will be catenated. For example, if this is set to - * true, "Azure-Search-1" becomes "AzureSearch1". Default is false. Default value: false. - */ - catenateAll?: boolean; - /** - * A value indicating whether to split words on caseChange. For example, if this is set to true, - * "AzureSearch" becomes "Azure" "Search". Default is true. Default value: true. - */ - splitOnCaseChange?: boolean; - /** - * A value indicating whether original words will be preserved and added to the subword list. - * Default is false. Default value: false. - */ - preserveOriginal?: boolean; - /** - * A value indicating whether to split on numbers. For example, if this is set to true, - * "Azure1Search" becomes "Azure" "1" "Search". Default is true. Default value: true. - */ - splitOnNumerics?: boolean; - /** - * A value indicating whether to remove trailing "'s" for each subword. Default is true. Default - * value: true. - */ - stemEnglishPossessive?: boolean; - /** - * A list of tokens to protect from being delimited. - */ - protectedWords?: string[]; -} - -/** - * Contains the possible cases for CharFilter. - */ -export type CharFilterUnion = CharFilter | MappingCharFilter | PatternReplaceCharFilter; - -/** - * Base type for character filters. - */ -export interface CharFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "CharFilter"; - /** - * The name of the char filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; -} - -/** - * A character filter that applies mappings defined with the mappings option. Matching is greedy - * (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. - * This character filter is implemented using Apache Lucene. - */ -export interface MappingCharFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.MappingCharFilter"; - /** - * The name of the char filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * A list of mappings of the following format: "a=>b" (all occurrences of the character "a" will - * be replaced with character "b"). - */ - mappings: string[]; -} - -/** - * A character filter that replaces characters in the input string. It uses a regular expression to - * identify character sequences to preserve and a replacement pattern to identify characters to - * replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement - * "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache - * Lucene. - */ -export interface PatternReplaceCharFilter { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.PatternReplaceCharFilter"; - /** - * The name of the char filter. It must only contain letters, digits, spaces, dashes or - * underscores, can only start and end with alphanumeric characters, and is limited to 128 - * characters. - */ - name: string; - /** - * A regular expression pattern. - */ - pattern: string; - /** - * The replacement text. - */ - replacement: string; -} - -/** - * Contains the possible cases for Similarity. - */ -export type SimilarityUnion = Similarity | ClassicSimilarity | BM25Similarity; - -/** - * Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie - * queries to documents. The higher the score, the more relevant the document is to that specific - * query. Those scores are used to rank the search results. - */ -export interface Similarity { - /** - * Polymorphic Discriminator - */ - odatatype: "Similarity"; -} - -/** - * Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This - * variation of TF-IDF introduces static document length normalization as well as coordinating - * factors that penalize documents that only partially match the searched queries. - */ -export interface ClassicSimilarity { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.ClassicSimilarity"; -} - -/** - * Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm - * that includes length normalization (controlled by the 'b' parameter) as well as term frequency - * saturation (controlled by the 'k1' parameter). - */ -export interface BM25Similarity { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.BM25Similarity"; - /** - * This property controls the scaling function between the term frequency of each matching terms - * and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A - * value of 0.0 means the score does not scale with an increase in term frequency. - */ - k1?: number; - /** - * This property controls how the length of a document affects the relevance score. By default, a - * value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value - * of 1.0 means the score is fully normalized by the length of the document. - */ - b?: number; -} - -/** - * Represents credentials that can be used to connect to a datasource. - */ -export interface DataSourceCredentials { - /** - * The connection string for the datasource. - */ - connectionString?: string; -} - -/** - * Represents information about the entity (such as Azure SQL table or CosmosDB collection) that - * will be indexed. - */ -export interface SearchIndexerDataContainer { - /** - * The name of the table or view (for Azure SQL data source) or collection (for CosmosDB data - * source) that will be indexed. - */ - name: string; - /** - * A query that is applied to this data container. The syntax and meaning of this parameter is - * datasource-specific. Not supported by Azure SQL datasources. - */ - query?: string; -} - -/** - * Contains the possible cases for DataChangeDetectionPolicy. - */ -export type DataChangeDetectionPolicyUnion = DataChangeDetectionPolicy | HighWaterMarkChangeDetectionPolicy | SqlIntegratedChangeTrackingPolicy; - -/** - * Base type for data change detection policies. - */ -export interface DataChangeDetectionPolicy { - /** - * Polymorphic Discriminator - */ - odatatype: "DataChangeDetectionPolicy"; -} - -/** - * Defines a data change detection policy that captures changes based on the value of a high water - * mark column. - */ -export interface HighWaterMarkChangeDetectionPolicy { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy"; - /** - * The name of the high water mark column. - */ - highWaterMarkColumnName: string; -} - -/** - * Defines a data change detection policy that captures changes using the Integrated Change - * Tracking feature of Azure SQL Database. - */ -export interface SqlIntegratedChangeTrackingPolicy { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; -} - -/** - * Contains the possible cases for DataDeletionDetectionPolicy. - */ -export type DataDeletionDetectionPolicyUnion = DataDeletionDetectionPolicy | SoftDeleteColumnDeletionDetectionPolicy; - -/** - * Base type for data deletion detection policies. - */ -export interface DataDeletionDetectionPolicy { - /** - * Polymorphic Discriminator - */ - odatatype: "DataDeletionDetectionPolicy"; -} - -/** - * Defines a data deletion detection policy that implements a soft-deletion strategy. It determines - * whether an item should be deleted based on the value of a designated 'soft delete' column. - */ -export interface SoftDeleteColumnDeletionDetectionPolicy { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"; - /** - * The name of the column to use for soft-deletion detection. - */ - softDeleteColumnName?: string; - /** - * The marker value that identifies an item as deleted. - */ - softDeleteMarkerValue?: string; -} - -/** - * Credentials of a registered application created for your search service, used for authenticated - * access to the encryption keys stored in Azure Key Vault. - */ -export interface AzureActiveDirectoryApplicationCredentials { - /** - * An AAD Application ID that was granted the required access permissions to the Azure Key Vault - * that is to be used when encrypting your data at rest. The Application ID should not be - * confused with the Object ID for your AAD Application. - */ - applicationId: string; - /** - * The authentication key of the specified AAD application. - */ - applicationSecret?: string; -} - -/** - * A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be - * used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym - * maps. - */ -export interface SearchResourceEncryptionKey { - /** - * The name of your Azure Key Vault key to be used to encrypt your data at rest. - */ - keyName: string; - /** - * The version of your Azure Key Vault key to be used to encrypt your data at rest. - */ - keyVersion: string; - /** - * The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be - * used to encrypt your data at rest. An example URI might be - * https://my-keyvault-name.vault.azure.net. - */ - vaultUri: string; - /** - * Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not - * required if using managed identity instead. - */ - accessCredentials?: AzureActiveDirectoryApplicationCredentials; -} - -/** - * Represents a datasource definition, which can be used to configure an indexer. - */ -export interface SearchIndexerDataSource { - /** - * The name of the datasource. - */ - name: string; - /** - * The description of the datasource. - */ - description?: string; - /** - * The type of the datasource. Possible values include: 'AzureSql', 'CosmosDb', 'AzureBlob', - * 'AzureTable', 'MySql' - */ - type: SearchIndexerDataSourceType; - /** - * Credentials for the datasource. - */ - credentials: DataSourceCredentials; - /** - * The data container for the datasource. - */ - container: SearchIndexerDataContainer; - /** - * The data change detection policy for the datasource. - */ - dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion; - /** - * The data deletion detection policy for the datasource. - */ - dataDeletionDetectionPolicy?: DataDeletionDetectionPolicyUnion; - /** - * The ETag of the data source. - */ - etag?: string; - /** - * A description of an encryption key that you create in Azure Key Vault. This key is used to - * provide an additional level of encryption-at-rest for your datasource definition when you want - * full assurance that no one, not even Microsoft, can decrypt your data source definition in - * Azure Cognitive Search. Once you have encrypted your data source definition, it will always - * remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null. - * You can change this property as needed if you want to rotate your encryption key; Your - * datasource definition will be unaffected. Encryption with customer-managed keys is not - * available for free search services, and is only available for paid services created on or - * after January 1, 2019. - */ - encryptionKey?: SearchResourceEncryptionKey; -} - -/** - * Response from a List Datasources request. If successful, it includes the full definitions of all - * datasources. - */ -export interface ListDataSourcesResult { - /** - * The datasources in the Search service. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly dataSources: SearchIndexerDataSource[]; -} - -/** - * Represents a schedule for indexer execution. - */ -export interface IndexingSchedule { - /** - * The interval of time between indexer executions. - */ - interval: string; - /** - * The time when an indexer should start running. - */ - startTime?: Date; -} - -/** - * A dictionary of indexer-specific configuration properties. Each name is the name of a specific - * property. Each value must be of a primitive type. - */ -export interface IndexingParametersConfiguration { - /** - * Possible values include: 'Default', 'Text', 'DelimitedText', 'Json', 'JsonArray', 'JsonLines'. - * Default value: 'default'. - */ - parsingMode?: BlobIndexerParsingMode; - /** - * Comma-delimited list of filename extensions to ignore when processing from Azure blob storage. - * For example, you could exclude ".png, .mp4" to skip over those files during indexing. Default - * value: ''. - */ - excludedFileNameExtensions?: string; - /** - * Comma-delimited list of filename extensions to select when processing from Azure blob storage. - * For example, you could focus indexing on specific application files ".docx, .pptx, .msg" to - * specifically include those file types. Default value: ''. - */ - indexedFileNameExtensions?: string; - /** - * For Azure blobs, set to false if you want to continue indexing when an unsupported content - * type is encountered, and you don't know all the content types (file extensions) in advance. - * Default value: false. - */ - failOnUnsupportedContentType?: boolean; - /** - * For Azure blobs, set to false if you want to continue indexing if a document fails indexing. - * Default value: false. - */ - failOnUnprocessableDocument?: boolean; - /** - * For Azure blobs, set this property to true to still index storage metadata for blob content - * that is too large to process. Oversized blobs are treated as errors by default. For limits on - * blob size, see https://docs.microsoft.com/azure/search/search-limits-quotas-capacity. Default - * value: false. - */ - indexStorageMetadataOnlyForOversizedDocuments?: boolean; - /** - * For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source - * fields to destination fields in an index. - */ - delimitedTextHeaders?: string; - /** - * For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each - * line starts a new document (for example, "|"). - */ - delimitedTextDelimiter?: string; - /** - * For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. - * Default value: true. - */ - firstLineContainsHeaders?: boolean; - /** - * For JSON arrays, given a structured or semi-structured document, you can specify a path to the - * array using this property. - */ - documentRoot?: string; - /** - * Possible values include: 'StorageMetadata', 'AllMetadata', 'ContentAndMetadata'. Default - * value: 'contentAndMetadata'. - */ - dataToExtract?: BlobIndexerDataToExtract; - /** - * Possible values include: 'None', 'GenerateNormalizedImages', 'GenerateNormalizedImagePerPage'. - * Default value: 'none'. - */ - imageAction?: BlobIndexerImageAction; - /** - * If true, will create a path //document//file_data that is an object representing the original - * file data downloaded from your blob data source. This allows you to pass the original file - * data to a custom skill for processing within the enrichment pipeline, or to the Document - * Extraction skill. Default value: false. - */ - allowSkillsetToReadFileData?: boolean; - /** - * Possible values include: 'None', 'DetectAngles'. Default value: 'none'. - */ - pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm; - /** - * Possible values include: 'standard', 'private'. Default value: 'standard'. - */ - executionEnvironment?: IndexerExecutionEnvironment; - /** - * Increases the timeout beyond the 5-minute default for Azure SQL database data sources, - * specified in the format "hh:mm:ss". Default value: '00:05:00'. - */ - queryTimeout?: string; - /** - * Describes unknown properties. The value of an unknown property can be of "any" type. - */ - [property: string]: any; -} - -/** - * Represents parameters for indexer execution. - */ -export interface IndexingParameters { - /** - * The number of items that are read from the data source and indexed as a single batch in order - * to improve performance. The default depends on the data source type. - */ - batchSize?: number; - /** - * The maximum number of items that can fail indexing for indexer execution to still be - * considered successful. -1 means no limit. Default is 0. Default value: 0. - */ - maxFailedItems?: number; - /** - * The maximum number of items in a single batch that can fail indexing for the batch to still be - * considered successful. -1 means no limit. Default is 0. Default value: 0. - */ - maxFailedItemsPerBatch?: number; - configuration?: IndexingParametersConfiguration; -} - -/** - * Represents a function that transforms a value from a data source before indexing. - */ -export interface FieldMappingFunction { - /** - * The name of the field mapping function. - */ - name: string; - /** - * A dictionary of parameter name/value pairs to pass to the function. Each value must be of a - * primitive type. - */ - parameters?: { [propertyName: string]: any }; -} - -/** - * Defines a mapping between a field in a data source and a target field in an index. - */ -export interface FieldMapping { - /** - * The name of the field in the data source. - */ - sourceFieldName: string; - /** - * The name of the target field in the index. Same as the source field name by default. - */ - targetFieldName?: string; - /** - * A function to apply to each source field value before indexing. - */ - mappingFunction?: FieldMappingFunction; -} - -/** - * Represents an indexer. - */ -export interface SearchIndexer { - /** - * The name of the indexer. - */ - name: string; - /** - * The description of the indexer. - */ - description?: string; - /** - * The name of the datasource from which this indexer reads data. - */ - dataSourceName: string; - /** - * The name of the skillset executing with this indexer. - */ - skillsetName?: string; - /** - * The name of the index to which this indexer writes data. - */ - targetIndexName: string; - /** - * The schedule for this indexer. - */ - schedule?: IndexingSchedule; - /** - * Parameters for indexer execution. - */ - parameters?: IndexingParameters; - /** - * Defines mappings between fields in the data source and corresponding target fields in the - * index. - */ - fieldMappings?: FieldMapping[]; - /** - * Output field mappings are applied after enrichment and immediately before indexing. - */ - outputFieldMappings?: FieldMapping[]; - /** - * A value indicating whether the indexer is disabled. Default is false. Default value: false. - */ - isDisabled?: boolean; - /** - * The ETag of the indexer. - */ - etag?: string; - /** - * A description of an encryption key that you create in Azure Key Vault. This key is used to - * provide an additional level of encryption-at-rest for your indexer definition (as well as - * indexer execution status) when you want full assurance that no one, not even Microsoft, can - * decrypt them in Azure Cognitive Search. Once you have encrypted your indexer definition, it - * will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property - * to null. You can change this property as needed if you want to rotate your encryption key; - * Your indexer definition (and indexer execution status) will be unaffected. Encryption with - * customer-managed keys is not available for free search services, and is only available for - * paid services created on or after January 1, 2019. - */ - encryptionKey?: SearchResourceEncryptionKey; -} - -/** - * Response from a List Indexers request. If successful, it includes the full definitions of all - * indexers. - */ -export interface ListIndexersResult { - /** - * The indexers in the Search service. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly indexers: SearchIndexer[]; -} - -/** - * Represents an item- or document-level indexing error. - */ -export interface SearchIndexerError { - /** - * The key of the item for which indexing failed. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly key?: string; - /** - * The message describing the error that occurred while processing the item. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly errorMessage: string; - /** - * The status code indicating why the indexing operation failed. Possible values include: 400 for - * a malformed input document, 404 for document not found, 409 for a version conflict, 422 when - * the index is temporarily unavailable, or 503 for when the service is too busy. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly statusCode: number; - /** - * The name of the source at which the error originated. For example, this could refer to a - * particular skill in the attached skillset. This may not be always available. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly name?: string; - /** - * Additional, verbose details about the error to assist in debugging the indexer. This may not - * be always available. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly details?: string; - /** - * A link to a troubleshooting guide for these classes of errors. This may not be always - * available. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly documentationLink?: string; -} - -/** - * Represents an item-level warning. - */ -export interface SearchIndexerWarning { - /** - * The key of the item which generated a warning. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly key?: string; - /** - * The message describing the warning that occurred while processing the item. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly message: string; - /** - * The name of the source at which the warning originated. For example, this could refer to a - * particular skill in the attached skillset. This may not be always available. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly name?: string; - /** - * Additional, verbose details about the warning to assist in debugging the indexer. This may not - * be always available. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly details?: string; - /** - * A link to a troubleshooting guide for these classes of warnings. This may not be always - * available. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly documentationLink?: string; -} - -/** - * Represents the result of an individual indexer execution. - */ -export interface IndexerExecutionResult { - /** - * The outcome of this indexer execution. Possible values include: 'TransientFailure', 'Success', - * 'InProgress', 'Reset' - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly status: IndexerExecutionStatus; - /** - * The error message indicating the top-level error, if any. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly errorMessage?: string; - /** - * The start time of this indexer execution. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly startTime?: Date; - /** - * The end time of this indexer execution, if the execution has already completed. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly endTime?: Date; - /** - * The item-level indexing errors. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly errors: SearchIndexerError[]; - /** - * The item-level indexing warnings. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly warnings: SearchIndexerWarning[]; - /** - * The number of items that were processed during this indexer execution. This includes both - * successfully processed items and items where indexing was attempted but failed. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly itemCount: number; - /** - * The number of items that failed to be indexed during this indexer execution. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly failedItemCount: number; - /** - * Change tracking state with which an indexer execution started. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly initialTrackingState?: string; - /** - * Change tracking state with which an indexer execution finished. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly finalTrackingState?: string; -} - -/** - * An interface representing SearchIndexerLimits. - */ -export interface SearchIndexerLimits { - /** - * The maximum duration that the indexer is permitted to run for one execution. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly maxRunTime?: string; - /** - * The maximum size of a document, in bytes, which will be considered valid for indexing. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly maxDocumentExtractionSize?: number; - /** - * The maximum number of characters that will be extracted from a document picked up for - * indexing. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly maxDocumentContentCharactersToExtract?: number; -} - -/** - * Represents the current status and execution history of an indexer. - */ -export interface SearchIndexerStatus { - /** - * Overall indexer status. Possible values include: 'Unknown', 'Error', 'Running' - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly status: IndexerStatus; - /** - * The result of the most recent or an in-progress indexer execution. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly lastResult?: IndexerExecutionResult; - /** - * History of the recent indexer executions, sorted in reverse chronological order. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly executionHistory: IndexerExecutionResult[]; - /** - * The execution limits for the indexer. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly limits: SearchIndexerLimits; -} - -/** - * Represents a field in an index definition, which describes the name, data type, and search - * behavior of a field. - */ -export interface SearchField { - /** - * The name of the field, which must be unique within the fields collection of the index or - * parent field. - */ - name: string; - /** - * The data type of the field. Possible values include: 'String', 'Int32', 'Int64', 'Double', - * 'Boolean', 'DateTimeOffset', 'GeographyPoint', 'Complex', 'Collection(Edm.String)', - * 'Collection(Edm.Int32)', 'Collection(Edm.Int64)', 'Collection(Edm.Double)', - * 'Collection(Edm.Boolean)', 'Collection(Edm.DateTimeOffset)', 'Collection(Edm.GeographyPoint)', - * 'Collection(Edm.ComplexType)' - */ - type: SearchFieldDataType; - /** - * A value indicating whether the field uniquely identifies documents in the index. Exactly one - * top-level field in each index must be chosen as the key field and it must be of type - * Edm.String. Key fields can be used to look up documents directly and update or delete specific - * documents. Default is false for simple fields and null for complex fields. - */ - key?: boolean; - /** - * A value indicating whether the field can be returned in a search result. You can disable this - * option if you want to use a field (for example, margin) as a filter, sorting, or scoring - * mechanism but do not want the field to be visible to the end user. This property must be true - * for key fields, and it must be null for complex fields. This property can be changed on - * existing fields. Enabling this property does not cause any increase in index storage - * requirements. Default is true for simple fields and null for complex fields. - */ - retrievable?: boolean; - /** - * A value indicating whether the field is full-text searchable. This means it will undergo - * analysis such as word-breaking during indexing. If you set a searchable field to a value like - * "sunny day", internally it will be split into the individual tokens "sunny" and "day". This - * enables full-text searches for these terms. Fields of type Edm.String or - * Collection(Edm.String) are searchable by default. This property must be false for simple - * fields of other non-string data types, and it must be null for complex fields. Note: - * searchable fields consume extra space in your index since Azure Cognitive Search will store an - * additional tokenized version of the field value for full-text searches. If you want to save - * space in your index and you don't need a field to be included in searches, set searchable to - * false. - */ - searchable?: boolean; - /** - * A value indicating whether to enable the field to be referenced in $filter queries. filterable - * differs from searchable in how strings are handled. Fields of type Edm.String or - * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are - * for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq - * 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null - * for complex fields. Default is true for simple fields and null for complex fields. - */ - filterable?: boolean; - /** - * A value indicating whether to enable the field to be referenced in $orderby expressions. By - * default Azure Cognitive Search sorts results by score, but in many experiences users will want - * to sort by fields in the documents. A simple field can be sortable only if it is single-valued - * (it has a single value in the scope of the parent document). Simple collection fields cannot - * be sortable, since they are multi-valued. Simple sub-fields of complex collections are also - * multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent - * field, or an ancestor field, that's the complex collection. Complex fields cannot be sortable - * and the sortable property must be null for such fields. The default for sortable is true for - * single-valued simple fields, false for multi-valued simple fields, and null for complex - * fields. - */ - sortable?: boolean; - /** - * A value indicating whether to enable the field to be referenced in facet queries. Typically - * used in a presentation of search results that includes hit count by category (for example, - * search for digital cameras and see hits by brand, by megapixels, by price, and so on). This - * property must be null for complex fields. Fields of type Edm.GeographyPoint or - * Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple - * fields. - */ - facetable?: boolean; - /** - * The name of the analyzer to use for the field. This option can be used only with searchable - * fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the - * analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. - * Possible values include: 'ArMicrosoft', 'ArLucene', 'HyLucene', 'BnMicrosoft', 'EuLucene', - * 'BgMicrosoft', 'BgLucene', 'CaMicrosoft', 'CaLucene', 'ZhHansMicrosoft', 'ZhHansLucene', - * 'ZhHantMicrosoft', 'ZhHantLucene', 'HrMicrosoft', 'CsMicrosoft', 'CsLucene', 'DaMicrosoft', - * 'DaLucene', 'NlMicrosoft', 'NlLucene', 'EnMicrosoft', 'EnLucene', 'EtMicrosoft', - * 'FiMicrosoft', 'FiLucene', 'FrMicrosoft', 'FrLucene', 'GlLucene', 'DeMicrosoft', 'DeLucene', - * 'ElMicrosoft', 'ElLucene', 'GuMicrosoft', 'HeMicrosoft', 'HiMicrosoft', 'HiLucene', - * 'HuMicrosoft', 'HuLucene', 'IsMicrosoft', 'IdMicrosoft', 'IdLucene', 'GaLucene', - * 'ItMicrosoft', 'ItLucene', 'JaMicrosoft', 'JaLucene', 'KnMicrosoft', 'KoMicrosoft', - * 'KoLucene', 'LvMicrosoft', 'LvLucene', 'LtMicrosoft', 'MlMicrosoft', 'MsMicrosoft', - * 'MrMicrosoft', 'NbMicrosoft', 'NoLucene', 'FaLucene', 'PlMicrosoft', 'PlLucene', - * 'PtBrMicrosoft', 'PtBrLucene', 'PtPtMicrosoft', 'PtPtLucene', 'PaMicrosoft', 'RoMicrosoft', - * 'RoLucene', 'RuMicrosoft', 'RuLucene', 'SrCyrillicMicrosoft', 'SrLatinMicrosoft', - * 'SkMicrosoft', 'SlMicrosoft', 'EsMicrosoft', 'EsLucene', 'SvMicrosoft', 'SvLucene', - * 'TaMicrosoft', 'TeMicrosoft', 'ThMicrosoft', 'ThLucene', 'TrMicrosoft', 'TrLucene', - * 'UkMicrosoft', 'UrMicrosoft', 'ViMicrosoft', 'StandardLucene', 'StandardAsciiFoldingLucene', - * 'Keyword', 'Pattern', 'Simple', 'Stop', 'Whitespace' - */ - analyzer?: LexicalAnalyzerName; - /** - * The name of the analyzer used at search time for the field. This option can be used only with - * searchable fields. It must be set together with indexAnalyzer and it cannot be set together - * with the analyzer option. This property cannot be set to the name of a language analyzer; use - * the analyzer property instead if you need a language analyzer. This analyzer can be updated on - * an existing field. Must be null for complex fields. Possible values include: 'ArMicrosoft', - * 'ArLucene', 'HyLucene', 'BnMicrosoft', 'EuLucene', 'BgMicrosoft', 'BgLucene', 'CaMicrosoft', - * 'CaLucene', 'ZhHansMicrosoft', 'ZhHansLucene', 'ZhHantMicrosoft', 'ZhHantLucene', - * 'HrMicrosoft', 'CsMicrosoft', 'CsLucene', 'DaMicrosoft', 'DaLucene', 'NlMicrosoft', - * 'NlLucene', 'EnMicrosoft', 'EnLucene', 'EtMicrosoft', 'FiMicrosoft', 'FiLucene', - * 'FrMicrosoft', 'FrLucene', 'GlLucene', 'DeMicrosoft', 'DeLucene', 'ElMicrosoft', 'ElLucene', - * 'GuMicrosoft', 'HeMicrosoft', 'HiMicrosoft', 'HiLucene', 'HuMicrosoft', 'HuLucene', - * 'IsMicrosoft', 'IdMicrosoft', 'IdLucene', 'GaLucene', 'ItMicrosoft', 'ItLucene', - * 'JaMicrosoft', 'JaLucene', 'KnMicrosoft', 'KoMicrosoft', 'KoLucene', 'LvMicrosoft', - * 'LvLucene', 'LtMicrosoft', 'MlMicrosoft', 'MsMicrosoft', 'MrMicrosoft', 'NbMicrosoft', - * 'NoLucene', 'FaLucene', 'PlMicrosoft', 'PlLucene', 'PtBrMicrosoft', 'PtBrLucene', - * 'PtPtMicrosoft', 'PtPtLucene', 'PaMicrosoft', 'RoMicrosoft', 'RoLucene', 'RuMicrosoft', - * 'RuLucene', 'SrCyrillicMicrosoft', 'SrLatinMicrosoft', 'SkMicrosoft', 'SlMicrosoft', - * 'EsMicrosoft', 'EsLucene', 'SvMicrosoft', 'SvLucene', 'TaMicrosoft', 'TeMicrosoft', - * 'ThMicrosoft', 'ThLucene', 'TrMicrosoft', 'TrLucene', 'UkMicrosoft', 'UrMicrosoft', - * 'ViMicrosoft', 'StandardLucene', 'StandardAsciiFoldingLucene', 'Keyword', 'Pattern', 'Simple', - * 'Stop', 'Whitespace' - */ - searchAnalyzer?: LexicalAnalyzerName; - /** - * The name of the analyzer used at indexing time for the field. This option can be used only - * with searchable fields. It must be set together with searchAnalyzer and it cannot be set - * together with the analyzer option. This property cannot be set to the name of a language - * analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer - * is chosen, it cannot be changed for the field. Must be null for complex fields. Possible - * values include: 'ArMicrosoft', 'ArLucene', 'HyLucene', 'BnMicrosoft', 'EuLucene', - * 'BgMicrosoft', 'BgLucene', 'CaMicrosoft', 'CaLucene', 'ZhHansMicrosoft', 'ZhHansLucene', - * 'ZhHantMicrosoft', 'ZhHantLucene', 'HrMicrosoft', 'CsMicrosoft', 'CsLucene', 'DaMicrosoft', - * 'DaLucene', 'NlMicrosoft', 'NlLucene', 'EnMicrosoft', 'EnLucene', 'EtMicrosoft', - * 'FiMicrosoft', 'FiLucene', 'FrMicrosoft', 'FrLucene', 'GlLucene', 'DeMicrosoft', 'DeLucene', - * 'ElMicrosoft', 'ElLucene', 'GuMicrosoft', 'HeMicrosoft', 'HiMicrosoft', 'HiLucene', - * 'HuMicrosoft', 'HuLucene', 'IsMicrosoft', 'IdMicrosoft', 'IdLucene', 'GaLucene', - * 'ItMicrosoft', 'ItLucene', 'JaMicrosoft', 'JaLucene', 'KnMicrosoft', 'KoMicrosoft', - * 'KoLucene', 'LvMicrosoft', 'LvLucene', 'LtMicrosoft', 'MlMicrosoft', 'MsMicrosoft', - * 'MrMicrosoft', 'NbMicrosoft', 'NoLucene', 'FaLucene', 'PlMicrosoft', 'PlLucene', - * 'PtBrMicrosoft', 'PtBrLucene', 'PtPtMicrosoft', 'PtPtLucene', 'PaMicrosoft', 'RoMicrosoft', - * 'RoLucene', 'RuMicrosoft', 'RuLucene', 'SrCyrillicMicrosoft', 'SrLatinMicrosoft', - * 'SkMicrosoft', 'SlMicrosoft', 'EsMicrosoft', 'EsLucene', 'SvMicrosoft', 'SvLucene', - * 'TaMicrosoft', 'TeMicrosoft', 'ThMicrosoft', 'ThLucene', 'TrMicrosoft', 'TrLucene', - * 'UkMicrosoft', 'UrMicrosoft', 'ViMicrosoft', 'StandardLucene', 'StandardAsciiFoldingLucene', - * 'Keyword', 'Pattern', 'Simple', 'Stop', 'Whitespace' - */ - indexAnalyzer?: LexicalAnalyzerName; - /** - * A list of the names of synonym maps to associate with this field. This option can be used only - * with searchable fields. Currently only one synonym map per field is supported. Assigning a - * synonym map to a field ensures that query terms targeting that field are expanded at - * query-time using the rules in the synonym map. This attribute can be changed on existing - * fields. Must be null or an empty collection for complex fields. - */ - synonymMaps?: string[]; - /** - * A list of sub-fields if this is a field of type Edm.ComplexType or - * Collection(Edm.ComplexType). Must be null or empty for simple fields. - */ - fields?: SearchField[]; -} - -/** - * Defines weights on index fields for which matches should boost scoring in search queries. - */ -export interface TextWeights { - /** - * The dictionary of per-field weights to boost document scoring. The keys are field names and - * the values are the weights for each field. - */ - weights: { [propertyName: string]: number }; -} - -/** - * Contains the possible cases for ScoringFunction. - */ -export type ScoringFunctionUnion = ScoringFunction | DistanceScoringFunction | FreshnessScoringFunction | MagnitudeScoringFunction | TagScoringFunction; - -/** - * Base type for functions that can modify document scores during ranking. - */ -export interface ScoringFunction { - /** - * Polymorphic Discriminator - */ - type: "ScoringFunction"; - /** - * The name of the field used as input to the scoring function. - */ - fieldName: string; - /** - * A multiplier for the raw score. Must be a positive number not equal to 1.0. - */ - boost: number; - /** - * A value indicating how boosting will be interpolated across document scores; defaults to - * "Linear". Possible values include: 'Linear', 'Constant', 'Quadratic', 'Logarithmic' - */ - interpolation?: ScoringFunctionInterpolation; -} - -/** - * Provides parameter values to a distance scoring function. - */ -export interface DistanceScoringParameters { - /** - * The name of the parameter passed in search queries to specify the reference location. - */ - referencePointParameter: string; - /** - * The distance in kilometers from the reference location where the boosting range ends. - */ - boostingDistance: number; -} - -/** - * Defines a function that boosts scores based on distance from a geographic location. - */ -export interface DistanceScoringFunction { - /** - * Polymorphic Discriminator - */ - type: "distance"; - /** - * The name of the field used as input to the scoring function. - */ - fieldName: string; - /** - * A multiplier for the raw score. Must be a positive number not equal to 1.0. - */ - boost: number; - /** - * A value indicating how boosting will be interpolated across document scores; defaults to - * "Linear". Possible values include: 'Linear', 'Constant', 'Quadratic', 'Logarithmic' - */ - interpolation?: ScoringFunctionInterpolation; - /** - * Parameter values for the distance scoring function. - */ - parameters: DistanceScoringParameters; -} - -/** - * Provides parameter values to a freshness scoring function. - */ -export interface FreshnessScoringParameters { - /** - * The expiration period after which boosting will stop for a particular document. - */ - boostingDuration: string; -} - -/** - * Defines a function that boosts scores based on the value of a date-time field. - */ -export interface FreshnessScoringFunction { - /** - * Polymorphic Discriminator - */ - type: "freshness"; - /** - * The name of the field used as input to the scoring function. - */ - fieldName: string; - /** - * A multiplier for the raw score. Must be a positive number not equal to 1.0. - */ - boost: number; - /** - * A value indicating how boosting will be interpolated across document scores; defaults to - * "Linear". Possible values include: 'Linear', 'Constant', 'Quadratic', 'Logarithmic' - */ - interpolation?: ScoringFunctionInterpolation; - /** - * Parameter values for the freshness scoring function. - */ - parameters: FreshnessScoringParameters; -} - -/** - * Provides parameter values to a magnitude scoring function. - */ -export interface MagnitudeScoringParameters { - /** - * The field value at which boosting starts. - */ - boostingRangeStart: number; - /** - * The field value at which boosting ends. - */ - boostingRangeEnd: number; - /** - * A value indicating whether to apply a constant boost for field values beyond the range end - * value; default is false. - */ - shouldBoostBeyondRangeByConstant?: boolean; -} - -/** - * Defines a function that boosts scores based on the magnitude of a numeric field. - */ -export interface MagnitudeScoringFunction { - /** - * Polymorphic Discriminator - */ - type: "magnitude"; - /** - * The name of the field used as input to the scoring function. - */ - fieldName: string; - /** - * A multiplier for the raw score. Must be a positive number not equal to 1.0. - */ - boost: number; - /** - * A value indicating how boosting will be interpolated across document scores; defaults to - * "Linear". Possible values include: 'Linear', 'Constant', 'Quadratic', 'Logarithmic' - */ - interpolation?: ScoringFunctionInterpolation; - /** - * Parameter values for the magnitude scoring function. - */ - parameters: MagnitudeScoringParameters; -} - -/** - * Provides parameter values to a tag scoring function. - */ -export interface TagScoringParameters { - /** - * The name of the parameter passed in search queries to specify the list of tags to compare - * against the target field. - */ - tagsParameter: string; -} - -/** - * Defines a function that boosts scores of documents with string values matching a given list of - * tags. - */ -export interface TagScoringFunction { - /** - * Polymorphic Discriminator - */ - type: "tag"; - /** - * The name of the field used as input to the scoring function. - */ - fieldName: string; - /** - * A multiplier for the raw score. Must be a positive number not equal to 1.0. - */ - boost: number; - /** - * A value indicating how boosting will be interpolated across document scores; defaults to - * "Linear". Possible values include: 'Linear', 'Constant', 'Quadratic', 'Logarithmic' - */ - interpolation?: ScoringFunctionInterpolation; - /** - * Parameter values for the tag scoring function. - */ - parameters: TagScoringParameters; -} - -/** - * Defines parameters for a search index that influence scoring in search queries. - */ -export interface ScoringProfile { - /** - * The name of the scoring profile. - */ - name: string; - /** - * Parameters that boost scoring based on text matches in certain index fields. - */ - textWeights?: TextWeights; - /** - * The collection of functions that influence the scoring of documents. - */ - functions?: ScoringFunctionUnion[]; - /** - * A value indicating how the results of individual scoring functions should be combined. - * Defaults to "Sum". Ignored if there are no scoring functions. Possible values include: 'Sum', - * 'Average', 'Minimum', 'Maximum', 'FirstMatching' - */ - functionAggregation?: ScoringFunctionAggregation; -} - -/** - * Defines options to control Cross-Origin Resource Sharing (CORS) for an index. - */ -export interface CorsOptions { - /** - * The list of origins from which JavaScript code will be granted access to your index. Can - * contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a - * single '*' to allow all origins (not recommended). - */ - allowedOrigins: string[]; - /** - * The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. - */ - maxAgeInSeconds?: number; -} - -/** - * Defines how the Suggest API should apply to a group of fields in the index. - */ -export interface Suggester { - /** - * The name of the suggester. - */ - name: string; - /** - * The list of field names to which the suggester applies. Each field must be searchable. - */ - sourceFields: string[]; -} - -/** - * Represents a search index definition, which describes the fields and search behavior of an - * index. - */ -export interface SearchIndex { - /** - * The name of the index. - */ - name: string; - /** - * The fields of the index. - */ - fields: SearchField[]; - /** - * The scoring profiles for the index. - */ - scoringProfiles?: ScoringProfile[]; - /** - * The name of the scoring profile to use if none is specified in the query. If this property is - * not set and no scoring profile is specified in the query, then default scoring (tf-idf) will - * be used. - */ - defaultScoringProfile?: string; - /** - * Options to control Cross-Origin Resource Sharing (CORS) for the index. - */ - corsOptions?: CorsOptions; - /** - * The suggesters for the index. - */ - suggesters?: Suggester[]; - /** - * The analyzers for the index. - */ - analyzers?: LexicalAnalyzerUnion[]; - /** - * The tokenizers for the index. - */ - tokenizers?: LexicalTokenizerUnion[]; - /** - * The token filters for the index. - */ - tokenFilters?: TokenFilterUnion[]; - /** - * The character filters for the index. - */ - charFilters?: CharFilterUnion[]; - /** - * A description of an encryption key that you create in Azure Key Vault. This key is used to - * provide an additional level of encryption-at-rest for your data when you want full assurance - * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you - * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore - * attempts to set this property to null. You can change this property as needed if you want to - * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed - * keys is not available for free search services, and is only available for paid services - * created on or after January 1, 2019. - */ - encryptionKey?: SearchResourceEncryptionKey; - /** - * The type of similarity algorithm to be used when scoring and ranking the documents matching a - * search query. The similarity algorithm can only be defined at index creation time and cannot - * be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. - */ - similarity?: SimilarityUnion; - /** - * The ETag of the index. - */ - etag?: string; -} - -/** - * Statistics for a given index. Statistics are collected periodically and are not guaranteed to - * always be up-to-date. - */ -export interface GetIndexStatisticsResult { - /** - * The number of documents in the index. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly documentCount: number; - /** - * The amount of storage in bytes consumed by the index. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly storageSize: number; -} - -/** - * Response from a List Indexes request. If successful, it includes the full definitions of all - * indexes. - */ -export interface ListIndexesResult { - /** - * The indexes in the Search service. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly indexes: SearchIndex[]; -} - -/** - * Input field mapping for a skill. - */ -export interface InputFieldMappingEntry { - /** - * The name of the input. - */ - name: string; - /** - * The source of the input. - */ - source?: string; - /** - * The source context used for selecting recursive inputs. - */ - sourceContext?: string; - /** - * The recursive inputs used when creating a complex type. - */ - inputs?: InputFieldMappingEntry[]; -} - -/** - * Output field mapping for a skill. - */ -export interface OutputFieldMappingEntry { - /** - * The name of the output defined by the skill. - */ - name: string; - /** - * The target name of the output. It is optional and default to name. - */ - targetName?: string; -} - -/** - * Contains the possible cases for SearchIndexerSkill. - */ -export type SearchIndexerSkillUnion = SearchIndexerSkill | ConditionalSkill | KeyPhraseExtractionSkill | OcrSkill | ImageAnalysisSkill | LanguageDetectionSkill | ShaperSkill | MergeSkill | EntityRecognitionSkill | SentimentSkill | SplitSkill | TextTranslationSkill | WebApiSkill; - -/** - * Base type for skills. - */ -export interface SearchIndexerSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "SearchIndexerSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; -} - -/** - * Contains the possible cases for CognitiveServicesAccount. - */ -export type CognitiveServicesAccountUnion = CognitiveServicesAccount | DefaultCognitiveServicesAccount | CognitiveServicesAccountKey; - -/** - * Base type for describing any cognitive service resource attached to a skillset. - */ -export interface CognitiveServicesAccount { - /** - * Polymorphic Discriminator - */ - odatatype: "CognitiveServicesAccount"; - /** - * Description of the cognitive service resource attached to a skillset. - */ - description?: string; -} - -/** - * A list of skills. - */ -export interface SearchIndexerSkillset { - /** - * The name of the skillset. - */ - name: string; - /** - * The description of the skillset. - */ - description?: string; - /** - * A list of skills in the skillset. - */ - skills: SearchIndexerSkillUnion[]; - /** - * Details about cognitive services to be used when running skills. - */ - cognitiveServicesAccount?: CognitiveServicesAccountUnion; - /** - * The ETag of the skillset. - */ - etag?: string; - /** - * A description of an encryption key that you create in Azure Key Vault. This key is used to - * provide an additional level of encryption-at-rest for your skillset definition when you want - * full assurance that no one, not even Microsoft, can decrypt your skillset definition in Azure - * Cognitive Search. Once you have encrypted your skillset definition, it will always remain - * encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can - * change this property as needed if you want to rotate your encryption key; Your skillset - * definition will be unaffected. Encryption with customer-managed keys is not available for free - * search services, and is only available for paid services created on or after January 1, 2019. - */ - encryptionKey?: SearchResourceEncryptionKey; -} - -/** - * An empty object that represents the default cognitive service resource for a skillset. - */ -export interface DefaultCognitiveServicesAccount { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices"; - /** - * Description of the cognitive service resource attached to a skillset. - */ - description?: string; -} - -/** - * A cognitive service resource provisioned with a key that is attached to a skillset. - */ -export interface CognitiveServicesAccountKey { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Azure.Search.CognitiveServicesByKey"; - /** - * Description of the cognitive service resource attached to a skillset. - */ - description?: string; - /** - * The key used to provision the cognitive service resource attached to a skillset. - */ - key: string; -} - -/** - * A skill that enables scenarios that require a Boolean operation to determine the data to assign - * to an output. - */ -export interface ConditionalSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Util.ConditionalSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; -} - -/** - * A skill that uses text analytics for key phrase extraction. - */ -export interface KeyPhraseExtractionSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; - /** - * A value indicating which language code to use. Default is en. Possible values include: 'da', - * 'nl', 'en', 'fi', 'fr', 'de', 'it', 'ja', 'ko', 'no', 'pl', 'pt-PT', 'pt-BR', 'ru', 'es', 'sv' - */ - defaultLanguageCode?: KeyPhraseExtractionSkillLanguage; - /** - * A number indicating how many key phrases to return. If absent, all identified key phrases will - * be returned. - */ - maxKeyPhraseCount?: number; -} - -/** - * A skill that extracts text from image files. - */ -export interface OcrSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Vision.OcrSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; - /** - * A value indicating which language code to use. Default is en. Possible values include: - * 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', - * 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' - */ - defaultLanguageCode?: OcrSkillLanguage; - /** - * A value indicating to turn orientation detection on or not. Default is false. Default value: - * false. - */ - shouldDetectOrientation?: boolean; -} - -/** - * A skill that analyzes image files. It extracts a rich set of visual features based on the image - * content. - */ -export interface ImageAnalysisSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Vision.ImageAnalysisSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; - /** - * A value indicating which language code to use. Default is en. Possible values include: 'en', - * 'es', 'ja', 'pt', 'zh' - */ - defaultLanguageCode?: ImageAnalysisSkillLanguage; - /** - * A list of visual features. - */ - visualFeatures?: VisualFeature[]; - /** - * A string indicating which domain-specific details to return. - */ - details?: ImageDetail[]; -} - -/** - * A skill that detects the language of input text and reports a single language code for every - * document submitted on the request. The language code is paired with a score indicating the - * confidence of the analysis. - */ -export interface LanguageDetectionSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Text.LanguageDetectionSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; -} - -/** - * A skill for reshaping the outputs. It creates a complex type to support composite fields (also - * known as multipart fields). - */ -export interface ShaperSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Util.ShaperSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; -} - -/** - * A skill for merging two or more strings into a single unified string, with an optional - * user-defined delimiter separating each component part. - */ -export interface MergeSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Text.MergeSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; - /** - * The tag indicates the start of the merged text. By default, the tag is an empty space. Default - * value: ''. - */ - insertPreTag?: string; - /** - * The tag indicates the end of the merged text. By default, the tag is an empty space. Default - * value: ''. - */ - insertPostTag?: string; -} - -/** - * Text analytics entity recognition. - */ -export interface EntityRecognitionSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Text.EntityRecognitionSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; - /** - * A list of entity categories that should be extracted. - */ - categories?: EntityCategory[]; - /** - * A value indicating which language code to use. Default is en. Possible values include: 'ar', - * 'cs', 'zh-Hans', 'zh-Hant', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', - * 'no', 'pl', 'pt-PT', 'pt-BR', 'ru', 'es', 'sv', 'tr' - */ - defaultLanguageCode?: EntityRecognitionSkillLanguage; - /** - * Determines whether or not to include entities which are well known but don't conform to a - * pre-defined type. If this configuration is not set (default), set to null or set to false, - * entities which don't conform to one of the pre-defined types will not be surfaced. - */ - includeTypelessEntities?: boolean; - /** - * A value between 0 and 1 that be used to only include entities whose confidence score is - * greater than the value specified. If not set (default), or if explicitly set to null, all - * entities will be included. - */ - minimumPrecision?: number; -} - -/** - * Text analytics positive-negative sentiment analysis, scored as a floating point value in a range - * of zero to 1. - */ -export interface SentimentSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Text.SentimentSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; - /** - * A value indicating which language code to use. Default is en. Possible values include: 'da', - * 'nl', 'en', 'fi', 'fr', 'de', 'el', 'it', 'no', 'pl', 'pt-PT', 'ru', 'es', 'sv', 'tr' - */ - defaultLanguageCode?: SentimentSkillLanguage; -} - -/** - * A skill to split a string into chunks of text. - */ -export interface SplitSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Text.SplitSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; - /** - * A value indicating which language code to use. Default is en. Possible values include: 'da', - * 'de', 'en', 'es', 'fi', 'fr', 'it', 'ko', 'pt' - */ - defaultLanguageCode?: SplitSkillLanguage; - /** - * A value indicating which split mode to perform. Possible values include: 'Pages', 'Sentences' - */ - textSplitMode?: TextSplitMode; - /** - * The desired maximum page length. Default is 10000. - */ - maxPageLength?: number; -} - -/** - * A skill to translate text from one language to another. - */ -export interface TextTranslationSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Text.TranslationSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; - /** - * The language code to translate documents into for documents that don't specify the to language - * explicitly. Possible values include: 'af', 'ar', 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', - * 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', - * 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', - * 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', - * 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', 'vi', 'cy', 'yua' - */ - defaultToLanguageCode: TextTranslationSkillLanguage; - /** - * The language code to translate documents from for documents that don't specify the from - * language explicitly. Possible values include: 'af', 'ar', 'bn', 'bs', 'bg', 'yue', 'ca', - * 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'de', 'el', - * 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', 'tlh', 'ko', 'lv', 'lt', 'mg', - * 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', - * 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', 'vi', 'cy', 'yua' - */ - defaultFromLanguageCode?: TextTranslationSkillLanguage; - /** - * The language code to translate documents from when neither the fromLanguageCode input nor the - * defaultFromLanguageCode parameter are provided, and the automatic language detection is - * unsuccessful. Default is en. Possible values include: 'af', 'ar', 'bn', 'bs', 'bg', 'yue', - * 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'de', - * 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', 'tlh', 'ko', 'lv', 'lt', - * 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', 'sr-Cyrl', 'sr-Latn', 'sk', - * 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', 'vi', 'cy', 'yua' - */ - suggestedFrom?: TextTranslationSkillLanguage; -} - -/** - * A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call - * your custom code. - */ -export interface WebApiSkill { - /** - * Polymorphic Discriminator - */ - odatatype: "#Microsoft.Skills.Custom.WebApiSkill"; - /** - * The name of the skill which uniquely identifies it within the skillset. A skill with no name - * defined will be given a default name of its 1-based index in the skills array, prefixed with - * the character '#'. - */ - name?: string; - /** - * The description of the skill which describes the inputs, outputs, and usage of the skill. - */ - description?: string; - /** - * Represents the level at which operations take place, such as the document root or document - * content (for example, /document or /document/content). The default is /document. - */ - context?: string; - /** - * Inputs of the skills could be a column in the source data set, or the output of an upstream - * skill. - */ - inputs: InputFieldMappingEntry[]; - /** - * The output of a skill is either a field in a search index, or a value that can be consumed as - * an input by another skill. - */ - outputs: OutputFieldMappingEntry[]; - /** - * The url for the Web API. - */ - uri: string; - /** - * The headers required to make the http request. - */ - httpHeaders?: { [propertyName: string]: string }; - /** - * The method for the http request. - */ - httpMethod?: string; - /** - * The desired timeout for the request. Default is 30 seconds. - */ - timeout?: string; - /** - * The desired batch size which indicates number of documents. - */ - batchSize?: number; - /** - * If set, the number of parallel calls that can be made to the Web API. - */ - degreeOfParallelism?: number; -} - -/** - * Response from a list skillset request. If successful, it includes the full definitions of all - * skillsets. - */ -export interface ListSkillsetsResult { - /** - * The skillsets defined in the Search service. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly skillsets: SearchIndexerSkillset[]; -} - -/** - * Represents a synonym map definition. - */ -export interface SynonymMap { - /** - * The name of the synonym map. - */ - name: string; - /** - * A series of synonym rules in the specified synonym map format. The rules must be separated by - * newlines. - */ - synonyms: string; - /** - * A description of an encryption key that you create in Azure Key Vault. This key is used to - * provide an additional level of encryption-at-rest for your data when you want full assurance - * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you - * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore - * attempts to set this property to null. You can change this property as needed if you want to - * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed - * keys is not available for free search services, and is only available for paid services - * created on or after January 1, 2019. - */ - encryptionKey?: SearchResourceEncryptionKey; - /** - * The ETag of the synonym map. - */ - etag?: string; -} - -/** - * Response from a List SynonymMaps request. If successful, it includes the full definitions of all - * synonym maps. - */ -export interface ListSynonymMapsResult { - /** - * The synonym maps in the Search service. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly synonymMaps: SynonymMap[]; -} - -/** - * Represents a resource's usage and quota. - */ -export interface ResourceCounter { - /** - * The resource usage amount. - */ - usage: number; - /** - * The resource amount quota. - */ - quota?: number; +/** Response from a get service statistics request. If successful, it includes service level counters and limits. */ +export interface ServiceStatistics { + /** Service level resource counters. */ + counters: ServiceCounters; + /** Service level general limits. */ + limits: ServiceLimits; } -/** - * Represents service-level resource counters and quotas. - */ +/** Represents service-level resource counters and quotas. */ export interface ServiceCounters { - /** - * Total number of documents across all indexes in the service. - */ + /** Total number of documents across all indexes in the service. */ documentCounter: ResourceCounter; - /** - * Total number of indexes. - */ + /** Total number of indexes. */ indexCounter: ResourceCounter; - /** - * Total number of indexers. - */ + /** Total number of indexers. */ indexerCounter: ResourceCounter; - /** - * Total number of data sources. - */ + /** Total number of data sources. */ dataSourceCounter: ResourceCounter; - /** - * Total size of used storage in bytes. - */ + /** Total size of used storage in bytes. */ storageSizeCounter: ResourceCounter; - /** - * Total number of synonym maps. - */ + /** Total number of synonym maps. */ synonymMapCounter: ResourceCounter; } -/** - * Represents various service level limits. - */ -export interface ServiceLimits { - /** - * The maximum allowed fields per index. - */ - maxFieldsPerIndex?: number; - /** - * The maximum depth which you can nest sub-fields in an index, including the top-level complex - * field. For example, a/b/c has a nesting depth of 3. - */ - maxFieldNestingDepthPerIndex?: number; - /** - * The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index. - */ - maxComplexCollectionFieldsPerIndex?: number; - /** - * The maximum number of objects in complex collections allowed per document. - */ - maxComplexObjectsInCollectionsPerDocument?: number; +/** Represents a resource's usage and quota. */ +export interface ResourceCounter { + /** The resource usage amount. */ + usage: number; + /** The resource amount quota. */ + quota?: number | null; } -/** - * Response from a get service statistics request. If successful, it includes service level - * counters and limits. - */ -export interface ServiceStatistics { - /** - * Service level resource counters. - */ - counters: ServiceCounters; - /** - * Service level general limits. - */ - limits: ServiceLimits; +/** Represents various service level limits. */ +export interface ServiceLimits { + /** The maximum allowed fields per index. */ + maxFieldsPerIndex?: number | null; + /** The maximum depth which you can nest sub-fields in an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. */ + maxFieldNestingDepthPerIndex?: number | null; + /** The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index. */ + maxComplexCollectionFieldsPerIndex?: number | null; + /** The maximum number of objects in complex collections allowed per document. */ + maxComplexObjectsInCollectionsPerDocument?: number | null; } -/** - * Describes an error condition for the Azure Cognitive Search API. - */ -export interface SearchError { - /** - * One of a server-defined set of error codes. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly code?: string; - /** - * A human-readable representation of the error. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly message: string; - /** - * An array of details about specific errors that led to this reported error. - * **NOTE: This property will not be serialized. It can only be populated by the server.** - */ - readonly details?: SearchError[]; +/** Provides parameter values to a distance scoring function. */ +export interface DistanceScoringParameters { + /** The name of the parameter passed in search queries to specify the reference location. */ + referencePointParameter: string; + /** The distance in kilometers from the reference location where the boosting range ends. */ + boostingDistance: number; } -/** - * Optional Parameters. - */ -export interface DataSourcesCreateOrUpdateOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Defines the If-Match condition. The operation will be performed only if the ETag on the server - * matches this value. - */ - ifMatch?: string; - /** - * Defines the If-None-Match condition. The operation will be performed only if the ETag on the - * server does not match this value. - */ - ifNoneMatch?: string; +/** Provides parameter values to a freshness scoring function. */ +export interface FreshnessScoringParameters { + /** The expiration period after which boosting will stop for a particular document. */ + boostingDuration: string; } -/** - * Optional Parameters. - */ -export interface DataSourcesDeleteMethodOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Defines the If-Match condition. The operation will be performed only if the ETag on the server - * matches this value. - */ - ifMatch?: string; - /** - * Defines the If-None-Match condition. The operation will be performed only if the ETag on the - * server does not match this value. - */ - ifNoneMatch?: string; +/** Provides parameter values to a magnitude scoring function. */ +export interface MagnitudeScoringParameters { + /** The field value at which boosting starts. */ + boostingRangeStart: number; + /** The field value at which boosting ends. */ + boostingRangeEnd: number; + /** A value indicating whether to apply a constant boost for field values beyond the range end value; default is false. */ + shouldBoostBeyondRangeByConstant?: boolean; } -/** - * Optional Parameters. - */ -export interface DataSourcesListOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Selects which top-level properties of the data sources to retrieve. Specified as a - * comma-separated list of JSON property names, or '*' for all properties. The default is all - * properties. - */ - select?: string; +/** Provides parameter values to a tag scoring function. */ +export interface TagScoringParameters { + /** The name of the parameter passed in search queries to specify the list of tags to compare against the target field. */ + tagsParameter: string; } -/** - * Optional Parameters. - */ -export interface IndexersCreateOrUpdateOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Defines the If-Match condition. The operation will be performed only if the ETag on the server - * matches this value. - */ - ifMatch?: string; - /** - * Defines the If-None-Match condition. The operation will be performed only if the ETag on the - * server does not match this value. - */ - ifNoneMatch?: string; +/** An object that contains information about the matches that were found, and related metadata. */ +export interface CustomEntity { + /** The top-level entity descriptor. Matches in the skill output will be grouped by this name, and it should represent the "normalized" form of the text being found. */ + name: string; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + description?: string | null; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + type?: string | null; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + subtype?: string | null; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + id?: string | null; + /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to character casing. Sample case insensitive matches of "Microsoft" could be: microsoft, microSoft, MICROSOFT. */ + caseSensitive?: boolean | null; + /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to accent. */ + accentSensitive?: boolean | null; + /** Defaults to 0. Maximum value of 5. Denotes the acceptable number of divergent characters that would still constitute a match with the entity name. The smallest possible fuzziness for any given match is returned. For instance, if the edit distance is set to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but otherwise do. */ + fuzzyEditDistance?: number | null; + /** Changes the default case sensitivity value for this entity. It be used to change the default value of all aliases caseSensitive values. */ + defaultCaseSensitive?: boolean | null; + /** Changes the default accent sensitivity value for this entity. It be used to change the default value of all aliases accentSensitive values. */ + defaultAccentSensitive?: boolean | null; + /** Changes the default fuzzy edit distance value for this entity. It can be used to change the default value of all aliases fuzzyEditDistance values. */ + defaultFuzzyEditDistance?: number | null; + /** An array of complex objects that can be used to specify alternative spellings or synonyms to the root entity name. */ + aliases?: CustomEntityAlias[] | null; +} + +/** A complex object that can be used to specify alternative spellings or synonyms to the root entity name. */ +export interface CustomEntityAlias { + /** The text of the alias. */ + text: string; + /** Determine if the alias is case sensitive. */ + caseSensitive?: boolean | null; + /** Determine if the alias is accent sensitive. */ + accentSensitive?: boolean | null; + /** Determine the fuzzy edit distance of the alias. */ + fuzzyEditDistance?: number | null; } -/** - * Optional Parameters. - */ -export interface IndexersDeleteMethodOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Defines the If-Match condition. The operation will be performed only if the ETag on the server - * matches this value. - */ - ifMatch?: string; - /** - * Defines the If-None-Match condition. The operation will be performed only if the ETag on the - * server does not match this value. - */ - ifNoneMatch?: string; -} +/** Defines a data change detection policy that captures changes based on the value of a high water mark column. */ +export type HighWaterMarkChangeDetectionPolicy = DataChangeDetectionPolicy & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy"; + /** The name of the high water mark column. */ + highWaterMarkColumnName: string; +}; -/** - * Optional Parameters. - */ -export interface IndexersListOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Selects which top-level properties of the indexers to retrieve. Specified as a comma-separated - * list of JSON property names, or '*' for all properties. The default is all properties. - */ - select?: string; -} +/** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */ +export type SqlIntegratedChangeTrackingPolicy = DataChangeDetectionPolicy & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; +}; -/** - * Optional Parameters. - */ -export interface SkillsetsCreateOrUpdateOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Defines the If-Match condition. The operation will be performed only if the ETag on the server - * matches this value. - */ - ifMatch?: string; - /** - * Defines the If-None-Match condition. The operation will be performed only if the ETag on the - * server does not match this value. - */ - ifNoneMatch?: string; -} +/** Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column. */ +export type SoftDeleteColumnDeletionDetectionPolicy = DataDeletionDetectionPolicy & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"; + /** The name of the column to use for soft-deletion detection. */ + softDeleteColumnName?: string; + /** The marker value that identifies an item as deleted. */ + softDeleteMarkerValue?: string; +}; -/** - * Optional Parameters. - */ -export interface SkillsetsDeleteMethodOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Defines the If-Match condition. The operation will be performed only if the ETag on the server - * matches this value. - */ - ifMatch?: string; - /** - * Defines the If-None-Match condition. The operation will be performed only if the ETag on the - * server does not match this value. - */ - ifNoneMatch?: string; -} +/** A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. */ +export type ConditionalSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Util.ConditionalSkill"; +}; -/** - * Optional Parameters. - */ -export interface SkillsetsListOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Selects which top-level properties of the skillsets to retrieve. Specified as a - * comma-separated list of JSON property names, or '*' for all properties. The default is all - * properties. - */ - select?: string; -} +/** A skill that uses text analytics for key phrase extraction. */ +export type KeyPhraseExtractionSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill"; + /** A value indicating which language code to use. Default is en. */ + defaultLanguageCode?: KeyPhraseExtractionSkillLanguage; + /** A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. */ + maxKeyPhraseCount?: number | null; +}; + +/** A skill that extracts text from image files. */ +export type OcrSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Vision.OcrSkill"; + /** A value indicating which language code to use. Default is en. */ + defaultLanguageCode?: OcrSkillLanguage; + /** A value indicating to turn orientation detection on or not. Default is false. */ + shouldDetectOrientation?: boolean; +}; + +/** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */ +export type ImageAnalysisSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Vision.ImageAnalysisSkill"; + /** A value indicating which language code to use. Default is en. */ + defaultLanguageCode?: ImageAnalysisSkillLanguage; + /** A list of visual features. */ + visualFeatures?: VisualFeature[]; + /** A string indicating which domain-specific details to return. */ + details?: ImageDetail[]; +}; + +/** A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. */ +export type LanguageDetectionSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Text.LanguageDetectionSkill"; +}; + +/** A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). */ +export type ShaperSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Util.ShaperSkill"; +}; + +/** A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. */ +export type MergeSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Text.MergeSkill"; + /** The tag indicates the start of the merged text. By default, the tag is an empty space. */ + insertPreTag?: string; + /** The tag indicates the end of the merged text. By default, the tag is an empty space. */ + insertPostTag?: string; +}; + +/** Text analytics entity recognition. */ +export type EntityRecognitionSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Text.EntityRecognitionSkill"; + /** A list of entity categories that should be extracted. */ + categories?: EntityCategory[]; + /** A value indicating which language code to use. Default is en. */ + defaultLanguageCode?: EntityRecognitionSkillLanguage; + /** Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not be surfaced. */ + includeTypelessEntities?: boolean | null; + /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ + minimumPrecision?: number | null; +}; + +/** Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1. */ +export type SentimentSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Text.SentimentSkill"; + /** A value indicating which language code to use. Default is en. */ + defaultLanguageCode?: SentimentSkillLanguage; +}; + +/** A skill to split a string into chunks of text. */ +export type SplitSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Text.SplitSkill"; + /** A value indicating which language code to use. Default is en. */ + defaultLanguageCode?: SplitSkillLanguage; + /** A value indicating which split mode to perform. */ + textSplitMode?: TextSplitMode; + /** The desired maximum page length. Default is 10000. */ + maxPageLength?: number | null; +}; + +/** A skill looks for text from a custom, user-defined list of words and phrases. */ +export type CustomEntityLookupSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Text.CustomEntityLookupSkill"; + /** A value indicating which language code to use. Default is en. */ + defaultLanguageCode?: CustomEntityLookupSkillLanguage | null; + /** Path to a JSON or CSV file containing all the target text to match against. This entity definition is read at the beginning of an indexer run. Any updates to this file during an indexer run will not take effect until subsequent runs. This config must be accessible over HTTPS. */ + entitiesDefinitionUri?: string | null; + /** The inline CustomEntity definition. */ + inlineEntitiesDefinition?: CustomEntity[] | null; + /** A global flag for CaseSensitive. If CaseSensitive is not set in CustomEntity, this value will be the default value. */ + globalDefaultCaseSensitive?: boolean | null; + /** A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value will be the default value. */ + globalDefaultAccentSensitive?: boolean | null; + /** A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value. */ + globalDefaultFuzzyEditDistance?: number | null; +}; + +/** A skill to translate text from one language to another. */ +export type TextTranslationSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Text.TranslationSkill"; + /** The language code to translate documents into for documents that don't specify the to language explicitly. */ + defaultToLanguageCode: TextTranslationSkillLanguage; + /** The language code to translate documents from for documents that don't specify the from language explicitly. */ + defaultFromLanguageCode?: TextTranslationSkillLanguage; + /** The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is en. */ + suggestedFrom?: TextTranslationSkillLanguage | null; +}; + +/** A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. */ +export type WebApiSkill = SearchIndexerSkill & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Custom.WebApiSkill"; + /** The url for the Web API. */ + uri: string; + /** The headers required to make the http request. */ + httpHeaders?: { [propertyName: string]: string }; + /** The method for the http request. */ + httpMethod?: string; + /** The desired timeout for the request. Default is 30 seconds. */ + timeout?: string; + /** The desired batch size which indicates number of documents. */ + batchSize?: number | null; + /** If set, the number of parallel calls that can be made to the Web API. */ + degreeOfParallelism?: number | null; +}; + +/** An empty object that represents the default cognitive service resource for a skillset. */ +export type DefaultCognitiveServicesAccount = CognitiveServicesAccount & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices"; +}; + +/** A cognitive service resource provisioned with a key that is attached to a skillset. */ +export type CognitiveServicesAccountKey = CognitiveServicesAccount & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.CognitiveServicesByKey"; + /** The key used to provision the cognitive service resource attached to a skillset. */ + key: string; +}; + +/** Defines a function that boosts scores based on distance from a geographic location. */ +export type DistanceScoringFunction = ScoringFunction & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + type: "distance"; + /** Parameter values for the distance scoring function. */ + parameters: DistanceScoringParameters; +}; + +/** Defines a function that boosts scores based on the value of a date-time field. */ +export type FreshnessScoringFunction = ScoringFunction & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + type: "freshness"; + /** Parameter values for the freshness scoring function. */ + parameters: FreshnessScoringParameters; +}; + +/** Defines a function that boosts scores based on the magnitude of a numeric field. */ +export type MagnitudeScoringFunction = ScoringFunction & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + type: "magnitude"; + /** Parameter values for the magnitude scoring function. */ + parameters: MagnitudeScoringParameters; +}; + +/** Defines a function that boosts scores of documents with string values matching a given list of tags. */ +export type TagScoringFunction = ScoringFunction & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + type: "tag"; + /** Parameter values for the tag scoring function. */ + parameters: TagScoringParameters; +}; + +/** Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. */ +export type CustomAnalyzer = LexicalAnalyzer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.CustomAnalyzer"; + /** The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. KnownTokenizerNames is an enum containing known values. */ + tokenizer: string; + /** A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */ + tokenFilters?: string[]; + /** A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */ + charFilters?: string[]; +}; + +/** Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. */ +export type PatternAnalyzer = LexicalAnalyzer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.PatternAnalyzer"; + /** A value indicating whether terms should be lower-cased. Default is true. */ + lowerCaseTerms?: boolean; + /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */ + pattern?: string; + /** Regular expression flags. */ + flags?: string; + /** A list of stopwords. */ + stopwords?: string[]; +}; + +/** Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. */ +export type LuceneStandardAnalyzer = LexicalAnalyzer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.StandardAnalyzer"; + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; + /** A list of stopwords. */ + stopwords?: string[]; +}; -/** - * Optional Parameters. - */ -export interface SynonymMapsCreateOrUpdateOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Defines the If-Match condition. The operation will be performed only if the ETag on the server - * matches this value. - */ - ifMatch?: string; - /** - * Defines the If-None-Match condition. The operation will be performed only if the ETag on the - * server does not match this value. - */ - ifNoneMatch?: string; -} +/** Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. */ +export type StopAnalyzer = LexicalAnalyzer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.StopAnalyzer"; + /** A list of stopwords. */ + stopwords?: string[]; +}; -/** - * Optional Parameters. - */ -export interface SynonymMapsDeleteMethodOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Defines the If-Match condition. The operation will be performed only if the ETag on the server - * matches this value. - */ - ifMatch?: string; - /** - * Defines the If-None-Match condition. The operation will be performed only if the ETag on the - * server does not match this value. - */ - ifNoneMatch?: string; -} +/** Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. */ +export type ClassicTokenizer = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.ClassicTokenizer"; + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; +}; -/** - * Optional Parameters. - */ -export interface SynonymMapsListOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Selects which top-level properties of the synonym maps to retrieve. Specified as a - * comma-separated list of JSON property names, or '*' for all properties. The default is all - * properties. - */ - select?: string; -} +/** Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */ +export type EdgeNGramTokenizer = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenizer"; + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; + /** Character classes to keep in the tokens. */ + tokenChars?: TokenCharacterKind[]; +}; -/** - * Optional Parameters. - */ -export interface IndexesListOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Selects which top-level properties of the index definitions to retrieve. Specified as a - * comma-separated list of JSON property names, or '*' for all properties. The default is all - * properties. - */ - select?: string; -} +/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */ +export type KeywordTokenizer = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.KeywordTokenizer"; + /** The read buffer size in bytes. Default is 256. */ + bufferSize?: number; +}; -/** - * Optional Parameters. - */ -export interface IndexesCreateOrUpdateOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by - * taking the index offline for at least a few seconds. This temporarily causes indexing and - * query requests to fail. Performance and write availability of the index can be impaired for - * several minutes after the index is updated, or longer for very large indexes. - */ - allowIndexDowntime?: boolean; - /** - * Defines the If-Match condition. The operation will be performed only if the ETag on the server - * matches this value. - */ - ifMatch?: string; - /** - * Defines the If-None-Match condition. The operation will be performed only if the ETag on the - * server does not match this value. - */ - ifNoneMatch?: string; -} +/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */ +export type KeywordTokenizerV2 = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.KeywordTokenizerV2"; + /** The maximum token length. Default is 256. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; +}; -/** - * Optional Parameters. - */ -export interface IndexesDeleteMethodOptionalParams extends coreHttp.RequestOptionsBase { - /** - * Defines the If-Match condition. The operation will be performed only if the ETag on the server - * matches this value. - */ - ifMatch?: string; - /** - * Defines the If-None-Match condition. The operation will be performed only if the ETag on the - * server does not match this value. - */ - ifNoneMatch?: string; -} +/** Divides text using language-specific rules. */ +export type MicrosoftLanguageTokenizer = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"; + /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */ + maxTokenLength?: number; + /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */ + isSearchTokenizer?: boolean; + /** The language to use. The default is English. */ + language?: MicrosoftTokenizerLanguage; +}; -/** - * Defines values for LexicalAnalyzerName. - * Possible values include: 'ArMicrosoft', 'ArLucene', 'HyLucene', 'BnMicrosoft', 'EuLucene', - * 'BgMicrosoft', 'BgLucene', 'CaMicrosoft', 'CaLucene', 'ZhHansMicrosoft', 'ZhHansLucene', - * 'ZhHantMicrosoft', 'ZhHantLucene', 'HrMicrosoft', 'CsMicrosoft', 'CsLucene', 'DaMicrosoft', - * 'DaLucene', 'NlMicrosoft', 'NlLucene', 'EnMicrosoft', 'EnLucene', 'EtMicrosoft', 'FiMicrosoft', - * 'FiLucene', 'FrMicrosoft', 'FrLucene', 'GlLucene', 'DeMicrosoft', 'DeLucene', 'ElMicrosoft', - * 'ElLucene', 'GuMicrosoft', 'HeMicrosoft', 'HiMicrosoft', 'HiLucene', 'HuMicrosoft', 'HuLucene', - * 'IsMicrosoft', 'IdMicrosoft', 'IdLucene', 'GaLucene', 'ItMicrosoft', 'ItLucene', 'JaMicrosoft', - * 'JaLucene', 'KnMicrosoft', 'KoMicrosoft', 'KoLucene', 'LvMicrosoft', 'LvLucene', 'LtMicrosoft', - * 'MlMicrosoft', 'MsMicrosoft', 'MrMicrosoft', 'NbMicrosoft', 'NoLucene', 'FaLucene', - * 'PlMicrosoft', 'PlLucene', 'PtBrMicrosoft', 'PtBrLucene', 'PtPtMicrosoft', 'PtPtLucene', - * 'PaMicrosoft', 'RoMicrosoft', 'RoLucene', 'RuMicrosoft', 'RuLucene', 'SrCyrillicMicrosoft', - * 'SrLatinMicrosoft', 'SkMicrosoft', 'SlMicrosoft', 'EsMicrosoft', 'EsLucene', 'SvMicrosoft', - * 'SvLucene', 'TaMicrosoft', 'TeMicrosoft', 'ThMicrosoft', 'ThLucene', 'TrMicrosoft', 'TrLucene', - * 'UkMicrosoft', 'UrMicrosoft', 'ViMicrosoft', 'StandardLucene', 'StandardAsciiFoldingLucene', - * 'Keyword', 'Pattern', 'Simple', 'Stop', 'Whitespace' - * @readonly - * @enum {string} - */ -export type LexicalAnalyzerName = 'ar.microsoft' | 'ar.lucene' | 'hy.lucene' | 'bn.microsoft' | 'eu.lucene' | 'bg.microsoft' | 'bg.lucene' | 'ca.microsoft' | 'ca.lucene' | 'zh-Hans.microsoft' | 'zh-Hans.lucene' | 'zh-Hant.microsoft' | 'zh-Hant.lucene' | 'hr.microsoft' | 'cs.microsoft' | 'cs.lucene' | 'da.microsoft' | 'da.lucene' | 'nl.microsoft' | 'nl.lucene' | 'en.microsoft' | 'en.lucene' | 'et.microsoft' | 'fi.microsoft' | 'fi.lucene' | 'fr.microsoft' | 'fr.lucene' | 'gl.lucene' | 'de.microsoft' | 'de.lucene' | 'el.microsoft' | 'el.lucene' | 'gu.microsoft' | 'he.microsoft' | 'hi.microsoft' | 'hi.lucene' | 'hu.microsoft' | 'hu.lucene' | 'is.microsoft' | 'id.microsoft' | 'id.lucene' | 'ga.lucene' | 'it.microsoft' | 'it.lucene' | 'ja.microsoft' | 'ja.lucene' | 'kn.microsoft' | 'ko.microsoft' | 'ko.lucene' | 'lv.microsoft' | 'lv.lucene' | 'lt.microsoft' | 'ml.microsoft' | 'ms.microsoft' | 'mr.microsoft' | 'nb.microsoft' | 'no.lucene' | 'fa.lucene' | 'pl.microsoft' | 'pl.lucene' | 'pt-BR.microsoft' | 'pt-BR.lucene' | 'pt-PT.microsoft' | 'pt-PT.lucene' | 'pa.microsoft' | 'ro.microsoft' | 'ro.lucene' | 'ru.microsoft' | 'ru.lucene' | 'sr-cyrillic.microsoft' | 'sr-latin.microsoft' | 'sk.microsoft' | 'sl.microsoft' | 'es.microsoft' | 'es.lucene' | 'sv.microsoft' | 'sv.lucene' | 'ta.microsoft' | 'te.microsoft' | 'th.microsoft' | 'th.lucene' | 'tr.microsoft' | 'tr.lucene' | 'uk.microsoft' | 'ur.microsoft' | 'vi.microsoft' | 'standard.lucene' | 'standardasciifolding.lucene' | 'keyword' | 'pattern' | 'simple' | 'stop' | 'whitespace'; +/** Divides text using language-specific rules and reduces words to their base forms. */ +export type MicrosoftLanguageStemmingTokenizer = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer"; + /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */ + maxTokenLength?: number; + /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */ + isSearchTokenizer?: boolean; + /** The language to use. The default is English. */ + language?: MicrosoftStemmingTokenizerLanguage; +}; -/** - * Defines values for LexicalTokenizerName. - * Possible values include: 'Classic', 'EdgeNGram', 'Keyword', 'Letter', 'Lowercase', - * 'MicrosoftLanguageTokenizer', 'MicrosoftLanguageStemmingTokenizer', 'NGram', 'PathHierarchy', - * 'Pattern', 'Standard', 'UaxUrlEmail', 'Whitespace' - * @readonly - * @enum {string} - */ -export type LexicalTokenizerName = 'classic' | 'edgeNGram' | 'keyword_v2' | 'letter' | 'lowercase' | 'microsoft_language_tokenizer' | 'microsoft_language_stemming_tokenizer' | 'nGram' | 'path_hierarchy_v2' | 'pattern' | 'standard_v2' | 'uax_url_email' | 'whitespace'; +/** Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */ +export type NGramTokenizer = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.NGramTokenizer"; + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; + /** Character classes to keep in the tokens. */ + tokenChars?: TokenCharacterKind[]; +}; -/** - * Defines values for TokenFilterName. - * Possible values include: 'ArabicNormalization', 'Apostrophe', 'AsciiFolding', 'CjkBigram', - * 'CjkWidth', 'Classic', 'CommonGram', 'EdgeNGram', 'Elision', 'GermanNormalization', - * 'HindiNormalization', 'IndicNormalization', 'KeywordRepeat', 'KStem', 'Length', 'Limit', - * 'Lowercase', 'NGram', 'PersianNormalization', 'Phonetic', 'PorterStem', 'Reverse', - * 'ScandinavianNormalization', 'ScandinavianFoldingNormalization', 'Shingle', 'Snowball', - * 'SoraniNormalization', 'Stemmer', 'Stopwords', 'Trim', 'Truncate', 'Unique', 'Uppercase', - * 'WordDelimiter' - * @readonly - * @enum {string} - */ -export type TokenFilterName = 'arabic_normalization' | 'apostrophe' | 'asciifolding' | 'cjk_bigram' | 'cjk_width' | 'classic' | 'common_grams' | 'edgeNGram_v2' | 'elision' | 'german_normalization' | 'hindi_normalization' | 'indic_normalization' | 'keyword_repeat' | 'kstem' | 'length' | 'limit' | 'lowercase' | 'nGram_v2' | 'persian_normalization' | 'phonetic' | 'porter_stem' | 'reverse' | 'scandinavian_normalization' | 'scandinavian_folding' | 'shingle' | 'snowball' | 'sorani_normalization' | 'stemmer' | 'stopwords' | 'trim' | 'truncate' | 'unique' | 'uppercase' | 'word_delimiter'; +/** Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. */ +export type PathHierarchyTokenizerV2 = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2"; + /** The delimiter character to use. Default is "/". */ + delimiter?: string; + /** A value that, if set, replaces the delimiter character. Default is "/". */ + replacement?: string; + /** The maximum token length. Default and maximum is 300. */ + maxTokenLength?: number; + /** A value indicating whether to generate tokens in reverse order. Default is false. */ + reverseTokenOrder?: boolean; + /** The number of initial tokens to skip. Default is 0. */ + numberOfTokensToSkip?: number; +}; -/** - * Defines values for CharFilterName. - * Possible values include: 'HtmlStrip' - * @readonly - * @enum {string} - */ -export type CharFilterName = 'html_strip'; +/** Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. */ +export type PatternTokenizer = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.PatternTokenizer"; + /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */ + pattern?: string; + /** Regular expression flags. */ + flags?: string; + /** The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. */ + group?: number; +}; -/** - * Defines values for RegexFlags. - * Possible values include: 'CanonEq', 'CaseInsensitive', 'Comments', 'DotAll', 'Literal', - * 'Multiline', 'UnicodeCase', 'UnixLines' - * @readonly - * @enum {string} - */ -export type RegexFlags = 'CANON_EQ' | 'CASE_INSENSITIVE' | 'COMMENTS' | 'DOTALL' | 'LITERAL' | 'MULTILINE' | 'UNICODE_CASE' | 'UNIX_LINES'; +/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */ +export type LuceneStandardTokenizer = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.StandardTokenizer"; + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. */ + maxTokenLength?: number; +}; -/** - * Defines values for SearchFieldDataType. - * Possible values include: 'String', 'Int32', 'Int64', 'Double', 'Boolean', 'DateTimeOffset', - * 'GeographyPoint', 'Complex', 'Collection(Edm.String)', 'Collection(Edm.Int32)', - * 'Collection(Edm.Int64)', 'Collection(Edm.Double)', 'Collection(Edm.Boolean)', - * 'Collection(Edm.DateTimeOffset)', 'Collection(Edm.GeographyPoint)', - * 'Collection(Edm.ComplexType)' - * @readonly - * @enum {string} - */ -export type SearchFieldDataType = 'Edm.String' | 'Edm.Int32' | 'Edm.Int64' | 'Edm.Double' | 'Edm.Boolean' | 'Edm.DateTimeOffset' | 'Edm.GeographyPoint' | 'Edm.ComplexType' | 'Collection(Edm.String)' | 'Collection(Edm.Int32)' | 'Collection(Edm.Int64)' | 'Collection(Edm.Double)' | 'Collection(Edm.Boolean)' | 'Collection(Edm.DateTimeOffset)' | 'Collection(Edm.GeographyPoint)' | 'Collection(Edm.ComplexType)'; +/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */ +export type LuceneStandardTokenizerV2 = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.StandardTokenizerV2"; + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; +}; -/** - * Defines values for TokenCharacterKind. - * Possible values include: 'Letter', 'Digit', 'Whitespace', 'Punctuation', 'Symbol' - * @readonly - * @enum {string} - */ -export type TokenCharacterKind = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol'; +/** Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. */ +export type UaxUrlEmailTokenizer = LexicalTokenizer & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer"; + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; +}; -/** - * Defines values for MicrosoftTokenizerLanguage. - * Possible values include: 'Bangla', 'Bulgarian', 'Catalan', 'ChineseSimplified', - * 'ChineseTraditional', 'Croatian', 'Czech', 'Danish', 'Dutch', 'English', 'French', 'German', - * 'Greek', 'Gujarati', 'Hindi', 'Icelandic', 'Indonesian', 'Italian', 'Japanese', 'Kannada', - * 'Korean', 'Malay', 'Malayalam', 'Marathi', 'NorwegianBokmaal', 'Polish', 'Portuguese', - * 'PortugueseBrazilian', 'Punjabi', 'Romanian', 'Russian', 'SerbianCyrillic', 'SerbianLatin', - * 'Slovenian', 'Spanish', 'Swedish', 'Tamil', 'Telugu', 'Thai', 'Ukrainian', 'Urdu', 'Vietnamese' - * @readonly - * @enum {string} - */ -export type MicrosoftTokenizerLanguage = 'bangla' | 'bulgarian' | 'catalan' | 'chineseSimplified' | 'chineseTraditional' | 'croatian' | 'czech' | 'danish' | 'dutch' | 'english' | 'french' | 'german' | 'greek' | 'gujarati' | 'hindi' | 'icelandic' | 'indonesian' | 'italian' | 'japanese' | 'kannada' | 'korean' | 'malay' | 'malayalam' | 'marathi' | 'norwegianBokmaal' | 'polish' | 'portuguese' | 'portugueseBrazilian' | 'punjabi' | 'romanian' | 'russian' | 'serbianCyrillic' | 'serbianLatin' | 'slovenian' | 'spanish' | 'swedish' | 'tamil' | 'telugu' | 'thai' | 'ukrainian' | 'urdu' | 'vietnamese'; +/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene. */ +export type AsciiFoldingTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter"; + /** A value indicating whether the original token will be kept. Default is false. */ + preserveOriginal?: boolean; +}; -/** - * Defines values for MicrosoftStemmingTokenizerLanguage. - * Possible values include: 'Arabic', 'Bangla', 'Bulgarian', 'Catalan', 'Croatian', 'Czech', - * 'Danish', 'Dutch', 'English', 'Estonian', 'Finnish', 'French', 'German', 'Greek', 'Gujarati', - * 'Hebrew', 'Hindi', 'Hungarian', 'Icelandic', 'Indonesian', 'Italian', 'Kannada', 'Latvian', - * 'Lithuanian', 'Malay', 'Malayalam', 'Marathi', 'NorwegianBokmaal', 'Polish', 'Portuguese', - * 'PortugueseBrazilian', 'Punjabi', 'Romanian', 'Russian', 'SerbianCyrillic', 'SerbianLatin', - * 'Slovak', 'Slovenian', 'Spanish', 'Swedish', 'Tamil', 'Telugu', 'Turkish', 'Ukrainian', 'Urdu' - * @readonly - * @enum {string} - */ -export type MicrosoftStemmingTokenizerLanguage = 'arabic' | 'bangla' | 'bulgarian' | 'catalan' | 'croatian' | 'czech' | 'danish' | 'dutch' | 'english' | 'estonian' | 'finnish' | 'french' | 'german' | 'greek' | 'gujarati' | 'hebrew' | 'hindi' | 'hungarian' | 'icelandic' | 'indonesian' | 'italian' | 'kannada' | 'latvian' | 'lithuanian' | 'malay' | 'malayalam' | 'marathi' | 'norwegianBokmaal' | 'polish' | 'portuguese' | 'portugueseBrazilian' | 'punjabi' | 'romanian' | 'russian' | 'serbianCyrillic' | 'serbianLatin' | 'slovak' | 'slovenian' | 'spanish' | 'swedish' | 'tamil' | 'telugu' | 'turkish' | 'ukrainian' | 'urdu'; +/** Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. */ +export type CjkBigramTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.CjkBigramTokenFilter"; + /** The scripts to ignore. */ + ignoreScripts?: CjkBigramTokenFilterScripts[]; + /** A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. */ + outputUnigrams?: boolean; +}; -/** - * Defines values for CjkBigramTokenFilterScripts. - * Possible values include: 'Han', 'Hiragana', 'Katakana', 'Hangul' - * @readonly - * @enum {string} - */ -export type CjkBigramTokenFilterScripts = 'han' | 'hiragana' | 'katakana' | 'hangul'; +/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene. */ +export type CommonGramTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.CommonGramTokenFilter"; + /** The set of common words. */ + commonWords: string[]; + /** A value indicating whether common words matching will be case insensitive. Default is false. */ + ignoreCase?: boolean; + /** A value that indicates whether the token filter is in query mode. When in query mode, the token filter generates bigrams and then removes common words and single terms followed by a common word. Default is false. */ + useQueryMode?: boolean; +}; -/** - * Defines values for EdgeNGramTokenFilterSide. - * Possible values include: 'Front', 'Back' - * @readonly - * @enum {string} - */ -export type EdgeNGramTokenFilterSide = 'front' | 'back'; +/** Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene. */ +export type DictionaryDecompounderTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter"; + /** The list of words to match against. */ + wordList: string[]; + /** The minimum word size. Only words longer than this get processed. Default is 5. Maximum is 300. */ + minWordSize?: number; + /** The minimum subword size. Only subwords longer than this are outputted. Default is 2. Maximum is 300. */ + minSubwordSize?: number; + /** The maximum subword size. Only subwords shorter than this are outputted. Default is 15. Maximum is 300. */ + maxSubwordSize?: number; + /** A value indicating whether to add only the longest matching subword to the output. Default is false. */ + onlyLongestMatch?: boolean; +}; -/** - * Defines values for PhoneticEncoder. - * Possible values include: 'Metaphone', 'DoubleMetaphone', 'Soundex', 'RefinedSoundex', - * 'Caverphone1', 'Caverphone2', 'Cologne', 'Nysiis', 'KoelnerPhonetik', 'HaasePhonetik', - * 'BeiderMorse' - * @readonly - * @enum {string} - */ -export type PhoneticEncoder = 'metaphone' | 'doubleMetaphone' | 'soundex' | 'refinedSoundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerPhonetik' | 'haasePhonetik' | 'beiderMorse'; +/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */ +export type EdgeNGramTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenFilter"; + /** The minimum n-gram length. Default is 1. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. */ + maxGram?: number; + /** Specifies which side of the input the n-gram should be generated from. Default is "front". */ + side?: EdgeNGramTokenFilterSide; +}; -/** - * Defines values for SnowballTokenFilterLanguage. - * Possible values include: 'Armenian', 'Basque', 'Catalan', 'Danish', 'Dutch', 'English', - * 'Finnish', 'French', 'German', 'German2', 'Hungarian', 'Italian', 'Kp', 'Lovins', 'Norwegian', - * 'Porter', 'Portuguese', 'Romanian', 'Russian', 'Spanish', 'Swedish', 'Turkish' - * @readonly - * @enum {string} - */ -export type SnowballTokenFilterLanguage = 'armenian' | 'basque' | 'catalan' | 'danish' | 'dutch' | 'english' | 'finnish' | 'french' | 'german' | 'german2' | 'hungarian' | 'italian' | 'kp' | 'lovins' | 'norwegian' | 'porter' | 'portuguese' | 'romanian' | 'russian' | 'spanish' | 'swedish' | 'turkish'; +/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */ +export type EdgeNGramTokenFilterV2 = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2"; + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; + /** Specifies which side of the input the n-gram should be generated from. Default is "front". */ + side?: EdgeNGramTokenFilterSide; +}; -/** - * Defines values for StemmerTokenFilterLanguage. - * Possible values include: 'Arabic', 'Armenian', 'Basque', 'Brazilian', 'Bulgarian', 'Catalan', - * 'Czech', 'Danish', 'Dutch', 'DutchKp', 'English', 'LightEnglish', 'MinimalEnglish', - * 'PossessiveEnglish', 'Porter2', 'Lovins', 'Finnish', 'LightFinnish', 'French', 'LightFrench', - * 'MinimalFrench', 'Galician', 'MinimalGalician', 'German', 'German2', 'LightGerman', - * 'MinimalGerman', 'Greek', 'Hindi', 'Hungarian', 'LightHungarian', 'Indonesian', 'Irish', - * 'Italian', 'LightItalian', 'Sorani', 'Latvian', 'Norwegian', 'LightNorwegian', - * 'MinimalNorwegian', 'LightNynorsk', 'MinimalNynorsk', 'Portuguese', 'LightPortuguese', - * 'MinimalPortuguese', 'PortugueseRslp', 'Romanian', 'Russian', 'LightRussian', 'Spanish', - * 'LightSpanish', 'Swedish', 'LightSwedish', 'Turkish' - * @readonly - * @enum {string} - */ -export type StemmerTokenFilterLanguage = 'arabic' | 'armenian' | 'basque' | 'brazilian' | 'bulgarian' | 'catalan' | 'czech' | 'danish' | 'dutch' | 'dutchKp' | 'english' | 'lightEnglish' | 'minimalEnglish' | 'possessiveEnglish' | 'porter2' | 'lovins' | 'finnish' | 'lightFinnish' | 'french' | 'lightFrench' | 'minimalFrench' | 'galician' | 'minimalGalician' | 'german' | 'german2' | 'lightGerman' | 'minimalGerman' | 'greek' | 'hindi' | 'hungarian' | 'lightHungarian' | 'indonesian' | 'irish' | 'italian' | 'lightItalian' | 'sorani' | 'latvian' | 'norwegian' | 'lightNorwegian' | 'minimalNorwegian' | 'lightNynorsk' | 'minimalNynorsk' | 'portuguese' | 'lightPortuguese' | 'minimalPortuguese' | 'portugueseRslp' | 'romanian' | 'russian' | 'lightRussian' | 'spanish' | 'lightSpanish' | 'swedish' | 'lightSwedish' | 'turkish'; +/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This token filter is implemented using Apache Lucene. */ +export type ElisionTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.ElisionTokenFilter"; + /** The set of articles to remove. */ + articles?: string[]; +}; -/** - * Defines values for StopwordsList. - * Possible values include: 'Arabic', 'Armenian', 'Basque', 'Brazilian', 'Bulgarian', 'Catalan', - * 'Czech', 'Danish', 'Dutch', 'English', 'Finnish', 'French', 'Galician', 'German', 'Greek', - * 'Hindi', 'Hungarian', 'Indonesian', 'Irish', 'Italian', 'Latvian', 'Norwegian', 'Persian', - * 'Portuguese', 'Romanian', 'Russian', 'Sorani', 'Spanish', 'Swedish', 'Thai', 'Turkish' - * @readonly - * @enum {string} - */ -export type StopwordsList = 'arabic' | 'armenian' | 'basque' | 'brazilian' | 'bulgarian' | 'catalan' | 'czech' | 'danish' | 'dutch' | 'english' | 'finnish' | 'french' | 'galician' | 'german' | 'greek' | 'hindi' | 'hungarian' | 'indonesian' | 'irish' | 'italian' | 'latvian' | 'norwegian' | 'persian' | 'portuguese' | 'romanian' | 'russian' | 'sorani' | 'spanish' | 'swedish' | 'thai' | 'turkish'; +/** A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. */ +export type KeepTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.KeepTokenFilter"; + /** The list of words to keep. */ + keepWords: string[]; + /** A value indicating whether to lower case all words first. Default is false. */ + lowerCaseKeepWords?: boolean; +}; -/** - * Defines values for SearchIndexerDataSourceType. - * Possible values include: 'AzureSql', 'CosmosDb', 'AzureBlob', 'AzureTable', 'MySql' - * @readonly - * @enum {string} - */ -export type SearchIndexerDataSourceType = 'azuresql' | 'cosmosdb' | 'azureblob' | 'azuretable' | 'mysql'; +/** Marks terms as keywords. This token filter is implemented using Apache Lucene. */ +export type KeywordMarkerTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter"; + /** A list of words to mark as keywords. */ + keywords: string[]; + /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */ + ignoreCase?: boolean; +}; -/** - * Defines values for BlobIndexerParsingMode. - * Possible values include: 'Default', 'Text', 'DelimitedText', 'Json', 'JsonArray', 'JsonLines' - * @readonly - * @enum {string} - */ -export type BlobIndexerParsingMode = 'default' | 'text' | 'delimitedText' | 'json' | 'jsonArray' | 'jsonLines'; +/** Removes words that are too long or too short. This token filter is implemented using Apache Lucene. */ +export type LengthTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.LengthTokenFilter"; + /** The minimum length in characters. Default is 0. Maximum is 300. Must be less than the value of max. */ + minLength?: number; + /** The maximum length in characters. Default and maximum is 300. */ + maxLength?: number; +}; -/** - * Defines values for BlobIndexerDataToExtract. - * Possible values include: 'StorageMetadata', 'AllMetadata', 'ContentAndMetadata' - * @readonly - * @enum {string} - */ -export type BlobIndexerDataToExtract = 'storageMetadata' | 'allMetadata' | 'contentAndMetadata'; +/** Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene. */ +export type LimitTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.LimitTokenFilter"; + /** The maximum number of tokens to produce. Default is 1. */ + maxTokenCount?: number; + /** A value indicating whether all tokens from the input must be consumed even if maxTokenCount is reached. Default is false. */ + consumeAllTokens?: boolean; +}; -/** - * Defines values for BlobIndexerImageAction. - * Possible values include: 'None', 'GenerateNormalizedImages', 'GenerateNormalizedImagePerPage' - * @readonly - * @enum {string} - */ -export type BlobIndexerImageAction = 'none' | 'generateNormalizedImages' | 'generateNormalizedImagePerPage'; +/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */ +export type NGramTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.NGramTokenFilter"; + /** The minimum n-gram length. Default is 1. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. */ + maxGram?: number; +}; -/** - * Defines values for BlobIndexerPDFTextRotationAlgorithm. - * Possible values include: 'None', 'DetectAngles' - * @readonly - * @enum {string} - */ -export type BlobIndexerPDFTextRotationAlgorithm = 'none' | 'detectAngles'; +/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */ +export type NGramTokenFilterV2 = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.NGramTokenFilterV2"; + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; +}; -/** - * Defines values for IndexerExecutionEnvironment. - * Possible values include: 'standard', 'private' - * @readonly - * @enum {string} - */ -export type IndexerExecutionEnvironment = 'standard' | 'private'; +/** Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene. */ +export type PatternCaptureTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.PatternCaptureTokenFilter"; + /** A list of patterns to match against each token. */ + patterns: string[]; + /** A value indicating whether to return the original token even if one of the patterns matches. Default is true. */ + preserveOriginal?: boolean; +}; -/** - * Defines values for IndexerExecutionStatus. - * Possible values include: 'TransientFailure', 'Success', 'InProgress', 'Reset' - * @readonly - * @enum {string} - */ -export type IndexerExecutionStatus = 'transientFailure' | 'success' | 'inProgress' | 'reset'; +/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene. */ +export type PatternReplaceTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.PatternReplaceTokenFilter"; + /** A regular expression pattern. */ + pattern: string; + /** The replacement text. */ + replacement: string; +}; -/** - * Defines values for IndexerStatus. - * Possible values include: 'Unknown', 'Error', 'Running' - * @readonly - * @enum {string} - */ -export type IndexerStatus = 'unknown' | 'error' | 'running'; +/** Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. */ +export type PhoneticTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.PhoneticTokenFilter"; + /** The phonetic encoder to use. Default is "metaphone". */ + encoder?: PhoneticEncoder; + /** A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. */ + replaceOriginalTokens?: boolean; +}; -/** - * Defines values for ScoringFunctionInterpolation. - * Possible values include: 'Linear', 'Constant', 'Quadratic', 'Logarithmic' - * @readonly - * @enum {string} - */ -export type ScoringFunctionInterpolation = 'linear' | 'constant' | 'quadratic' | 'logarithmic'; +/** Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene. */ +export type ShingleTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.ShingleTokenFilter"; + /** The maximum shingle size. Default and minimum value is 2. */ + maxShingleSize?: number; + /** The minimum shingle size. Default and minimum value is 2. Must be less than the value of maxShingleSize. */ + minShingleSize?: number; + /** A value indicating whether the output stream will contain the input tokens (unigrams) as well as shingles. Default is true. */ + outputUnigrams?: boolean; + /** A value indicating whether to output unigrams for those times when no shingles are available. This property takes precedence when outputUnigrams is set to false. Default is false. */ + outputUnigramsIfNoShingles?: boolean; + /** The string to use when joining adjacent tokens to form a shingle. Default is a single space (" "). */ + tokenSeparator?: string; + /** The string to insert for each position at which there is no token. Default is an underscore ("_"). */ + filterToken?: string; +}; -/** - * Defines values for ScoringFunctionAggregation. - * Possible values include: 'Sum', 'Average', 'Minimum', 'Maximum', 'FirstMatching' - * @readonly - * @enum {string} - */ -export type ScoringFunctionAggregation = 'sum' | 'average' | 'minimum' | 'maximum' | 'firstMatching'; +/** A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. */ +export type SnowballTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.SnowballTokenFilter"; + /** The language to use. */ + language: SnowballTokenFilterLanguage; +}; -/** - * Defines values for KeyPhraseExtractionSkillLanguage. - * Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'it', 'ja', 'ko', 'no', 'pl', - * 'pt-PT', 'pt-BR', 'ru', 'es', 'sv' - * @readonly - * @enum {string} - */ -export type KeyPhraseExtractionSkillLanguage = 'da' | 'nl' | 'en' | 'fi' | 'fr' | 'de' | 'it' | 'ja' | 'ko' | 'no' | 'pl' | 'pt-PT' | 'pt-BR' | 'ru' | 'es' | 'sv'; +/** Language specific stemming filter. This token filter is implemented using Apache Lucene. */ +export type StemmerTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.StemmerTokenFilter"; + /** The language to use. */ + language: StemmerTokenFilterLanguage; +}; -/** - * Defines values for OcrSkillLanguage. - * Possible values include: 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', - * 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', - * 'sr-Latn', 'sk' - * @readonly - * @enum {string} - */ -export type OcrSkillLanguage = 'zh-Hans' | 'zh-Hant' | 'cs' | 'da' | 'nl' | 'en' | 'fi' | 'fr' | 'de' | 'el' | 'hu' | 'it' | 'ja' | 'ko' | 'nb' | 'pl' | 'pt' | 'ru' | 'es' | 'sv' | 'tr' | 'ar' | 'ro' | 'sr-Cyrl' | 'sr-Latn' | 'sk'; +/** Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. */ +export type StemmerOverrideTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter"; + /** A list of stemming rules in the following format: "word => stem", for example: "ran => run". */ + rules: string[]; +}; -/** - * Defines values for ImageAnalysisSkillLanguage. - * Possible values include: 'en', 'es', 'ja', 'pt', 'zh' - * @readonly - * @enum {string} - */ -export type ImageAnalysisSkillLanguage = 'en' | 'es' | 'ja' | 'pt' | 'zh'; +/** Removes stop words from a token stream. This token filter is implemented using Apache Lucene. */ +export type StopwordsTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.StopwordsTokenFilter"; + /** The list of stopwords. This property and the stopwords list property cannot both be set. */ + stopwords?: string[]; + /** A predefined list of stopwords to use. This property and the stopwords property cannot both be set. Default is English. */ + stopwordsList?: StopwordsList; + /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */ + ignoreCase?: boolean; + /** A value indicating whether to ignore the last search term if it's a stop word. Default is true. */ + removeTrailingStopWords?: boolean; +}; + +/** Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene. */ +export type SynonymTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.SynonymTokenFilter"; + /** A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. */ + synonyms: string[]; + /** A value indicating whether to case-fold input for matching. Default is false. */ + ignoreCase?: boolean; + /** A value indicating whether all words in the list of synonyms (if => notation is not used) will map to one another. If true, all words in the list of synonyms (if => notation is not used) will map to one another. The following list: incredible, unbelievable, fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true. */ + expand?: boolean; +}; -/** - * Defines values for VisualFeature. - * Possible values include: 'Adult', 'Brands', 'Categories', 'Description', 'Faces', 'Objects', - * 'Tags' - * @readonly - * @enum {string} - */ -export type VisualFeature = 'adult' | 'brands' | 'categories' | 'description' | 'faces' | 'objects' | 'tags'; +/** Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. */ +export type TruncateTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.TruncateTokenFilter"; + /** The length at which terms will be truncated. Default and maximum is 300. */ + length?: number; +}; -/** - * Defines values for ImageDetail. - * Possible values include: 'Celebrities', 'Landmarks' - * @readonly - * @enum {string} - */ -export type ImageDetail = 'celebrities' | 'landmarks'; +/** Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene. */ +export type UniqueTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.UniqueTokenFilter"; + /** A value indicating whether to remove duplicates only at the same position. Default is false. */ + onlyOnSamePosition?: boolean; +}; -/** - * Defines values for EntityCategory. - * Possible values include: 'Location', 'Organization', 'Person', 'Quantity', 'Datetime', 'Url', - * 'Email' - * @readonly - * @enum {string} - */ -export type EntityCategory = 'location' | 'organization' | 'person' | 'quantity' | 'datetime' | 'url' | 'email'; +/** Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. */ +export type WordDelimiterTokenFilter = TokenFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.WordDelimiterTokenFilter"; + /** A value indicating whether to generate part words. If set, causes parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is true. */ + generateWordParts?: boolean; + /** A value indicating whether to generate number subwords. Default is true. */ + generateNumberParts?: boolean; + /** A value indicating whether maximum runs of word parts will be catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false. */ + catenateWords?: boolean; + /** A value indicating whether maximum runs of number parts will be catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. */ + catenateNumbers?: boolean; + /** A value indicating whether all subword parts will be catenated. For example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. */ + catenateAll?: boolean; + /** A value indicating whether to split words on caseChange. For example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. */ + splitOnCaseChange?: boolean; + /** A value indicating whether original words will be preserved and added to the subword list. Default is false. */ + preserveOriginal?: boolean; + /** A value indicating whether to split on numbers. For example, if this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. */ + splitOnNumerics?: boolean; + /** A value indicating whether to remove trailing "'s" for each subword. Default is true. */ + stemEnglishPossessive?: boolean; + /** A list of tokens to protect from being delimited. */ + protectedWords?: string[]; +}; -/** - * Defines values for EntityRecognitionSkillLanguage. - * Possible values include: 'ar', 'cs', 'zh-Hans', 'zh-Hant', 'da', 'nl', 'en', 'fi', 'fr', 'de', - * 'el', 'hu', 'it', 'ja', 'ko', 'no', 'pl', 'pt-PT', 'pt-BR', 'ru', 'es', 'sv', 'tr' - * @readonly - * @enum {string} - */ -export type EntityRecognitionSkillLanguage = 'ar' | 'cs' | 'zh-Hans' | 'zh-Hant' | 'da' | 'nl' | 'en' | 'fi' | 'fr' | 'de' | 'el' | 'hu' | 'it' | 'ja' | 'ko' | 'no' | 'pl' | 'pt-PT' | 'pt-BR' | 'ru' | 'es' | 'sv' | 'tr'; +/** A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene. */ +export type MappingCharFilter = CharFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.MappingCharFilter"; + /** A list of mappings of the following format: "a=>b" (all occurrences of the character "a" will be replaced with character "b"). */ + mappings: string[]; +}; -/** - * Defines values for SentimentSkillLanguage. - * Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'it', 'no', 'pl', 'pt-PT', - * 'ru', 'es', 'sv', 'tr' - * @readonly - * @enum {string} - */ -export type SentimentSkillLanguage = 'da' | 'nl' | 'en' | 'fi' | 'fr' | 'de' | 'el' | 'it' | 'no' | 'pl' | 'pt-PT' | 'ru' | 'es' | 'sv' | 'tr'; +/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene. */ +export type PatternReplaceCharFilter = CharFilter & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.PatternReplaceCharFilter"; + /** A regular expression pattern. */ + pattern: string; + /** The replacement text. */ + replacement: string; +}; -/** - * Defines values for SplitSkillLanguage. - * Possible values include: 'da', 'de', 'en', 'es', 'fi', 'fr', 'it', 'ko', 'pt' - * @readonly - * @enum {string} - */ -export type SplitSkillLanguage = 'da' | 'de' | 'en' | 'es' | 'fi' | 'fr' | 'it' | 'ko' | 'pt'; +/** Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. */ +export type ClassicSimilarity = Similarity & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.ClassicSimilarity"; +}; -/** - * Defines values for TextSplitMode. - * Possible values include: 'Pages', 'Sentences' - * @readonly - * @enum {string} - */ -export type TextSplitMode = 'pages' | 'sentences'; +/** Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). */ +export type BM25Similarity = Similarity & { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.BM25Similarity"; + /** This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. */ + k1?: number | null; + /** This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. */ + b?: number | null; +}; -/** - * Defines values for TextTranslationSkillLanguage. - * Possible values include: 'af', 'ar', 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', - * 'cs', 'da', 'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', - * 'hu', 'is', 'id', 'it', 'ja', 'sw', 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', - * 'pt', 'otq', 'ro', 'ru', 'sm', 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', - * 'th', 'to', 'tr', 'uk', 'ur', 'vi', 'cy', 'yua' - * @readonly - * @enum {string} - */ -export type TextTranslationSkillLanguage = 'af' | 'ar' | 'bn' | 'bs' | 'bg' | 'yue' | 'ca' | 'zh-Hans' | 'zh-Hant' | 'hr' | 'cs' | 'da' | 'nl' | 'en' | 'et' | 'fj' | 'fil' | 'fi' | 'fr' | 'de' | 'el' | 'ht' | 'he' | 'hi' | 'mww' | 'hu' | 'is' | 'id' | 'it' | 'ja' | 'sw' | 'tlh' | 'ko' | 'lv' | 'lt' | 'mg' | 'ms' | 'mt' | 'nb' | 'fa' | 'pl' | 'pt' | 'otq' | 'ro' | 'ru' | 'sm' | 'sr-Cyrl' | 'sr-Latn' | 'sk' | 'sl' | 'es' | 'sv' | 'ty' | 'ta' | 'te' | 'th' | 'to' | 'tr' | 'uk' | 'ur' | 'vi' | 'cy' | 'yua'; +/** Parameter group */ +export interface RequestOptions { + /** The tracking ID sent with the request to help with debugging. */ + xMsClientRequestId?: string; +} + +/** Known values of {@link ApiVersion20200630} that the service accepts. */ +export const enum KnownApiVersion20200630 { + /** Api Version '2020-06-30' */ + TwoThousandTwenty0630 = "2020-06-30" +} + +/** + * Defines values for ApiVersion20200630. \ + * {@link KnownApiVersion20200630} can be used interchangeably with ApiVersion20200630, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **2020-06-30**: Api Version '2020-06-30' + */ +export type ApiVersion20200630 = string; + +/** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */ +export const enum KnownSearchIndexerDataSourceType { + /** Indicates an Azure SQL datasource. */ + AzureSql = "azuresql", + /** Indicates a CosmosDB datasource. */ + CosmosDb = "cosmosdb", + /** Indicates a Azure Blob datasource. */ + AzureBlob = "azureblob", + /** Indicates a Azure Table datasource. */ + AzureTable = "azuretable", + /** Indicates a MySql datasource. */ + MySql = "mysql" +} + +/** + * Defines values for SearchIndexerDataSourceType. \ + * {@link KnownSearchIndexerDataSourceType} can be used interchangeably with SearchIndexerDataSourceType, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **azuresql**: Indicates an Azure SQL datasource. \ + * **cosmosdb**: Indicates a CosmosDB datasource. \ + * **azureblob**: Indicates a Azure Blob datasource. \ + * **azuretable**: Indicates a Azure Table datasource. \ + * **mysql**: Indicates a MySql datasource. + */ +export type SearchIndexerDataSourceType = string; + +/** Known values of {@link BlobIndexerParsingMode} that the service accepts. */ +export const enum KnownBlobIndexerParsingMode { + /** Set to default for normal file processing. */ + Default = "default", + /** Set to text to improve indexing performance on plain text files in blob storage. */ + Text = "text", + /** Set to delimitedText when blobs are plain CSV files. */ + DelimitedText = "delimitedText", + /** Set to json to extract structured content from JSON files. */ + Json = "json", + /** Set to jsonArray to extract individual elements of a JSON array as separate documents in Azure Cognitive Search. */ + JsonArray = "jsonArray", + /** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents in Azure Cognitive Search. */ + JsonLines = "jsonLines" +} + +/** + * Defines values for BlobIndexerParsingMode. \ + * {@link KnownBlobIndexerParsingMode} can be used interchangeably with BlobIndexerParsingMode, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **default**: Set to default for normal file processing. \ + * **text**: Set to text to improve indexing performance on plain text files in blob storage. \ + * **delimitedText**: Set to delimitedText when blobs are plain CSV files. \ + * **json**: Set to json to extract structured content from JSON files. \ + * **jsonArray**: Set to jsonArray to extract individual elements of a JSON array as separate documents in Azure Cognitive Search. \ + * **jsonLines**: Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents in Azure Cognitive Search. + */ +export type BlobIndexerParsingMode = string; + +/** Known values of {@link BlobIndexerDataToExtract} that the service accepts. */ +export const enum KnownBlobIndexerDataToExtract { + /** Indexes just the standard blob properties and user-specified metadata. */ + StorageMetadata = "storageMetadata", + /** Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). */ + AllMetadata = "allMetadata", + /** Extracts all metadata and textual content from each blob. */ + ContentAndMetadata = "contentAndMetadata" +} + +/** + * Defines values for BlobIndexerDataToExtract. \ + * {@link KnownBlobIndexerDataToExtract} can be used interchangeably with BlobIndexerDataToExtract, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **storageMetadata**: Indexes just the standard blob properties and user-specified metadata. \ + * **allMetadata**: Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). \ + * **contentAndMetadata**: Extracts all metadata and textual content from each blob. + */ +export type BlobIndexerDataToExtract = string; + +/** Known values of {@link BlobIndexerImageAction} that the service accepts. */ +export const enum KnownBlobIndexerImageAction { + /** Ignores embedded images or image files in the data set. This is the default. */ + None = "none", + /** Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. */ + GenerateNormalizedImages = "generateNormalizedImages", + /** Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set. */ + GenerateNormalizedImagePerPage = "generateNormalizedImagePerPage" +} + +/** + * Defines values for BlobIndexerImageAction. \ + * {@link KnownBlobIndexerImageAction} can be used interchangeably with BlobIndexerImageAction, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **none**: Ignores embedded images or image files in the data set. This is the default. \ + * **generateNormalizedImages**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. \ + * **generateNormalizedImagePerPage**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set. + */ +export type BlobIndexerImageAction = string; + +/** Known values of {@link BlobIndexerPDFTextRotationAlgorithm} that the service accepts. */ +export const enum KnownBlobIndexerPDFTextRotationAlgorithm { + /** Leverages normal text extraction. This is the default. */ + None = "none", + /** May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply. */ + DetectAngles = "detectAngles" +} + +/** + * Defines values for BlobIndexerPDFTextRotationAlgorithm. \ + * {@link KnownBlobIndexerPDFTextRotationAlgorithm} can be used interchangeably with BlobIndexerPDFTextRotationAlgorithm, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **none**: Leverages normal text extraction. This is the default. \ + * **detectAngles**: May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply. + */ +export type BlobIndexerPDFTextRotationAlgorithm = string; + +/** Known values of {@link IndexerExecutionEnvironment} that the service accepts. */ +export const enum KnownIndexerExecutionEnvironment { + /** Indicates that Azure Cognitive Search can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */ + Standard = "standard", + /** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */ + Private = "private" +} + +/** + * Defines values for IndexerExecutionEnvironment. \ + * {@link KnownIndexerExecutionEnvironment} can be used interchangeably with IndexerExecutionEnvironment, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **standard**: Indicates that Azure Cognitive Search can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. \ + * **private**: Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. + */ +export type IndexerExecutionEnvironment = string; + +/** Known values of {@link SearchFieldDataType} that the service accepts. */ +export const enum KnownSearchFieldDataType { + /** Indicates that a field contains a string. */ + String = "Edm.String", + /** Indicates that a field contains a 32-bit signed integer. */ + Int32 = "Edm.Int32", + /** Indicates that a field contains a 64-bit signed integer. */ + Int64 = "Edm.Int64", + /** Indicates that a field contains an IEEE double-precision floating point number. */ + Double = "Edm.Double", + /** Indicates that a field contains a Boolean value (true or false). */ + Boolean = "Edm.Boolean", + /** Indicates that a field contains a date/time value, including timezone information. */ + DateTimeOffset = "Edm.DateTimeOffset", + /** Indicates that a field contains a geo-location in terms of longitude and latitude. */ + GeographyPoint = "Edm.GeographyPoint", + /** Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. */ + Complex = "Edm.ComplexType", + CollectionEdmString = "Collection(Edm.String)", + CollectionEdmInt32 = "Collection(Edm.Int32)", + CollectionEdmInt64 = "Collection(Edm.Int64)", + CollectionEdmDouble = "Collection(Edm.Double)", + CollectionEdmBoolean = "Collection(Edm.Boolean)", + CollectionEdmDateTimeOffset = "Collection(Edm.DateTimeOffset)", + CollectionEdmGeographyPoint = "Collection(Edm.GeographyPoint)", + CollectionEdmComplexType = "Collection(Edm.ComplexType)" +} + +/** + * Defines values for SearchFieldDataType. \ + * {@link KnownSearchFieldDataType} can be used interchangeably with SearchFieldDataType, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **Edm.String**: Indicates that a field contains a string. \ + * **Edm.Int32**: Indicates that a field contains a 32-bit signed integer. \ + * **Edm.Int64**: Indicates that a field contains a 64-bit signed integer. \ + * **Edm.Double**: Indicates that a field contains an IEEE double-precision floating point number. \ + * **Edm.Boolean**: Indicates that a field contains a Boolean value (true or false). \ + * **Edm.DateTimeOffset**: Indicates that a field contains a date/time value, including timezone information. \ + * **Edm.GeographyPoint**: Indicates that a field contains a geo-location in terms of longitude and latitude. \ + * **Edm.ComplexType**: Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. \ + * **Collection(Edm.String)** \ + * **Collection(Edm.Int32)** \ + * **Collection(Edm.Int64)** \ + * **Collection(Edm.Double)** \ + * **Collection(Edm.Boolean)** \ + * **Collection(Edm.DateTimeOffset)** \ + * **Collection(Edm.GeographyPoint)** \ + * **Collection(Edm.ComplexType)** + */ +export type SearchFieldDataType = string; + +/** Known values of {@link LexicalAnalyzerName} that the service accepts. */ +export const enum KnownLexicalAnalyzerName { + /** Microsoft analyzer for Arabic. */ + ArMicrosoft = "ar.microsoft", + /** Lucene analyzer for Arabic. */ + ArLucene = "ar.lucene", + /** Lucene analyzer for Armenian. */ + HyLucene = "hy.lucene", + /** Microsoft analyzer for Bangla. */ + BnMicrosoft = "bn.microsoft", + /** Lucene analyzer for Basque. */ + EuLucene = "eu.lucene", + /** Microsoft analyzer for Bulgarian. */ + BgMicrosoft = "bg.microsoft", + /** Lucene analyzer for Bulgarian. */ + BgLucene = "bg.lucene", + /** Microsoft analyzer for Catalan. */ + CaMicrosoft = "ca.microsoft", + /** Lucene analyzer for Catalan. */ + CaLucene = "ca.lucene", + /** Microsoft analyzer for Chinese (Simplified). */ + ZhHansMicrosoft = "zh-Hans.microsoft", + /** Lucene analyzer for Chinese (Simplified). */ + ZhHansLucene = "zh-Hans.lucene", + /** Microsoft analyzer for Chinese (Traditional). */ + ZhHantMicrosoft = "zh-Hant.microsoft", + /** Lucene analyzer for Chinese (Traditional). */ + ZhHantLucene = "zh-Hant.lucene", + /** Microsoft analyzer for Croatian. */ + HrMicrosoft = "hr.microsoft", + /** Microsoft analyzer for Czech. */ + CsMicrosoft = "cs.microsoft", + /** Lucene analyzer for Czech. */ + CsLucene = "cs.lucene", + /** Microsoft analyzer for Danish. */ + DaMicrosoft = "da.microsoft", + /** Lucene analyzer for Danish. */ + DaLucene = "da.lucene", + /** Microsoft analyzer for Dutch. */ + NlMicrosoft = "nl.microsoft", + /** Lucene analyzer for Dutch. */ + NlLucene = "nl.lucene", + /** Microsoft analyzer for English. */ + EnMicrosoft = "en.microsoft", + /** Lucene analyzer for English. */ + EnLucene = "en.lucene", + /** Microsoft analyzer for Estonian. */ + EtMicrosoft = "et.microsoft", + /** Microsoft analyzer for Finnish. */ + FiMicrosoft = "fi.microsoft", + /** Lucene analyzer for Finnish. */ + FiLucene = "fi.lucene", + /** Microsoft analyzer for French. */ + FrMicrosoft = "fr.microsoft", + /** Lucene analyzer for French. */ + FrLucene = "fr.lucene", + /** Lucene analyzer for Galician. */ + GlLucene = "gl.lucene", + /** Microsoft analyzer for German. */ + DeMicrosoft = "de.microsoft", + /** Lucene analyzer for German. */ + DeLucene = "de.lucene", + /** Microsoft analyzer for Greek. */ + ElMicrosoft = "el.microsoft", + /** Lucene analyzer for Greek. */ + ElLucene = "el.lucene", + /** Microsoft analyzer for Gujarati. */ + GuMicrosoft = "gu.microsoft", + /** Microsoft analyzer for Hebrew. */ + HeMicrosoft = "he.microsoft", + /** Microsoft analyzer for Hindi. */ + HiMicrosoft = "hi.microsoft", + /** Lucene analyzer for Hindi. */ + HiLucene = "hi.lucene", + /** Microsoft analyzer for Hungarian. */ + HuMicrosoft = "hu.microsoft", + /** Lucene analyzer for Hungarian. */ + HuLucene = "hu.lucene", + /** Microsoft analyzer for Icelandic. */ + IsMicrosoft = "is.microsoft", + /** Microsoft analyzer for Indonesian (Bahasa). */ + IdMicrosoft = "id.microsoft", + /** Lucene analyzer for Indonesian. */ + IdLucene = "id.lucene", + /** Lucene analyzer for Irish. */ + GaLucene = "ga.lucene", + /** Microsoft analyzer for Italian. */ + ItMicrosoft = "it.microsoft", + /** Lucene analyzer for Italian. */ + ItLucene = "it.lucene", + /** Microsoft analyzer for Japanese. */ + JaMicrosoft = "ja.microsoft", + /** Lucene analyzer for Japanese. */ + JaLucene = "ja.lucene", + /** Microsoft analyzer for Kannada. */ + KnMicrosoft = "kn.microsoft", + /** Microsoft analyzer for Korean. */ + KoMicrosoft = "ko.microsoft", + /** Lucene analyzer for Korean. */ + KoLucene = "ko.lucene", + /** Microsoft analyzer for Latvian. */ + LvMicrosoft = "lv.microsoft", + /** Lucene analyzer for Latvian. */ + LvLucene = "lv.lucene", + /** Microsoft analyzer for Lithuanian. */ + LtMicrosoft = "lt.microsoft", + /** Microsoft analyzer for Malayalam. */ + MlMicrosoft = "ml.microsoft", + /** Microsoft analyzer for Malay (Latin). */ + MsMicrosoft = "ms.microsoft", + /** Microsoft analyzer for Marathi. */ + MrMicrosoft = "mr.microsoft", + /** Microsoft analyzer for Norwegian (Bokmål). */ + NbMicrosoft = "nb.microsoft", + /** Lucene analyzer for Norwegian. */ + NoLucene = "no.lucene", + /** Lucene analyzer for Persian. */ + FaLucene = "fa.lucene", + /** Microsoft analyzer for Polish. */ + PlMicrosoft = "pl.microsoft", + /** Lucene analyzer for Polish. */ + PlLucene = "pl.lucene", + /** Microsoft analyzer for Portuguese (Brazil). */ + PtBrMicrosoft = "pt-BR.microsoft", + /** Lucene analyzer for Portuguese (Brazil). */ + PtBrLucene = "pt-BR.lucene", + /** Microsoft analyzer for Portuguese (Portugal). */ + PtPtMicrosoft = "pt-PT.microsoft", + /** Lucene analyzer for Portuguese (Portugal). */ + PtPtLucene = "pt-PT.lucene", + /** Microsoft analyzer for Punjabi. */ + PaMicrosoft = "pa.microsoft", + /** Microsoft analyzer for Romanian. */ + RoMicrosoft = "ro.microsoft", + /** Lucene analyzer for Romanian. */ + RoLucene = "ro.lucene", + /** Microsoft analyzer for Russian. */ + RuMicrosoft = "ru.microsoft", + /** Lucene analyzer for Russian. */ + RuLucene = "ru.lucene", + /** Microsoft analyzer for Serbian (Cyrillic). */ + SrCyrillicMicrosoft = "sr-cyrillic.microsoft", + /** Microsoft analyzer for Serbian (Latin). */ + SrLatinMicrosoft = "sr-latin.microsoft", + /** Microsoft analyzer for Slovak. */ + SkMicrosoft = "sk.microsoft", + /** Microsoft analyzer for Slovenian. */ + SlMicrosoft = "sl.microsoft", + /** Microsoft analyzer for Spanish. */ + EsMicrosoft = "es.microsoft", + /** Lucene analyzer for Spanish. */ + EsLucene = "es.lucene", + /** Microsoft analyzer for Swedish. */ + SvMicrosoft = "sv.microsoft", + /** Lucene analyzer for Swedish. */ + SvLucene = "sv.lucene", + /** Microsoft analyzer for Tamil. */ + TaMicrosoft = "ta.microsoft", + /** Microsoft analyzer for Telugu. */ + TeMicrosoft = "te.microsoft", + /** Microsoft analyzer for Thai. */ + ThMicrosoft = "th.microsoft", + /** Lucene analyzer for Thai. */ + ThLucene = "th.lucene", + /** Microsoft analyzer for Turkish. */ + TrMicrosoft = "tr.microsoft", + /** Lucene analyzer for Turkish. */ + TrLucene = "tr.lucene", + /** Microsoft analyzer for Ukrainian. */ + UkMicrosoft = "uk.microsoft", + /** Microsoft analyzer for Urdu. */ + UrMicrosoft = "ur.microsoft", + /** Microsoft analyzer for Vietnamese. */ + ViMicrosoft = "vi.microsoft", + /** Standard Lucene analyzer. */ + StandardLucene = "standard.lucene", + /** Standard ASCII Folding Lucene analyzer. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#Analyzers */ + StandardAsciiFoldingLucene = "standardasciifolding.lucene", + /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordAnalyzer.html */ + Keyword = "keyword", + /** Flexibly separates text into terms via a regular expression pattern. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.html */ + Pattern = "pattern", + /** Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/SimpleAnalyzer.html */ + Simple = "simple", + /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopAnalyzer.html */ + Stop = "stop", + /** An analyzer that uses the whitespace tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceAnalyzer.html */ + Whitespace = "whitespace" +} + +/** + * Defines values for LexicalAnalyzerName. \ + * {@link KnownLexicalAnalyzerName} can be used interchangeably with LexicalAnalyzerName, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **ar.microsoft**: Microsoft analyzer for Arabic. \ + * **ar.lucene**: Lucene analyzer for Arabic. \ + * **hy.lucene**: Lucene analyzer for Armenian. \ + * **bn.microsoft**: Microsoft analyzer for Bangla. \ + * **eu.lucene**: Lucene analyzer for Basque. \ + * **bg.microsoft**: Microsoft analyzer for Bulgarian. \ + * **bg.lucene**: Lucene analyzer for Bulgarian. \ + * **ca.microsoft**: Microsoft analyzer for Catalan. \ + * **ca.lucene**: Lucene analyzer for Catalan. \ + * **zh-Hans.microsoft**: Microsoft analyzer for Chinese (Simplified). \ + * **zh-Hans.lucene**: Lucene analyzer for Chinese (Simplified). \ + * **zh-Hant.microsoft**: Microsoft analyzer for Chinese (Traditional). \ + * **zh-Hant.lucene**: Lucene analyzer for Chinese (Traditional). \ + * **hr.microsoft**: Microsoft analyzer for Croatian. \ + * **cs.microsoft**: Microsoft analyzer for Czech. \ + * **cs.lucene**: Lucene analyzer for Czech. \ + * **da.microsoft**: Microsoft analyzer for Danish. \ + * **da.lucene**: Lucene analyzer for Danish. \ + * **nl.microsoft**: Microsoft analyzer for Dutch. \ + * **nl.lucene**: Lucene analyzer for Dutch. \ + * **en.microsoft**: Microsoft analyzer for English. \ + * **en.lucene**: Lucene analyzer for English. \ + * **et.microsoft**: Microsoft analyzer for Estonian. \ + * **fi.microsoft**: Microsoft analyzer for Finnish. \ + * **fi.lucene**: Lucene analyzer for Finnish. \ + * **fr.microsoft**: Microsoft analyzer for French. \ + * **fr.lucene**: Lucene analyzer for French. \ + * **gl.lucene**: Lucene analyzer for Galician. \ + * **de.microsoft**: Microsoft analyzer for German. \ + * **de.lucene**: Lucene analyzer for German. \ + * **el.microsoft**: Microsoft analyzer for Greek. \ + * **el.lucene**: Lucene analyzer for Greek. \ + * **gu.microsoft**: Microsoft analyzer for Gujarati. \ + * **he.microsoft**: Microsoft analyzer for Hebrew. \ + * **hi.microsoft**: Microsoft analyzer for Hindi. \ + * **hi.lucene**: Lucene analyzer for Hindi. \ + * **hu.microsoft**: Microsoft analyzer for Hungarian. \ + * **hu.lucene**: Lucene analyzer for Hungarian. \ + * **is.microsoft**: Microsoft analyzer for Icelandic. \ + * **id.microsoft**: Microsoft analyzer for Indonesian (Bahasa). \ + * **id.lucene**: Lucene analyzer for Indonesian. \ + * **ga.lucene**: Lucene analyzer for Irish. \ + * **it.microsoft**: Microsoft analyzer for Italian. \ + * **it.lucene**: Lucene analyzer for Italian. \ + * **ja.microsoft**: Microsoft analyzer for Japanese. \ + * **ja.lucene**: Lucene analyzer for Japanese. \ + * **kn.microsoft**: Microsoft analyzer for Kannada. \ + * **ko.microsoft**: Microsoft analyzer for Korean. \ + * **ko.lucene**: Lucene analyzer for Korean. \ + * **lv.microsoft**: Microsoft analyzer for Latvian. \ + * **lv.lucene**: Lucene analyzer for Latvian. \ + * **lt.microsoft**: Microsoft analyzer for Lithuanian. \ + * **ml.microsoft**: Microsoft analyzer for Malayalam. \ + * **ms.microsoft**: Microsoft analyzer for Malay (Latin). \ + * **mr.microsoft**: Microsoft analyzer for Marathi. \ + * **nb.microsoft**: Microsoft analyzer for Norwegian (Bokmål). \ + * **no.lucene**: Lucene analyzer for Norwegian. \ + * **fa.lucene**: Lucene analyzer for Persian. \ + * **pl.microsoft**: Microsoft analyzer for Polish. \ + * **pl.lucene**: Lucene analyzer for Polish. \ + * **pt-BR.microsoft**: Microsoft analyzer for Portuguese (Brazil). \ + * **pt-BR.lucene**: Lucene analyzer for Portuguese (Brazil). \ + * **pt-PT.microsoft**: Microsoft analyzer for Portuguese (Portugal). \ + * **pt-PT.lucene**: Lucene analyzer for Portuguese (Portugal). \ + * **pa.microsoft**: Microsoft analyzer for Punjabi. \ + * **ro.microsoft**: Microsoft analyzer for Romanian. \ + * **ro.lucene**: Lucene analyzer for Romanian. \ + * **ru.microsoft**: Microsoft analyzer for Russian. \ + * **ru.lucene**: Lucene analyzer for Russian. \ + * **sr-cyrillic.microsoft**: Microsoft analyzer for Serbian (Cyrillic). \ + * **sr-latin.microsoft**: Microsoft analyzer for Serbian (Latin). \ + * **sk.microsoft**: Microsoft analyzer for Slovak. \ + * **sl.microsoft**: Microsoft analyzer for Slovenian. \ + * **es.microsoft**: Microsoft analyzer for Spanish. \ + * **es.lucene**: Lucene analyzer for Spanish. \ + * **sv.microsoft**: Microsoft analyzer for Swedish. \ + * **sv.lucene**: Lucene analyzer for Swedish. \ + * **ta.microsoft**: Microsoft analyzer for Tamil. \ + * **te.microsoft**: Microsoft analyzer for Telugu. \ + * **th.microsoft**: Microsoft analyzer for Thai. \ + * **th.lucene**: Lucene analyzer for Thai. \ + * **tr.microsoft**: Microsoft analyzer for Turkish. \ + * **tr.lucene**: Lucene analyzer for Turkish. \ + * **uk.microsoft**: Microsoft analyzer for Ukrainian. \ + * **ur.microsoft**: Microsoft analyzer for Urdu. \ + * **vi.microsoft**: Microsoft analyzer for Vietnamese. \ + * **standard.lucene**: Standard Lucene analyzer. \ + * **standardasciifolding.lucene**: Standard ASCII Folding Lucene analyzer. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#Analyzers \ + * **keyword**: Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordAnalyzer.html \ + * **pattern**: Flexibly separates text into terms via a regular expression pattern. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.html \ + * **simple**: Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/SimpleAnalyzer.html \ + * **stop**: Divides text at non-letters; Applies the lowercase and stopword token filters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopAnalyzer.html \ + * **whitespace**: An analyzer that uses the whitespace tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceAnalyzer.html + */ +export type LexicalAnalyzerName = string; + +/** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */ +export const enum KnownKeyPhraseExtractionSkillLanguage { + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Korean */ + Ko = "ko", + /** Norwegian (Bokmaal) */ + No = "no", + /** Polish */ + Pl = "pl", + /** Portuguese (Portugal) */ + PtPT = "pt-PT", + /** Portuguese (Brazil) */ + PtBR = "pt-BR", + /** Russian */ + Ru = "ru", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv" +} + +/** + * Defines values for KeyPhraseExtractionSkillLanguage. \ + * {@link KnownKeyPhraseExtractionSkillLanguage} can be used interchangeably with KeyPhraseExtractionSkillLanguage, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **it**: Italian \ + * **ja**: Japanese \ + * **ko**: Korean \ + * **no**: Norwegian (Bokmaal) \ + * **pl**: Polish \ + * **pt-PT**: Portuguese (Portugal) \ + * **pt-BR**: Portuguese (Brazil) \ + * **ru**: Russian \ + * **es**: Spanish \ + * **sv**: Swedish + */ +export type KeyPhraseExtractionSkillLanguage = string; + +/** Known values of {@link OcrSkillLanguage} that the service accepts. */ +export const enum KnownOcrSkillLanguage { + /** Chinese-Simplified */ + ZhHans = "zh-Hans", + /** Chinese-Traditional */ + ZhHant = "zh-Hant", + /** Czech */ + Cs = "cs", + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** Hungarian */ + Hu = "hu", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Korean */ + Ko = "ko", + /** Norwegian (Bokmaal) */ + Nb = "nb", + /** Polish */ + Pl = "pl", + /** Portuguese */ + Pt = "pt", + /** Russian */ + Ru = "ru", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", + /** Turkish */ + Tr = "tr", + /** Arabic */ + Ar = "ar", + /** Romanian */ + Ro = "ro", + /** Serbian (Cyrillic, Serbia) */ + SrCyrl = "sr-Cyrl", + /** Serbian (Latin, Serbia) */ + SrLatn = "sr-Latn", + /** Slovak */ + Sk = "sk" +} + +/** + * Defines values for OcrSkillLanguage. \ + * {@link KnownOcrSkillLanguage} can be used interchangeably with OcrSkillLanguage, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **zh-Hans**: Chinese-Simplified \ + * **zh-Hant**: Chinese-Traditional \ + * **cs**: Czech \ + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **el**: Greek \ + * **hu**: Hungarian \ + * **it**: Italian \ + * **ja**: Japanese \ + * **ko**: Korean \ + * **nb**: Norwegian (Bokmaal) \ + * **pl**: Polish \ + * **pt**: Portuguese \ + * **ru**: Russian \ + * **es**: Spanish \ + * **sv**: Swedish \ + * **tr**: Turkish \ + * **ar**: Arabic \ + * **ro**: Romanian \ + * **sr-Cyrl**: Serbian (Cyrillic, Serbia) \ + * **sr-Latn**: Serbian (Latin, Serbia) \ + * **sk**: Slovak + */ +export type OcrSkillLanguage = string; + +/** Known values of {@link ImageAnalysisSkillLanguage} that the service accepts. */ +export const enum KnownImageAnalysisSkillLanguage { + /** English */ + En = "en", + /** Spanish */ + Es = "es", + /** Japanese */ + Ja = "ja", + /** Portuguese */ + Pt = "pt", + /** Chinese */ + Zh = "zh" +} + +/** + * Defines values for ImageAnalysisSkillLanguage. \ + * {@link KnownImageAnalysisSkillLanguage} can be used interchangeably with ImageAnalysisSkillLanguage, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **en**: English \ + * **es**: Spanish \ + * **ja**: Japanese \ + * **pt**: Portuguese \ + * **zh**: Chinese + */ +export type ImageAnalysisSkillLanguage = string; + +/** Known values of {@link VisualFeature} that the service accepts. */ +export const enum KnownVisualFeature { + /** Visual features recognized as adult persons. */ + Adult = "adult", + /** Visual features recognized as commercial brands. */ + Brands = "brands", + /** Categories. */ + Categories = "categories", + /** Description. */ + Description = "description", + /** Visual features recognized as people faces. */ + Faces = "faces", + /** Visual features recognized as objects. */ + Objects = "objects", + /** Tags. */ + Tags = "tags" +} + +/** + * Defines values for VisualFeature. \ + * {@link KnownVisualFeature} can be used interchangeably with VisualFeature, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **adult**: Visual features recognized as adult persons. \ + * **brands**: Visual features recognized as commercial brands. \ + * **categories**: Categories. \ + * **description**: Description. \ + * **faces**: Visual features recognized as people faces. \ + * **objects**: Visual features recognized as objects. \ + * **tags**: Tags. + */ +export type VisualFeature = string; + +/** Known values of {@link ImageDetail} that the service accepts. */ +export const enum KnownImageDetail { + /** Details recognized as celebrities. */ + Celebrities = "celebrities", + /** Details recognized as landmarks. */ + Landmarks = "landmarks" +} + +/** + * Defines values for ImageDetail. \ + * {@link KnownImageDetail} can be used interchangeably with ImageDetail, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **celebrities**: Details recognized as celebrities. \ + * **landmarks**: Details recognized as landmarks. + */ +export type ImageDetail = string; + +/** Known values of {@link EntityCategory} that the service accepts. */ +export const enum KnownEntityCategory { + /** Entities describing a physical location. */ + Location = "location", + /** Entities describing an organization. */ + Organization = "organization", + /** Entities describing a person. */ + Person = "person", + /** Entities describing a quantity. */ + Quantity = "quantity", + /** Entities describing a date and time. */ + Datetime = "datetime", + /** Entities describing a URL. */ + Url = "url", + /** Entities describing an email address. */ + Email = "email" +} + +/** + * Defines values for EntityCategory. \ + * {@link KnownEntityCategory} can be used interchangeably with EntityCategory, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **location**: Entities describing a physical location. \ + * **organization**: Entities describing an organization. \ + * **person**: Entities describing a person. \ + * **quantity**: Entities describing a quantity. \ + * **datetime**: Entities describing a date and time. \ + * **url**: Entities describing a URL. \ + * **email**: Entities describing an email address. + */ +export type EntityCategory = string; + +/** Known values of {@link EntityRecognitionSkillLanguage} that the service accepts. */ +export const enum KnownEntityRecognitionSkillLanguage { + /** Arabic */ + Ar = "ar", + /** Czech */ + Cs = "cs", + /** Chinese-Simplified */ + ZhHans = "zh-Hans", + /** Chinese-Traditional */ + ZhHant = "zh-Hant", + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** Hungarian */ + Hu = "hu", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Korean */ + Ko = "ko", + /** Norwegian (Bokmaal) */ + No = "no", + /** Polish */ + Pl = "pl", + /** Portuguese (Portugal) */ + PtPT = "pt-PT", + /** Portuguese (Brazil) */ + PtBR = "pt-BR", + /** Russian */ + Ru = "ru", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", + /** Turkish */ + Tr = "tr" +} + +/** + * Defines values for EntityRecognitionSkillLanguage. \ + * {@link KnownEntityRecognitionSkillLanguage} can be used interchangeably with EntityRecognitionSkillLanguage, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **ar**: Arabic \ + * **cs**: Czech \ + * **zh-Hans**: Chinese-Simplified \ + * **zh-Hant**: Chinese-Traditional \ + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **el**: Greek \ + * **hu**: Hungarian \ + * **it**: Italian \ + * **ja**: Japanese \ + * **ko**: Korean \ + * **no**: Norwegian (Bokmaal) \ + * **pl**: Polish \ + * **pt-PT**: Portuguese (Portugal) \ + * **pt-BR**: Portuguese (Brazil) \ + * **ru**: Russian \ + * **es**: Spanish \ + * **sv**: Swedish \ + * **tr**: Turkish + */ +export type EntityRecognitionSkillLanguage = string; + +/** Known values of {@link SentimentSkillLanguage} that the service accepts. */ +export const enum KnownSentimentSkillLanguage { + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** Italian */ + It = "it", + /** Norwegian (Bokmaal) */ + No = "no", + /** Polish */ + Pl = "pl", + /** Portuguese (Portugal) */ + PtPT = "pt-PT", + /** Russian */ + Ru = "ru", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", + /** Turkish */ + Tr = "tr" +} + +/** + * Defines values for SentimentSkillLanguage. \ + * {@link KnownSentimentSkillLanguage} can be used interchangeably with SentimentSkillLanguage, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **el**: Greek \ + * **it**: Italian \ + * **no**: Norwegian (Bokmaal) \ + * **pl**: Polish \ + * **pt-PT**: Portuguese (Portugal) \ + * **ru**: Russian \ + * **es**: Spanish \ + * **sv**: Swedish \ + * **tr**: Turkish + */ +export type SentimentSkillLanguage = string; + +/** Known values of {@link SplitSkillLanguage} that the service accepts. */ +export const enum KnownSplitSkillLanguage { + /** Danish */ + Da = "da", + /** German */ + De = "de", + /** English */ + En = "en", + /** Spanish */ + Es = "es", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** Italian */ + It = "it", + /** Korean */ + Ko = "ko", + /** Portuguese */ + Pt = "pt" +} + +/** + * Defines values for SplitSkillLanguage. \ + * {@link KnownSplitSkillLanguage} can be used interchangeably with SplitSkillLanguage, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **da**: Danish \ + * **de**: German \ + * **en**: English \ + * **es**: Spanish \ + * **fi**: Finnish \ + * **fr**: French \ + * **it**: Italian \ + * **ko**: Korean \ + * **pt**: Portuguese + */ +export type SplitSkillLanguage = string; + +/** Known values of {@link TextSplitMode} that the service accepts. */ +export const enum KnownTextSplitMode { + /** Split the text into individual pages. */ + Pages = "pages", + /** Split the text into individual sentences. */ + Sentences = "sentences" +} + +/** + * Defines values for TextSplitMode. \ + * {@link KnownTextSplitMode} can be used interchangeably with TextSplitMode, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **pages**: Split the text into individual pages. \ + * **sentences**: Split the text into individual sentences. + */ +export type TextSplitMode = string; + +/** Known values of {@link CustomEntityLookupSkillLanguage} that the service accepts. */ +export const enum KnownCustomEntityLookupSkillLanguage { + /** Danish */ + Da = "da", + /** German */ + De = "de", + /** English */ + En = "en", + /** Spanish */ + Es = "es", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** Italian */ + It = "it", + /** Korean */ + Ko = "ko", + /** Portuguese */ + Pt = "pt" +} + +/** + * Defines values for CustomEntityLookupSkillLanguage. \ + * {@link KnownCustomEntityLookupSkillLanguage} can be used interchangeably with CustomEntityLookupSkillLanguage, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **da**: Danish \ + * **de**: German \ + * **en**: English \ + * **es**: Spanish \ + * **fi**: Finnish \ + * **fr**: French \ + * **it**: Italian \ + * **ko**: Korean \ + * **pt**: Portuguese + */ +export type CustomEntityLookupSkillLanguage = string; + +/** Known values of {@link TextTranslationSkillLanguage} that the service accepts. */ +export const enum KnownTextTranslationSkillLanguage { + /** Afrikaans */ + Af = "af", + /** Arabic */ + Ar = "ar", + /** Bangla */ + Bn = "bn", + /** Bosnian (Latin) */ + Bs = "bs", + /** Bulgarian */ + Bg = "bg", + /** Cantonese (Traditional) */ + Yue = "yue", + /** Catalan */ + Ca = "ca", + /** Chinese Simplified */ + ZhHans = "zh-Hans", + /** Chinese Traditional */ + ZhHant = "zh-Hant", + /** Croatian */ + Hr = "hr", + /** Czech */ + Cs = "cs", + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Estonian */ + Et = "et", + /** Fijian */ + Fj = "fj", + /** Filipino */ + Fil = "fil", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** Haitian Creole */ + Ht = "ht", + /** Hebrew */ + He = "he", + /** Hindi */ + Hi = "hi", + /** Hmong Daw */ + Mww = "mww", + /** Hungarian */ + Hu = "hu", + /** Icelandic */ + Is = "is", + /** Indonesian */ + Id = "id", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Kiswahili */ + Sw = "sw", + /** Klingon */ + Tlh = "tlh", + /** Korean */ + Ko = "ko", + /** Latvian */ + Lv = "lv", + /** Lithuanian */ + Lt = "lt", + /** Malagasy */ + Mg = "mg", + /** Malay */ + Ms = "ms", + /** Maltese */ + Mt = "mt", + /** Norwegian */ + Nb = "nb", + /** Persian */ + Fa = "fa", + /** Polish */ + Pl = "pl", + /** Portuguese */ + Pt = "pt", + /** Queretaro Otomi */ + Otq = "otq", + /** Romanian */ + Ro = "ro", + /** Russian */ + Ru = "ru", + /** Samoan */ + Sm = "sm", + /** Serbian (Cyrillic) */ + SrCyrl = "sr-Cyrl", + /** Serbian (Latin) */ + SrLatn = "sr-Latn", + /** Slovak */ + Sk = "sk", + /** Slovenian */ + Sl = "sl", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", + /** Tahitian */ + Ty = "ty", + /** Tamil */ + Ta = "ta", + /** Telugu */ + Te = "te", + /** Thai */ + Th = "th", + /** Tongan */ + To = "to", + /** Turkish */ + Tr = "tr", + /** Ukrainian */ + Uk = "uk", + /** Urdu */ + Ur = "ur", + /** Vietnamese */ + Vi = "vi", + /** Welsh */ + Cy = "cy", + /** Yucatec Maya */ + Yua = "yua" +} + +/** + * Defines values for TextTranslationSkillLanguage. \ + * {@link KnownTextTranslationSkillLanguage} can be used interchangeably with TextTranslationSkillLanguage, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **af**: Afrikaans \ + * **ar**: Arabic \ + * **bn**: Bangla \ + * **bs**: Bosnian (Latin) \ + * **bg**: Bulgarian \ + * **yue**: Cantonese (Traditional) \ + * **ca**: Catalan \ + * **zh-Hans**: Chinese Simplified \ + * **zh-Hant**: Chinese Traditional \ + * **hr**: Croatian \ + * **cs**: Czech \ + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **et**: Estonian \ + * **fj**: Fijian \ + * **fil**: Filipino \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **el**: Greek \ + * **ht**: Haitian Creole \ + * **he**: Hebrew \ + * **hi**: Hindi \ + * **mww**: Hmong Daw \ + * **hu**: Hungarian \ + * **is**: Icelandic \ + * **id**: Indonesian \ + * **it**: Italian \ + * **ja**: Japanese \ + * **sw**: Kiswahili \ + * **tlh**: Klingon \ + * **ko**: Korean \ + * **lv**: Latvian \ + * **lt**: Lithuanian \ + * **mg**: Malagasy \ + * **ms**: Malay \ + * **mt**: Maltese \ + * **nb**: Norwegian \ + * **fa**: Persian \ + * **pl**: Polish \ + * **pt**: Portuguese \ + * **otq**: Queretaro Otomi \ + * **ro**: Romanian \ + * **ru**: Russian \ + * **sm**: Samoan \ + * **sr-Cyrl**: Serbian (Cyrillic) \ + * **sr-Latn**: Serbian (Latin) \ + * **sk**: Slovak \ + * **sl**: Slovenian \ + * **es**: Spanish \ + * **sv**: Swedish \ + * **ty**: Tahitian \ + * **ta**: Tamil \ + * **te**: Telugu \ + * **th**: Thai \ + * **to**: Tongan \ + * **tr**: Turkish \ + * **uk**: Ukrainian \ + * **ur**: Urdu \ + * **vi**: Vietnamese \ + * **cy**: Welsh \ + * **yua**: Yucatec Maya + */ +export type TextTranslationSkillLanguage = string; + +/** Known values of {@link LexicalTokenizerName} that the service accepts. */ +export const enum KnownLexicalTokenizerName { + /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html */ + Classic = "classic", + /** Tokenizes the input from an edge into n-grams of the given size(s). See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html */ + EdgeNGram = "edgeNGram", + /** Emits the entire input as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html */ + Keyword = "keyword_v2", + /** Divides text at non-letters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html */ + Letter = "letter", + /** Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html */ + Lowercase = "lowercase", + /** Divides text using language-specific rules. */ + MicrosoftLanguageTokenizer = "microsoft_language_tokenizer", + /** Divides text using language-specific rules and reduces words to their base forms. */ + MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer", + /** Tokenizes the input into n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html */ + NGram = "nGram", + /** Tokenizer for path-like hierarchies. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html */ + PathHierarchy = "path_hierarchy_v2", + /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html */ + Pattern = "pattern", + /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html */ + Standard = "standard_v2", + /** Tokenizes urls and emails as one token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html */ + UaxUrlEmail = "uax_url_email", + /** Divides text at whitespace. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html */ + Whitespace = "whitespace" +} + +/** + * Defines values for LexicalTokenizerName. \ + * {@link KnownLexicalTokenizerName} can be used interchangeably with LexicalTokenizerName, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **classic**: Grammar-based tokenizer that is suitable for processing most European-language documents. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html \ + * **edgeNGram**: Tokenizes the input from an edge into n-grams of the given size(s). See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html \ + * **keyword_v2**: Emits the entire input as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html \ + * **letter**: Divides text at non-letters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html \ + * **lowercase**: Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html \ + * **microsoft_language_tokenizer**: Divides text using language-specific rules. \ + * **microsoft_language_stemming_tokenizer**: Divides text using language-specific rules and reduces words to their base forms. \ + * **nGram**: Tokenizes the input into n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html \ + * **path_hierarchy_v2**: Tokenizer for path-like hierarchies. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html \ + * **pattern**: Tokenizer that uses regex pattern matching to construct distinct tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html \ + * **standard_v2**: Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html \ + * **uax_url_email**: Tokenizes urls and emails as one token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html \ + * **whitespace**: Divides text at whitespace. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html + */ +export type LexicalTokenizerName = string; + +/** Known values of {@link TokenFilterName} that the service accepts. */ +export const enum KnownTokenFilterName { + /** A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html */ + ArabicNormalization = "arabic_normalization", + /** Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html */ + Apostrophe = "apostrophe", + /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html */ + AsciiFolding = "asciifolding", + /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html */ + CjkBigram = "cjk_bigram", + /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html */ + CjkWidth = "cjk_width", + /** Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html */ + Classic = "classic", + /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html */ + CommonGram = "common_grams", + /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html */ + EdgeNGram = "edgeNGram_v2", + /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html */ + Elision = "elision", + /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html */ + GermanNormalization = "german_normalization", + /** Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html */ + HindiNormalization = "hindi_normalization", + /** Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html */ + IndicNormalization = "indic_normalization", + /** Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html */ + KeywordRepeat = "keyword_repeat", + /** A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html */ + KStem = "kstem", + /** Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html */ + Length = "length", + /** Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html */ + Limit = "limit", + /** Normalizes token text to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm */ + Lowercase = "lowercase", + /** Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html */ + NGram = "nGram_v2", + /** Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html */ + PersianNormalization = "persian_normalization", + /** Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html */ + Phonetic = "phonetic", + /** Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer */ + PorterStem = "porter_stem", + /** Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html */ + Reverse = "reverse", + /** Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html */ + ScandinavianNormalization = "scandinavian_normalization", + /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html */ + ScandinavianFoldingNormalization = "scandinavian_folding", + /** Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html */ + Shingle = "shingle", + /** A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html */ + Snowball = "snowball", + /** Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html */ + SoraniNormalization = "sorani_normalization", + /** Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters */ + Stemmer = "stemmer", + /** Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html */ + Stopwords = "stopwords", + /** Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html */ + Trim = "trim", + /** Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html */ + Truncate = "truncate", + /** Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html */ + Unique = "unique", + /** Normalizes token text to upper case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html */ + Uppercase = "uppercase", + /** Splits words into subwords and performs optional transformations on subword groups. */ + WordDelimiter = "word_delimiter" +} + +/** + * Defines values for TokenFilterName. \ + * {@link KnownTokenFilterName} can be used interchangeably with TokenFilterName, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **arabic_normalization**: A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html \ + * **apostrophe**: Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html \ + * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html \ + * **cjk_bigram**: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html \ + * **cjk_width**: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html \ + * **classic**: Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html \ + * **common_grams**: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html \ + * **edgeNGram_v2**: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html \ + * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html \ + * **german_normalization**: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html \ + * **hindi_normalization**: Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html \ + * **indic_normalization**: Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html \ + * **keyword_repeat**: Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html \ + * **kstem**: A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html \ + * **length**: Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html \ + * **limit**: Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html \ + * **lowercase**: Normalizes token text to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm \ + * **nGram_v2**: Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html \ + * **persian_normalization**: Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html \ + * **phonetic**: Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html \ + * **porter_stem**: Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer \ + * **reverse**: Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html \ + * **scandinavian_normalization**: Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html \ + * **scandinavian_folding**: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html \ + * **shingle**: Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html \ + * **snowball**: A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html \ + * **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html \ + * **stemmer**: Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters \ + * **stopwords**: Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html \ + * **trim**: Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html \ + * **truncate**: Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html \ + * **unique**: Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html \ + * **uppercase**: Normalizes token text to upper case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html \ + * **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups. + */ +export type TokenFilterName = string; + +/** Known values of {@link CharFilterName} that the service accepts. */ +export const enum KnownCharFilterName { + /** A character filter that attempts to strip out HTML constructs. See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html */ + HtmlStrip = "html_strip" +} + +/** + * Defines values for CharFilterName. \ + * {@link KnownCharFilterName} can be used interchangeably with CharFilterName, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **html_strip**: A character filter that attempts to strip out HTML constructs. See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html + */ +export type CharFilterName = string; + +/** Known values of {@link RegexFlags} that the service accepts. */ +export const enum KnownRegexFlags { + /** Enables canonical equivalence. */ + CanonEq = "CANON_EQ", + /** Enables case-insensitive matching. */ + CaseInsensitive = "CASE_INSENSITIVE", + /** Permits whitespace and comments in the pattern. */ + Comments = "COMMENTS", + /** Enables dotall mode. */ + DotAll = "DOTALL", + /** Enables literal parsing of the pattern. */ + Literal = "LITERAL", + /** Enables multiline mode. */ + Multiline = "MULTILINE", + /** Enables Unicode-aware case folding. */ + UnicodeCase = "UNICODE_CASE", + /** Enables Unix lines mode. */ + UnixLines = "UNIX_LINES" +} + +/** + * Defines values for RegexFlags. \ + * {@link KnownRegexFlags} can be used interchangeably with RegexFlags, + * this enum contains the known values that the service supports. + * ### Know values supported by the service + * **CANON_EQ**: Enables canonical equivalence. \ + * **CASE_INSENSITIVE**: Enables case-insensitive matching. \ + * **COMMENTS**: Permits whitespace and comments in the pattern. \ + * **DOTALL**: Enables dotall mode. \ + * **LITERAL**: Enables literal parsing of the pattern. \ + * **MULTILINE**: Enables multiline mode. \ + * **UNICODE_CASE**: Enables Unicode-aware case folding. \ + * **UNIX_LINES**: Enables Unix lines mode. + */ +export type RegexFlags = string; +/** Defines values for IndexerStatus. */ +export type IndexerStatus = "unknown" | "error" | "running"; +/** Defines values for IndexerExecutionStatus. */ +export type IndexerExecutionStatus = + | "transientFailure" + | "success" + | "inProgress" + | "reset"; +/** Defines values for ScoringFunctionInterpolation. */ +export type ScoringFunctionInterpolation = + | "linear" + | "constant" + | "quadratic" + | "logarithmic"; +/** Defines values for ScoringFunctionAggregation. */ +export type ScoringFunctionAggregation = + | "sum" + | "average" + | "minimum" + | "maximum" + | "firstMatching"; +/** Defines values for TokenCharacterKind. */ +export type TokenCharacterKind = + | "letter" + | "digit" + | "whitespace" + | "punctuation" + | "symbol"; +/** Defines values for MicrosoftTokenizerLanguage. */ +export type MicrosoftTokenizerLanguage = + | "bangla" + | "bulgarian" + | "catalan" + | "chineseSimplified" + | "chineseTraditional" + | "croatian" + | "czech" + | "danish" + | "dutch" + | "english" + | "french" + | "german" + | "greek" + | "gujarati" + | "hindi" + | "icelandic" + | "indonesian" + | "italian" + | "japanese" + | "kannada" + | "korean" + | "malay" + | "malayalam" + | "marathi" + | "norwegianBokmaal" + | "polish" + | "portuguese" + | "portugueseBrazilian" + | "punjabi" + | "romanian" + | "russian" + | "serbianCyrillic" + | "serbianLatin" + | "slovenian" + | "spanish" + | "swedish" + | "tamil" + | "telugu" + | "thai" + | "ukrainian" + | "urdu" + | "vietnamese"; +/** Defines values for MicrosoftStemmingTokenizerLanguage. */ +export type MicrosoftStemmingTokenizerLanguage = + | "arabic" + | "bangla" + | "bulgarian" + | "catalan" + | "croatian" + | "czech" + | "danish" + | "dutch" + | "english" + | "estonian" + | "finnish" + | "french" + | "german" + | "greek" + | "gujarati" + | "hebrew" + | "hindi" + | "hungarian" + | "icelandic" + | "indonesian" + | "italian" + | "kannada" + | "latvian" + | "lithuanian" + | "malay" + | "malayalam" + | "marathi" + | "norwegianBokmaal" + | "polish" + | "portuguese" + | "portugueseBrazilian" + | "punjabi" + | "romanian" + | "russian" + | "serbianCyrillic" + | "serbianLatin" + | "slovak" + | "slovenian" + | "spanish" + | "swedish" + | "tamil" + | "telugu" + | "turkish" + | "ukrainian" + | "urdu"; +/** Defines values for CjkBigramTokenFilterScripts. */ +export type CjkBigramTokenFilterScripts = + | "han" + | "hiragana" + | "katakana" + | "hangul"; +/** Defines values for EdgeNGramTokenFilterSide. */ +export type EdgeNGramTokenFilterSide = "front" | "back"; +/** Defines values for PhoneticEncoder. */ +export type PhoneticEncoder = + | "metaphone" + | "doubleMetaphone" + | "soundex" + | "refinedSoundex" + | "caverphone1" + | "caverphone2" + | "cologne" + | "nysiis" + | "koelnerPhonetik" + | "haasePhonetik" + | "beiderMorse"; +/** Defines values for SnowballTokenFilterLanguage. */ +export type SnowballTokenFilterLanguage = + | "armenian" + | "basque" + | "catalan" + | "danish" + | "dutch" + | "english" + | "finnish" + | "french" + | "german" + | "german2" + | "hungarian" + | "italian" + | "kp" + | "lovins" + | "norwegian" + | "porter" + | "portuguese" + | "romanian" + | "russian" + | "spanish" + | "swedish" + | "turkish"; +/** Defines values for StemmerTokenFilterLanguage. */ +export type StemmerTokenFilterLanguage = + | "arabic" + | "armenian" + | "basque" + | "brazilian" + | "bulgarian" + | "catalan" + | "czech" + | "danish" + | "dutch" + | "dutchKp" + | "english" + | "lightEnglish" + | "minimalEnglish" + | "possessiveEnglish" + | "porter2" + | "lovins" + | "finnish" + | "lightFinnish" + | "french" + | "lightFrench" + | "minimalFrench" + | "galician" + | "minimalGalician" + | "german" + | "german2" + | "lightGerman" + | "minimalGerman" + | "greek" + | "hindi" + | "hungarian" + | "lightHungarian" + | "indonesian" + | "irish" + | "italian" + | "lightItalian" + | "sorani" + | "latvian" + | "norwegian" + | "lightNorwegian" + | "minimalNorwegian" + | "lightNynorsk" + | "minimalNynorsk" + | "portuguese" + | "lightPortuguese" + | "minimalPortuguese" + | "portugueseRslp" + | "romanian" + | "russian" + | "lightRussian" + | "spanish" + | "lightSpanish" + | "swedish" + | "lightSwedish" + | "turkish"; +/** Defines values for StopwordsList. */ +export type StopwordsList = + | "arabic" + | "armenian" + | "basque" + | "brazilian" + | "bulgarian" + | "catalan" + | "czech" + | "danish" + | "dutch" + | "english" + | "finnish" + | "french" + | "galician" + | "german" + | "greek" + | "hindi" + | "hungarian" + | "indonesian" + | "irish" + | "italian" + | "latvian" + | "norwegian" + | "persian" + | "portuguese" + | "romanian" + | "russian" + | "sorani" + | "spanish" + | "swedish" + | "thai" + | "turkish"; + +/** Optional parameters. */ +export interface DataSourcesCreateOrUpdateOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; +} -/** - * Contains response data for the createOrUpdate operation. - */ +/** Contains response data for the createOrUpdate operation. */ export type DataSourcesCreateOrUpdateResponse = SearchIndexerDataSource & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndexerDataSource; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndexerDataSource; + }; }; -/** - * Contains response data for the get operation. - */ +/** Optional parameters. */ +export interface DataSourcesDeleteOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; +} + +/** Optional parameters. */ +export interface DataSourcesGetOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the get operation. */ export type DataSourcesGetResponse = SearchIndexerDataSource & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndexerDataSource; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndexerDataSource; + }; }; -/** - * Contains response data for the list operation. - */ +/** Optional parameters. */ +export interface DataSourcesListOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Selects which top-level properties of the data sources to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; +} + +/** Contains response data for the list operation. */ export type DataSourcesListResponse = ListDataSourcesResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: ListDataSourcesResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: ListDataSourcesResult; + }; }; -/** - * Contains response data for the create operation. - */ +/** Optional parameters. */ +export interface DataSourcesCreateOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the create operation. */ export type DataSourcesCreateResponse = SearchIndexerDataSource & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndexerDataSource; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndexerDataSource; + }; }; -/** - * Contains response data for the createOrUpdate operation. - */ +/** Optional parameters. */ +export interface IndexersResetOptionalParams extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Optional parameters. */ +export interface IndexersRunOptionalParams extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Optional parameters. */ +export interface IndexersCreateOrUpdateOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; +} + +/** Contains response data for the createOrUpdate operation. */ export type IndexersCreateOrUpdateResponse = SearchIndexer & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndexer; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndexer; + }; }; -/** - * Contains response data for the get operation. - */ +/** Optional parameters. */ +export interface IndexersDeleteOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; +} + +/** Optional parameters. */ +export interface IndexersGetOptionalParams extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the get operation. */ export type IndexersGetResponse = SearchIndexer & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndexer; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndexer; + }; }; -/** - * Contains response data for the list operation. - */ +/** Optional parameters. */ +export interface IndexersListOptionalParams extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Selects which top-level properties of the indexers to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; +} + +/** Contains response data for the list operation. */ export type IndexersListResponse = ListIndexersResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: ListIndexersResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: ListIndexersResult; + }; }; -/** - * Contains response data for the create operation. - */ +/** Optional parameters. */ +export interface IndexersCreateOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the create operation. */ export type IndexersCreateResponse = SearchIndexer & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndexer; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndexer; + }; }; -/** - * Contains response data for the getStatus operation. - */ +/** Optional parameters. */ +export interface IndexersGetStatusOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the getStatus operation. */ export type IndexersGetStatusResponse = SearchIndexerStatus & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndexerStatus; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndexerStatus; + }; }; -/** - * Contains response data for the createOrUpdate operation. - */ +/** Optional parameters. */ +export interface SkillsetsCreateOrUpdateOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; +} + +/** Contains response data for the createOrUpdate operation. */ export type SkillsetsCreateOrUpdateResponse = SearchIndexerSkillset & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndexerSkillset; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndexerSkillset; + }; }; -/** - * Contains response data for the get operation. - */ +/** Optional parameters. */ +export interface SkillsetsDeleteOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; +} + +/** Optional parameters. */ +export interface SkillsetsGetOptionalParams extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the get operation. */ export type SkillsetsGetResponse = SearchIndexerSkillset & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndexerSkillset; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndexerSkillset; + }; }; -/** - * Contains response data for the list operation. - */ +/** Optional parameters. */ +export interface SkillsetsListOptionalParams extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Selects which top-level properties of the skillsets to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; +} + +/** Contains response data for the list operation. */ export type SkillsetsListResponse = ListSkillsetsResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: ListSkillsetsResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: ListSkillsetsResult; + }; }; -/** - * Contains response data for the create operation. - */ +/** Optional parameters. */ +export interface SkillsetsCreateOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the create operation. */ export type SkillsetsCreateResponse = SearchIndexerSkillset & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndexerSkillset; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndexerSkillset; + }; }; -/** - * Contains response data for the createOrUpdate operation. - */ +/** Optional parameters. */ +export interface SynonymMapsCreateOrUpdateOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; +} + +/** Contains response data for the createOrUpdate operation. */ export type SynonymMapsCreateOrUpdateResponse = SynonymMap & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SynonymMap; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SynonymMap; + }; }; -/** - * Contains response data for the get operation. - */ +/** Optional parameters. */ +export interface SynonymMapsDeleteOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; +} + +/** Optional parameters. */ +export interface SynonymMapsGetOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the get operation. */ export type SynonymMapsGetResponse = SynonymMap & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SynonymMap; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SynonymMap; + }; }; -/** - * Contains response data for the list operation. - */ +/** Optional parameters. */ +export interface SynonymMapsListOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Selects which top-level properties of the synonym maps to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; +} + +/** Contains response data for the list operation. */ export type SynonymMapsListResponse = ListSynonymMapsResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: ListSynonymMapsResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: ListSynonymMapsResult; + }; }; -/** - * Contains response data for the create operation. - */ +/** Optional parameters. */ +export interface SynonymMapsCreateOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the create operation. */ export type SynonymMapsCreateResponse = SynonymMap & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SynonymMap; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SynonymMap; + }; }; -/** - * Contains response data for the create operation. - */ +/** Optional parameters. */ +export interface IndexesCreateOptionalParams extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the create operation. */ export type IndexesCreateResponse = SearchIndex & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndex; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndex; + }; }; -/** - * Contains response data for the list operation. - */ +/** Optional parameters. */ +export interface IndexesListOptionalParams extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Selects which top-level properties of the index definitions to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; +} + +/** Contains response data for the list operation. */ export type IndexesListResponse = ListIndexesResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: ListIndexesResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: ListIndexesResult; + }; }; -/** - * Contains response data for the createOrUpdate operation. - */ +/** Optional parameters. */ +export interface IndexesCreateOrUpdateOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. */ + allowIndexDowntime?: boolean; +} + +/** Contains response data for the createOrUpdate operation. */ export type IndexesCreateOrUpdateResponse = SearchIndex & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndex; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndex; + }; }; -/** - * Contains response data for the get operation. - */ +/** Optional parameters. */ +export interface IndexesDeleteOptionalParams extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; +} + +/** Optional parameters. */ +export interface IndexesGetOptionalParams extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the get operation. */ export type IndexesGetResponse = SearchIndex & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: SearchIndex; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: SearchIndex; + }; }; -/** - * Contains response data for the getStatistics operation. - */ +/** Optional parameters. */ +export interface IndexesGetStatisticsOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the getStatistics operation. */ export type IndexesGetStatisticsResponse = GetIndexStatisticsResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: GetIndexStatisticsResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: GetIndexStatisticsResult; + }; }; -/** - * Contains response data for the analyze operation. - */ +/** Optional parameters. */ +export interface IndexesAnalyzeOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the analyze operation. */ export type IndexesAnalyzeResponse = AnalyzeResult & { - /** - * The underlying HTTP response. - */ + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: AnalyzeResult; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: AnalyzeResult; + }; }; -/** - * Contains response data for the getServiceStatistics operation. - */ -export type GetServiceStatisticsResponse = ServiceStatistics & { - /** - * The underlying HTTP response. - */ +/** Optional parameters. */ +export interface SearchServiceClientGetServiceStatisticsOptionalParams + extends coreHttp.OperationOptions { + /** Parameter group */ + requestOptionsParam?: RequestOptions; +} + +/** Contains response data for the getServiceStatistics operation. */ +export type SearchServiceClientGetServiceStatisticsResponse = ServiceStatistics & { + /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { - /** - * The response body as text (string format) - */ - bodyAsText: string; - - /** - * The response body as parsed JSON or XML - */ - parsedBody: ServiceStatistics; - }; + /** The response body as text (string format) */ + bodyAsText: string; + + /** The response body as parsed JSON or XML */ + parsedBody: ServiceStatistics; + }; }; + +/** Optional parameters. */ +export interface SearchServiceClientOptionalParams + extends coreHttp.ServiceClientOptions { + /** Overrides client endpoint. */ + endpoint?: string; +} diff --git a/sdk/search/search-documents/src/generated/service/models/mappers.ts b/sdk/search/search-documents/src/generated/service/models/mappers.ts index 52e25f943200..d1093c5cb3fd 100644 --- a/sdk/search/search-documents/src/generated/service/models/mappers.ts +++ b/sdk/search/search-documents/src/generated/service/models/mappers.ts @@ -1,6 +1,6 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -8,147 +8,105 @@ import * as coreHttp from "@azure/core-http"; - -export const AnalyzeRequest: coreHttp.CompositeMapper = { - serializedName: "AnalyzeRequest", +export const SearchIndexerDataSource: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "AnalyzeRequest", + className: "SearchIndexerDataSource", modelProperties: { - text: { + name: { + serializedName: "name", required: true, - serializedName: "text", type: { name: "String" } }, - analyzer: { - serializedName: "analyzer", + description: { + serializedName: "description", type: { name: "String" } }, - tokenizer: { - serializedName: "tokenizer", + type: { + serializedName: "type", + required: true, type: { name: "String" } }, - tokenFilters: { - serializedName: "tokenFilters", + credentials: { + serializedName: "credentials", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "Composite", + className: "DataSourceCredentials" } }, - charFilters: { - serializedName: "charFilters", + container: { + serializedName: "container", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "Composite", + className: "SearchIndexerDataContainer" } - } - } - } -}; - -export const AnalyzedTokenInfo: coreHttp.CompositeMapper = { - serializedName: "AnalyzedTokenInfo", - type: { - name: "Composite", - className: "AnalyzedTokenInfo", - modelProperties: { - token: { - required: true, - readOnly: true, - serializedName: "token", + }, + dataChangeDetectionPolicy: { + serializedName: "dataChangeDetectionPolicy", type: { - name: "String" + name: "Composite", + className: "DataChangeDetectionPolicy" } }, - startOffset: { - required: true, - nullable: false, - readOnly: true, - serializedName: "startOffset", + dataDeletionDetectionPolicy: { + serializedName: "dataDeletionDetectionPolicy", type: { - name: "Number" + name: "Composite", + className: "DataDeletionDetectionPolicy" } }, - endOffset: { - required: true, - nullable: false, - readOnly: true, - serializedName: "endOffset", + etag: { + serializedName: "@odata\\.etag", type: { - name: "Number" + name: "String" } }, - position: { - required: true, - nullable: false, - readOnly: true, - serializedName: "position", + encryptionKey: { + serializedName: "encryptionKey", type: { - name: "Number" + name: "Composite", + className: "SearchResourceEncryptionKey" } } } } }; -export const AnalyzeResult: coreHttp.CompositeMapper = { - serializedName: "AnalyzeResult", +export const DataSourceCredentials: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "AnalyzeResult", + className: "DataSourceCredentials", modelProperties: { - tokens: { - required: true, - serializedName: "tokens", + connectionString: { + serializedName: "connectionString", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "AnalyzedTokenInfo" - } - } + name: "String" } } } } }; -export const LexicalAnalyzer: coreHttp.CompositeMapper = { - serializedName: "LexicalAnalyzer", +export const SearchIndexerDataContainer: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: { - serializedName: "@odata.type", - clientName: "odatatype" - }, - uberParent: "LexicalAnalyzer", - className: "LexicalAnalyzer", + className: "SearchIndexerDataContainer", modelProperties: { name: { - required: true, serializedName: "name", + required: true, type: { name: "String" } }, - odatatype: { - required: true, - serializedName: "@odata\\.type", + query: { + serializedName: "query", type: { name: "String" } @@ -157,1186 +115,987 @@ export const LexicalAnalyzer: coreHttp.CompositeMapper = { } }; -export const CustomAnalyzer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.CustomAnalyzer", +export const DataChangeDetectionPolicy: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, - uberParent: "LexicalAnalyzer", - className: "CustomAnalyzer", + className: "DataChangeDetectionPolicy", + uberParent: "DataChangeDetectionPolicy", + polymorphicDiscriminator: { + serializedName: "@odata\\.type", + clientName: "@odata\\.type" + }, modelProperties: { - ...LexicalAnalyzer.type.modelProperties, - tokenizer: { + odatatype: { + serializedName: "@odata\\.type", required: true, - serializedName: "tokenizer", type: { name: "String" } - }, - tokenFilters: { - serializedName: "tokenFilters", - type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } - } - }, - charFilters: { - serializedName: "charFilters", + } + } + } +}; + +export const DataDeletionDetectionPolicy: coreHttp.CompositeMapper = { + type: { + name: "Composite", + className: "DataDeletionDetectionPolicy", + uberParent: "DataDeletionDetectionPolicy", + polymorphicDiscriminator: { + serializedName: "@odata\\.type", + clientName: "@odata\\.type" + }, + modelProperties: { + odatatype: { + serializedName: "@odata\\.type", + required: true, type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "String" } } } } }; -export const PatternAnalyzer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PatternAnalyzer", +export const SearchResourceEncryptionKey: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, - uberParent: "LexicalAnalyzer", - className: "PatternAnalyzer", + className: "SearchResourceEncryptionKey", modelProperties: { - ...LexicalAnalyzer.type.modelProperties, - lowerCaseTerms: { - serializedName: "lowercase", - defaultValue: true, + keyName: { + serializedName: "keyVaultKeyName", + required: true, type: { - name: "Boolean" + name: "String" } }, - pattern: { - serializedName: "pattern", - defaultValue: '\W+', + keyVersion: { + serializedName: "keyVaultKeyVersion", + required: true, type: { name: "String" } }, - flags: { - serializedName: "flags", + vaultUri: { + serializedName: "keyVaultUri", + required: true, type: { name: "String" } }, - stopwords: { - serializedName: "stopwords", + accessCredentials: { + serializedName: "accessCredentials", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "Composite", + className: "AzureActiveDirectoryApplicationCredentials" } } } } }; -export const LuceneStandardAnalyzer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StandardAnalyzer", +export const AzureActiveDirectoryApplicationCredentials: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, - uberParent: "LexicalAnalyzer", - className: "LuceneStandardAnalyzer", + className: "AzureActiveDirectoryApplicationCredentials", modelProperties: { - ...LexicalAnalyzer.type.modelProperties, - maxTokenLength: { - serializedName: "maxTokenLength", - defaultValue: 255, - constraints: { - InclusiveMaximum: 300 - }, + applicationId: { + serializedName: "applicationId", + required: true, type: { - name: "Number" + name: "String" } }, - stopwords: { - serializedName: "stopwords", + applicationSecret: { + serializedName: "applicationSecret", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "String" } } } } }; -export const StopAnalyzer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StopAnalyzer", +export const SearchError: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, - uberParent: "LexicalAnalyzer", - className: "StopAnalyzer", + className: "SearchError", modelProperties: { - ...LexicalAnalyzer.type.modelProperties, - stopwords: { - serializedName: "stopwords", + code: { + serializedName: "code", + readOnly: true, type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "String" } - } - } - } -}; - -export const LexicalTokenizer: coreHttp.CompositeMapper = { - serializedName: "LexicalTokenizer", - type: { - name: "Composite", - polymorphicDiscriminator: { - serializedName: "@odata.type", - clientName: "odatatype" - }, - uberParent: "LexicalTokenizer", - className: "LexicalTokenizer", - modelProperties: { - name: { + }, + message: { + serializedName: "message", required: true, - serializedName: "name", + readOnly: true, type: { name: "String" } }, - odatatype: { - required: true, - serializedName: "@odata\\.type", + details: { + serializedName: "details", + readOnly: true, type: { - name: "String" + name: "Sequence", + element: { + type: { + name: "Composite", + className: "SearchError" + } + } } } } } }; -export const ClassicTokenizer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.ClassicTokenizer", +export const ListDataSourcesResult: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "ClassicTokenizer", + className: "ListDataSourcesResult", modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - serializedName: "maxTokenLength", - defaultValue: 255, - constraints: { - InclusiveMaximum: 300 - }, + dataSources: { + serializedName: "value", + required: true, + readOnly: true, type: { - name: "Number" + name: "Sequence", + element: { + type: { + name: "Composite", + className: "SearchIndexerDataSource" + } + } } } } } }; -export const EdgeNGramTokenizer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.EdgeNGramTokenizer", +export const SearchIndexer: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "EdgeNGramTokenizer", + className: "SearchIndexer", modelProperties: { - ...LexicalTokenizer.type.modelProperties, - minGram: { - serializedName: "minGram", - defaultValue: 1, - constraints: { - InclusiveMaximum: 300 - }, + name: { + serializedName: "name", + required: true, type: { - name: "Number" + name: "String" } }, - maxGram: { - serializedName: "maxGram", - defaultValue: 2, - constraints: { - InclusiveMaximum: 300 - }, + description: { + serializedName: "description", type: { - name: "Number" + name: "String" } }, - tokenChars: { - serializedName: "tokenChars", + dataSourceName: { + serializedName: "dataSourceName", + required: true, + type: { + name: "String" + } + }, + skillsetName: { + serializedName: "skillsetName", + type: { + name: "String" + } + }, + targetIndexName: { + serializedName: "targetIndexName", + required: true, + type: { + name: "String" + } + }, + schedule: { + serializedName: "schedule", + type: { + name: "Composite", + className: "IndexingSchedule" + } + }, + parameters: { + serializedName: "parameters", + type: { + name: "Composite", + className: "IndexingParameters" + } + }, + fieldMappings: { + serializedName: "fieldMappings", type: { name: "Sequence", element: { type: { - name: "Enum", - allowedValues: [ - "letter", - "digit", - "whitespace", - "punctuation", - "symbol" - ] + name: "Composite", + className: "FieldMapping" + } + } + } + }, + outputFieldMappings: { + serializedName: "outputFieldMappings", + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "FieldMapping" } } } + }, + isDisabled: { + serializedName: "disabled", + nullable: true, + type: { + name: "Boolean" + } + }, + etag: { + serializedName: "@odata\\.etag", + type: { + name: "String" + } + }, + encryptionKey: { + serializedName: "encryptionKey", + type: { + name: "Composite", + className: "SearchResourceEncryptionKey" + } } } } }; -export const KeywordTokenizer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.KeywordTokenizer", +export const IndexingSchedule: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "KeywordTokenizer", + className: "IndexingSchedule", modelProperties: { - ...LexicalTokenizer.type.modelProperties, - bufferSize: { - serializedName: "bufferSize", - defaultValue: 256, + interval: { + serializedName: "interval", + required: true, type: { - name: "Number" + name: "TimeSpan" + } + }, + startTime: { + serializedName: "startTime", + type: { + name: "DateTime" } } } } }; -export const KeywordTokenizerV2: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.KeywordTokenizerV2", +export const IndexingParameters: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "KeywordTokenizerV2", + className: "IndexingParameters", modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - serializedName: "maxTokenLength", - defaultValue: 256, - constraints: { - InclusiveMaximum: 300 - }, + batchSize: { + serializedName: "batchSize", + nullable: true, + type: { + name: "Number" + } + }, + maxFailedItems: { + serializedName: "maxFailedItems", + nullable: true, + type: { + name: "Number" + } + }, + maxFailedItemsPerBatch: { + serializedName: "maxFailedItemsPerBatch", + nullable: true, type: { name: "Number" } + }, + configuration: { + serializedName: "configuration", + type: { + name: "Composite", + className: "IndexingParametersConfiguration" + } } } } }; -export const MicrosoftLanguageTokenizer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer", +export const IndexingParametersConfiguration: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "MicrosoftLanguageTokenizer", + className: "IndexingParametersConfiguration", + additionalProperties: { type: { name: "Object" } }, modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - serializedName: "maxTokenLength", - defaultValue: 255, - constraints: { - InclusiveMaximum: 300 - }, + parsingMode: { + defaultValue: "default", + serializedName: "parsingMode", type: { - name: "Number" + name: "String" } }, - isSearchTokenizer: { - serializedName: "isSearchTokenizer", - defaultValue: false, + excludedFileNameExtensions: { + serializedName: "excludedFileNameExtensions", type: { - name: "Boolean" + name: "String" } }, - language: { - serializedName: "language", + indexedFileNameExtensions: { + serializedName: "indexedFileNameExtensions", type: { - name: "Enum", - allowedValues: [ - "bangla", - "bulgarian", - "catalan", - "chineseSimplified", - "chineseTraditional", - "croatian", - "czech", - "danish", - "dutch", - "english", - "french", - "german", - "greek", - "gujarati", - "hindi", - "icelandic", - "indonesian", - "italian", - "japanese", - "kannada", - "korean", - "malay", - "malayalam", - "marathi", - "norwegianBokmaal", - "polish", - "portuguese", - "portugueseBrazilian", - "punjabi", - "romanian", - "russian", - "serbianCyrillic", - "serbianLatin", - "slovenian", - "spanish", - "swedish", - "tamil", - "telugu", - "thai", - "ukrainian", - "urdu", - "vietnamese" - ] + name: "String" } - } - } - } -}; - -export const MicrosoftLanguageStemmingTokenizer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer", - type: { - name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "MicrosoftLanguageStemmingTokenizer", - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - serializedName: "maxTokenLength", - defaultValue: 255, - constraints: { - InclusiveMaximum: 300 - }, + }, + failOnUnsupportedContentType: { + serializedName: "failOnUnsupportedContentType", type: { - name: "Number" + name: "Boolean" } }, - isSearchTokenizer: { - serializedName: "isSearchTokenizer", - defaultValue: false, + failOnUnprocessableDocument: { + serializedName: "failOnUnprocessableDocument", type: { name: "Boolean" } }, - language: { - serializedName: "language", + indexStorageMetadataOnlyForOversizedDocuments: { + serializedName: "indexStorageMetadataOnlyForOversizedDocuments", type: { - name: "Enum", - allowedValues: [ - "arabic", - "bangla", - "bulgarian", - "catalan", - "croatian", - "czech", - "danish", - "dutch", - "english", - "estonian", - "finnish", - "french", - "german", - "greek", - "gujarati", - "hebrew", - "hindi", - "hungarian", - "icelandic", - "indonesian", - "italian", - "kannada", - "latvian", - "lithuanian", - "malay", - "malayalam", - "marathi", - "norwegianBokmaal", - "polish", - "portuguese", - "portugueseBrazilian", - "punjabi", - "romanian", - "russian", - "serbianCyrillic", - "serbianLatin", - "slovak", - "slovenian", - "spanish", - "swedish", - "tamil", - "telugu", - "turkish", - "ukrainian", - "urdu" - ] + name: "Boolean" } - } - } - } -}; - -export const NGramTokenizer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.NGramTokenizer", - type: { - name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "NGramTokenizer", - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - minGram: { - serializedName: "minGram", - defaultValue: 1, - constraints: { - InclusiveMaximum: 300 - }, + }, + delimitedTextHeaders: { + serializedName: "delimitedTextHeaders", type: { - name: "Number" + name: "String" } }, - maxGram: { - serializedName: "maxGram", - defaultValue: 2, - constraints: { - InclusiveMaximum: 300 - }, + delimitedTextDelimiter: { + serializedName: "delimitedTextDelimiter", type: { - name: "Number" + name: "String" } }, - tokenChars: { - serializedName: "tokenChars", + firstLineContainsHeaders: { + defaultValue: true, + serializedName: "firstLineContainsHeaders", type: { - name: "Sequence", - element: { - type: { - name: "Enum", - allowedValues: [ - "letter", - "digit", - "whitespace", - "punctuation", - "symbol" - ] - } - } + name: "Boolean" } - } - } - } -}; - -export const PathHierarchyTokenizerV2: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2", - type: { - name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "PathHierarchyTokenizerV2", - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - delimiter: { - serializedName: "delimiter", - defaultValue: '/', + }, + documentRoot: { + serializedName: "documentRoot", type: { name: "String" } }, - replacement: { - serializedName: "replacement", - defaultValue: '/', + dataToExtract: { + defaultValue: "contentAndMetadata", + serializedName: "dataToExtract", type: { name: "String" } }, - maxTokenLength: { - serializedName: "maxTokenLength", - defaultValue: 300, - constraints: { - InclusiveMaximum: 300 - }, + imageAction: { + defaultValue: "none", + serializedName: "imageAction", type: { - name: "Number" + name: "String" } }, - reverseTokenOrder: { - serializedName: "reverse", - defaultValue: false, + allowSkillsetToReadFileData: { + serializedName: "allowSkillsetToReadFileData", type: { name: "Boolean" } }, - numberOfTokensToSkip: { - serializedName: "skip", - defaultValue: 0, + pdfTextRotationAlgorithm: { + defaultValue: "none", + serializedName: "pdfTextRotationAlgorithm", type: { - name: "Number" + name: "String" + } + }, + executionEnvironment: { + defaultValue: "standard", + serializedName: "executionEnvironment", + type: { + name: "String" + } + }, + queryTimeout: { + defaultValue: "00:05:00", + serializedName: "queryTimeout", + type: { + name: "String" } } } } }; -export const PatternTokenizer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PatternTokenizer", +export const FieldMapping: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "PatternTokenizer", + className: "FieldMapping", modelProperties: { - ...LexicalTokenizer.type.modelProperties, - pattern: { - serializedName: "pattern", - defaultValue: '\W+', + sourceFieldName: { + serializedName: "sourceFieldName", + required: true, type: { name: "String" } }, - flags: { - serializedName: "flags", + targetFieldName: { + serializedName: "targetFieldName", type: { name: "String" } }, - group: { - serializedName: "group", - defaultValue: -1, + mappingFunction: { + serializedName: "mappingFunction", type: { - name: "Number" + name: "Composite", + className: "FieldMappingFunction" } } } } }; -export const LuceneStandardTokenizer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StandardTokenizer", +export const FieldMappingFunction: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "LuceneStandardTokenizer", + className: "FieldMappingFunction", modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - serializedName: "maxTokenLength", - defaultValue: 255, + name: { + serializedName: "name", + required: true, type: { - name: "Number" + name: "String" } - } - } - } -}; - -export const LuceneStandardTokenizerV2: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StandardTokenizerV2", - type: { - name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "LuceneStandardTokenizerV2", - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - serializedName: "maxTokenLength", - defaultValue: 255, - constraints: { - InclusiveMaximum: 300 - }, + }, + parameters: { + serializedName: "parameters", type: { - name: "Number" + name: "Dictionary", + value: { type: { name: "any" } } } } } } }; -export const UaxUrlEmailTokenizer: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer", +export const ListIndexersResult: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - uberParent: "LexicalTokenizer", - className: "UaxUrlEmailTokenizer", + className: "ListIndexersResult", modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - serializedName: "maxTokenLength", - defaultValue: 255, - constraints: { - InclusiveMaximum: 300 - }, + indexers: { + serializedName: "value", + required: true, + readOnly: true, type: { - name: "Number" + name: "Sequence", + element: { + type: { + name: "Composite", + className: "SearchIndexer" + } + } } } } } }; -export const TokenFilter: coreHttp.CompositeMapper = { - serializedName: "TokenFilter", +export const SearchIndexerStatus: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: { - serializedName: "@odata.type", - clientName: "odatatype" - }, - uberParent: "TokenFilter", - className: "TokenFilter", + className: "SearchIndexerStatus", modelProperties: { - name: { + status: { + serializedName: "status", required: true, - serializedName: "name", + readOnly: true, type: { - name: "String" + name: "Enum", + allowedValues: ["unknown", "error", "running"] } }, - odatatype: { - required: true, - serializedName: "@odata\\.type", - type: { - name: "String" - } - } - } - } -}; - -export const AsciiFoldingTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter", - type: { - name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "AsciiFoldingTokenFilter", - modelProperties: { - ...TokenFilter.type.modelProperties, - preserveOriginal: { - serializedName: "preserveOriginal", - defaultValue: false, + lastResult: { + serializedName: "lastResult", type: { - name: "Boolean" + name: "Composite", + className: "IndexerExecutionResult" } - } - } - } -}; - -export const CjkBigramTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.CjkBigramTokenFilter", - type: { - name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "CjkBigramTokenFilter", - modelProperties: { - ...TokenFilter.type.modelProperties, - ignoreScripts: { - serializedName: "ignoreScripts", + }, + executionHistory: { + serializedName: "executionHistory", + required: true, + readOnly: true, type: { name: "Sequence", element: { type: { - name: "Enum", - allowedValues: [ - "han", - "hiragana", - "katakana", - "hangul" - ] + name: "Composite", + className: "IndexerExecutionResult" } } } }, - outputUnigrams: { - serializedName: "outputUnigrams", - defaultValue: false, + limits: { + serializedName: "limits", type: { - name: "Boolean" + name: "Composite", + className: "SearchIndexerLimits" } } } } }; -export const CommonGramTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.CommonGramTokenFilter", +export const IndexerExecutionResult: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "CommonGramTokenFilter", + className: "IndexerExecutionResult", modelProperties: { - ...TokenFilter.type.modelProperties, - commonWords: { + status: { + serializedName: "status", required: true, - serializedName: "commonWords", + readOnly: true, type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "Enum", + allowedValues: ["transientFailure", "success", "inProgress", "reset"] } }, - ignoreCase: { - serializedName: "ignoreCase", - defaultValue: false, + errorMessage: { + serializedName: "errorMessage", + readOnly: true, type: { - name: "Boolean" + name: "String" } }, - useQueryMode: { - serializedName: "queryMode", - defaultValue: false, + startTime: { + serializedName: "startTime", + readOnly: true, type: { - name: "Boolean" + name: "DateTime" } - } - } - } -}; - -export const DictionaryDecompounderTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter", - type: { - name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "DictionaryDecompounderTokenFilter", - modelProperties: { - ...TokenFilter.type.modelProperties, - wordList: { + }, + endTime: { + serializedName: "endTime", + readOnly: true, + nullable: true, + type: { + name: "DateTime" + } + }, + errors: { + serializedName: "errors", required: true, - serializedName: "wordList", + readOnly: true, type: { name: "Sequence", element: { type: { - name: "String" + name: "Composite", + className: "SearchIndexerError" } } } }, - minWordSize: { - serializedName: "minWordSize", - defaultValue: 5, - constraints: { - InclusiveMaximum: 300 - }, + warnings: { + serializedName: "warnings", + required: true, + readOnly: true, type: { - name: "Number" + name: "Sequence", + element: { + type: { + name: "Composite", + className: "SearchIndexerWarning" + } + } } }, - minSubwordSize: { - serializedName: "minSubwordSize", - defaultValue: 2, - constraints: { - InclusiveMaximum: 300 - }, + itemCount: { + serializedName: "itemsProcessed", + required: true, + readOnly: true, type: { name: "Number" } }, - maxSubwordSize: { - serializedName: "maxSubwordSize", - defaultValue: 15, - constraints: { - InclusiveMaximum: 300 - }, + failedItemCount: { + serializedName: "itemsFailed", + required: true, + readOnly: true, type: { name: "Number" } }, - onlyLongestMatch: { - serializedName: "onlyLongestMatch", - defaultValue: false, - type: { - name: "Boolean" + initialTrackingState: { + serializedName: "initialTrackingState", + readOnly: true, + type: { + name: "String" + } + }, + finalTrackingState: { + serializedName: "finalTrackingState", + readOnly: true, + type: { + name: "String" } } } } }; -export const EdgeNGramTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.EdgeNGramTokenFilter", +export const SearchIndexerError: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "EdgeNGramTokenFilter", + className: "SearchIndexerError", modelProperties: { - ...TokenFilter.type.modelProperties, - minGram: { - serializedName: "minGram", - defaultValue: 1, + key: { + serializedName: "key", + readOnly: true, type: { - name: "Number" + name: "String" } }, - maxGram: { - serializedName: "maxGram", - defaultValue: 2, + errorMessage: { + serializedName: "errorMessage", + required: true, + readOnly: true, + type: { + name: "String" + } + }, + statusCode: { + serializedName: "statusCode", + required: true, + readOnly: true, type: { name: "Number" } }, - side: { - serializedName: "side", + name: { + serializedName: "name", + readOnly: true, type: { - name: "Enum", - allowedValues: [ - "front", - "back" - ] + name: "String" + } + }, + details: { + serializedName: "details", + readOnly: true, + type: { + name: "String" + } + }, + documentationLink: { + serializedName: "documentationLink", + readOnly: true, + type: { + name: "String" } } } } }; -export const EdgeNGramTokenFilterV2: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2", +export const SearchIndexerWarning: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "EdgeNGramTokenFilterV2", + className: "SearchIndexerWarning", modelProperties: { - ...TokenFilter.type.modelProperties, - minGram: { - serializedName: "minGram", - defaultValue: 1, - constraints: { - InclusiveMaximum: 300 - }, + key: { + serializedName: "key", + readOnly: true, type: { - name: "Number" + name: "String" } }, - maxGram: { - serializedName: "maxGram", - defaultValue: 2, - constraints: { - InclusiveMaximum: 300 - }, + message: { + serializedName: "message", + required: true, + readOnly: true, type: { - name: "Number" + name: "String" } }, - side: { - serializedName: "side", + name: { + serializedName: "name", + readOnly: true, type: { - name: "Enum", - allowedValues: [ - "front", - "back" - ] + name: "String" + } + }, + details: { + serializedName: "details", + readOnly: true, + type: { + name: "String" + } + }, + documentationLink: { + serializedName: "documentationLink", + readOnly: true, + type: { + name: "String" } } } } }; -export const ElisionTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.ElisionTokenFilter", +export const SearchIndexerLimits: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "ElisionTokenFilter", + className: "SearchIndexerLimits", modelProperties: { - ...TokenFilter.type.modelProperties, - articles: { - serializedName: "articles", + maxRunTime: { + serializedName: "maxRunTime", + readOnly: true, type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "TimeSpan" + } + }, + maxDocumentExtractionSize: { + serializedName: "maxDocumentExtractionSize", + readOnly: true, + type: { + name: "Number" + } + }, + maxDocumentContentCharactersToExtract: { + serializedName: "maxDocumentContentCharactersToExtract", + readOnly: true, + type: { + name: "Number" } } } } }; -export const KeepTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.KeepTokenFilter", +export const SearchIndexerSkillset: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "KeepTokenFilter", + className: "SearchIndexerSkillset", modelProperties: { - ...TokenFilter.type.modelProperties, - keepWords: { + name: { + serializedName: "name", required: true, - serializedName: "keepWords", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "String" } }, - lowerCaseKeepWords: { - serializedName: "keepWordsCase", - defaultValue: false, + description: { + serializedName: "description", type: { - name: "Boolean" + name: "String" } - } - } - } -}; - -export const KeywordMarkerTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter", - type: { - name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "KeywordMarkerTokenFilter", - modelProperties: { - ...TokenFilter.type.modelProperties, - keywords: { + }, + skills: { + serializedName: "skills", required: true, - serializedName: "keywords", type: { name: "Sequence", element: { type: { - name: "String" + name: "Composite", + className: "SearchIndexerSkill" } } } }, - ignoreCase: { - serializedName: "ignoreCase", - defaultValue: false, + cognitiveServicesAccount: { + serializedName: "cognitiveServices", type: { - name: "Boolean" + name: "Composite", + className: "CognitiveServicesAccount" } - } - } - } -}; - -export const LengthTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.LengthTokenFilter", - type: { - name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "LengthTokenFilter", - modelProperties: { - ...TokenFilter.type.modelProperties, - minLength: { - serializedName: "min", - defaultValue: 0, - constraints: { - InclusiveMaximum: 300 - }, + }, + etag: { + serializedName: "@odata\\.etag", type: { - name: "Number" + name: "String" } }, - maxLength: { - serializedName: "max", - defaultValue: 300, - constraints: { - InclusiveMaximum: 300 - }, + encryptionKey: { + serializedName: "encryptionKey", type: { - name: "Number" + name: "Composite", + className: "SearchResourceEncryptionKey" } } } } }; -export const LimitTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.LimitTokenFilter", +export const SearchIndexerSkill: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "LimitTokenFilter", + className: "SearchIndexerSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: { + serializedName: "@odata\\.type", + clientName: "@odata\\.type" + }, modelProperties: { - ...TokenFilter.type.modelProperties, - maxTokenCount: { - serializedName: "maxTokenCount", - defaultValue: 1, + odatatype: { + serializedName: "@odata\\.type", + required: true, type: { - name: "Number" + name: "String" } }, - consumeAllTokens: { - serializedName: "consumeAllTokens", - defaultValue: false, + name: { + serializedName: "name", type: { - name: "Boolean" + name: "String" } - } - } - } -}; - -export const NGramTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.NGramTokenFilter", - type: { - name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "NGramTokenFilter", - modelProperties: { - ...TokenFilter.type.modelProperties, - minGram: { - serializedName: "minGram", - defaultValue: 1, + }, + description: { + serializedName: "description", type: { - name: "Number" + name: "String" } }, - maxGram: { - serializedName: "maxGram", - defaultValue: 2, + context: { + serializedName: "context", type: { - name: "Number" + name: "String" + } + }, + inputs: { + serializedName: "inputs", + required: true, + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "InputFieldMappingEntry" + } + } + } + }, + outputs: { + serializedName: "outputs", + required: true, + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "OutputFieldMappingEntry" + } + } } } } } }; -export const NGramTokenFilterV2: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.NGramTokenFilterV2", +export const InputFieldMappingEntry: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "NGramTokenFilterV2", + className: "InputFieldMappingEntry", modelProperties: { - ...TokenFilter.type.modelProperties, - minGram: { - serializedName: "minGram", - defaultValue: 1, - constraints: { - InclusiveMaximum: 300 - }, + name: { + serializedName: "name", + required: true, type: { - name: "Number" + name: "String" } }, - maxGram: { - serializedName: "maxGram", - defaultValue: 2, - constraints: { - InclusiveMaximum: 300 - }, + source: { + serializedName: "source", type: { - name: "Number" + name: "String" + } + }, + sourceContext: { + serializedName: "sourceContext", + type: { + name: "String" + } + }, + inputs: { + serializedName: "inputs", + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "InputFieldMappingEntry" + } + } } } } } }; -export const PatternCaptureTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PatternCaptureTokenFilter", +export const OutputFieldMappingEntry: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "PatternCaptureTokenFilter", + className: "OutputFieldMappingEntry", modelProperties: { - ...TokenFilter.type.modelProperties, - patterns: { + name: { + serializedName: "name", required: true, - serializedName: "patterns", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "String" } }, - preserveOriginal: { - serializedName: "preserveOriginal", - defaultValue: true, + targetName: { + serializedName: "targetName", type: { - name: "Boolean" + name: "String" } } } } }; -export const PatternReplaceTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PatternReplaceTokenFilter", +export const CognitiveServicesAccount: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "PatternReplaceTokenFilter", + className: "CognitiveServicesAccount", + uberParent: "CognitiveServicesAccount", + polymorphicDiscriminator: { + serializedName: "@odata\\.type", + clientName: "@odata\\.type" + }, modelProperties: { - ...TokenFilter.type.modelProperties, - pattern: { + odatatype: { + serializedName: "@odata\\.type", required: true, - serializedName: "pattern", type: { name: "String" } }, - replacement: { - required: true, - serializedName: "replacement", + description: { + serializedName: "description", type: { name: "String" } @@ -1345,98 +1104,65 @@ export const PatternReplaceTokenFilter: coreHttp.CompositeMapper = { } }; -export const PhoneticTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PhoneticTokenFilter", +export const ListSkillsetsResult: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "PhoneticTokenFilter", + className: "ListSkillsetsResult", modelProperties: { - ...TokenFilter.type.modelProperties, - encoder: { - serializedName: "encoder", - type: { - name: "Enum", - allowedValues: [ - "metaphone", - "doubleMetaphone", - "soundex", - "refinedSoundex", - "caverphone1", - "caverphone2", - "cologne", - "nysiis", - "koelnerPhonetik", - "haasePhonetik", - "beiderMorse" - ] - } - }, - replaceOriginalTokens: { - serializedName: "replace", - defaultValue: true, + skillsets: { + serializedName: "value", + required: true, + readOnly: true, type: { - name: "Boolean" + name: "Sequence", + element: { + type: { + name: "Composite", + className: "SearchIndexerSkillset" + } + } } } } } }; -export const ShingleTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.ShingleTokenFilter", +export const SynonymMap: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "ShingleTokenFilter", + className: "SynonymMap", modelProperties: { - ...TokenFilter.type.modelProperties, - maxShingleSize: { - serializedName: "maxShingleSize", - defaultValue: 2, - constraints: { - InclusiveMinimum: 2 - }, - type: { - name: "Number" - } - }, - minShingleSize: { - serializedName: "minShingleSize", - defaultValue: 2, - constraints: { - InclusiveMinimum: 2 - }, + name: { + serializedName: "name", + required: true, type: { - name: "Number" + name: "String" } }, - outputUnigrams: { - serializedName: "outputUnigrams", - defaultValue: true, + format: { + defaultValue: "solr", + isConstant: true, + serializedName: "format", type: { - name: "Boolean" + name: "String" } }, - outputUnigramsIfNoShingles: { - serializedName: "outputUnigramsIfNoShingles", - defaultValue: false, + synonyms: { + serializedName: "synonyms", + required: true, type: { - name: "Boolean" + name: "String" } }, - tokenSeparator: { - serializedName: "tokenSeparator", - defaultValue: '', + encryptionKey: { + serializedName: "encryptionKey", type: { - name: "String" + name: "Composite", + className: "SearchResourceEncryptionKey" } }, - filterToken: { - serializedName: "filterToken", - defaultValue: '_', + etag: { + serializedName: "@odata\\.etag", type: { name: "String" } @@ -1445,438 +1171,381 @@ export const ShingleTokenFilter: coreHttp.CompositeMapper = { } }; -export const SnowballTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.SnowballTokenFilter", +export const ListSynonymMapsResult: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "SnowballTokenFilter", + className: "ListSynonymMapsResult", modelProperties: { - ...TokenFilter.type.modelProperties, - language: { + synonymMaps: { + serializedName: "value", required: true, - serializedName: "language", + readOnly: true, type: { - name: "Enum", - allowedValues: [ - "armenian", - "basque", - "catalan", - "danish", - "dutch", - "english", - "finnish", - "french", - "german", - "german2", - "hungarian", - "italian", - "kp", - "lovins", - "norwegian", - "porter", - "portuguese", - "romanian", - "russian", - "spanish", - "swedish", - "turkish" - ] + name: "Sequence", + element: { + type: { + name: "Composite", + className: "SynonymMap" + } + } } } } } }; -export const StemmerTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StemmerTokenFilter", +export const SearchIndex: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "StemmerTokenFilter", + className: "SearchIndex", modelProperties: { - ...TokenFilter.type.modelProperties, - language: { + name: { + serializedName: "name", required: true, - serializedName: "language", type: { - name: "Enum", - allowedValues: [ - "arabic", - "armenian", - "basque", - "brazilian", - "bulgarian", - "catalan", - "czech", - "danish", - "dutch", - "dutchKp", - "english", - "lightEnglish", - "minimalEnglish", - "possessiveEnglish", - "porter2", - "lovins", - "finnish", - "lightFinnish", - "french", - "lightFrench", - "minimalFrench", - "galician", - "minimalGalician", - "german", - "german2", - "lightGerman", - "minimalGerman", - "greek", - "hindi", - "hungarian", - "lightHungarian", - "indonesian", - "irish", - "italian", - "lightItalian", - "sorani", - "latvian", - "norwegian", - "lightNorwegian", - "minimalNorwegian", - "lightNynorsk", - "minimalNynorsk", - "portuguese", - "lightPortuguese", - "minimalPortuguese", - "portugueseRslp", - "romanian", - "russian", - "lightRussian", - "spanish", - "lightSpanish", - "swedish", - "lightSwedish", - "turkish" - ] + name: "String" } - } - } - } -}; - -export const StemmerOverrideTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter", - type: { - name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "StemmerOverrideTokenFilter", - modelProperties: { - ...TokenFilter.type.modelProperties, - rules: { + }, + fields: { + serializedName: "fields", required: true, - serializedName: "rules", type: { name: "Sequence", element: { type: { - name: "String" + name: "Composite", + className: "SearchField" + } + } + } + }, + scoringProfiles: { + serializedName: "scoringProfiles", + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "ScoringProfile" + } + } + } + }, + defaultScoringProfile: { + serializedName: "defaultScoringProfile", + type: { + name: "String" + } + }, + corsOptions: { + serializedName: "corsOptions", + type: { + name: "Composite", + className: "CorsOptions" + } + }, + suggesters: { + serializedName: "suggesters", + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "Suggester" + } + } + } + }, + analyzers: { + serializedName: "analyzers", + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "LexicalAnalyzer" + } + } + } + }, + tokenizers: { + serializedName: "tokenizers", + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "LexicalTokenizer" + } + } + } + }, + tokenFilters: { + serializedName: "tokenFilters", + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "TokenFilter" } } } - } - } - } -}; - -export const StopwordsTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StopwordsTokenFilter", - type: { - name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "StopwordsTokenFilter", - modelProperties: { - ...TokenFilter.type.modelProperties, - stopwords: { - serializedName: "stopwords", + }, + charFilters: { + serializedName: "charFilters", type: { name: "Sequence", element: { type: { - name: "String" + name: "Composite", + className: "CharFilter" } } } }, - stopwordsList: { - serializedName: "stopwordsList", + encryptionKey: { + serializedName: "encryptionKey", type: { - name: "Enum", - allowedValues: [ - "arabic", - "armenian", - "basque", - "brazilian", - "bulgarian", - "catalan", - "czech", - "danish", - "dutch", - "english", - "finnish", - "french", - "galician", - "german", - "greek", - "hindi", - "hungarian", - "indonesian", - "irish", - "italian", - "latvian", - "norwegian", - "persian", - "portuguese", - "romanian", - "russian", - "sorani", - "spanish", - "swedish", - "thai", - "turkish" - ] + name: "Composite", + className: "SearchResourceEncryptionKey" } }, - ignoreCase: { - serializedName: "ignoreCase", - defaultValue: false, + similarity: { + serializedName: "similarity", type: { - name: "Boolean" + name: "Composite", + className: "Similarity" } }, - removeTrailingStopWords: { - serializedName: "removeTrailing", - defaultValue: true, + etag: { + serializedName: "@odata\\.etag", type: { - name: "Boolean" + name: "String" } } } } }; -export const SynonymTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.SynonymTokenFilter", +export const SearchField: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "SynonymTokenFilter", + className: "SearchField", modelProperties: { - ...TokenFilter.type.modelProperties, - synonyms: { + name: { + serializedName: "name", required: true, - serializedName: "synonyms", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "String" } }, - ignoreCase: { - serializedName: "ignoreCase", - defaultValue: false, + type: { + serializedName: "type", + required: true, type: { - name: "Boolean" + name: "String" } }, - expand: { - serializedName: "expand", - defaultValue: true, + key: { + serializedName: "key", type: { name: "Boolean" } - } - } - } -}; - -export const TruncateTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.TruncateTokenFilter", - type: { - name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "TruncateTokenFilter", - modelProperties: { - ...TokenFilter.type.modelProperties, - length: { - serializedName: "length", - defaultValue: 300, - constraints: { - InclusiveMaximum: 300 - }, + }, + retrievable: { + serializedName: "retrievable", type: { - name: "Number" + name: "Boolean" } - } - } - } -}; - -export const UniqueTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.UniqueTokenFilter", - type: { - name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "UniqueTokenFilter", - modelProperties: { - ...TokenFilter.type.modelProperties, - onlyOnSamePosition: { - serializedName: "onlyOnSamePosition", - defaultValue: false, + }, + searchable: { + serializedName: "searchable", type: { name: "Boolean" } - } - } - } -}; - -export const WordDelimiterTokenFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.WordDelimiterTokenFilter", - type: { - name: "Composite", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - uberParent: "TokenFilter", - className: "WordDelimiterTokenFilter", - modelProperties: { - ...TokenFilter.type.modelProperties, - generateWordParts: { - serializedName: "generateWordParts", - defaultValue: true, + }, + filterable: { + serializedName: "filterable", type: { name: "Boolean" } }, - generateNumberParts: { - serializedName: "generateNumberParts", - defaultValue: true, + sortable: { + serializedName: "sortable", type: { name: "Boolean" } }, - catenateWords: { - serializedName: "catenateWords", - defaultValue: false, + facetable: { + serializedName: "facetable", type: { name: "Boolean" } }, - catenateNumbers: { - serializedName: "catenateNumbers", - defaultValue: false, + analyzer: { + serializedName: "analyzer", + nullable: true, type: { - name: "Boolean" + name: "String" } }, - catenateAll: { - serializedName: "catenateAll", - defaultValue: false, + searchAnalyzer: { + serializedName: "searchAnalyzer", + nullable: true, type: { - name: "Boolean" + name: "String" } }, - splitOnCaseChange: { - serializedName: "splitOnCaseChange", - defaultValue: true, + indexAnalyzer: { + serializedName: "indexAnalyzer", + nullable: true, type: { - name: "Boolean" + name: "String" } }, - preserveOriginal: { - serializedName: "preserveOriginal", - defaultValue: false, + synonymMaps: { + serializedName: "synonymMaps", type: { - name: "Boolean" + name: "Sequence", + element: { + type: { + name: "String" + } + } } }, - splitOnNumerics: { - serializedName: "splitOnNumerics", - defaultValue: true, + fields: { + serializedName: "fields", type: { - name: "Boolean" + name: "Sequence", + element: { + type: { + name: "Composite", + className: "SearchField" + } + } + } + } + } + } +}; + +export const ScoringProfile: coreHttp.CompositeMapper = { + type: { + name: "Composite", + className: "ScoringProfile", + modelProperties: { + name: { + serializedName: "name", + required: true, + type: { + name: "String" } }, - stemEnglishPossessive: { - serializedName: "stemEnglishPossessive", - defaultValue: true, + textWeights: { + serializedName: "text", type: { - name: "Boolean" + name: "Composite", + className: "TextWeights" } }, - protectedWords: { - serializedName: "protectedWords", + functions: { + serializedName: "functions", type: { name: "Sequence", element: { type: { - name: "String" + name: "Composite", + className: "ScoringFunction" } } } + }, + functionAggregation: { + serializedName: "functionAggregation", + type: { + name: "Enum", + allowedValues: [ + "sum", + "average", + "minimum", + "maximum", + "firstMatching" + ] + } } } } }; -export const CharFilter: coreHttp.CompositeMapper = { - serializedName: "CharFilter", +export const TextWeights: coreHttp.CompositeMapper = { + type: { + name: "Composite", + className: "TextWeights", + modelProperties: { + weights: { + serializedName: "weights", + required: true, + type: { + name: "Dictionary", + value: { type: { name: "Number" } } + } + } + } + } +}; + +export const ScoringFunction: coreHttp.CompositeMapper = { type: { name: "Composite", + className: "ScoringFunction", + uberParent: "ScoringFunction", polymorphicDiscriminator: { - serializedName: "@odata.type", - clientName: "odatatype" + serializedName: "type", + clientName: "type" }, - uberParent: "CharFilter", - className: "CharFilter", modelProperties: { - name: { + type: { + serializedName: "type", required: true, - serializedName: "name", type: { name: "String" } }, - odatatype: { + fieldName: { + serializedName: "fieldName", required: true, - serializedName: "@odata\\.type", type: { name: "String" } + }, + boost: { + serializedName: "boost", + required: true, + type: { + name: "Number" + } + }, + interpolation: { + serializedName: "interpolation", + type: { + name: "Enum", + allowedValues: ["linear", "constant", "quadratic", "logarithmic"] + } } } } }; -export const MappingCharFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.MappingCharFilter", +export const CorsOptions: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: CharFilter.type.polymorphicDiscriminator, - uberParent: "CharFilter", - className: "MappingCharFilter", + className: "CorsOptions", modelProperties: { - ...CharFilter.type.modelProperties, - mappings: { + allowedOrigins: { + serializedName: "allowedOrigins", required: true, - serializedName: "mappings", type: { name: "Sequence", element: { @@ -1885,52 +1554,74 @@ export const MappingCharFilter: coreHttp.CompositeMapper = { } } } + }, + maxAgeInSeconds: { + serializedName: "maxAgeInSeconds", + nullable: true, + type: { + name: "Number" + } } } } }; -export const PatternReplaceCharFilter: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PatternReplaceCharFilter", +export const Suggester: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: CharFilter.type.polymorphicDiscriminator, - uberParent: "CharFilter", - className: "PatternReplaceCharFilter", + className: "Suggester", modelProperties: { - ...CharFilter.type.modelProperties, - pattern: { + name: { + serializedName: "name", required: true, - serializedName: "pattern", type: { name: "String" } }, - replacement: { - required: true, - serializedName: "replacement", + searchMode: { + defaultValue: "analyzingInfixMatching", + isConstant: true, + serializedName: "searchMode", type: { name: "String" } + }, + sourceFields: { + serializedName: "sourceFields", + required: true, + type: { + name: "Sequence", + element: { + type: { + name: "String" + } + } + } } } } }; -export const Similarity: coreHttp.CompositeMapper = { - serializedName: "Similarity", +export const LexicalAnalyzer: coreHttp.CompositeMapper = { type: { name: "Composite", + className: "LexicalAnalyzer", + uberParent: "LexicalAnalyzer", polymorphicDiscriminator: { - serializedName: "@odata.type", - clientName: "odatatype" + serializedName: "@odata\\.type", + clientName: "@odata\\.type" }, - uberParent: "Similarity", - className: "Similarity", modelProperties: { odatatype: { - required: true, serializedName: "@odata\\.type", + required: true, + type: { + name: "String" + } + }, + name: { + serializedName: "name", + required: true, type: { name: "String" } @@ -1939,54 +1630,82 @@ export const Similarity: coreHttp.CompositeMapper = { } }; -export const ClassicSimilarity: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.ClassicSimilarity", +export const LexicalTokenizer: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: Similarity.type.polymorphicDiscriminator, - uberParent: "Similarity", - className: "ClassicSimilarity", + className: "LexicalTokenizer", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: { + serializedName: "@odata\\.type", + clientName: "@odata\\.type" + }, modelProperties: { - ...Similarity.type.modelProperties + odatatype: { + serializedName: "@odata\\.type", + required: true, + type: { + name: "String" + } + }, + name: { + serializedName: "name", + required: true, + type: { + name: "String" + } + } } } }; -export const BM25Similarity: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.BM25Similarity", +export const TokenFilter: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: Similarity.type.polymorphicDiscriminator, - uberParent: "Similarity", - className: "BM25Similarity", + className: "TokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: { + serializedName: "@odata\\.type", + clientName: "@odata\\.type" + }, modelProperties: { - ...Similarity.type.modelProperties, - k1: { - nullable: true, - serializedName: "k1", + odatatype: { + serializedName: "@odata\\.type", + required: true, type: { - name: "Number" + name: "String" } }, - b: { - nullable: true, - serializedName: "b", + name: { + serializedName: "name", + required: true, type: { - name: "Number" + name: "String" } } } } }; -export const DataSourceCredentials: coreHttp.CompositeMapper = { - serializedName: "DataSourceCredentials", +export const CharFilter: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "DataSourceCredentials", + className: "CharFilter", + uberParent: "CharFilter", + polymorphicDiscriminator: { + serializedName: "@odata\\.type", + clientName: "@odata\\.type" + }, modelProperties: { - connectionString: { - serializedName: "connectionString", + odatatype: { + serializedName: "@odata\\.type", + required: true, + type: { + name: "String" + } + }, + name: { + serializedName: "name", + required: true, type: { name: "String" } @@ -1995,872 +1714,1031 @@ export const DataSourceCredentials: coreHttp.CompositeMapper = { } }; -export const SearchIndexerDataContainer: coreHttp.CompositeMapper = { - serializedName: "SearchIndexerDataContainer", +export const Similarity: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "SearchIndexerDataContainer", + className: "Similarity", + uberParent: "Similarity", + polymorphicDiscriminator: { + serializedName: "@odata\\.type", + clientName: "@odata\\.type" + }, modelProperties: { - name: { + odatatype: { + serializedName: "@odata\\.type", required: true, - serializedName: "name", type: { name: "String" } - }, - query: { - serializedName: "query", + } + } + } +}; + +export const ListIndexesResult: coreHttp.CompositeMapper = { + type: { + name: "Composite", + className: "ListIndexesResult", + modelProperties: { + indexes: { + serializedName: "value", + required: true, + readOnly: true, type: { - name: "String" + name: "Sequence", + element: { + type: { + name: "Composite", + className: "SearchIndex" + } + } } } } } }; -export const DataChangeDetectionPolicy: coreHttp.CompositeMapper = { - serializedName: "DataChangeDetectionPolicy", +export const GetIndexStatisticsResult: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: { - serializedName: "@odata.type", - clientName: "odatatype" - }, - uberParent: "DataChangeDetectionPolicy", - className: "DataChangeDetectionPolicy", + className: "GetIndexStatisticsResult", modelProperties: { - odatatype: { + documentCount: { + serializedName: "documentCount", + required: true, + readOnly: true, + type: { + name: "Number" + } + }, + storageSize: { + serializedName: "storageSize", required: true, - serializedName: "@odata\\.type", + readOnly: true, type: { - name: "String" + name: "Number" } } } } }; -export const HighWaterMarkChangeDetectionPolicy: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy", +export const AnalyzeRequest: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: DataChangeDetectionPolicy.type.polymorphicDiscriminator, - uberParent: "DataChangeDetectionPolicy", - className: "HighWaterMarkChangeDetectionPolicy", + className: "AnalyzeRequest", modelProperties: { - ...DataChangeDetectionPolicy.type.modelProperties, - highWaterMarkColumnName: { + text: { + serializedName: "text", required: true, - serializedName: "highWaterMarkColumnName", type: { name: "String" } + }, + analyzer: { + serializedName: "analyzer", + type: { + name: "String" + } + }, + tokenizer: { + serializedName: "tokenizer", + type: { + name: "String" + } + }, + tokenFilters: { + serializedName: "tokenFilters", + type: { + name: "Sequence", + element: { + type: { + name: "String" + } + } + } + }, + charFilters: { + serializedName: "charFilters", + type: { + name: "Sequence", + element: { + type: { + name: "String" + } + } + } } } } }; -export const SqlIntegratedChangeTrackingPolicy: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy", - type: { - name: "Composite", - polymorphicDiscriminator: DataChangeDetectionPolicy.type.polymorphicDiscriminator, - uberParent: "DataChangeDetectionPolicy", - className: "SqlIntegratedChangeTrackingPolicy", - modelProperties: { - ...DataChangeDetectionPolicy.type.modelProperties - } - } -}; - -export const DataDeletionDetectionPolicy: coreHttp.CompositeMapper = { - serializedName: "DataDeletionDetectionPolicy", +export const AnalyzeResult: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: { - serializedName: "@odata.type", - clientName: "odatatype" - }, - uberParent: "DataDeletionDetectionPolicy", - className: "DataDeletionDetectionPolicy", + className: "AnalyzeResult", modelProperties: { - odatatype: { + tokens: { + serializedName: "tokens", required: true, - serializedName: "@odata\\.type", type: { - name: "String" + name: "Sequence", + element: { + type: { + name: "Composite", + className: "AnalyzedTokenInfo" + } + } } } } } }; -export const SoftDeleteColumnDeletionDetectionPolicy: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy", +export const AnalyzedTokenInfo: coreHttp.CompositeMapper = { type: { name: "Composite", - polymorphicDiscriminator: DataDeletionDetectionPolicy.type.polymorphicDiscriminator, - uberParent: "DataDeletionDetectionPolicy", - className: "SoftDeleteColumnDeletionDetectionPolicy", + className: "AnalyzedTokenInfo", modelProperties: { - ...DataDeletionDetectionPolicy.type.modelProperties, - softDeleteColumnName: { - serializedName: "softDeleteColumnName", + token: { + serializedName: "token", + required: true, + readOnly: true, type: { name: "String" } }, - softDeleteMarkerValue: { - serializedName: "softDeleteMarkerValue", + startOffset: { + serializedName: "startOffset", + required: true, + readOnly: true, type: { - name: "String" + name: "Number" + } + }, + endOffset: { + serializedName: "endOffset", + required: true, + readOnly: true, + type: { + name: "Number" + } + }, + position: { + serializedName: "position", + required: true, + readOnly: true, + type: { + name: "Number" } } } } }; -export const AzureActiveDirectoryApplicationCredentials: coreHttp.CompositeMapper = { - serializedName: "AzureActiveDirectoryApplicationCredentials", +export const ServiceStatistics: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "AzureActiveDirectoryApplicationCredentials", + className: "ServiceStatistics", modelProperties: { - applicationId: { - required: true, - serializedName: "applicationId", + counters: { + serializedName: "counters", type: { - name: "String" + name: "Composite", + className: "ServiceCounters" } }, - applicationSecret: { - serializedName: "applicationSecret", + limits: { + serializedName: "limits", type: { - name: "String" + name: "Composite", + className: "ServiceLimits" } } } } }; -export const SearchResourceEncryptionKey: coreHttp.CompositeMapper = { - serializedName: "SearchResourceEncryptionKey", +export const ServiceCounters: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "SearchResourceEncryptionKey", + className: "ServiceCounters", modelProperties: { - keyName: { - required: true, - serializedName: "keyVaultKeyName", + documentCounter: { + serializedName: "documentCount", type: { - name: "String" + name: "Composite", + className: "ResourceCounter" } }, - keyVersion: { - required: true, - serializedName: "keyVaultKeyVersion", + indexCounter: { + serializedName: "indexesCount", type: { - name: "String" + name: "Composite", + className: "ResourceCounter" } }, - vaultUri: { - required: true, - serializedName: "keyVaultUri", + indexerCounter: { + serializedName: "indexersCount", type: { - name: "String" + name: "Composite", + className: "ResourceCounter" } }, - accessCredentials: { - serializedName: "accessCredentials", + dataSourceCounter: { + serializedName: "dataSourcesCount", type: { name: "Composite", - className: "AzureActiveDirectoryApplicationCredentials" + className: "ResourceCounter" + } + }, + storageSizeCounter: { + serializedName: "storageSize", + type: { + name: "Composite", + className: "ResourceCounter" + } + }, + synonymMapCounter: { + serializedName: "synonymMaps", + type: { + name: "Composite", + className: "ResourceCounter" } } } } }; -export const SearchIndexerDataSource: coreHttp.CompositeMapper = { - serializedName: "SearchIndexerDataSource", +export const ResourceCounter: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "SearchIndexerDataSource", + className: "ResourceCounter", modelProperties: { - name: { + usage: { + serializedName: "usage", required: true, - serializedName: "name", - type: { - name: "String" - } - }, - description: { - serializedName: "description", type: { - name: "String" + name: "Number" } }, - type: { - required: true, - serializedName: "type", + quota: { + serializedName: "quota", + nullable: true, type: { - name: "String" + name: "Number" } - }, - credentials: { - required: true, - serializedName: "credentials", + } + } + } +}; + +export const ServiceLimits: coreHttp.CompositeMapper = { + type: { + name: "Composite", + className: "ServiceLimits", + modelProperties: { + maxFieldsPerIndex: { + serializedName: "maxFieldsPerIndex", + nullable: true, type: { - name: "Composite", - className: "DataSourceCredentials" + name: "Number" } }, - container: { - required: true, - serializedName: "container", + maxFieldNestingDepthPerIndex: { + serializedName: "maxFieldNestingDepthPerIndex", + nullable: true, type: { - name: "Composite", - className: "SearchIndexerDataContainer" + name: "Number" } }, - dataChangeDetectionPolicy: { + maxComplexCollectionFieldsPerIndex: { + serializedName: "maxComplexCollectionFieldsPerIndex", nullable: true, - serializedName: "dataChangeDetectionPolicy", type: { - name: "Composite", - className: "DataChangeDetectionPolicy" + name: "Number" } }, - dataDeletionDetectionPolicy: { + maxComplexObjectsInCollectionsPerDocument: { + serializedName: "maxComplexObjectsInCollectionsPerDocument", nullable: true, - serializedName: "dataDeletionDetectionPolicy", type: { - name: "Composite", - className: "DataDeletionDetectionPolicy" - } - }, - etag: { - serializedName: "@odata\\.etag", + name: "Number" + } + } + } + } +}; + +export const DistanceScoringParameters: coreHttp.CompositeMapper = { + type: { + name: "Composite", + className: "DistanceScoringParameters", + modelProperties: { + referencePointParameter: { + serializedName: "referencePointParameter", + required: true, type: { name: "String" } }, - encryptionKey: { - nullable: true, - serializedName: "encryptionKey", + boostingDistance: { + serializedName: "boostingDistance", + required: true, type: { - name: "Composite", - className: "SearchResourceEncryptionKey" + name: "Number" } } } } }; -export const ListDataSourcesResult: coreHttp.CompositeMapper = { - serializedName: "ListDataSourcesResult", +export const FreshnessScoringParameters: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "ListDataSourcesResult", + className: "FreshnessScoringParameters", modelProperties: { - dataSources: { + boostingDuration: { + serializedName: "boostingDuration", required: true, - readOnly: true, - serializedName: "value", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerDataSource" - } - } + name: "TimeSpan" } } } } }; -export const IndexingSchedule: coreHttp.CompositeMapper = { - serializedName: "IndexingSchedule", +export const MagnitudeScoringParameters: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "IndexingSchedule", + className: "MagnitudeScoringParameters", modelProperties: { - interval: { + boostingRangeStart: { + serializedName: "boostingRangeStart", required: true, - serializedName: "interval", type: { - name: "TimeSpan" + name: "Number" } }, - startTime: { - serializedName: "startTime", + boostingRangeEnd: { + serializedName: "boostingRangeEnd", + required: true, type: { - name: "DateTime" + name: "Number" + } + }, + shouldBoostBeyondRangeByConstant: { + serializedName: "constantBoostBeyondRange", + type: { + name: "Boolean" } } } } }; -export const IndexingParametersConfiguration: coreHttp.CompositeMapper = { - serializedName: "IndexingParametersConfiguration", +export const TagScoringParameters: coreHttp.CompositeMapper = { type: { name: "Composite", - className: "IndexingParametersConfiguration", + className: "TagScoringParameters", modelProperties: { - parsingMode: { - serializedName: "parsingMode", - defaultValue: 'default', + tagsParameter: { + serializedName: "tagsParameter", + required: true, + type: { + name: "String" + } + } + } + } +}; + +export const CustomEntity: coreHttp.CompositeMapper = { + type: { + name: "Composite", + className: "CustomEntity", + modelProperties: { + name: { + serializedName: "name", + required: true, type: { name: "String" } }, - excludedFileNameExtensions: { - serializedName: "excludedFileNameExtensions", - defaultValue: '', + description: { + serializedName: "description", + nullable: true, type: { name: "String" } }, - indexedFileNameExtensions: { - serializedName: "indexedFileNameExtensions", - defaultValue: '', + type: { + serializedName: "type", + nullable: true, type: { name: "String" } }, - failOnUnsupportedContentType: { - serializedName: "failOnUnsupportedContentType", - defaultValue: false, + subtype: { + serializedName: "subtype", + nullable: true, type: { - name: "Boolean" + name: "String" } }, - failOnUnprocessableDocument: { - serializedName: "failOnUnprocessableDocument", - defaultValue: false, + id: { + serializedName: "id", + nullable: true, type: { - name: "Boolean" + name: "String" } }, - indexStorageMetadataOnlyForOversizedDocuments: { - serializedName: "indexStorageMetadataOnlyForOversizedDocuments", - defaultValue: false, + caseSensitive: { + serializedName: "caseSensitive", + nullable: true, type: { name: "Boolean" } }, - delimitedTextHeaders: { - serializedName: "delimitedTextHeaders", + accentSensitive: { + serializedName: "accentSensitive", + nullable: true, type: { - name: "String" + name: "Boolean" } }, - delimitedTextDelimiter: { - serializedName: "delimitedTextDelimiter", + fuzzyEditDistance: { + serializedName: "fuzzyEditDistance", + nullable: true, type: { - name: "String" + name: "Number" } }, - firstLineContainsHeaders: { - serializedName: "firstLineContainsHeaders", - defaultValue: true, + defaultCaseSensitive: { + serializedName: "defaultCaseSensitive", + nullable: true, type: { name: "Boolean" } }, - documentRoot: { - serializedName: "documentRoot", + defaultAccentSensitive: { + serializedName: "defaultAccentSensitive", + nullable: true, type: { - name: "String" + name: "Boolean" } }, - dataToExtract: { - serializedName: "dataToExtract", - defaultValue: 'contentAndMetadata', + defaultFuzzyEditDistance: { + serializedName: "defaultFuzzyEditDistance", + nullable: true, type: { - name: "String" + name: "Number" } }, - imageAction: { - serializedName: "imageAction", - defaultValue: 'none', + aliases: { + serializedName: "aliases", + nullable: true, + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "CustomEntityAlias" + } + } + } + } + } + } +}; + +export const CustomEntityAlias: coreHttp.CompositeMapper = { + type: { + name: "Composite", + className: "CustomEntityAlias", + modelProperties: { + text: { + serializedName: "text", + required: true, type: { name: "String" } }, - allowSkillsetToReadFileData: { - serializedName: "allowSkillsetToReadFileData", - defaultValue: false, + caseSensitive: { + serializedName: "caseSensitive", + nullable: true, type: { name: "Boolean" } }, - pdfTextRotationAlgorithm: { - serializedName: "pdfTextRotationAlgorithm", - defaultValue: 'none', + accentSensitive: { + serializedName: "accentSensitive", + nullable: true, type: { - name: "String" + name: "Boolean" } }, - executionEnvironment: { - serializedName: "executionEnvironment", - defaultValue: 'standard', + fuzzyEditDistance: { + serializedName: "fuzzyEditDistance", + nullable: true, type: { - name: "String" + name: "Number" } - }, - queryTimeout: { - serializedName: "queryTimeout", - defaultValue: '00:05:00', + } + } + } +}; + +export const HighWaterMarkChangeDetectionPolicy: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy", + type: { + name: "Composite", + className: "HighWaterMarkChangeDetectionPolicy", + uberParent: "DataChangeDetectionPolicy", + polymorphicDiscriminator: + DataChangeDetectionPolicy.type.polymorphicDiscriminator, + modelProperties: { + ...DataChangeDetectionPolicy.type.modelProperties, + highWaterMarkColumnName: { + serializedName: "highWaterMarkColumnName", + required: true, type: { name: "String" } } - }, - additionalProperties: { - type: { - name: "Object" - } } } }; -export const IndexingParameters: coreHttp.CompositeMapper = { - serializedName: "IndexingParameters", +export const SqlIntegratedChangeTrackingPolicy: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy", type: { name: "Composite", - className: "IndexingParameters", + className: "SqlIntegratedChangeTrackingPolicy", + uberParent: "DataChangeDetectionPolicy", + polymorphicDiscriminator: + DataChangeDetectionPolicy.type.polymorphicDiscriminator, modelProperties: { - batchSize: { - nullable: true, - serializedName: "batchSize", + ...DataChangeDetectionPolicy.type.modelProperties + } + } +}; + +export const SoftDeleteColumnDeletionDetectionPolicy: coreHttp.CompositeMapper = { + serializedName: + "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy", + type: { + name: "Composite", + className: "SoftDeleteColumnDeletionDetectionPolicy", + uberParent: "DataDeletionDetectionPolicy", + polymorphicDiscriminator: + DataDeletionDetectionPolicy.type.polymorphicDiscriminator, + modelProperties: { + ...DataDeletionDetectionPolicy.type.modelProperties, + softDeleteColumnName: { + serializedName: "softDeleteColumnName", type: { - name: "Number" + name: "String" } }, - maxFailedItems: { - nullable: true, - serializedName: "maxFailedItems", - defaultValue: 0, + softDeleteMarkerValue: { + serializedName: "softDeleteMarkerValue", type: { - name: "Number" + name: "String" } - }, - maxFailedItemsPerBatch: { - nullable: true, - serializedName: "maxFailedItemsPerBatch", - defaultValue: 0, + } + } + } +}; + +export const ConditionalSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Util.ConditionalSkill", + type: { + name: "Composite", + className: "ConditionalSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, + modelProperties: { + ...SearchIndexerSkill.type.modelProperties + } + } +}; + +export const KeyPhraseExtractionSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill", + type: { + name: "Composite", + className: "KeyPhraseExtractionSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, + modelProperties: { + ...SearchIndexerSkill.type.modelProperties, + defaultLanguageCode: { + serializedName: "defaultLanguageCode", type: { - name: "Number" + name: "String" } }, - configuration: { - serializedName: "configuration", + maxKeyPhraseCount: { + serializedName: "maxKeyPhraseCount", + nullable: true, type: { - name: "Composite", - className: "IndexingParametersConfiguration", - additionalProperties: { - type: { - name: "Object" - } - } + name: "Number" } } } } }; -export const FieldMappingFunction: coreHttp.CompositeMapper = { - serializedName: "FieldMappingFunction", +export const OcrSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Vision.OcrSkill", type: { name: "Composite", - className: "FieldMappingFunction", + className: "OcrSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, modelProperties: { - name: { - required: true, - serializedName: "name", + ...SearchIndexerSkill.type.modelProperties, + defaultLanguageCode: { + serializedName: "defaultLanguageCode", type: { name: "String" } }, - parameters: { - serializedName: "parameters", + shouldDetectOrientation: { + serializedName: "detectOrientation", type: { - name: "Dictionary", - value: { - type: { - name: "Object" - } - } + name: "Boolean" } } } } }; -export const FieldMapping: coreHttp.CompositeMapper = { - serializedName: "FieldMapping", +export const ImageAnalysisSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Vision.ImageAnalysisSkill", type: { name: "Composite", - className: "FieldMapping", + className: "ImageAnalysisSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, modelProperties: { - sourceFieldName: { - required: true, - serializedName: "sourceFieldName", + ...SearchIndexerSkill.type.modelProperties, + defaultLanguageCode: { + serializedName: "defaultLanguageCode", type: { name: "String" } }, - targetFieldName: { - serializedName: "targetFieldName", + visualFeatures: { + serializedName: "visualFeatures", type: { - name: "String" + name: "Sequence", + element: { + type: { + name: "String" + } + } } }, - mappingFunction: { - nullable: true, - serializedName: "mappingFunction", + details: { + serializedName: "details", type: { - name: "Composite", - className: "FieldMappingFunction" + name: "Sequence", + element: { + type: { + name: "String" + } + } } } } } }; -export const SearchIndexer: coreHttp.CompositeMapper = { - serializedName: "SearchIndexer", +export const LanguageDetectionSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Text.LanguageDetectionSkill", type: { name: "Composite", - className: "SearchIndexer", + className: "LanguageDetectionSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, modelProperties: { - name: { - required: true, - serializedName: "name", - type: { - name: "String" - } - }, - description: { - serializedName: "description", - type: { - name: "String" - } - }, - dataSourceName: { - required: true, - serializedName: "dataSourceName", - type: { - name: "String" - } - }, - skillsetName: { - serializedName: "skillsetName", + ...SearchIndexerSkill.type.modelProperties + } + } +}; + +export const ShaperSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Util.ShaperSkill", + type: { + name: "Composite", + className: "ShaperSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, + modelProperties: { + ...SearchIndexerSkill.type.modelProperties + } + } +}; + +export const MergeSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Text.MergeSkill", + type: { + name: "Composite", + className: "MergeSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, + modelProperties: { + ...SearchIndexerSkill.type.modelProperties, + insertPreTag: { + defaultValue: " ", + serializedName: "insertPreTag", type: { name: "String" } }, - targetIndexName: { - required: true, - serializedName: "targetIndexName", + insertPostTag: { + defaultValue: " ", + serializedName: "insertPostTag", type: { name: "String" } - }, - schedule: { - nullable: true, - serializedName: "schedule", - type: { - name: "Composite", - className: "IndexingSchedule" - } - }, - parameters: { - nullable: true, - serializedName: "parameters", - type: { - name: "Composite", - className: "IndexingParameters" - } - }, - fieldMappings: { - serializedName: "fieldMappings", + } + } + } +}; + +export const EntityRecognitionSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Text.EntityRecognitionSkill", + type: { + name: "Composite", + className: "EntityRecognitionSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, + modelProperties: { + ...SearchIndexerSkill.type.modelProperties, + categories: { + serializedName: "categories", type: { name: "Sequence", element: { type: { - name: "Composite", - className: "FieldMapping" + name: "String" } } } }, - outputFieldMappings: { - serializedName: "outputFieldMappings", + defaultLanguageCode: { + serializedName: "defaultLanguageCode", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "FieldMapping" - } - } + name: "String" } }, - isDisabled: { + includeTypelessEntities: { + serializedName: "includeTypelessEntities", nullable: true, - serializedName: "disabled", - defaultValue: false, type: { name: "Boolean" } }, - etag: { - serializedName: "@odata\\.etag", - type: { - name: "String" - } - }, - encryptionKey: { + minimumPrecision: { + serializedName: "minimumPrecision", nullable: true, - serializedName: "encryptionKey", type: { - name: "Composite", - className: "SearchResourceEncryptionKey" + name: "Number" } } } } }; -export const ListIndexersResult: coreHttp.CompositeMapper = { - serializedName: "ListIndexersResult", +export const SentimentSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Text.SentimentSkill", type: { name: "Composite", - className: "ListIndexersResult", + className: "SentimentSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, modelProperties: { - indexers: { - required: true, - readOnly: true, - serializedName: "value", + ...SearchIndexerSkill.type.modelProperties, + defaultLanguageCode: { + serializedName: "defaultLanguageCode", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexer" - } - } + name: "String" } } } } }; -export const SearchIndexerError: coreHttp.CompositeMapper = { - serializedName: "SearchIndexerError", +export const SplitSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Text.SplitSkill", type: { name: "Composite", - className: "SearchIndexerError", - modelProperties: { - key: { - readOnly: true, - serializedName: "key", - type: { - name: "String" - } - }, - errorMessage: { - required: true, - readOnly: true, - serializedName: "errorMessage", - type: { - name: "String" - } - }, - statusCode: { - required: true, - nullable: false, - readOnly: true, - serializedName: "statusCode", - type: { - name: "Number" - } - }, - name: { - readOnly: true, - serializedName: "name", + className: "SplitSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, + modelProperties: { + ...SearchIndexerSkill.type.modelProperties, + defaultLanguageCode: { + serializedName: "defaultLanguageCode", type: { name: "String" } }, - details: { - readOnly: true, - serializedName: "details", + textSplitMode: { + serializedName: "textSplitMode", type: { name: "String" } }, - documentationLink: { - readOnly: true, - serializedName: "documentationLink", + maxPageLength: { + serializedName: "maximumPageLength", + nullable: true, type: { - name: "String" + name: "Number" } } } } }; -export const SearchIndexerWarning: coreHttp.CompositeMapper = { - serializedName: "SearchIndexerWarning", +export const CustomEntityLookupSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Text.CustomEntityLookupSkill", type: { name: "Composite", - className: "SearchIndexerWarning", + className: "CustomEntityLookupSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, modelProperties: { - key: { - readOnly: true, - serializedName: "key", + ...SearchIndexerSkill.type.modelProperties, + defaultLanguageCode: { + serializedName: "defaultLanguageCode", + nullable: true, type: { name: "String" } }, - message: { - required: true, - readOnly: true, - serializedName: "message", + entitiesDefinitionUri: { + serializedName: "entitiesDefinitionUri", + nullable: true, type: { name: "String" } }, - name: { - readOnly: true, - serializedName: "name", + inlineEntitiesDefinition: { + serializedName: "inlineEntitiesDefinition", + nullable: true, type: { - name: "String" + name: "Sequence", + element: { + type: { + name: "Composite", + className: "CustomEntity" + } + } } }, - details: { - readOnly: true, - serializedName: "details", + globalDefaultCaseSensitive: { + serializedName: "globalDefaultCaseSensitive", + nullable: true, type: { - name: "String" + name: "Boolean" } }, - documentationLink: { - readOnly: true, - serializedName: "documentationLink", + globalDefaultAccentSensitive: { + serializedName: "globalDefaultAccentSensitive", + nullable: true, type: { - name: "String" + name: "Boolean" + } + }, + globalDefaultFuzzyEditDistance: { + serializedName: "globalDefaultFuzzyEditDistance", + nullable: true, + type: { + name: "Number" } } } } }; -export const IndexerExecutionResult: coreHttp.CompositeMapper = { - serializedName: "IndexerExecutionResult", +export const TextTranslationSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Text.TranslationSkill", type: { name: "Composite", - className: "IndexerExecutionResult", + className: "TextTranslationSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, modelProperties: { - status: { + ...SearchIndexerSkill.type.modelProperties, + defaultToLanguageCode: { + serializedName: "defaultToLanguageCode", required: true, - nullable: false, - readOnly: true, - serializedName: "status", type: { - name: "Enum", - allowedValues: [ - "transientFailure", - "success", - "inProgress", - "reset" - ] + name: "String" } }, - errorMessage: { - readOnly: true, - serializedName: "errorMessage", + defaultFromLanguageCode: { + serializedName: "defaultFromLanguageCode", type: { name: "String" } }, - startTime: { - readOnly: true, - serializedName: "startTime", + suggestedFrom: { + serializedName: "suggestedFrom", + nullable: true, type: { - name: "DateTime" + name: "String" } - }, - endTime: { - nullable: true, - readOnly: true, - serializedName: "endTime", + } + } + } +}; + +export const WebApiSkill: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Skills.Custom.WebApiSkill", + type: { + name: "Composite", + className: "WebApiSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, + modelProperties: { + ...SearchIndexerSkill.type.modelProperties, + uri: { + serializedName: "uri", + required: true, type: { - name: "DateTime" + name: "String" } }, - errors: { - required: true, - readOnly: true, - serializedName: "errors", + httpHeaders: { + serializedName: "httpHeaders", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerError" - } - } + name: "Dictionary", + value: { type: { name: "String" } } } }, - warnings: { - required: true, - readOnly: true, - serializedName: "warnings", + httpMethod: { + serializedName: "httpMethod", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerWarning" - } - } + name: "String" } }, - itemCount: { - required: true, - nullable: false, - readOnly: true, - serializedName: "itemsProcessed", + timeout: { + serializedName: "timeout", type: { - name: "Number" + name: "TimeSpan" } }, - failedItemCount: { - required: true, - nullable: false, - readOnly: true, - serializedName: "itemsFailed", + batchSize: { + serializedName: "batchSize", + nullable: true, type: { name: "Number" } }, - initialTrackingState: { - readOnly: true, - serializedName: "initialTrackingState", + degreeOfParallelism: { + serializedName: "degreeOfParallelism", + nullable: true, type: { - name: "String" + name: "Number" } - }, - finalTrackingState: { - readOnly: true, - serializedName: "finalTrackingState", + } + } + } +}; + +export const DefaultCognitiveServicesAccount: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.DefaultCognitiveServices", + type: { + name: "Composite", + className: "DefaultCognitiveServicesAccount", + uberParent: "CognitiveServicesAccount", + polymorphicDiscriminator: + CognitiveServicesAccount.type.polymorphicDiscriminator, + modelProperties: { + ...CognitiveServicesAccount.type.modelProperties + } + } +}; + +export const CognitiveServicesAccountKey: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.CognitiveServicesByKey", + type: { + name: "Composite", + className: "CognitiveServicesAccountKey", + uberParent: "CognitiveServicesAccount", + polymorphicDiscriminator: + CognitiveServicesAccount.type.polymorphicDiscriminator, + modelProperties: { + ...CognitiveServicesAccount.type.modelProperties, + key: { + serializedName: "key", + required: true, type: { name: "String" } @@ -2869,168 +2747,104 @@ export const IndexerExecutionResult: coreHttp.CompositeMapper = { } }; -export const SearchIndexerLimits: coreHttp.CompositeMapper = { - serializedName: "SearchIndexerLimits", +export const DistanceScoringFunction: coreHttp.CompositeMapper = { + serializedName: "distance", type: { name: "Composite", - className: "SearchIndexerLimits", + className: "DistanceScoringFunction", + uberParent: "ScoringFunction", + polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, modelProperties: { - maxRunTime: { - readOnly: true, - serializedName: "maxRunTime", + ...ScoringFunction.type.modelProperties, + parameters: { + serializedName: "distance", type: { - name: "TimeSpan" + name: "Composite", + className: "DistanceScoringParameters" } - }, - maxDocumentExtractionSize: { - readOnly: true, - serializedName: "maxDocumentExtractionSize", + } + } + } +}; + +export const FreshnessScoringFunction: coreHttp.CompositeMapper = { + serializedName: "freshness", + type: { + name: "Composite", + className: "FreshnessScoringFunction", + uberParent: "ScoringFunction", + polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, + modelProperties: { + ...ScoringFunction.type.modelProperties, + parameters: { + serializedName: "freshness", type: { - name: "Number" + name: "Composite", + className: "FreshnessScoringParameters" } - }, - maxDocumentContentCharactersToExtract: { - readOnly: true, - serializedName: "maxDocumentContentCharactersToExtract", + } + } + } +}; + +export const MagnitudeScoringFunction: coreHttp.CompositeMapper = { + serializedName: "magnitude", + type: { + name: "Composite", + className: "MagnitudeScoringFunction", + uberParent: "ScoringFunction", + polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, + modelProperties: { + ...ScoringFunction.type.modelProperties, + parameters: { + serializedName: "magnitude", type: { - name: "Number" + name: "Composite", + className: "MagnitudeScoringParameters" } } } } }; -export const SearchIndexerStatus: coreHttp.CompositeMapper = { - serializedName: "SearchIndexerStatus", +export const TagScoringFunction: coreHttp.CompositeMapper = { + serializedName: "tag", type: { name: "Composite", - className: "SearchIndexerStatus", + className: "TagScoringFunction", + uberParent: "ScoringFunction", + polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, modelProperties: { - status: { - required: true, - nullable: false, - readOnly: true, - serializedName: "status", - type: { - name: "Enum", - allowedValues: [ - "unknown", - "error", - "running" - ] - } - }, - lastResult: { - readOnly: true, - serializedName: "lastResult", - type: { - name: "Composite", - className: "IndexerExecutionResult" - } - }, - executionHistory: { - required: true, - readOnly: true, - serializedName: "executionHistory", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "IndexerExecutionResult" - } - } - } - }, - limits: { - required: true, - readOnly: true, - serializedName: "limits", + ...ScoringFunction.type.modelProperties, + parameters: { + serializedName: "tag", type: { name: "Composite", - className: "SearchIndexerLimits" + className: "TagScoringParameters" } } } } }; -export const SearchField: coreHttp.CompositeMapper = { - serializedName: "SearchField", +export const CustomAnalyzer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.CustomAnalyzer", type: { name: "Composite", - className: "SearchField", + className: "CustomAnalyzer", + uberParent: "LexicalAnalyzer", + polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, modelProperties: { - name: { - required: true, - serializedName: "name", - type: { - name: "String" - } - }, - type: { + ...LexicalAnalyzer.type.modelProperties, + tokenizer: { + serializedName: "tokenizer", required: true, - serializedName: "type", - type: { - name: "String" - } - }, - key: { - serializedName: "key", - type: { - name: "Boolean" - } - }, - retrievable: { - serializedName: "retrievable", - type: { - name: "Boolean" - } - }, - searchable: { - serializedName: "searchable", - type: { - name: "Boolean" - } - }, - filterable: { - serializedName: "filterable", - type: { - name: "Boolean" - } - }, - sortable: { - serializedName: "sortable", - type: { - name: "Boolean" - } - }, - facetable: { - serializedName: "facetable", - type: { - name: "Boolean" - } - }, - analyzer: { - serializedName: "analyzer", - type: { - name: "String" - } - }, - searchAnalyzer: { - serializedName: "searchAnalyzer", type: { name: "String" } }, - indexAnalyzer: { - serializedName: "indexAnalyzer", - type: { - name: "String" - } - }, - synonymMaps: { - serializedName: "synonymMaps", + tokenFilters: { + serializedName: "tokenFilters", type: { name: "Sequence", element: { @@ -3040,36 +2854,13 @@ export const SearchField: coreHttp.CompositeMapper = { } } }, - fields: { - serializedName: "fields", + charFilters: { + serializedName: "charFilters", type: { name: "Sequence", element: { type: { - name: "Composite", - className: "SearchField" - } - } - } - } - } - } -}; - -export const TextWeights: coreHttp.CompositeMapper = { - serializedName: "TextWeights", - type: { - name: "Composite", - className: "TextWeights", - modelProperties: { - weights: { - required: true, - serializedName: "weights", - type: { - name: "Dictionary", - value: { - type: { - name: "Number" + name: "String" } } } @@ -3078,296 +2869,478 @@ export const TextWeights: coreHttp.CompositeMapper = { } }; -export const ScoringFunction: coreHttp.CompositeMapper = { - serializedName: "ScoringFunction", +export const PatternAnalyzer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.PatternAnalyzer", type: { name: "Composite", - polymorphicDiscriminator: { - serializedName: "type", - clientName: "type" - }, - uberParent: "ScoringFunction", - className: "ScoringFunction", + className: "PatternAnalyzer", + uberParent: "LexicalAnalyzer", + polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, modelProperties: { - fieldName: { - required: true, - serializedName: "fieldName", + ...LexicalAnalyzer.type.modelProperties, + lowerCaseTerms: { + defaultValue: true, + serializedName: "lowercase", type: { - name: "String" + name: "Boolean" } }, - boost: { - required: true, - serializedName: "boost", + pattern: { + defaultValue: "W+", + serializedName: "pattern", type: { - name: "Number" + name: "String" } }, - interpolation: { - serializedName: "interpolation", + flags: { + serializedName: "flags", type: { - name: "Enum", - allowedValues: [ - "linear", - "constant", - "quadratic", - "logarithmic" - ] + name: "String" } }, - type: { - required: true, - serializedName: "type", + stopwords: { + serializedName: "stopwords", type: { - name: "String" + name: "Sequence", + element: { + type: { + name: "String" + } + } } } } } }; -export const DistanceScoringParameters: coreHttp.CompositeMapper = { - serializedName: "DistanceScoringParameters", +export const LuceneStandardAnalyzer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.StandardAnalyzer", type: { name: "Composite", - className: "DistanceScoringParameters", + className: "LuceneStandardAnalyzer", + uberParent: "LexicalAnalyzer", + polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, modelProperties: { - referencePointParameter: { - required: true, - serializedName: "referencePointParameter", - type: { - name: "String" - } - }, - boostingDistance: { - required: true, - serializedName: "boostingDistance", + ...LexicalAnalyzer.type.modelProperties, + maxTokenLength: { + defaultValue: 255, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxTokenLength", type: { name: "Number" } - } - } - } -}; - -export const DistanceScoringFunction: coreHttp.CompositeMapper = { - serializedName: "distance", - type: { - name: "Composite", - polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, - uberParent: "ScoringFunction", - className: "DistanceScoringFunction", - modelProperties: { - ...ScoringFunction.type.modelProperties, - parameters: { - required: true, - serializedName: "distance", + }, + stopwords: { + serializedName: "stopwords", type: { - name: "Composite", - className: "DistanceScoringParameters" + name: "Sequence", + element: { + type: { + name: "String" + } + } } } } } }; -export const FreshnessScoringParameters: coreHttp.CompositeMapper = { - serializedName: "FreshnessScoringParameters", +export const StopAnalyzer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.StopAnalyzer", type: { name: "Composite", - className: "FreshnessScoringParameters", + className: "StopAnalyzer", + uberParent: "LexicalAnalyzer", + polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, modelProperties: { - boostingDuration: { - required: true, - serializedName: "boostingDuration", + ...LexicalAnalyzer.type.modelProperties, + stopwords: { + serializedName: "stopwords", type: { - name: "TimeSpan" + name: "Sequence", + element: { + type: { + name: "String" + } + } } } } } }; -export const FreshnessScoringFunction: coreHttp.CompositeMapper = { - serializedName: "freshness", +export const ClassicTokenizer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.ClassicTokenizer", type: { name: "Composite", - polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, - uberParent: "ScoringFunction", - className: "FreshnessScoringFunction", + className: "ClassicTokenizer", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, modelProperties: { - ...ScoringFunction.type.modelProperties, - parameters: { - required: true, - serializedName: "freshness", + ...LexicalTokenizer.type.modelProperties, + maxTokenLength: { + defaultValue: 255, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxTokenLength", type: { - name: "Composite", - className: "FreshnessScoringParameters" + name: "Number" } } } } }; -export const MagnitudeScoringParameters: coreHttp.CompositeMapper = { - serializedName: "MagnitudeScoringParameters", +export const EdgeNGramTokenizer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.EdgeNGramTokenizer", type: { name: "Composite", - className: "MagnitudeScoringParameters", + className: "EdgeNGramTokenizer", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, modelProperties: { - boostingRangeStart: { - required: true, - serializedName: "boostingRangeStart", + ...LexicalTokenizer.type.modelProperties, + minGram: { + defaultValue: 1, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "minGram", type: { name: "Number" } }, - boostingRangeEnd: { - required: true, - serializedName: "boostingRangeEnd", + maxGram: { + defaultValue: 2, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxGram", type: { name: "Number" } }, - shouldBoostBeyondRangeByConstant: { - serializedName: "constantBoostBeyondRange", + tokenChars: { + serializedName: "tokenChars", type: { - name: "Boolean" + name: "Sequence", + element: { + type: { + name: "Enum", + allowedValues: [ + "letter", + "digit", + "whitespace", + "punctuation", + "symbol" + ] + } + } } } } } }; -export const MagnitudeScoringFunction: coreHttp.CompositeMapper = { - serializedName: "magnitude", +export const KeywordTokenizer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.KeywordTokenizer", type: { name: "Composite", - polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, - uberParent: "ScoringFunction", - className: "MagnitudeScoringFunction", + className: "KeywordTokenizer", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, modelProperties: { - ...ScoringFunction.type.modelProperties, - parameters: { - required: true, - serializedName: "magnitude", + ...LexicalTokenizer.type.modelProperties, + bufferSize: { + defaultValue: 256, + serializedName: "bufferSize", type: { - name: "Composite", - className: "MagnitudeScoringParameters" + name: "Number" } } } } }; -export const TagScoringParameters: coreHttp.CompositeMapper = { - serializedName: "TagScoringParameters", +export const KeywordTokenizerV2: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.KeywordTokenizerV2", type: { name: "Composite", - className: "TagScoringParameters", + className: "KeywordTokenizerV2", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, modelProperties: { - tagsParameter: { - required: true, - serializedName: "tagsParameter", + ...LexicalTokenizer.type.modelProperties, + maxTokenLength: { + defaultValue: 256, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxTokenLength", type: { - name: "String" + name: "Number" } } } } }; -export const TagScoringFunction: coreHttp.CompositeMapper = { - serializedName: "tag", +export const MicrosoftLanguageTokenizer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer", type: { name: "Composite", - polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, - uberParent: "ScoringFunction", - className: "TagScoringFunction", + className: "MicrosoftLanguageTokenizer", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, modelProperties: { - ...ScoringFunction.type.modelProperties, - parameters: { - required: true, - serializedName: "tag", + ...LexicalTokenizer.type.modelProperties, + maxTokenLength: { + defaultValue: 255, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxTokenLength", type: { - name: "Composite", - className: "TagScoringParameters" + name: "Number" + } + }, + isSearchTokenizer: { + serializedName: "isSearchTokenizer", + type: { + name: "Boolean" + } + }, + language: { + serializedName: "language", + type: { + name: "Enum", + allowedValues: [ + "bangla", + "bulgarian", + "catalan", + "chineseSimplified", + "chineseTraditional", + "croatian", + "czech", + "danish", + "dutch", + "english", + "french", + "german", + "greek", + "gujarati", + "hindi", + "icelandic", + "indonesian", + "italian", + "japanese", + "kannada", + "korean", + "malay", + "malayalam", + "marathi", + "norwegianBokmaal", + "polish", + "portuguese", + "portugueseBrazilian", + "punjabi", + "romanian", + "russian", + "serbianCyrillic", + "serbianLatin", + "slovenian", + "spanish", + "swedish", + "tamil", + "telugu", + "thai", + "ukrainian", + "urdu", + "vietnamese" + ] + } + } + } + } +}; + +export const MicrosoftLanguageStemmingTokenizer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer", + type: { + name: "Composite", + className: "MicrosoftLanguageStemmingTokenizer", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, + modelProperties: { + ...LexicalTokenizer.type.modelProperties, + maxTokenLength: { + defaultValue: 255, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxTokenLength", + type: { + name: "Number" + } + }, + isSearchTokenizer: { + serializedName: "isSearchTokenizer", + type: { + name: "Boolean" + } + }, + language: { + serializedName: "language", + type: { + name: "Enum", + allowedValues: [ + "arabic", + "bangla", + "bulgarian", + "catalan", + "croatian", + "czech", + "danish", + "dutch", + "english", + "estonian", + "finnish", + "french", + "german", + "greek", + "gujarati", + "hebrew", + "hindi", + "hungarian", + "icelandic", + "indonesian", + "italian", + "kannada", + "latvian", + "lithuanian", + "malay", + "malayalam", + "marathi", + "norwegianBokmaal", + "polish", + "portuguese", + "portugueseBrazilian", + "punjabi", + "romanian", + "russian", + "serbianCyrillic", + "serbianLatin", + "slovak", + "slovenian", + "spanish", + "swedish", + "tamil", + "telugu", + "turkish", + "ukrainian", + "urdu" + ] } } } } }; -export const ScoringProfile: coreHttp.CompositeMapper = { - serializedName: "ScoringProfile", +export const NGramTokenizer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.NGramTokenizer", type: { name: "Composite", - className: "ScoringProfile", + className: "NGramTokenizer", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, modelProperties: { - name: { - required: true, - serializedName: "name", + ...LexicalTokenizer.type.modelProperties, + minGram: { + defaultValue: 1, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "minGram", type: { - name: "String" + name: "Number" } }, - textWeights: { - nullable: true, - serializedName: "text", + maxGram: { + defaultValue: 2, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxGram", type: { - name: "Composite", - className: "TextWeights" + name: "Number" } }, - functions: { - serializedName: "functions", + tokenChars: { + serializedName: "tokenChars", type: { name: "Sequence", element: { type: { - name: "Composite", - className: "ScoringFunction" + name: "Enum", + allowedValues: [ + "letter", + "digit", + "whitespace", + "punctuation", + "symbol" + ] } } } - }, - functionAggregation: { - serializedName: "functionAggregation", - type: { - name: "Enum", - allowedValues: [ - "sum", - "average", - "minimum", - "maximum", - "firstMatching" - ] - } } } } }; -export const CorsOptions: coreHttp.CompositeMapper = { - serializedName: "CorsOptions", +export const PathHierarchyTokenizerV2: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2", type: { name: "Composite", - className: "CorsOptions", + className: "PathHierarchyTokenizerV2", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, modelProperties: { - allowedOrigins: { - required: true, - serializedName: "allowedOrigins", + ...LexicalTokenizer.type.modelProperties, + delimiter: { + defaultValue: "/", + serializedName: "delimiter", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "String" } }, - maxAgeInSeconds: { - serializedName: "maxAgeInSeconds", + replacement: { + defaultValue: "/", + serializedName: "replacement", + type: { + name: "String" + } + }, + maxTokenLength: { + defaultValue: 300, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxTokenLength", + type: { + name: "Number" + } + }, + reverseTokenOrder: { + serializedName: "reverse", + type: { + name: "Boolean" + } + }, + numberOfTokensToSkip: { + serializedName: "skip", type: { name: "Number" } @@ -3376,512 +3349,490 @@ export const CorsOptions: coreHttp.CompositeMapper = { } }; -export const Suggester: coreHttp.CompositeMapper = { - serializedName: "Suggester", +export const PatternTokenizer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.PatternTokenizer", type: { name: "Composite", - className: "Suggester", + className: "PatternTokenizer", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, modelProperties: { - name: { - required: true, - serializedName: "name", + ...LexicalTokenizer.type.modelProperties, + pattern: { + defaultValue: "W+", + serializedName: "pattern", type: { name: "String" } }, - searchMode: { - required: true, - isConstant: true, - serializedName: "searchMode", - defaultValue: 'analyzingInfixMatching', + flags: { + serializedName: "flags", type: { name: "String" } }, - sourceFields: { - required: true, - serializedName: "sourceFields", + group: { + defaultValue: -1, + serializedName: "group", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "Number" } } } } }; -export const SearchIndex: coreHttp.CompositeMapper = { - serializedName: "SearchIndex", +export const LuceneStandardTokenizer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.StandardTokenizer", type: { name: "Composite", - className: "SearchIndex", + className: "LuceneStandardTokenizer", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, modelProperties: { - name: { - required: true, - serializedName: "name", - type: { - name: "String" - } - }, - fields: { - required: true, - serializedName: "fields", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchField" - } - } - } - }, - scoringProfiles: { - serializedName: "scoringProfiles", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "ScoringProfile" - } - } - } - }, - defaultScoringProfile: { - serializedName: "defaultScoringProfile", - type: { - name: "String" - } - }, - corsOptions: { - nullable: true, - serializedName: "corsOptions", - type: { - name: "Composite", - className: "CorsOptions" - } - }, - suggesters: { - serializedName: "suggesters", + ...LexicalTokenizer.type.modelProperties, + maxTokenLength: { + defaultValue: 255, + serializedName: "maxTokenLength", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "Suggester" - } - } + name: "Number" } - }, - analyzers: { - serializedName: "analyzers", + } + } + } +}; + +export const LuceneStandardTokenizerV2: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.StandardTokenizerV2", + type: { + name: "Composite", + className: "LuceneStandardTokenizerV2", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, + modelProperties: { + ...LexicalTokenizer.type.modelProperties, + maxTokenLength: { + defaultValue: 255, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxTokenLength", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "LexicalAnalyzer" - } - } + name: "Number" } - }, - tokenizers: { - serializedName: "tokenizers", + } + } + } +}; + +export const UaxUrlEmailTokenizer: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer", + type: { + name: "Composite", + className: "UaxUrlEmailTokenizer", + uberParent: "LexicalTokenizer", + polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, + modelProperties: { + ...LexicalTokenizer.type.modelProperties, + maxTokenLength: { + defaultValue: 255, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxTokenLength", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "LexicalTokenizer" - } - } + name: "Number" } - }, - tokenFilters: { - serializedName: "tokenFilters", + } + } + } +}; + +export const AsciiFoldingTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter", + type: { + name: "Composite", + className: "AsciiFoldingTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, + modelProperties: { + ...TokenFilter.type.modelProperties, + preserveOriginal: { + serializedName: "preserveOriginal", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "TokenFilter" - } - } + name: "Boolean" } - }, - charFilters: { - serializedName: "charFilters", + } + } + } +}; + +export const CjkBigramTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.CjkBigramTokenFilter", + type: { + name: "Composite", + className: "CjkBigramTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, + modelProperties: { + ...TokenFilter.type.modelProperties, + ignoreScripts: { + serializedName: "ignoreScripts", type: { name: "Sequence", element: { type: { - name: "Composite", - className: "CharFilter" + name: "Enum", + allowedValues: ["han", "hiragana", "katakana", "hangul"] } } } }, - encryptionKey: { - nullable: true, - serializedName: "encryptionKey", - type: { - name: "Composite", - className: "SearchResourceEncryptionKey" - } - }, - similarity: { - serializedName: "similarity", - type: { - name: "Composite", - className: "Similarity" - } - }, - etag: { - serializedName: "@odata\\.etag", + outputUnigrams: { + serializedName: "outputUnigrams", type: { - name: "String" + name: "Boolean" } } } } }; -export const GetIndexStatisticsResult: coreHttp.CompositeMapper = { - serializedName: "GetIndexStatisticsResult", +export const CommonGramTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.CommonGramTokenFilter", type: { name: "Composite", - className: "GetIndexStatisticsResult", + className: "CommonGramTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - documentCount: { + ...TokenFilter.type.modelProperties, + commonWords: { + serializedName: "commonWords", required: true, - nullable: false, - readOnly: true, - serializedName: "documentCount", type: { - name: "Number" + name: "Sequence", + element: { + type: { + name: "String" + } + } } }, - storageSize: { - required: true, - nullable: false, - readOnly: true, - serializedName: "storageSize", + ignoreCase: { + serializedName: "ignoreCase", type: { - name: "Number" + name: "Boolean" + } + }, + useQueryMode: { + serializedName: "queryMode", + type: { + name: "Boolean" } } } } }; -export const ListIndexesResult: coreHttp.CompositeMapper = { - serializedName: "ListIndexesResult", +export const DictionaryDecompounderTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter", type: { name: "Composite", - className: "ListIndexesResult", + className: "DictionaryDecompounderTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - indexes: { + ...TokenFilter.type.modelProperties, + wordList: { + serializedName: "wordList", required: true, - readOnly: true, - serializedName: "value", type: { name: "Sequence", element: { type: { - name: "Composite", - className: "SearchIndex" + name: "String" } } } + }, + minWordSize: { + defaultValue: 5, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "minWordSize", + type: { + name: "Number" + } + }, + minSubwordSize: { + defaultValue: 2, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "minSubwordSize", + type: { + name: "Number" + } + }, + maxSubwordSize: { + defaultValue: 15, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxSubwordSize", + type: { + name: "Number" + } + }, + onlyLongestMatch: { + serializedName: "onlyLongestMatch", + type: { + name: "Boolean" + } } } } }; -export const InputFieldMappingEntry: coreHttp.CompositeMapper = { - serializedName: "InputFieldMappingEntry", +export const EdgeNGramTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.EdgeNGramTokenFilter", type: { name: "Composite", - className: "InputFieldMappingEntry", + className: "EdgeNGramTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - name: { - required: true, - serializedName: "name", - type: { - name: "String" - } - }, - source: { - serializedName: "source", + ...TokenFilter.type.modelProperties, + minGram: { + defaultValue: 1, + serializedName: "minGram", type: { - name: "String" + name: "Number" } }, - sourceContext: { - serializedName: "sourceContext", + maxGram: { + defaultValue: 2, + serializedName: "maxGram", type: { - name: "String" + name: "Number" } }, - inputs: { - serializedName: "inputs", + side: { + serializedName: "side", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "InputFieldMappingEntry" - } - } + name: "Enum", + allowedValues: ["front", "back"] } } } } }; -export const OutputFieldMappingEntry: coreHttp.CompositeMapper = { - serializedName: "OutputFieldMappingEntry", +export const EdgeNGramTokenFilterV2: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2", type: { name: "Composite", - className: "OutputFieldMappingEntry", + className: "EdgeNGramTokenFilterV2", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - name: { - required: true, - serializedName: "name", + ...TokenFilter.type.modelProperties, + minGram: { + defaultValue: 1, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "minGram", type: { - name: "String" + name: "Number" } }, - targetName: { - serializedName: "targetName", + maxGram: { + defaultValue: 2, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxGram", type: { - name: "String" + name: "Number" + } + }, + side: { + serializedName: "side", + type: { + name: "Enum", + allowedValues: ["front", "back"] } } } } }; -export const SearchIndexerSkill: coreHttp.CompositeMapper = { - serializedName: "SearchIndexerSkill", +export const ElisionTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.ElisionTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: { - serializedName: "@odata.type", - clientName: "odatatype" - }, - uberParent: "SearchIndexerSkill", - className: "SearchIndexerSkill", + className: "ElisionTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - name: { - serializedName: "name", - type: { - name: "String" - } - }, - description: { - serializedName: "description", - type: { - name: "String" - } - }, - context: { - serializedName: "context", - type: { - name: "String" - } - }, - inputs: { - required: true, - serializedName: "inputs", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "InputFieldMappingEntry" - } - } - } - }, - outputs: { - required: true, - serializedName: "outputs", + ...TokenFilter.type.modelProperties, + articles: { + serializedName: "articles", type: { name: "Sequence", element: { type: { - name: "Composite", - className: "OutputFieldMappingEntry" + name: "String" } } } - }, - odatatype: { - required: true, - serializedName: "@odata\\.type", - type: { - name: "String" - } } } } }; -export const CognitiveServicesAccount: coreHttp.CompositeMapper = { - serializedName: "CognitiveServicesAccount", +export const KeepTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.KeepTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: { - serializedName: "@odata.type", - clientName: "odatatype" - }, - uberParent: "CognitiveServicesAccount", - className: "CognitiveServicesAccount", + className: "KeepTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - description: { - serializedName: "description", + ...TokenFilter.type.modelProperties, + keepWords: { + serializedName: "keepWords", + required: true, type: { - name: "String" + name: "Sequence", + element: { + type: { + name: "String" + } + } } }, - odatatype: { - required: true, - serializedName: "@odata\\.type", + lowerCaseKeepWords: { + serializedName: "keepWordsCase", type: { - name: "String" + name: "Boolean" } } } } }; -export const SearchIndexerSkillset: coreHttp.CompositeMapper = { - serializedName: "SearchIndexerSkillset", +export const KeywordMarkerTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter", type: { name: "Composite", - className: "SearchIndexerSkillset", + className: "KeywordMarkerTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - name: { - required: true, - serializedName: "name", - type: { - name: "String" - } - }, - description: { - serializedName: "description", - type: { - name: "String" - } - }, - skills: { + ...TokenFilter.type.modelProperties, + keywords: { + serializedName: "keywords", required: true, - serializedName: "skills", type: { name: "Sequence", element: { type: { - name: "Composite", - className: "SearchIndexerSkill" + name: "String" } } } }, - cognitiveServicesAccount: { - serializedName: "cognitiveServices", - type: { - name: "Composite", - className: "CognitiveServicesAccount" - } - }, - etag: { - serializedName: "@odata\\.etag", - type: { - name: "String" - } - }, - encryptionKey: { - nullable: true, - serializedName: "encryptionKey", + ignoreCase: { + serializedName: "ignoreCase", type: { - name: "Composite", - className: "SearchResourceEncryptionKey" + name: "Boolean" } } } } }; -export const DefaultCognitiveServicesAccount: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.DefaultCognitiveServices", - type: { - name: "Composite", - polymorphicDiscriminator: CognitiveServicesAccount.type.polymorphicDiscriminator, - uberParent: "CognitiveServicesAccount", - className: "DefaultCognitiveServicesAccount", - modelProperties: { - ...CognitiveServicesAccount.type.modelProperties - } - } -}; - -export const CognitiveServicesAccountKey: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.CognitiveServicesByKey", +export const LengthTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.LengthTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: CognitiveServicesAccount.type.polymorphicDiscriminator, - uberParent: "CognitiveServicesAccount", - className: "CognitiveServicesAccountKey", + className: "LengthTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - ...CognitiveServicesAccount.type.modelProperties, - key: { - required: true, - serializedName: "key", + ...TokenFilter.type.modelProperties, + minLength: { + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "min", type: { - name: "String" + name: "Number" + } + }, + maxLength: { + defaultValue: 300, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "max", + type: { + name: "Number" } } } } }; -export const ConditionalSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Util.ConditionalSkill", +export const LimitTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.LimitTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "ConditionalSkill", + className: "LimitTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - ...SearchIndexerSkill.type.modelProperties + ...TokenFilter.type.modelProperties, + maxTokenCount: { + defaultValue: 1, + serializedName: "maxTokenCount", + type: { + name: "Number" + } + }, + consumeAllTokens: { + serializedName: "consumeAllTokens", + type: { + name: "Boolean" + } + } } } }; -export const KeyPhraseExtractionSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill", +export const NGramTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.NGramTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "KeyPhraseExtractionSkill", + className: "NGramTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", + ...TokenFilter.type.modelProperties, + minGram: { + defaultValue: 1, + serializedName: "minGram", type: { - name: "String" + name: "Number" } }, - maxKeyPhraseCount: { - nullable: true, - serializedName: "maxKeyPhraseCount", + maxGram: { + defaultValue: 2, + serializedName: "maxGram", type: { name: "Number" } @@ -3890,49 +3841,51 @@ export const KeyPhraseExtractionSkill: coreHttp.CompositeMapper = { } }; -export const OcrSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Vision.OcrSkill", +export const NGramTokenFilterV2: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.NGramTokenFilterV2", type: { name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "OcrSkill", + className: "NGramTokenFilterV2", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", + ...TokenFilter.type.modelProperties, + minGram: { + defaultValue: 1, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "minGram", type: { - name: "String" + name: "Number" } }, - shouldDetectOrientation: { - serializedName: "detectOrientation", - defaultValue: false, + maxGram: { + defaultValue: 2, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "maxGram", type: { - name: "Boolean" + name: "Number" } } } } }; -export const ImageAnalysisSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Vision.ImageAnalysisSkill", +export const PatternCaptureTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.PatternCaptureTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "ImageAnalysisSkill", + className: "PatternCaptureTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - type: { - name: "String" - } - }, - visualFeatures: { - serializedName: "visualFeatures", + ...TokenFilter.type.modelProperties, + patterns: { + serializedName: "patterns", + required: true, type: { name: "Sequence", element: { @@ -3942,129 +3895,135 @@ export const ImageAnalysisSkill: coreHttp.CompositeMapper = { } } }, - details: { - serializedName: "details", + preserveOriginal: { + defaultValue: true, + serializedName: "preserveOriginal", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "Boolean" } } } } }; -export const LanguageDetectionSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.LanguageDetectionSkill", - type: { - name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "LanguageDetectionSkill", - modelProperties: { - ...SearchIndexerSkill.type.modelProperties - } - } -}; - -export const ShaperSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Util.ShaperSkill", +export const PatternReplaceTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.PatternReplaceTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "ShaperSkill", + className: "PatternReplaceTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - ...SearchIndexerSkill.type.modelProperties + ...TokenFilter.type.modelProperties, + pattern: { + serializedName: "pattern", + required: true, + type: { + name: "String" + } + }, + replacement: { + serializedName: "replacement", + required: true, + type: { + name: "String" + } + } } } }; -export const MergeSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.MergeSkill", +export const PhoneticTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.PhoneticTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "MergeSkill", + className: "PhoneticTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - insertPreTag: { - serializedName: "insertPreTag", - defaultValue: '', + ...TokenFilter.type.modelProperties, + encoder: { + serializedName: "encoder", type: { - name: "String" + name: "Enum", + allowedValues: [ + "metaphone", + "doubleMetaphone", + "soundex", + "refinedSoundex", + "caverphone1", + "caverphone2", + "cologne", + "nysiis", + "koelnerPhonetik", + "haasePhonetik", + "beiderMorse" + ] } }, - insertPostTag: { - serializedName: "insertPostTag", - defaultValue: '', + replaceOriginalTokens: { + defaultValue: true, + serializedName: "replace", type: { - name: "String" + name: "Boolean" } } } } }; -export const EntityRecognitionSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.EntityRecognitionSkill", +export const ShingleTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.ShingleTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "EntityRecognitionSkill", + className: "ShingleTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - categories: { - serializedName: "categories", + ...TokenFilter.type.modelProperties, + maxShingleSize: { + defaultValue: 2, + constraints: { + InclusiveMinimum: 2 + }, + serializedName: "maxShingleSize", type: { - name: "Sequence", - element: { - type: { - name: "String" - } - } + name: "Number" } }, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", + minShingleSize: { + defaultValue: 2, + constraints: { + InclusiveMinimum: 2 + }, + serializedName: "minShingleSize", type: { - name: "String" + name: "Number" } }, - includeTypelessEntities: { - nullable: true, - serializedName: "includeTypelessEntities", + outputUnigrams: { + defaultValue: true, + serializedName: "outputUnigrams", type: { name: "Boolean" } }, - minimumPrecision: { - nullable: true, - serializedName: "minimumPrecision", + outputUnigramsIfNoShingles: { + serializedName: "outputUnigramsIfNoShingles", type: { - name: "Number" + name: "Boolean" } - } - } - } -}; - -export const SentimentSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.SentimentSkill", - type: { - name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "SentimentSkill", - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", + }, + tokenSeparator: { + defaultValue: " ", + serializedName: "tokenSeparator", + type: { + name: "String" + } + }, + filterToken: { + defaultValue: "_", + serializedName: "filterToken", type: { name: "String" } @@ -4073,483 +4032,558 @@ export const SentimentSkill: coreHttp.CompositeMapper = { } }; -export const SplitSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.SplitSkill", +export const SnowballTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.SnowballTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "SplitSkill", + className: "SnowballTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - type: { - name: "String" - } - }, - textSplitMode: { - serializedName: "textSplitMode", - type: { - name: "String" - } - }, - maxPageLength: { - nullable: true, - serializedName: "maximumPageLength", + ...TokenFilter.type.modelProperties, + language: { + serializedName: "language", + required: true, type: { - name: "Number" + name: "Enum", + allowedValues: [ + "armenian", + "basque", + "catalan", + "danish", + "dutch", + "english", + "finnish", + "french", + "german", + "german2", + "hungarian", + "italian", + "kp", + "lovins", + "norwegian", + "porter", + "portuguese", + "romanian", + "russian", + "spanish", + "swedish", + "turkish" + ] } } } } }; -export const TextTranslationSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.TranslationSkill", +export const StemmerTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.StemmerTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "TextTranslationSkill", + className: "StemmerTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultToLanguageCode: { + ...TokenFilter.type.modelProperties, + language: { + serializedName: "language", required: true, - serializedName: "defaultToLanguageCode", - type: { - name: "String" - } - }, - defaultFromLanguageCode: { - serializedName: "defaultFromLanguageCode", - type: { - name: "String" - } - }, - suggestedFrom: { - serializedName: "suggestedFrom", type: { - name: "String" + name: "Enum", + allowedValues: [ + "arabic", + "armenian", + "basque", + "brazilian", + "bulgarian", + "catalan", + "czech", + "danish", + "dutch", + "dutchKp", + "english", + "lightEnglish", + "minimalEnglish", + "possessiveEnglish", + "porter2", + "lovins", + "finnish", + "lightFinnish", + "french", + "lightFrench", + "minimalFrench", + "galician", + "minimalGalician", + "german", + "german2", + "lightGerman", + "minimalGerman", + "greek", + "hindi", + "hungarian", + "lightHungarian", + "indonesian", + "irish", + "italian", + "lightItalian", + "sorani", + "latvian", + "norwegian", + "lightNorwegian", + "minimalNorwegian", + "lightNynorsk", + "minimalNynorsk", + "portuguese", + "lightPortuguese", + "minimalPortuguese", + "portugueseRslp", + "romanian", + "russian", + "lightRussian", + "spanish", + "lightSpanish", + "swedish", + "lightSwedish", + "turkish" + ] } } } } }; -export const WebApiSkill: coreHttp.CompositeMapper = { - serializedName: "#Microsoft.Skills.Custom.WebApiSkill", +export const StemmerOverrideTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter", type: { name: "Composite", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - uberParent: "SearchIndexerSkill", - className: "WebApiSkill", + className: "StemmerOverrideTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - uri: { + ...TokenFilter.type.modelProperties, + rules: { + serializedName: "rules", required: true, - serializedName: "uri", - type: { - name: "String" - } - }, - httpHeaders: { - serializedName: "httpHeaders", type: { - name: "Dictionary", - value: { + name: "Sequence", + element: { type: { name: "String" } } } - }, - httpMethod: { - serializedName: "httpMethod", - type: { - name: "String" - } - }, - timeout: { - serializedName: "timeout", - type: { - name: "TimeSpan" - } - }, - batchSize: { - nullable: true, - serializedName: "batchSize", - type: { - name: "Number" - } - }, - degreeOfParallelism: { - nullable: true, - serializedName: "degreeOfParallelism", - type: { - name: "Number" - } } } } }; -export const ListSkillsetsResult: coreHttp.CompositeMapper = { - serializedName: "ListSkillsetsResult", +export const StopwordsTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.StopwordsTokenFilter", type: { name: "Composite", - className: "ListSkillsetsResult", + className: "StopwordsTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - skillsets: { - required: true, - readOnly: true, - serializedName: "value", + ...TokenFilter.type.modelProperties, + stopwords: { + serializedName: "stopwords", type: { name: "Sequence", element: { type: { - name: "Composite", - className: "SearchIndexerSkillset" + name: "String" } } } + }, + stopwordsList: { + serializedName: "stopwordsList", + type: { + name: "Enum", + allowedValues: [ + "arabic", + "armenian", + "basque", + "brazilian", + "bulgarian", + "catalan", + "czech", + "danish", + "dutch", + "english", + "finnish", + "french", + "galician", + "german", + "greek", + "hindi", + "hungarian", + "indonesian", + "irish", + "italian", + "latvian", + "norwegian", + "persian", + "portuguese", + "romanian", + "russian", + "sorani", + "spanish", + "swedish", + "thai", + "turkish" + ] + } + }, + ignoreCase: { + serializedName: "ignoreCase", + type: { + name: "Boolean" + } + }, + removeTrailingStopWords: { + defaultValue: true, + serializedName: "removeTrailing", + type: { + name: "Boolean" + } } } } }; -export const SynonymMap: coreHttp.CompositeMapper = { - serializedName: "SynonymMap", +export const SynonymTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.SynonymTokenFilter", type: { name: "Composite", - className: "SynonymMap", + className: "SynonymTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - name: { - required: true, - serializedName: "name", - type: { - name: "String" - } - }, - format: { - required: true, - isConstant: true, - serializedName: "format", - defaultValue: 'solr', - type: { - name: "String" - } - }, + ...TokenFilter.type.modelProperties, synonyms: { - required: true, serializedName: "synonyms", + required: true, type: { - name: "String" + name: "Sequence", + element: { + type: { + name: "String" + } + } } }, - encryptionKey: { - nullable: true, - serializedName: "encryptionKey", + ignoreCase: { + serializedName: "ignoreCase", type: { - name: "Composite", - className: "SearchResourceEncryptionKey" + name: "Boolean" } }, - etag: { - serializedName: "@odata\\.etag", + expand: { + defaultValue: true, + serializedName: "expand", type: { - name: "String" + name: "Boolean" } } } } }; -export const ListSynonymMapsResult: coreHttp.CompositeMapper = { - serializedName: "ListSynonymMapsResult", +export const TruncateTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.TruncateTokenFilter", type: { name: "Composite", - className: "ListSynonymMapsResult", + className: "TruncateTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - synonymMaps: { - required: true, - readOnly: true, - serializedName: "value", + ...TokenFilter.type.modelProperties, + length: { + defaultValue: 300, + constraints: { + InclusiveMaximum: 300 + }, + serializedName: "length", type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SynonymMap" - } - } + name: "Number" } } } } }; -export const ResourceCounter: coreHttp.CompositeMapper = { - serializedName: "ResourceCounter", +export const UniqueTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.UniqueTokenFilter", type: { name: "Composite", - className: "ResourceCounter", + className: "UniqueTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - usage: { - required: true, - nullable: false, - serializedName: "usage", - type: { - name: "Number" - } - }, - quota: { - nullable: true, - serializedName: "quota", + ...TokenFilter.type.modelProperties, + onlyOnSamePosition: { + serializedName: "onlyOnSamePosition", type: { - name: "Number" + name: "Boolean" } } } } }; -export const ServiceCounters: coreHttp.CompositeMapper = { - serializedName: "ServiceCounters", +export const WordDelimiterTokenFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.WordDelimiterTokenFilter", type: { name: "Composite", - className: "ServiceCounters", + className: "WordDelimiterTokenFilter", + uberParent: "TokenFilter", + polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, modelProperties: { - documentCounter: { - required: true, - serializedName: "documentCount", + ...TokenFilter.type.modelProperties, + generateWordParts: { + defaultValue: true, + serializedName: "generateWordParts", type: { - name: "Composite", - className: "ResourceCounter" + name: "Boolean" } }, - indexCounter: { - required: true, - serializedName: "indexesCount", + generateNumberParts: { + defaultValue: true, + serializedName: "generateNumberParts", type: { - name: "Composite", - className: "ResourceCounter" + name: "Boolean" } }, - indexerCounter: { - required: true, - serializedName: "indexersCount", + catenateWords: { + serializedName: "catenateWords", type: { - name: "Composite", - className: "ResourceCounter" + name: "Boolean" } }, - dataSourceCounter: { - required: true, - serializedName: "dataSourcesCount", + catenateNumbers: { + serializedName: "catenateNumbers", type: { - name: "Composite", - className: "ResourceCounter" + name: "Boolean" } }, - storageSizeCounter: { - required: true, - serializedName: "storageSize", + catenateAll: { + serializedName: "catenateAll", type: { - name: "Composite", - className: "ResourceCounter" + name: "Boolean" } }, - synonymMapCounter: { - required: true, - serializedName: "synonymMaps", + splitOnCaseChange: { + defaultValue: true, + serializedName: "splitOnCaseChange", type: { - name: "Composite", - className: "ResourceCounter" + name: "Boolean" } - } - } - } -}; - -export const ServiceLimits: coreHttp.CompositeMapper = { - serializedName: "ServiceLimits", - type: { - name: "Composite", - className: "ServiceLimits", - modelProperties: { - maxFieldsPerIndex: { - nullable: true, - serializedName: "maxFieldsPerIndex", + }, + preserveOriginal: { + serializedName: "preserveOriginal", type: { - name: "Number" + name: "Boolean" } }, - maxFieldNestingDepthPerIndex: { - nullable: true, - serializedName: "maxFieldNestingDepthPerIndex", + splitOnNumerics: { + defaultValue: true, + serializedName: "splitOnNumerics", type: { - name: "Number" + name: "Boolean" } }, - maxComplexCollectionFieldsPerIndex: { - nullable: true, - serializedName: "maxComplexCollectionFieldsPerIndex", + stemEnglishPossessive: { + defaultValue: true, + serializedName: "stemEnglishPossessive", type: { - name: "Number" + name: "Boolean" } }, - maxComplexObjectsInCollectionsPerDocument: { - nullable: true, - serializedName: "maxComplexObjectsInCollectionsPerDocument", + protectedWords: { + serializedName: "protectedWords", type: { - name: "Number" + name: "Sequence", + element: { + type: { + name: "String" + } + } } } } } }; -export const ServiceStatistics: coreHttp.CompositeMapper = { - serializedName: "ServiceStatistics", +export const MappingCharFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.MappingCharFilter", type: { name: "Composite", - className: "ServiceStatistics", + className: "MappingCharFilter", + uberParent: "CharFilter", + polymorphicDiscriminator: CharFilter.type.polymorphicDiscriminator, modelProperties: { - counters: { - required: true, - serializedName: "counters", - type: { - name: "Composite", - className: "ServiceCounters" - } - }, - limits: { + ...CharFilter.type.modelProperties, + mappings: { + serializedName: "mappings", required: true, - serializedName: "limits", type: { - name: "Composite", - className: "ServiceLimits" + name: "Sequence", + element: { + type: { + name: "String" + } + } } } } } }; -export const SearchError: coreHttp.CompositeMapper = { - serializedName: "SearchError", +export const PatternReplaceCharFilter: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.PatternReplaceCharFilter", type: { name: "Composite", - className: "SearchError", + className: "PatternReplaceCharFilter", + uberParent: "CharFilter", + polymorphicDiscriminator: CharFilter.type.polymorphicDiscriminator, modelProperties: { - code: { - readOnly: true, - serializedName: "code", + ...CharFilter.type.modelProperties, + pattern: { + serializedName: "pattern", + required: true, type: { name: "String" } }, - message: { + replacement: { + serializedName: "replacement", required: true, - readOnly: true, - serializedName: "message", type: { name: "String" } + } + } + } +}; + +export const ClassicSimilarity: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.ClassicSimilarity", + type: { + name: "Composite", + className: "ClassicSimilarity", + uberParent: "Similarity", + polymorphicDiscriminator: Similarity.type.polymorphicDiscriminator, + modelProperties: { + ...Similarity.type.modelProperties + } + } +}; + +export const BM25Similarity: coreHttp.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.BM25Similarity", + type: { + name: "Composite", + className: "BM25Similarity", + uberParent: "Similarity", + polymorphicDiscriminator: Similarity.type.polymorphicDiscriminator, + modelProperties: { + ...Similarity.type.modelProperties, + k1: { + serializedName: "k1", + nullable: true, + type: { + name: "Number" + } }, - details: { - readOnly: true, - serializedName: "details", + b: { + serializedName: "b", + nullable: true, type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchError" - } - } + name: "Number" } } } } }; -export const discriminators = { - 'LexicalAnalyzer' : LexicalAnalyzer, - 'LexicalAnalyzer.#Microsoft.Azure.Search.CustomAnalyzer' : CustomAnalyzer, - 'LexicalAnalyzer.#Microsoft.Azure.Search.PatternAnalyzer' : PatternAnalyzer, - 'LexicalAnalyzer.#Microsoft.Azure.Search.StandardAnalyzer' : LuceneStandardAnalyzer, - 'LexicalAnalyzer.#Microsoft.Azure.Search.StopAnalyzer' : StopAnalyzer, - 'LexicalTokenizer' : LexicalTokenizer, - 'LexicalTokenizer.#Microsoft.Azure.Search.ClassicTokenizer' : ClassicTokenizer, - 'LexicalTokenizer.#Microsoft.Azure.Search.EdgeNGramTokenizer' : EdgeNGramTokenizer, - 'LexicalTokenizer.#Microsoft.Azure.Search.KeywordTokenizer' : KeywordTokenizer, - 'LexicalTokenizer.#Microsoft.Azure.Search.KeywordTokenizerV2' : KeywordTokenizerV2, - 'LexicalTokenizer.#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' : MicrosoftLanguageTokenizer, - 'LexicalTokenizer.#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' : MicrosoftLanguageStemmingTokenizer, - 'LexicalTokenizer.#Microsoft.Azure.Search.NGramTokenizer' : NGramTokenizer, - 'LexicalTokenizer.#Microsoft.Azure.Search.PathHierarchyTokenizerV2' : PathHierarchyTokenizerV2, - 'LexicalTokenizer.#Microsoft.Azure.Search.PatternTokenizer' : PatternTokenizer, - 'LexicalTokenizer.#Microsoft.Azure.Search.StandardTokenizer' : LuceneStandardTokenizer, - 'LexicalTokenizer.#Microsoft.Azure.Search.StandardTokenizerV2' : LuceneStandardTokenizerV2, - 'LexicalTokenizer.#Microsoft.Azure.Search.UaxUrlEmailTokenizer' : UaxUrlEmailTokenizer, - 'TokenFilter' : TokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.AsciiFoldingTokenFilter' : AsciiFoldingTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.CjkBigramTokenFilter' : CjkBigramTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.CommonGramTokenFilter' : CommonGramTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' : DictionaryDecompounderTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.EdgeNGramTokenFilter' : EdgeNGramTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' : EdgeNGramTokenFilterV2, - 'TokenFilter.#Microsoft.Azure.Search.ElisionTokenFilter' : ElisionTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.KeepTokenFilter' : KeepTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.KeywordMarkerTokenFilter' : KeywordMarkerTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.LengthTokenFilter' : LengthTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.LimitTokenFilter' : LimitTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.NGramTokenFilter' : NGramTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.NGramTokenFilterV2' : NGramTokenFilterV2, - 'TokenFilter.#Microsoft.Azure.Search.PatternCaptureTokenFilter' : PatternCaptureTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.PatternReplaceTokenFilter' : PatternReplaceTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.PhoneticTokenFilter' : PhoneticTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.ShingleTokenFilter' : ShingleTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.SnowballTokenFilter' : SnowballTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.StemmerTokenFilter' : StemmerTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.StemmerOverrideTokenFilter' : StemmerOverrideTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.StopwordsTokenFilter' : StopwordsTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.SynonymTokenFilter' : SynonymTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.TruncateTokenFilter' : TruncateTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.UniqueTokenFilter' : UniqueTokenFilter, - 'TokenFilter.#Microsoft.Azure.Search.WordDelimiterTokenFilter' : WordDelimiterTokenFilter, - 'CharFilter' : CharFilter, - 'CharFilter.#Microsoft.Azure.Search.MappingCharFilter' : MappingCharFilter, - 'CharFilter.#Microsoft.Azure.Search.PatternReplaceCharFilter' : PatternReplaceCharFilter, - 'Similarity' : Similarity, - 'Similarity.#Microsoft.Azure.Search.ClassicSimilarity' : ClassicSimilarity, - 'Similarity.#Microsoft.Azure.Search.BM25Similarity' : BM25Similarity, - 'DataChangeDetectionPolicy' : DataChangeDetectionPolicy, - 'DataChangeDetectionPolicy.#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' : HighWaterMarkChangeDetectionPolicy, - 'DataChangeDetectionPolicy.#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' : SqlIntegratedChangeTrackingPolicy, - 'DataDeletionDetectionPolicy' : DataDeletionDetectionPolicy, - 'DataDeletionDetectionPolicy.#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' : SoftDeleteColumnDeletionDetectionPolicy, - 'ScoringFunction' : ScoringFunction, - 'ScoringFunction.distance' : DistanceScoringFunction, - 'ScoringFunction.freshness' : FreshnessScoringFunction, - 'ScoringFunction.magnitude' : MagnitudeScoringFunction, - 'ScoringFunction.tag' : TagScoringFunction, - 'SearchIndexerSkill' : SearchIndexerSkill, - 'CognitiveServicesAccount' : CognitiveServicesAccount, - 'CognitiveServicesAccount.#Microsoft.Azure.Search.DefaultCognitiveServices' : DefaultCognitiveServicesAccount, - 'CognitiveServicesAccount.#Microsoft.Azure.Search.CognitiveServicesByKey' : CognitiveServicesAccountKey, - 'SearchIndexerSkill.#Microsoft.Skills.Util.ConditionalSkill' : ConditionalSkill, - 'SearchIndexerSkill.#Microsoft.Skills.Text.KeyPhraseExtractionSkill' : KeyPhraseExtractionSkill, - 'SearchIndexerSkill.#Microsoft.Skills.Vision.OcrSkill' : OcrSkill, - 'SearchIndexerSkill.#Microsoft.Skills.Vision.ImageAnalysisSkill' : ImageAnalysisSkill, - 'SearchIndexerSkill.#Microsoft.Skills.Text.LanguageDetectionSkill' : LanguageDetectionSkill, - 'SearchIndexerSkill.#Microsoft.Skills.Util.ShaperSkill' : ShaperSkill, - 'SearchIndexerSkill.#Microsoft.Skills.Text.MergeSkill' : MergeSkill, - 'SearchIndexerSkill.#Microsoft.Skills.Text.EntityRecognitionSkill' : EntityRecognitionSkill, - 'SearchIndexerSkill.#Microsoft.Skills.Text.SentimentSkill' : SentimentSkill, - 'SearchIndexerSkill.#Microsoft.Skills.Text.SplitSkill' : SplitSkill, - 'SearchIndexerSkill.#Microsoft.Skills.Text.TranslationSkill' : TextTranslationSkill, - 'SearchIndexerSkill.#Microsoft.Skills.Custom.WebApiSkill' : WebApiSkill - +export let discriminators = { + DataChangeDetectionPolicy: DataChangeDetectionPolicy, + DataDeletionDetectionPolicy: DataDeletionDetectionPolicy, + SearchIndexerSkill: SearchIndexerSkill, + CognitiveServicesAccount: CognitiveServicesAccount, + ScoringFunction: ScoringFunction, + LexicalAnalyzer: LexicalAnalyzer, + LexicalTokenizer: LexicalTokenizer, + TokenFilter: TokenFilter, + CharFilter: CharFilter, + Similarity: Similarity, + "DataChangeDetectionPolicy.#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy": HighWaterMarkChangeDetectionPolicy, + "DataChangeDetectionPolicy.#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy": SqlIntegratedChangeTrackingPolicy, + "DataDeletionDetectionPolicy.#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy": SoftDeleteColumnDeletionDetectionPolicy, + "SearchIndexerSkill.#Microsoft.Skills.Util.ConditionalSkill": ConditionalSkill, + "SearchIndexerSkill.#Microsoft.Skills.Text.KeyPhraseExtractionSkill": KeyPhraseExtractionSkill, + "SearchIndexerSkill.#Microsoft.Skills.Vision.OcrSkill": OcrSkill, + "SearchIndexerSkill.#Microsoft.Skills.Vision.ImageAnalysisSkill": ImageAnalysisSkill, + "SearchIndexerSkill.#Microsoft.Skills.Text.LanguageDetectionSkill": LanguageDetectionSkill, + "SearchIndexerSkill.#Microsoft.Skills.Util.ShaperSkill": ShaperSkill, + "SearchIndexerSkill.#Microsoft.Skills.Text.MergeSkill": MergeSkill, + "SearchIndexerSkill.#Microsoft.Skills.Text.EntityRecognitionSkill": EntityRecognitionSkill, + "SearchIndexerSkill.#Microsoft.Skills.Text.SentimentSkill": SentimentSkill, + "SearchIndexerSkill.#Microsoft.Skills.Text.SplitSkill": SplitSkill, + "SearchIndexerSkill.#Microsoft.Skills.Text.CustomEntityLookupSkill": CustomEntityLookupSkill, + "SearchIndexerSkill.#Microsoft.Skills.Text.TranslationSkill": TextTranslationSkill, + "SearchIndexerSkill.#Microsoft.Skills.Custom.WebApiSkill": WebApiSkill, + "CognitiveServicesAccount.#Microsoft.Azure.Search.DefaultCognitiveServices": DefaultCognitiveServicesAccount, + "CognitiveServicesAccount.#Microsoft.Azure.Search.CognitiveServicesByKey": CognitiveServicesAccountKey, + "ScoringFunction.distance": DistanceScoringFunction, + "ScoringFunction.freshness": FreshnessScoringFunction, + "ScoringFunction.magnitude": MagnitudeScoringFunction, + "ScoringFunction.tag": TagScoringFunction, + "LexicalAnalyzer.#Microsoft.Azure.Search.CustomAnalyzer": CustomAnalyzer, + "LexicalAnalyzer.#Microsoft.Azure.Search.PatternAnalyzer": PatternAnalyzer, + "LexicalAnalyzer.#Microsoft.Azure.Search.StandardAnalyzer": LuceneStandardAnalyzer, + "LexicalAnalyzer.#Microsoft.Azure.Search.StopAnalyzer": StopAnalyzer, + "LexicalTokenizer.#Microsoft.Azure.Search.ClassicTokenizer": ClassicTokenizer, + "LexicalTokenizer.#Microsoft.Azure.Search.EdgeNGramTokenizer": EdgeNGramTokenizer, + "LexicalTokenizer.#Microsoft.Azure.Search.KeywordTokenizer": KeywordTokenizer, + "LexicalTokenizer.#Microsoft.Azure.Search.KeywordTokenizerV2": KeywordTokenizerV2, + "LexicalTokenizer.#Microsoft.Azure.Search.MicrosoftLanguageTokenizer": MicrosoftLanguageTokenizer, + "LexicalTokenizer.#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer": MicrosoftLanguageStemmingTokenizer, + "LexicalTokenizer.#Microsoft.Azure.Search.NGramTokenizer": NGramTokenizer, + "LexicalTokenizer.#Microsoft.Azure.Search.PathHierarchyTokenizerV2": PathHierarchyTokenizerV2, + "LexicalTokenizer.#Microsoft.Azure.Search.PatternTokenizer": PatternTokenizer, + "LexicalTokenizer.#Microsoft.Azure.Search.StandardTokenizer": LuceneStandardTokenizer, + "LexicalTokenizer.#Microsoft.Azure.Search.StandardTokenizerV2": LuceneStandardTokenizerV2, + "LexicalTokenizer.#Microsoft.Azure.Search.UaxUrlEmailTokenizer": UaxUrlEmailTokenizer, + "TokenFilter.#Microsoft.Azure.Search.AsciiFoldingTokenFilter": AsciiFoldingTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.CjkBigramTokenFilter": CjkBigramTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.CommonGramTokenFilter": CommonGramTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter": DictionaryDecompounderTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.EdgeNGramTokenFilter": EdgeNGramTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.EdgeNGramTokenFilterV2": EdgeNGramTokenFilterV2, + "TokenFilter.#Microsoft.Azure.Search.ElisionTokenFilter": ElisionTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.KeepTokenFilter": KeepTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.KeywordMarkerTokenFilter": KeywordMarkerTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.LengthTokenFilter": LengthTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.LimitTokenFilter": LimitTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.NGramTokenFilter": NGramTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.NGramTokenFilterV2": NGramTokenFilterV2, + "TokenFilter.#Microsoft.Azure.Search.PatternCaptureTokenFilter": PatternCaptureTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.PatternReplaceTokenFilter": PatternReplaceTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.PhoneticTokenFilter": PhoneticTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.ShingleTokenFilter": ShingleTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.SnowballTokenFilter": SnowballTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.StemmerTokenFilter": StemmerTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.StemmerOverrideTokenFilter": StemmerOverrideTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.StopwordsTokenFilter": StopwordsTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.SynonymTokenFilter": SynonymTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.TruncateTokenFilter": TruncateTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.UniqueTokenFilter": UniqueTokenFilter, + "TokenFilter.#Microsoft.Azure.Search.WordDelimiterTokenFilter": WordDelimiterTokenFilter, + "CharFilter.#Microsoft.Azure.Search.MappingCharFilter": MappingCharFilter, + "CharFilter.#Microsoft.Azure.Search.PatternReplaceCharFilter": PatternReplaceCharFilter, + "Similarity.#Microsoft.Azure.Search.ClassicSimilarity": ClassicSimilarity, + "Similarity.#Microsoft.Azure.Search.BM25Similarity": BM25Similarity }; diff --git a/sdk/search/search-documents/src/generated/service/models/parameters.ts b/sdk/search/search-documents/src/generated/service/models/parameters.ts index 7235dd302ccb..9fae96fc7552 100644 --- a/sdk/search/search-documents/src/generated/service/models/parameters.ts +++ b/sdk/search/search-documents/src/generated/service/models/parameters.ts @@ -1,64 +1,89 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import * as coreHttp from "@azure/core-http"; +import { + OperationParameter, + OperationURLParameter, + OperationQueryParameter +} from "@azure/core-http"; +import { + SearchIndexerDataSource as SearchIndexerDataSourceMapper, + SearchIndexer as SearchIndexerMapper, + SearchIndexerSkillset as SearchIndexerSkillsetMapper, + SynonymMap as SynonymMapMapper, + SearchIndex as SearchIndexMapper, + AnalyzeRequest as AnalyzeRequestMapper +} from "../models/mappers"; -export const allowIndexDowntime: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "allowIndexDowntime" - ], +export const contentType: OperationParameter = { + parameterPath: ["options", "contentType"], mapper: { - serializedName: "allowIndexDowntime", + defaultValue: "application/json", + isConstant: true, + serializedName: "Content-Type", type: { - name: "Boolean" + name: "String" } } }; -export const apiVersion: coreHttp.OperationQueryParameter = { - parameterPath: "apiVersion", + +export const dataSource: OperationParameter = { + parameterPath: "dataSource", + mapper: SearchIndexerDataSourceMapper +}; + +export const accept: OperationParameter = { + parameterPath: "accept", mapper: { - required: true, - serializedName: "api-version", + defaultValue: "application/json", + isConstant: true, + serializedName: "Accept", type: { name: "String" } } }; -export const dataSourceName: coreHttp.OperationURLParameter = { - parameterPath: "dataSourceName", + +export const endpoint: OperationURLParameter = { + parameterPath: "endpoint", mapper: { + serializedName: "endpoint", required: true, - serializedName: "dataSourceName", type: { name: "String" } - } + }, + skipEncoding: true }; -export const endpoint: coreHttp.OperationURLParameter = { - parameterPath: "endpoint", + +export const dataSourceName: OperationURLParameter = { + parameterPath: "dataSourceName", mapper: { + serializedName: "dataSourceName", required: true, - serializedName: "endpoint", - defaultValue: '', type: { name: "String" } - }, - skipEncoding: true + } +}; + +export const xMsClientRequestId: OperationParameter = { + parameterPath: ["options", "requestOptionsParam", "xMsClientRequestId"], + mapper: { + serializedName: "x-ms-client-request-id", + type: { + name: "Uuid" + } + } }; -export const ifMatch: coreHttp.OperationParameter = { - parameterPath: [ - "options", - "ifMatch" - ], + +export const ifMatch: OperationParameter = { + parameterPath: ["options", "ifMatch"], mapper: { serializedName: "If-Match", type: { @@ -66,11 +91,9 @@ export const ifMatch: coreHttp.OperationParameter = { } } }; -export const ifNoneMatch: coreHttp.OperationParameter = { - parameterPath: [ - "options", - "ifNoneMatch" - ], + +export const ifNoneMatch: OperationParameter = { + parameterPath: ["options", "ifNoneMatch"], mapper: { serializedName: "If-None-Match", type: { @@ -78,67 +101,115 @@ export const ifNoneMatch: coreHttp.OperationParameter = { } } }; -export const indexerName: coreHttp.OperationURLParameter = { - parameterPath: "indexerName", + +export const prefer: OperationParameter = { + parameterPath: "prefer", mapper: { - required: true, - serializedName: "indexerName", + defaultValue: "return=representation", + isConstant: true, + serializedName: "Prefer", type: { name: "String" } } }; -export const indexName: coreHttp.OperationURLParameter = { - parameterPath: "indexName", + +export const apiVersion: OperationQueryParameter = { + parameterPath: "apiVersion", mapper: { + serializedName: "api-version", required: true, - serializedName: "indexName", type: { name: "String" } } }; -export const prefer: coreHttp.OperationParameter = { - parameterPath: "prefer", + +export const select: OperationQueryParameter = { + parameterPath: ["options", "select"], mapper: { - required: true, - isConstant: true, - serializedName: "Prefer", - defaultValue: 'return=representation', + serializedName: "$select", type: { name: "String" } } }; -export const select: coreHttp.OperationQueryParameter = { - parameterPath: [ - "options", - "select" - ], + +export const indexerName: OperationURLParameter = { + parameterPath: "indexerName", mapper: { - serializedName: "$select", + serializedName: "indexerName", + required: true, type: { name: "String" } } }; -export const skillsetName: coreHttp.OperationURLParameter = { + +export const indexer: OperationParameter = { + parameterPath: "indexer", + mapper: SearchIndexerMapper +}; + +export const skillset: OperationParameter = { + parameterPath: "skillset", + mapper: SearchIndexerSkillsetMapper +}; + +export const skillsetName: OperationURLParameter = { parameterPath: "skillsetName", mapper: { - required: true, serializedName: "skillsetName", + required: true, type: { name: "String" } } }; -export const synonymMapName: coreHttp.OperationURLParameter = { + +export const synonymMap: OperationParameter = { + parameterPath: "synonymMap", + mapper: SynonymMapMapper +}; + +export const synonymMapName: OperationURLParameter = { parameterPath: "synonymMapName", mapper: { - required: true, serializedName: "synonymMapName", + required: true, type: { name: "String" } } }; + +export const index: OperationParameter = { + parameterPath: "index", + mapper: SearchIndexMapper +}; + +export const indexName: OperationURLParameter = { + parameterPath: "indexName", + mapper: { + serializedName: "indexName", + required: true, + type: { + name: "String" + } + } +}; + +export const allowIndexDowntime: OperationQueryParameter = { + parameterPath: ["options", "allowIndexDowntime"], + mapper: { + serializedName: "allowIndexDowntime", + type: { + name: "Boolean" + } + } +}; + +export const request: OperationParameter = { + parameterPath: "request", + mapper: AnalyzeRequestMapper +}; diff --git a/sdk/search/search-documents/src/generated/service/operations/dataSources.ts b/sdk/search/search-documents/src/generated/service/operations/dataSources.ts index d48caa9ad894..b4918df4734e 100644 --- a/sdk/search/search-documents/src/generated/service/operations/dataSources.ts +++ b/sdk/search/search-documents/src/generated/service/operations/dataSources.ts @@ -1,28 +1,37 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ import * as coreHttp from "@azure/core-http"; -import * as Models from "../models"; -import * as Mappers from "../models/dataSourcesMappers"; +import * as Mappers from "../models/mappers"; import * as Parameters from "../models/parameters"; -import { SearchServiceClientContext } from "../searchServiceClientContext"; +import { SearchServiceClient } from "../searchServiceClient"; +import { + SearchIndexerDataSource, + DataSourcesCreateOrUpdateOptionalParams, + DataSourcesCreateOrUpdateResponse, + DataSourcesDeleteOptionalParams, + DataSourcesGetOptionalParams, + DataSourcesGetResponse, + DataSourcesListOptionalParams, + DataSourcesListResponse, + DataSourcesCreateOptionalParams, + DataSourcesCreateResponse +} from "../models"; /** Class representing a DataSources. */ export class DataSources { - private readonly client: SearchServiceClientContext; + private readonly client: SearchServiceClient; /** - * Create a DataSources. - * @param {SearchServiceClientContext} client Reference to the service client. + * Initialize a new instance of the class DataSources class. + * @param client Reference to the service client */ - constructor(client: SearchServiceClientContext) { + constructor(client: SearchServiceClient) { this.client = client; } @@ -30,167 +39,103 @@ export class DataSources { * Creates a new datasource or updates a datasource if it already exists. * @param dataSourceName The name of the datasource to create or update. * @param dataSource The definition of the datasource to create or update. - * @param [options] The optional parameters - * @returns Promise - */ - createOrUpdate(dataSourceName: string, dataSource: Models.SearchIndexerDataSource, options?: Models.DataSourcesCreateOrUpdateOptionalParams): Promise; - /** - * @param dataSourceName The name of the datasource to create or update. - * @param dataSource The definition of the datasource to create or update. - * @param callback The callback - */ - createOrUpdate(dataSourceName: string, dataSource: Models.SearchIndexerDataSource, callback: coreHttp.ServiceCallback): void; - /** - * @param dataSourceName The name of the datasource to create or update. - * @param dataSource The definition of the datasource to create or update. - * @param options The optional parameters - * @param callback The callback - */ - createOrUpdate(dataSourceName: string, dataSource: Models.SearchIndexerDataSource, options: Models.DataSourcesCreateOrUpdateOptionalParams, callback: coreHttp.ServiceCallback): void; - createOrUpdate(dataSourceName: string, dataSource: Models.SearchIndexerDataSource, options?: Models.DataSourcesCreateOrUpdateOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + createOrUpdate( + dataSourceName: string, + dataSource: SearchIndexerDataSource, + options?: DataSourcesCreateOrUpdateOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + dataSourceName, + dataSource, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - dataSourceName, - dataSource, - options - }, - createOrUpdateOperationSpec, - callback) as Promise; + operationArguments, + createOrUpdateOperationSpec + ) as Promise; } /** * Deletes a datasource. * @param dataSourceName The name of the datasource to delete. - * @param [options] The optional parameters - * @returns Promise - */ - deleteMethod(dataSourceName: string, options?: Models.DataSourcesDeleteMethodOptionalParams): Promise; - /** - * @param dataSourceName The name of the datasource to delete. - * @param callback The callback - */ - deleteMethod(dataSourceName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param dataSourceName The name of the datasource to delete. - * @param options The optional parameters - * @param callback The callback - */ - deleteMethod(dataSourceName: string, options: Models.DataSourcesDeleteMethodOptionalParams, callback: coreHttp.ServiceCallback): void; - deleteMethod(dataSourceName: string, options?: Models.DataSourcesDeleteMethodOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + delete( + dataSourceName: string, + options?: DataSourcesDeleteOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + dataSourceName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - dataSourceName, - options - }, - deleteMethodOperationSpec, - callback); + operationArguments, + deleteOperationSpec + ) as Promise; } /** * Retrieves a datasource definition. * @param dataSourceName The name of the datasource to retrieve. - * @param [options] The optional parameters - * @returns Promise - */ - get(dataSourceName: string, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param dataSourceName The name of the datasource to retrieve. - * @param callback The callback - */ - get(dataSourceName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param dataSourceName The name of the datasource to retrieve. - * @param options The optional parameters - * @param callback The callback - */ - get(dataSourceName: string, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - get(dataSourceName: string, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + get( + dataSourceName: string, + options?: DataSourcesGetOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + dataSourceName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - dataSourceName, - options - }, - getOperationSpec, - callback) as Promise; + operationArguments, + getOperationSpec + ) as Promise; } /** * Lists all datasources available for a search service. - * @param [options] The optional parameters - * @returns Promise - */ - list(options?: Models.DataSourcesListOptionalParams): Promise; - /** - * @param callback The callback - */ - list(callback: coreHttp.ServiceCallback): void; - /** - * @param options The optional parameters - * @param callback The callback - */ - list(options: Models.DataSourcesListOptionalParams, callback: coreHttp.ServiceCallback): void; - list(options?: Models.DataSourcesListOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + list( + options?: DataSourcesListOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - options - }, - listOperationSpec, - callback) as Promise; + operationArguments, + listOperationSpec + ) as Promise; } /** * Creates a new datasource. * @param dataSource The definition of the datasource to create. - * @param [options] The optional parameters - * @returns Promise - */ - create(dataSource: Models.SearchIndexerDataSource, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param dataSource The definition of the datasource to create. - * @param callback The callback - */ - create(dataSource: Models.SearchIndexerDataSource, callback: coreHttp.ServiceCallback): void; - /** - * @param dataSource The definition of the datasource to create. - * @param options The optional parameters - * @param callback The callback - */ - create(dataSource: Models.SearchIndexerDataSource, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - create(dataSource: Models.SearchIndexerDataSource, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + create( + dataSource: SearchIndexerDataSource, + options?: DataSourcesCreateOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + dataSource, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - dataSource, - options - }, - createOperationSpec, - callback) as Promise; + operationArguments, + createOperationSpec + ) as Promise; } } - // Operation Specifications -const serializer = new coreHttp.Serializer(Mappers); +const serializer = new coreHttp.Serializer(Mappers, /* isXml */ false); + const createOrUpdateOperationSpec: coreHttp.OperationSpec = { + path: "/datasources('{dataSourceName}')", httpMethod: "PUT", - path: "datasources('{dataSourceName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.dataSourceName - ], - queryParameters: [ - Parameters.apiVersion - ], - headerParameters: [ - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer - ], - requestBody: { - parameterPath: "dataSource", - mapper: { - ...Mappers.SearchIndexerDataSource, - required: true - } - }, responses: { 200: { bodyMapper: Mappers.SearchIndexerDataSource @@ -202,23 +147,23 @@ const createOrUpdateOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, - serializer -}; - -const deleteMethodOperationSpec: coreHttp.OperationSpec = { - httpMethod: "DELETE", - path: "datasources('{dataSourceName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.dataSourceName - ], - queryParameters: [ - Parameters.apiVersion - ], + requestBody: Parameters.dataSource, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.dataSourceName], headerParameters: [ + Parameters.contentType, + Parameters.accept, + Parameters.xMsClientRequestId, Parameters.ifMatch, - Parameters.ifNoneMatch + Parameters.ifNoneMatch, + Parameters.prefer ], + mediaType: "json", + serializer +}; +const deleteOperationSpec: coreHttp.OperationSpec = { + path: "/datasources('{dataSourceName}')", + httpMethod: "DELETE", responses: { 204: {}, 404: {}, @@ -226,19 +171,19 @@ const deleteMethodOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.dataSourceName], + headerParameters: [ + Parameters.accept, + Parameters.xMsClientRequestId, + Parameters.ifMatch, + Parameters.ifNoneMatch + ], serializer }; - const getOperationSpec: coreHttp.OperationSpec = { + path: "/datasources('{dataSourceName}')", httpMethod: "GET", - path: "datasources('{dataSourceName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.dataSourceName - ], - queryParameters: [ - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.SearchIndexerDataSource @@ -247,19 +192,14 @@ const getOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.dataSourceName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const listOperationSpec: coreHttp.OperationSpec = { + path: "/datasources", httpMethod: "GET", - path: "datasources", - urlParameters: [ - Parameters.endpoint - ], - queryParameters: [ - Parameters.select, - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.ListDataSourcesResult @@ -268,25 +208,14 @@ const listOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion, Parameters.select], + urlParameters: [Parameters.endpoint], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const createOperationSpec: coreHttp.OperationSpec = { + path: "/datasources", httpMethod: "POST", - path: "datasources", - urlParameters: [ - Parameters.endpoint - ], - queryParameters: [ - Parameters.apiVersion - ], - requestBody: { - parameterPath: "dataSource", - mapper: { - ...Mappers.SearchIndexerDataSource, - required: true - } - }, responses: { 201: { bodyMapper: Mappers.SearchIndexerDataSource @@ -295,5 +224,14 @@ const createOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + requestBody: Parameters.dataSource, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint], + headerParameters: [ + Parameters.contentType, + Parameters.accept, + Parameters.xMsClientRequestId + ], + mediaType: "json", serializer }; diff --git a/sdk/search/search-documents/src/generated/service/operations/index.ts b/sdk/search/search-documents/src/generated/service/operations/index.ts index 66889cb1e482..896ae33eded4 100644 --- a/sdk/search/search-documents/src/generated/service/operations/index.ts +++ b/sdk/search/search-documents/src/generated/service/operations/index.ts @@ -1,11 +1,9 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ export * from "./dataSources"; diff --git a/sdk/search/search-documents/src/generated/service/operations/indexers.ts b/sdk/search/search-documents/src/generated/service/operations/indexers.ts index a200bf8759d4..9e588e39b832 100644 --- a/sdk/search/search-documents/src/generated/service/operations/indexers.ts +++ b/sdk/search/search-documents/src/generated/service/operations/indexers.ts @@ -1,318 +1,228 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ import * as coreHttp from "@azure/core-http"; -import * as Models from "../models"; -import * as Mappers from "../models/indexersMappers"; +import * as Mappers from "../models/mappers"; import * as Parameters from "../models/parameters"; -import { SearchServiceClientContext } from "../searchServiceClientContext"; +import { SearchServiceClient } from "../searchServiceClient"; +import { + IndexersResetOptionalParams, + IndexersRunOptionalParams, + SearchIndexer, + IndexersCreateOrUpdateOptionalParams, + IndexersCreateOrUpdateResponse, + IndexersDeleteOptionalParams, + IndexersGetOptionalParams, + IndexersGetResponse, + IndexersListOptionalParams, + IndexersListResponse, + IndexersCreateOptionalParams, + IndexersCreateResponse, + IndexersGetStatusOptionalParams, + IndexersGetStatusResponse +} from "../models"; /** Class representing a Indexers. */ export class Indexers { - private readonly client: SearchServiceClientContext; + private readonly client: SearchServiceClient; /** - * Create a Indexers. - * @param {SearchServiceClientContext} client Reference to the service client. + * Initialize a new instance of the class Indexers class. + * @param client Reference to the service client */ - constructor(client: SearchServiceClientContext) { + constructor(client: SearchServiceClient) { this.client = client; } /** * Resets the change tracking state associated with an indexer. * @param indexerName The name of the indexer to reset. - * @param [options] The optional parameters - * @returns Promise + * @param options The options parameters. */ - reset(indexerName: string, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param indexerName The name of the indexer to reset. - * @param callback The callback - */ - reset(indexerName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param indexerName The name of the indexer to reset. - * @param options The optional parameters - * @param callback The callback - */ - reset(indexerName: string, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - reset(indexerName: string, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + reset( + indexerName: string, + options?: IndexersResetOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexerName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexerName, - options - }, - resetOperationSpec, - callback); + operationArguments, + resetOperationSpec + ) as Promise; } /** * Runs an indexer on-demand. * @param indexerName The name of the indexer to run. - * @param [options] The optional parameters - * @returns Promise + * @param options The options parameters. */ - run(indexerName: string, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param indexerName The name of the indexer to run. - * @param callback The callback - */ - run(indexerName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param indexerName The name of the indexer to run. - * @param options The optional parameters - * @param callback The callback - */ - run(indexerName: string, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - run(indexerName: string, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + run( + indexerName: string, + options?: IndexersRunOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexerName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexerName, - options - }, - runOperationSpec, - callback); + operationArguments, + runOperationSpec + ) as Promise; } /** * Creates a new indexer or updates an indexer if it already exists. * @param indexerName The name of the indexer to create or update. * @param indexer The definition of the indexer to create or update. - * @param [options] The optional parameters - * @returns Promise + * @param options The options parameters. */ - createOrUpdate(indexerName: string, indexer: Models.SearchIndexer, options?: Models.IndexersCreateOrUpdateOptionalParams): Promise; - /** - * @param indexerName The name of the indexer to create or update. - * @param indexer The definition of the indexer to create or update. - * @param callback The callback - */ - createOrUpdate(indexerName: string, indexer: Models.SearchIndexer, callback: coreHttp.ServiceCallback): void; - /** - * @param indexerName The name of the indexer to create or update. - * @param indexer The definition of the indexer to create or update. - * @param options The optional parameters - * @param callback The callback - */ - createOrUpdate(indexerName: string, indexer: Models.SearchIndexer, options: Models.IndexersCreateOrUpdateOptionalParams, callback: coreHttp.ServiceCallback): void; - createOrUpdate(indexerName: string, indexer: Models.SearchIndexer, options?: Models.IndexersCreateOrUpdateOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + createOrUpdate( + indexerName: string, + indexer: SearchIndexer, + options?: IndexersCreateOrUpdateOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexerName, + indexer, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexerName, - indexer, - options - }, - createOrUpdateOperationSpec, - callback) as Promise; + operationArguments, + createOrUpdateOperationSpec + ) as Promise; } /** * Deletes an indexer. * @param indexerName The name of the indexer to delete. - * @param [options] The optional parameters - * @returns Promise - */ - deleteMethod(indexerName: string, options?: Models.IndexersDeleteMethodOptionalParams): Promise; - /** - * @param indexerName The name of the indexer to delete. - * @param callback The callback + * @param options The options parameters. */ - deleteMethod(indexerName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param indexerName The name of the indexer to delete. - * @param options The optional parameters - * @param callback The callback - */ - deleteMethod(indexerName: string, options: Models.IndexersDeleteMethodOptionalParams, callback: coreHttp.ServiceCallback): void; - deleteMethod(indexerName: string, options?: Models.IndexersDeleteMethodOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + delete( + indexerName: string, + options?: IndexersDeleteOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexerName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexerName, - options - }, - deleteMethodOperationSpec, - callback); + operationArguments, + deleteOperationSpec + ) as Promise; } /** * Retrieves an indexer definition. * @param indexerName The name of the indexer to retrieve. - * @param [options] The optional parameters - * @returns Promise - */ - get(indexerName: string, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param indexerName The name of the indexer to retrieve. - * @param callback The callback - */ - get(indexerName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param indexerName The name of the indexer to retrieve. - * @param options The optional parameters - * @param callback The callback + * @param options The options parameters. */ - get(indexerName: string, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - get(indexerName: string, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + get( + indexerName: string, + options?: IndexersGetOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexerName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexerName, - options - }, - getOperationSpec, - callback) as Promise; + operationArguments, + getOperationSpec + ) as Promise; } /** * Lists all indexers available for a search service. - * @param [options] The optional parameters - * @returns Promise - */ - list(options?: Models.IndexersListOptionalParams): Promise; - /** - * @param callback The callback + * @param options The options parameters. */ - list(callback: coreHttp.ServiceCallback): void; - /** - * @param options The optional parameters - * @param callback The callback - */ - list(options: Models.IndexersListOptionalParams, callback: coreHttp.ServiceCallback): void; - list(options?: Models.IndexersListOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + list(options?: IndexersListOptionalParams): Promise { + const operationArguments: coreHttp.OperationArguments = { + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - options - }, - listOperationSpec, - callback) as Promise; + operationArguments, + listOperationSpec + ) as Promise; } /** * Creates a new indexer. * @param indexer The definition of the indexer to create. - * @param [options] The optional parameters - * @returns Promise - */ - create(indexer: Models.SearchIndexer, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param indexer The definition of the indexer to create. - * @param callback The callback - */ - create(indexer: Models.SearchIndexer, callback: coreHttp.ServiceCallback): void; - /** - * @param indexer The definition of the indexer to create. - * @param options The optional parameters - * @param callback The callback + * @param options The options parameters. */ - create(indexer: Models.SearchIndexer, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - create(indexer: Models.SearchIndexer, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + create( + indexer: SearchIndexer, + options?: IndexersCreateOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexer, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexer, - options - }, - createOperationSpec, - callback) as Promise; + operationArguments, + createOperationSpec + ) as Promise; } /** * Returns the current status and execution history of an indexer. * @param indexerName The name of the indexer for which to retrieve status. - * @param [options] The optional parameters - * @returns Promise - */ - getStatus(indexerName: string, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param indexerName The name of the indexer for which to retrieve status. - * @param callback The callback - */ - getStatus(indexerName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param indexerName The name of the indexer for which to retrieve status. - * @param options The optional parameters - * @param callback The callback + * @param options The options parameters. */ - getStatus(indexerName: string, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - getStatus(indexerName: string, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + getStatus( + indexerName: string, + options?: IndexersGetStatusOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexerName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexerName, - options - }, - getStatusOperationSpec, - callback) as Promise; + operationArguments, + getStatusOperationSpec + ) as Promise; } } - // Operation Specifications -const serializer = new coreHttp.Serializer(Mappers); +const serializer = new coreHttp.Serializer(Mappers, /* isXml */ false); + const resetOperationSpec: coreHttp.OperationSpec = { + path: "/indexers('{indexerName}')/search.reset", httpMethod: "POST", - path: "indexers('{indexerName}')/search.reset", - urlParameters: [ - Parameters.endpoint, - Parameters.indexerName - ], - queryParameters: [ - Parameters.apiVersion - ], responses: { 204: {}, default: { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexerName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const runOperationSpec: coreHttp.OperationSpec = { + path: "/indexers('{indexerName}')/search.run", httpMethod: "POST", - path: "indexers('{indexerName}')/search.run", - urlParameters: [ - Parameters.endpoint, - Parameters.indexerName - ], - queryParameters: [ - Parameters.apiVersion - ], responses: { 202: {}, default: { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexerName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const createOrUpdateOperationSpec: coreHttp.OperationSpec = { + path: "/indexers('{indexerName}')", httpMethod: "PUT", - path: "indexers('{indexerName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.indexerName - ], - queryParameters: [ - Parameters.apiVersion - ], - headerParameters: [ - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer - ], - requestBody: { - parameterPath: "indexer", - mapper: { - ...Mappers.SearchIndexer, - required: true - } - }, responses: { 200: { bodyMapper: Mappers.SearchIndexer @@ -324,23 +234,23 @@ const createOrUpdateOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, - serializer -}; - -const deleteMethodOperationSpec: coreHttp.OperationSpec = { - httpMethod: "DELETE", - path: "indexers('{indexerName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.indexerName - ], - queryParameters: [ - Parameters.apiVersion - ], + requestBody: Parameters.indexer, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexerName], headerParameters: [ + Parameters.contentType, + Parameters.accept, + Parameters.xMsClientRequestId, Parameters.ifMatch, - Parameters.ifNoneMatch + Parameters.ifNoneMatch, + Parameters.prefer ], + mediaType: "json", + serializer +}; +const deleteOperationSpec: coreHttp.OperationSpec = { + path: "/indexers('{indexerName}')", + httpMethod: "DELETE", responses: { 204: {}, 404: {}, @@ -348,19 +258,19 @@ const deleteMethodOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexerName], + headerParameters: [ + Parameters.accept, + Parameters.xMsClientRequestId, + Parameters.ifMatch, + Parameters.ifNoneMatch + ], serializer }; - const getOperationSpec: coreHttp.OperationSpec = { + path: "/indexers('{indexerName}')", httpMethod: "GET", - path: "indexers('{indexerName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.indexerName - ], - queryParameters: [ - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.SearchIndexer @@ -369,19 +279,14 @@ const getOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexerName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const listOperationSpec: coreHttp.OperationSpec = { + path: "/indexers", httpMethod: "GET", - path: "indexers", - urlParameters: [ - Parameters.endpoint - ], - queryParameters: [ - Parameters.select, - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.ListIndexersResult @@ -390,25 +295,14 @@ const listOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion, Parameters.select], + urlParameters: [Parameters.endpoint], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const createOperationSpec: coreHttp.OperationSpec = { + path: "/indexers", httpMethod: "POST", - path: "indexers", - urlParameters: [ - Parameters.endpoint - ], - queryParameters: [ - Parameters.apiVersion - ], - requestBody: { - parameterPath: "indexer", - mapper: { - ...Mappers.SearchIndexer, - required: true - } - }, responses: { 201: { bodyMapper: Mappers.SearchIndexer @@ -417,19 +311,20 @@ const createOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + requestBody: Parameters.indexer, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint], + headerParameters: [ + Parameters.contentType, + Parameters.accept, + Parameters.xMsClientRequestId + ], + mediaType: "json", serializer }; - const getStatusOperationSpec: coreHttp.OperationSpec = { + path: "/indexers('{indexerName}')/search.status", httpMethod: "GET", - path: "indexers('{indexerName}')/search.status", - urlParameters: [ - Parameters.endpoint, - Parameters.indexerName - ], - queryParameters: [ - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.SearchIndexerStatus @@ -438,5 +333,8 @@ const getStatusOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexerName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; diff --git a/sdk/search/search-documents/src/generated/service/operations/indexes.ts b/sdk/search/search-documents/src/generated/service/operations/indexes.ts index 333d5cc92984..208398b5cb98 100644 --- a/sdk/search/search-documents/src/generated/service/operations/indexes.ts +++ b/sdk/search/search-documents/src/generated/service/operations/indexes.ts @@ -1,252 +1,187 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ import * as coreHttp from "@azure/core-http"; -import * as Models from "../models"; -import * as Mappers from "../models/indexesMappers"; +import * as Mappers from "../models/mappers"; import * as Parameters from "../models/parameters"; -import { SearchServiceClientContext } from "../searchServiceClientContext"; +import { SearchServiceClient } from "../searchServiceClient"; +import { + SearchIndex, + IndexesCreateOptionalParams, + IndexesCreateResponse, + IndexesListOptionalParams, + IndexesListResponse, + IndexesCreateOrUpdateOptionalParams, + IndexesCreateOrUpdateResponse, + IndexesDeleteOptionalParams, + IndexesGetOptionalParams, + IndexesGetResponse, + IndexesGetStatisticsOptionalParams, + IndexesGetStatisticsResponse, + AnalyzeRequest, + IndexesAnalyzeOptionalParams, + IndexesAnalyzeResponse +} from "../models"; /** Class representing a Indexes. */ export class Indexes { - private readonly client: SearchServiceClientContext; + private readonly client: SearchServiceClient; /** - * Create a Indexes. - * @param {SearchServiceClientContext} client Reference to the service client. + * Initialize a new instance of the class Indexes class. + * @param client Reference to the service client */ - constructor(client: SearchServiceClientContext) { + constructor(client: SearchServiceClient) { this.client = client; } /** * Creates a new search index. * @param index The definition of the index to create. - * @param [options] The optional parameters - * @returns Promise + * @param options The options parameters. */ - create(index: Models.SearchIndex, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param index The definition of the index to create. - * @param callback The callback - */ - create(index: Models.SearchIndex, callback: coreHttp.ServiceCallback): void; - /** - * @param index The definition of the index to create. - * @param options The optional parameters - * @param callback The callback - */ - create(index: Models.SearchIndex, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - create(index: Models.SearchIndex, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + create( + index: SearchIndex, + options?: IndexesCreateOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + index, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - index, - options - }, - createOperationSpec, - callback) as Promise; + operationArguments, + createOperationSpec + ) as Promise; } /** * Lists all indexes available for a search service. - * @param [options] The optional parameters - * @returns Promise + * @param options The options parameters. */ - list(options?: Models.IndexesListOptionalParams): Promise; - /** - * @param callback The callback - */ - list(callback: coreHttp.ServiceCallback): void; - /** - * @param options The optional parameters - * @param callback The callback - */ - list(options: Models.IndexesListOptionalParams, callback: coreHttp.ServiceCallback): void; - list(options?: Models.IndexesListOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + list(options?: IndexesListOptionalParams): Promise { + const operationArguments: coreHttp.OperationArguments = { + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - options - }, - listOperationSpec, - callback) as Promise; + operationArguments, + listOperationSpec + ) as Promise; } /** * Creates a new search index or updates an index if it already exists. * @param indexName The definition of the index to create or update. * @param index The definition of the index to create or update. - * @param [options] The optional parameters - * @returns Promise - */ - createOrUpdate(indexName: string, index: Models.SearchIndex, options?: Models.IndexesCreateOrUpdateOptionalParams): Promise; - /** - * @param indexName The definition of the index to create or update. - * @param index The definition of the index to create or update. - * @param callback The callback - */ - createOrUpdate(indexName: string, index: Models.SearchIndex, callback: coreHttp.ServiceCallback): void; - /** - * @param indexName The definition of the index to create or update. - * @param index The definition of the index to create or update. - * @param options The optional parameters - * @param callback The callback + * @param options The options parameters. */ - createOrUpdate(indexName: string, index: Models.SearchIndex, options: Models.IndexesCreateOrUpdateOptionalParams, callback: coreHttp.ServiceCallback): void; - createOrUpdate(indexName: string, index: Models.SearchIndex, options?: Models.IndexesCreateOrUpdateOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + createOrUpdate( + indexName: string, + index: SearchIndex, + options?: IndexesCreateOrUpdateOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexName, + index, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexName, - index, - options - }, - createOrUpdateOperationSpec, - callback) as Promise; + operationArguments, + createOrUpdateOperationSpec + ) as Promise; } /** * Deletes a search index and all the documents it contains. This operation is permanent, with no - * recovery option. Make sure you have a master copy of your index definition, data ingestion code, - * and a backup of the primary data source in case you need to re-build the index. - * @param indexName The name of the index to delete. - * @param [options] The optional parameters - * @returns Promise - */ - deleteMethod(indexName: string, options?: Models.IndexesDeleteMethodOptionalParams): Promise; - /** - * @param indexName The name of the index to delete. - * @param callback The callback - */ - deleteMethod(indexName: string, callback: coreHttp.ServiceCallback): void; - /** + * recovery option. Make sure you have a master copy of your index definition, data ingestion code, and + * a backup of the primary data source in case you need to re-build the index. * @param indexName The name of the index to delete. - * @param options The optional parameters - * @param callback The callback + * @param options The options parameters. */ - deleteMethod(indexName: string, options: Models.IndexesDeleteMethodOptionalParams, callback: coreHttp.ServiceCallback): void; - deleteMethod(indexName: string, options?: Models.IndexesDeleteMethodOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + delete( + indexName: string, + options?: IndexesDeleteOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexName, - options - }, - deleteMethodOperationSpec, - callback); + operationArguments, + deleteOperationSpec + ) as Promise; } /** * Retrieves an index definition. * @param indexName The name of the index to retrieve. - * @param [options] The optional parameters - * @returns Promise + * @param options The options parameters. */ - get(indexName: string, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param indexName The name of the index to retrieve. - * @param callback The callback - */ - get(indexName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param indexName The name of the index to retrieve. - * @param options The optional parameters - * @param callback The callback - */ - get(indexName: string, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - get(indexName: string, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + get( + indexName: string, + options?: IndexesGetOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexName, - options - }, - getOperationSpec, - callback) as Promise; + operationArguments, + getOperationSpec + ) as Promise; } /** * Returns statistics for the given index, including a document count and storage usage. * @param indexName The name of the index for which to retrieve statistics. - * @param [options] The optional parameters - * @returns Promise - */ - getStatistics(indexName: string, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param indexName The name of the index for which to retrieve statistics. - * @param callback The callback - */ - getStatistics(indexName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param indexName The name of the index for which to retrieve statistics. - * @param options The optional parameters - * @param callback The callback + * @param options The options parameters. */ - getStatistics(indexName: string, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - getStatistics(indexName: string, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + getStatistics( + indexName: string, + options?: IndexesGetStatisticsOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexName, - options - }, - getStatisticsOperationSpec, - callback) as Promise; + operationArguments, + getStatisticsOperationSpec + ) as Promise; } /** * Shows how an analyzer breaks text into tokens. * @param indexName The name of the index for which to test an analyzer. * @param request The text and analyzer or analysis components to test. - * @param [options] The optional parameters - * @returns Promise + * @param options The options parameters. */ - analyze(indexName: string, request: Models.AnalyzeRequest, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param indexName The name of the index for which to test an analyzer. - * @param request The text and analyzer or analysis components to test. - * @param callback The callback - */ - analyze(indexName: string, request: Models.AnalyzeRequest, callback: coreHttp.ServiceCallback): void; - /** - * @param indexName The name of the index for which to test an analyzer. - * @param request The text and analyzer or analysis components to test. - * @param options The optional parameters - * @param callback The callback - */ - analyze(indexName: string, request: Models.AnalyzeRequest, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - analyze(indexName: string, request: Models.AnalyzeRequest, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + analyze( + indexName: string, + request: AnalyzeRequest, + options?: IndexesAnalyzeOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + indexName, + request, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - indexName, - request, - options - }, - analyzeOperationSpec, - callback) as Promise; + operationArguments, + analyzeOperationSpec + ) as Promise; } } - // Operation Specifications -const serializer = new coreHttp.Serializer(Mappers); +const serializer = new coreHttp.Serializer(Mappers, /* isXml */ false); + const createOperationSpec: coreHttp.OperationSpec = { + path: "/indexes", httpMethod: "POST", - path: "indexes", - urlParameters: [ - Parameters.endpoint - ], - queryParameters: [ - Parameters.apiVersion - ], - requestBody: { - parameterPath: "index", - mapper: { - ...Mappers.SearchIndex, - required: true - } - }, responses: { 201: { bodyMapper: Mappers.SearchIndex @@ -255,19 +190,20 @@ const createOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + requestBody: Parameters.index, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint], + headerParameters: [ + Parameters.contentType, + Parameters.accept, + Parameters.xMsClientRequestId + ], + mediaType: "json", serializer }; - const listOperationSpec: coreHttp.OperationSpec = { + path: "/indexes", httpMethod: "GET", - path: "indexes", - urlParameters: [ - Parameters.endpoint - ], - queryParameters: [ - Parameters.select, - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.ListIndexesResult @@ -276,32 +212,14 @@ const listOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion, Parameters.select], + urlParameters: [Parameters.endpoint], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const createOrUpdateOperationSpec: coreHttp.OperationSpec = { + path: "/indexes('{indexName}')", httpMethod: "PUT", - path: "indexes('{indexName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], - queryParameters: [ - Parameters.allowIndexDowntime, - Parameters.apiVersion - ], - headerParameters: [ - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer - ], - requestBody: { - parameterPath: "index", - mapper: { - ...Mappers.SearchIndex, - required: true - } - }, responses: { 200: { bodyMapper: Mappers.SearchIndex @@ -313,23 +231,23 @@ const createOrUpdateOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, - serializer -}; - -const deleteMethodOperationSpec: coreHttp.OperationSpec = { - httpMethod: "DELETE", - path: "indexes('{indexName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], - queryParameters: [ - Parameters.apiVersion - ], + requestBody: Parameters.index, + queryParameters: [Parameters.apiVersion, Parameters.allowIndexDowntime], + urlParameters: [Parameters.endpoint, Parameters.indexName], headerParameters: [ + Parameters.contentType, + Parameters.accept, + Parameters.xMsClientRequestId, Parameters.ifMatch, - Parameters.ifNoneMatch + Parameters.ifNoneMatch, + Parameters.prefer ], + mediaType: "json", + serializer +}; +const deleteOperationSpec: coreHttp.OperationSpec = { + path: "/indexes('{indexName}')", + httpMethod: "DELETE", responses: { 204: {}, 404: {}, @@ -337,19 +255,19 @@ const deleteMethodOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [ + Parameters.accept, + Parameters.xMsClientRequestId, + Parameters.ifMatch, + Parameters.ifNoneMatch + ], serializer }; - const getOperationSpec: coreHttp.OperationSpec = { + path: "/indexes('{indexName}')", httpMethod: "GET", - path: "indexes('{indexName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], - queryParameters: [ - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.SearchIndex @@ -358,19 +276,14 @@ const getOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const getStatisticsOperationSpec: coreHttp.OperationSpec = { + path: "/indexes('{indexName}')/search.stats", httpMethod: "GET", - path: "indexes('{indexName}')/search.stats", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], - queryParameters: [ - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.GetIndexStatisticsResult @@ -379,26 +292,14 @@ const getStatisticsOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const analyzeOperationSpec: coreHttp.OperationSpec = { + path: "/indexes('{indexName}')/search.analyze", httpMethod: "POST", - path: "indexes('{indexName}')/search.analyze", - urlParameters: [ - Parameters.endpoint, - Parameters.indexName - ], - queryParameters: [ - Parameters.apiVersion - ], - requestBody: { - parameterPath: "request", - mapper: { - ...Mappers.AnalyzeRequest, - required: true - } - }, responses: { 200: { bodyMapper: Mappers.AnalyzeResult @@ -407,5 +308,14 @@ const analyzeOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + requestBody: Parameters.request, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.indexName], + headerParameters: [ + Parameters.contentType, + Parameters.accept, + Parameters.xMsClientRequestId + ], + mediaType: "json", serializer }; diff --git a/sdk/search/search-documents/src/generated/service/operations/skillsets.ts b/sdk/search/search-documents/src/generated/service/operations/skillsets.ts index 1137f497c89f..64bbc7f4f5cd 100644 --- a/sdk/search/search-documents/src/generated/service/operations/skillsets.ts +++ b/sdk/search/search-documents/src/generated/service/operations/skillsets.ts @@ -1,199 +1,139 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ import * as coreHttp from "@azure/core-http"; -import * as Models from "../models"; -import * as Mappers from "../models/skillsetsMappers"; +import * as Mappers from "../models/mappers"; import * as Parameters from "../models/parameters"; -import { SearchServiceClientContext } from "../searchServiceClientContext"; +import { SearchServiceClient } from "../searchServiceClient"; +import { + SearchIndexerSkillset, + SkillsetsCreateOrUpdateOptionalParams, + SkillsetsCreateOrUpdateResponse, + SkillsetsDeleteOptionalParams, + SkillsetsGetOptionalParams, + SkillsetsGetResponse, + SkillsetsListOptionalParams, + SkillsetsListResponse, + SkillsetsCreateOptionalParams, + SkillsetsCreateResponse +} from "../models"; /** Class representing a Skillsets. */ export class Skillsets { - private readonly client: SearchServiceClientContext; + private readonly client: SearchServiceClient; /** - * Create a Skillsets. - * @param {SearchServiceClientContext} client Reference to the service client. + * Initialize a new instance of the class Skillsets class. + * @param client Reference to the service client */ - constructor(client: SearchServiceClientContext) { + constructor(client: SearchServiceClient) { this.client = client; } /** * Creates a new skillset in a search service or updates the skillset if it already exists. * @param skillsetName The name of the skillset to create or update. - * @param skillset The skillset containing one or more skills to create or update in a search - * service. - * @param [options] The optional parameters - * @returns Promise - */ - createOrUpdate(skillsetName: string, skillset: Models.SearchIndexerSkillset, options?: Models.SkillsetsCreateOrUpdateOptionalParams): Promise; - /** - * @param skillsetName The name of the skillset to create or update. - * @param skillset The skillset containing one or more skills to create or update in a search - * service. - * @param callback The callback - */ - createOrUpdate(skillsetName: string, skillset: Models.SearchIndexerSkillset, callback: coreHttp.ServiceCallback): void; - /** - * @param skillsetName The name of the skillset to create or update. - * @param skillset The skillset containing one or more skills to create or update in a search - * service. - * @param options The optional parameters - * @param callback The callback - */ - createOrUpdate(skillsetName: string, skillset: Models.SearchIndexerSkillset, options: Models.SkillsetsCreateOrUpdateOptionalParams, callback: coreHttp.ServiceCallback): void; - createOrUpdate(skillsetName: string, skillset: Models.SearchIndexerSkillset, options?: Models.SkillsetsCreateOrUpdateOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param skillset The skillset containing one or more skills to create or update in a search service. + * @param options The options parameters. + */ + createOrUpdate( + skillsetName: string, + skillset: SearchIndexerSkillset, + options?: SkillsetsCreateOrUpdateOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + skillsetName, + skillset, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - skillsetName, - skillset, - options - }, - createOrUpdateOperationSpec, - callback) as Promise; + operationArguments, + createOrUpdateOperationSpec + ) as Promise; } /** * Deletes a skillset in a search service. * @param skillsetName The name of the skillset to delete. - * @param [options] The optional parameters - * @returns Promise - */ - deleteMethod(skillsetName: string, options?: Models.SkillsetsDeleteMethodOptionalParams): Promise; - /** - * @param skillsetName The name of the skillset to delete. - * @param callback The callback - */ - deleteMethod(skillsetName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param skillsetName The name of the skillset to delete. - * @param options The optional parameters - * @param callback The callback - */ - deleteMethod(skillsetName: string, options: Models.SkillsetsDeleteMethodOptionalParams, callback: coreHttp.ServiceCallback): void; - deleteMethod(skillsetName: string, options?: Models.SkillsetsDeleteMethodOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + delete( + skillsetName: string, + options?: SkillsetsDeleteOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + skillsetName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - skillsetName, - options - }, - deleteMethodOperationSpec, - callback); + operationArguments, + deleteOperationSpec + ) as Promise; } /** * Retrieves a skillset in a search service. * @param skillsetName The name of the skillset to retrieve. - * @param [options] The optional parameters - * @returns Promise - */ - get(skillsetName: string, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param skillsetName The name of the skillset to retrieve. - * @param callback The callback - */ - get(skillsetName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param skillsetName The name of the skillset to retrieve. - * @param options The optional parameters - * @param callback The callback - */ - get(skillsetName: string, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - get(skillsetName: string, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + get( + skillsetName: string, + options?: SkillsetsGetOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + skillsetName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - skillsetName, - options - }, - getOperationSpec, - callback) as Promise; + operationArguments, + getOperationSpec + ) as Promise; } /** * List all skillsets in a search service. - * @param [options] The optional parameters - * @returns Promise - */ - list(options?: Models.SkillsetsListOptionalParams): Promise; - /** - * @param callback The callback + * @param options The options parameters. */ - list(callback: coreHttp.ServiceCallback): void; - /** - * @param options The optional parameters - * @param callback The callback - */ - list(options: Models.SkillsetsListOptionalParams, callback: coreHttp.ServiceCallback): void; - list(options?: Models.SkillsetsListOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + list(options?: SkillsetsListOptionalParams): Promise { + const operationArguments: coreHttp.OperationArguments = { + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - options - }, - listOperationSpec, - callback) as Promise; + operationArguments, + listOperationSpec + ) as Promise; } /** * Creates a new skillset in a search service. * @param skillset The skillset containing one or more skills to create in a search service. - * @param [options] The optional parameters - * @returns Promise - */ - create(skillset: Models.SearchIndexerSkillset, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param skillset The skillset containing one or more skills to create in a search service. - * @param callback The callback - */ - create(skillset: Models.SearchIndexerSkillset, callback: coreHttp.ServiceCallback): void; - /** - * @param skillset The skillset containing one or more skills to create in a search service. - * @param options The optional parameters - * @param callback The callback - */ - create(skillset: Models.SearchIndexerSkillset, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - create(skillset: Models.SearchIndexerSkillset, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + create( + skillset: SearchIndexerSkillset, + options?: SkillsetsCreateOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + skillset, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - skillset, - options - }, - createOperationSpec, - callback) as Promise; + operationArguments, + createOperationSpec + ) as Promise; } } - // Operation Specifications -const serializer = new coreHttp.Serializer(Mappers); +const serializer = new coreHttp.Serializer(Mappers, /* isXml */ false); + const createOrUpdateOperationSpec: coreHttp.OperationSpec = { + path: "/skillsets('{skillsetName}')", httpMethod: "PUT", - path: "skillsets('{skillsetName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.skillsetName - ], - queryParameters: [ - Parameters.apiVersion - ], - headerParameters: [ - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer - ], - requestBody: { - parameterPath: "skillset", - mapper: { - ...Mappers.SearchIndexerSkillset, - required: true - } - }, responses: { 200: { bodyMapper: Mappers.SearchIndexerSkillset @@ -205,23 +145,23 @@ const createOrUpdateOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, - serializer -}; - -const deleteMethodOperationSpec: coreHttp.OperationSpec = { - httpMethod: "DELETE", - path: "skillsets('{skillsetName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.skillsetName - ], - queryParameters: [ - Parameters.apiVersion - ], + requestBody: Parameters.skillset, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.skillsetName], headerParameters: [ + Parameters.contentType, + Parameters.accept, + Parameters.xMsClientRequestId, Parameters.ifMatch, - Parameters.ifNoneMatch + Parameters.ifNoneMatch, + Parameters.prefer ], + mediaType: "json", + serializer +}; +const deleteOperationSpec: coreHttp.OperationSpec = { + path: "/skillsets('{skillsetName}')", + httpMethod: "DELETE", responses: { 204: {}, 404: {}, @@ -229,19 +169,19 @@ const deleteMethodOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.skillsetName], + headerParameters: [ + Parameters.accept, + Parameters.xMsClientRequestId, + Parameters.ifMatch, + Parameters.ifNoneMatch + ], serializer }; - const getOperationSpec: coreHttp.OperationSpec = { + path: "/skillsets('{skillsetName}')", httpMethod: "GET", - path: "skillsets('{skillsetName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.skillsetName - ], - queryParameters: [ - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.SearchIndexerSkillset @@ -250,19 +190,14 @@ const getOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.skillsetName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const listOperationSpec: coreHttp.OperationSpec = { + path: "/skillsets", httpMethod: "GET", - path: "skillsets", - urlParameters: [ - Parameters.endpoint - ], - queryParameters: [ - Parameters.select, - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.ListSkillsetsResult @@ -271,25 +206,14 @@ const listOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion, Parameters.select], + urlParameters: [Parameters.endpoint], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const createOperationSpec: coreHttp.OperationSpec = { + path: "/skillsets", httpMethod: "POST", - path: "skillsets", - urlParameters: [ - Parameters.endpoint - ], - queryParameters: [ - Parameters.apiVersion - ], - requestBody: { - parameterPath: "skillset", - mapper: { - ...Mappers.SearchIndexerSkillset, - required: true - } - }, responses: { 201: { bodyMapper: Mappers.SearchIndexerSkillset @@ -298,5 +222,14 @@ const createOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + requestBody: Parameters.skillset, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint], + headerParameters: [ + Parameters.contentType, + Parameters.accept, + Parameters.xMsClientRequestId + ], + mediaType: "json", serializer }; diff --git a/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts b/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts index 31e6f33f84a5..6ed1fe8863de 100644 --- a/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts +++ b/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts @@ -1,28 +1,37 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ import * as coreHttp from "@azure/core-http"; -import * as Models from "../models"; -import * as Mappers from "../models/synonymMapsMappers"; +import * as Mappers from "../models/mappers"; import * as Parameters from "../models/parameters"; -import { SearchServiceClientContext } from "../searchServiceClientContext"; +import { SearchServiceClient } from "../searchServiceClient"; +import { + SynonymMap, + SynonymMapsCreateOrUpdateOptionalParams, + SynonymMapsCreateOrUpdateResponse, + SynonymMapsDeleteOptionalParams, + SynonymMapsGetOptionalParams, + SynonymMapsGetResponse, + SynonymMapsListOptionalParams, + SynonymMapsListResponse, + SynonymMapsCreateOptionalParams, + SynonymMapsCreateResponse +} from "../models"; /** Class representing a SynonymMaps. */ export class SynonymMaps { - private readonly client: SearchServiceClientContext; + private readonly client: SearchServiceClient; /** - * Create a SynonymMaps. - * @param {SearchServiceClientContext} client Reference to the service client. + * Initialize a new instance of the class SynonymMaps class. + * @param client Reference to the service client */ - constructor(client: SearchServiceClientContext) { + constructor(client: SearchServiceClient) { this.client = client; } @@ -30,167 +39,103 @@ export class SynonymMaps { * Creates a new synonym map or updates a synonym map if it already exists. * @param synonymMapName The name of the synonym map to create or update. * @param synonymMap The definition of the synonym map to create or update. - * @param [options] The optional parameters - * @returns Promise - */ - createOrUpdate(synonymMapName: string, synonymMap: Models.SynonymMap, options?: Models.SynonymMapsCreateOrUpdateOptionalParams): Promise; - /** - * @param synonymMapName The name of the synonym map to create or update. - * @param synonymMap The definition of the synonym map to create or update. - * @param callback The callback - */ - createOrUpdate(synonymMapName: string, synonymMap: Models.SynonymMap, callback: coreHttp.ServiceCallback): void; - /** - * @param synonymMapName The name of the synonym map to create or update. - * @param synonymMap The definition of the synonym map to create or update. - * @param options The optional parameters - * @param callback The callback - */ - createOrUpdate(synonymMapName: string, synonymMap: Models.SynonymMap, options: Models.SynonymMapsCreateOrUpdateOptionalParams, callback: coreHttp.ServiceCallback): void; - createOrUpdate(synonymMapName: string, synonymMap: Models.SynonymMap, options?: Models.SynonymMapsCreateOrUpdateOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + createOrUpdate( + synonymMapName: string, + synonymMap: SynonymMap, + options?: SynonymMapsCreateOrUpdateOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + synonymMapName, + synonymMap, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - synonymMapName, - synonymMap, - options - }, - createOrUpdateOperationSpec, - callback) as Promise; + operationArguments, + createOrUpdateOperationSpec + ) as Promise; } /** * Deletes a synonym map. * @param synonymMapName The name of the synonym map to delete. - * @param [options] The optional parameters - * @returns Promise - */ - deleteMethod(synonymMapName: string, options?: Models.SynonymMapsDeleteMethodOptionalParams): Promise; - /** - * @param synonymMapName The name of the synonym map to delete. - * @param callback The callback - */ - deleteMethod(synonymMapName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param synonymMapName The name of the synonym map to delete. - * @param options The optional parameters - * @param callback The callback - */ - deleteMethod(synonymMapName: string, options: Models.SynonymMapsDeleteMethodOptionalParams, callback: coreHttp.ServiceCallback): void; - deleteMethod(synonymMapName: string, options?: Models.SynonymMapsDeleteMethodOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + delete( + synonymMapName: string, + options?: SynonymMapsDeleteOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + synonymMapName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - synonymMapName, - options - }, - deleteMethodOperationSpec, - callback); + operationArguments, + deleteOperationSpec + ) as Promise; } /** * Retrieves a synonym map definition. * @param synonymMapName The name of the synonym map to retrieve. - * @param [options] The optional parameters - * @returns Promise - */ - get(synonymMapName: string, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param synonymMapName The name of the synonym map to retrieve. - * @param callback The callback - */ - get(synonymMapName: string, callback: coreHttp.ServiceCallback): void; - /** - * @param synonymMapName The name of the synonym map to retrieve. - * @param options The optional parameters - * @param callback The callback - */ - get(synonymMapName: string, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - get(synonymMapName: string, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + get( + synonymMapName: string, + options?: SynonymMapsGetOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + synonymMapName, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - synonymMapName, - options - }, - getOperationSpec, - callback) as Promise; + operationArguments, + getOperationSpec + ) as Promise; } /** * Lists all synonym maps available for a search service. - * @param [options] The optional parameters - * @returns Promise - */ - list(options?: Models.SynonymMapsListOptionalParams): Promise; - /** - * @param callback The callback - */ - list(callback: coreHttp.ServiceCallback): void; - /** - * @param options The optional parameters - * @param callback The callback - */ - list(options: Models.SynonymMapsListOptionalParams, callback: coreHttp.ServiceCallback): void; - list(options?: Models.SynonymMapsListOptionalParams | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + list( + options?: SynonymMapsListOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - options - }, - listOperationSpec, - callback) as Promise; + operationArguments, + listOperationSpec + ) as Promise; } /** * Creates a new synonym map. * @param synonymMap The definition of the synonym map to create. - * @param [options] The optional parameters - * @returns Promise - */ - create(synonymMap: Models.SynonymMap, options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param synonymMap The definition of the synonym map to create. - * @param callback The callback - */ - create(synonymMap: Models.SynonymMap, callback: coreHttp.ServiceCallback): void; - /** - * @param synonymMap The definition of the synonym map to create. - * @param options The optional parameters - * @param callback The callback - */ - create(synonymMap: Models.SynonymMap, options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - create(synonymMap: Models.SynonymMap, options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + * @param options The options parameters. + */ + create( + synonymMap: SynonymMap, + options?: SynonymMapsCreateOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + synonymMap, + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.client.sendOperationRequest( - { - synonymMap, - options - }, - createOperationSpec, - callback) as Promise; + operationArguments, + createOperationSpec + ) as Promise; } } - // Operation Specifications -const serializer = new coreHttp.Serializer(Mappers); +const serializer = new coreHttp.Serializer(Mappers, /* isXml */ false); + const createOrUpdateOperationSpec: coreHttp.OperationSpec = { + path: "/synonymmaps('{synonymMapName}')", httpMethod: "PUT", - path: "synonymmaps('{synonymMapName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.synonymMapName - ], - queryParameters: [ - Parameters.apiVersion - ], - headerParameters: [ - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer - ], - requestBody: { - parameterPath: "synonymMap", - mapper: { - ...Mappers.SynonymMap, - required: true - } - }, responses: { 200: { bodyMapper: Mappers.SynonymMap @@ -202,23 +147,23 @@ const createOrUpdateOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, - serializer -}; - -const deleteMethodOperationSpec: coreHttp.OperationSpec = { - httpMethod: "DELETE", - path: "synonymmaps('{synonymMapName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.synonymMapName - ], - queryParameters: [ - Parameters.apiVersion - ], + requestBody: Parameters.synonymMap, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.synonymMapName], headerParameters: [ + Parameters.contentType, + Parameters.accept, + Parameters.xMsClientRequestId, Parameters.ifMatch, - Parameters.ifNoneMatch + Parameters.ifNoneMatch, + Parameters.prefer ], + mediaType: "json", + serializer +}; +const deleteOperationSpec: coreHttp.OperationSpec = { + path: "/synonymmaps('{synonymMapName}')", + httpMethod: "DELETE", responses: { 204: {}, 404: {}, @@ -226,19 +171,19 @@ const deleteMethodOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.synonymMapName], + headerParameters: [ + Parameters.accept, + Parameters.xMsClientRequestId, + Parameters.ifMatch, + Parameters.ifNoneMatch + ], serializer }; - const getOperationSpec: coreHttp.OperationSpec = { + path: "/synonymmaps('{synonymMapName}')", httpMethod: "GET", - path: "synonymmaps('{synonymMapName}')", - urlParameters: [ - Parameters.endpoint, - Parameters.synonymMapName - ], - queryParameters: [ - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.SynonymMap @@ -247,19 +192,14 @@ const getOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint, Parameters.synonymMapName], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const listOperationSpec: coreHttp.OperationSpec = { + path: "/synonymmaps", httpMethod: "GET", - path: "synonymmaps", - urlParameters: [ - Parameters.endpoint - ], - queryParameters: [ - Parameters.select, - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.ListSynonymMapsResult @@ -268,25 +208,14 @@ const listOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion, Parameters.select], + urlParameters: [Parameters.endpoint], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - const createOperationSpec: coreHttp.OperationSpec = { + path: "/synonymmaps", httpMethod: "POST", - path: "synonymmaps", - urlParameters: [ - Parameters.endpoint - ], - queryParameters: [ - Parameters.apiVersion - ], - requestBody: { - parameterPath: "synonymMap", - mapper: { - ...Mappers.SynonymMap, - required: true - } - }, responses: { 201: { bodyMapper: Mappers.SynonymMap @@ -295,5 +224,14 @@ const createOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + requestBody: Parameters.synonymMap, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint], + headerParameters: [ + Parameters.contentType, + Parameters.accept, + Parameters.xMsClientRequestId + ], + mediaType: "json", serializer }; diff --git a/sdk/search/search-documents/src/generated/service/searchServiceClient.ts b/sdk/search/search-documents/src/generated/service/searchServiceClient.ts index 853bf63cbcc4..6c59a30c72a8 100644 --- a/sdk/search/search-documents/src/generated/service/searchServiceClient.ts +++ b/sdk/search/search-documents/src/generated/service/searchServiceClient.ts @@ -1,79 +1,78 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ import * as coreHttp from "@azure/core-http"; -import * as Models from "./models"; -import * as Mappers from "./models/mappers"; +import { + DataSources, + Indexers, + Skillsets, + SynonymMaps, + Indexes +} from "./operations"; import * as Parameters from "./models/parameters"; -import * as operations from "./operations"; +import * as Mappers from "./models/mappers"; import { SearchServiceClientContext } from "./searchServiceClientContext"; +import { + SearchServiceClientOptionalParams, + ApiVersion20200630, + SearchServiceClientGetServiceStatisticsOptionalParams, + SearchServiceClientGetServiceStatisticsResponse +} from "./models"; -class SearchServiceClient extends SearchServiceClientContext { - // Operation groups - dataSources: operations.DataSources; - indexers: operations.Indexers; - skillsets: operations.Skillsets; - synonymMaps: operations.SynonymMaps; - indexes: operations.Indexes; - +/** @hidden */ +export class SearchServiceClient extends SearchServiceClientContext { /** * Initializes a new instance of the SearchServiceClient class. - * @param apiVersion Client Api Version. * @param endpoint The endpoint URL of the search service. - * @param [options] The parameter options + * @param apiVersion Api Version + * @param options The parameter options */ - constructor(apiVersion: string, endpoint: string, options?: coreHttp.ServiceClientOptions) { - super(apiVersion, endpoint, options); - this.dataSources = new operations.DataSources(this); - this.indexers = new operations.Indexers(this); - this.skillsets = new operations.Skillsets(this); - this.synonymMaps = new operations.SynonymMaps(this); - this.indexes = new operations.Indexes(this); + constructor( + endpoint: string, + apiVersion: ApiVersion20200630, + options?: SearchServiceClientOptionalParams + ) { + super(endpoint, apiVersion, options); + this.dataSources = new DataSources(this); + this.indexers = new Indexers(this); + this.skillsets = new Skillsets(this); + this.synonymMaps = new SynonymMaps(this); + this.indexes = new Indexes(this); } /** * Gets service level statistics for a search service. - * @param [options] The optional parameters - * @returns Promise - */ - getServiceStatistics(options?: coreHttp.RequestOptionsBase): Promise; - /** - * @param callback The callback - */ - getServiceStatistics(callback: coreHttp.ServiceCallback): void; - /** - * @param options The optional parameters - * @param callback The callback + * @param options The options parameters. */ - getServiceStatistics(options: coreHttp.RequestOptionsBase, callback: coreHttp.ServiceCallback): void; - getServiceStatistics(options?: coreHttp.RequestOptionsBase | coreHttp.ServiceCallback, callback?: coreHttp.ServiceCallback): Promise { + getServiceStatistics( + options?: SearchServiceClientGetServiceStatisticsOptionalParams + ): Promise { + const operationArguments: coreHttp.OperationArguments = { + options: coreHttp.operationOptionsToRequestOptionsBase(options || {}) + }; return this.sendOperationRequest( - { - options - }, - getServiceStatisticsOperationSpec, - callback) as Promise; + operationArguments, + getServiceStatisticsOperationSpec + ) as Promise; } -} + dataSources: DataSources; + indexers: Indexers; + skillsets: Skillsets; + synonymMaps: SynonymMaps; + indexes: Indexes; +} // Operation Specifications -const serializer = new coreHttp.Serializer(Mappers); +const serializer = new coreHttp.Serializer(Mappers, /* isXml */ false); + const getServiceStatisticsOperationSpec: coreHttp.OperationSpec = { + path: "/servicestats", httpMethod: "GET", - path: "servicestats", - urlParameters: [ - Parameters.endpoint - ], - queryParameters: [ - Parameters.apiVersion - ], responses: { 200: { bodyMapper: Mappers.ServiceStatistics @@ -82,13 +81,8 @@ const getServiceStatisticsOperationSpec: coreHttp.OperationSpec = { bodyMapper: Mappers.SearchError } }, + queryParameters: [Parameters.apiVersion], + urlParameters: [Parameters.endpoint], + headerParameters: [Parameters.accept, Parameters.xMsClientRequestId], serializer }; - -export { - SearchServiceClient, - SearchServiceClientContext, - Models as SearchServiceModels, - Mappers as SearchServiceMappers -}; -export * from "./operations"; diff --git a/sdk/search/search-documents/src/generated/service/searchServiceClientContext.ts b/sdk/search/search-documents/src/generated/service/searchServiceClientContext.ts index ab8dec20d108..520b5420088b 100644 --- a/sdk/search/search-documents/src/generated/service/searchServiceClientContext.ts +++ b/sdk/search/search-documents/src/generated/service/searchServiceClientContext.ts @@ -1,36 +1,44 @@ /* - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is - * regenerated. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ import * as coreHttp from "@azure/core-http"; +import { + ApiVersion20200630, + SearchServiceClientOptionalParams +} from "./models"; const packageName = "@azure/search-documents"; const packageVersion = "11.1.0-beta.2"; +/** @hidden */ export class SearchServiceClientContext extends coreHttp.ServiceClient { - apiVersion: string; endpoint: string; + apiVersion: ApiVersion20200630; /** * Initializes a new instance of the SearchServiceClientContext class. - * @param apiVersion Client Api Version. * @param endpoint The endpoint URL of the search service. - * @param [options] The parameter options + * @param apiVersion Api Version + * @param options The parameter options */ - constructor(apiVersion: string, endpoint: string, options?: coreHttp.ServiceClientOptions) { - if (apiVersion == undefined) { - throw new Error("'apiVersion' cannot be null."); + constructor( + endpoint: string, + apiVersion: ApiVersion20200630, + options?: SearchServiceClientOptionalParams + ) { + if (endpoint === undefined) { + throw new Error("'endpoint' cannot be null"); } - if (endpoint == undefined) { - throw new Error("'endpoint' cannot be null."); + if (apiVersion === undefined) { + throw new Error("'apiVersion' cannot be null"); } + // Initializing default values for options if (!options) { options = {}; } @@ -42,9 +50,12 @@ export class SearchServiceClientContext extends coreHttp.ServiceClient { super(undefined, options); - this.baseUri = "{endpoint}"; this.requestContentType = "application/json; charset=utf-8"; - this.apiVersion = apiVersion; + + this.baseUri = options.endpoint || "{endpoint}"; + + // Parameter assignments this.endpoint = endpoint; + this.apiVersion = apiVersion; } } diff --git a/sdk/search/search-documents/src/index.ts b/sdk/search/search-documents/src/index.ts index 271876fb2b8a..9d584a37c038 100644 --- a/sdk/search/search-documents/src/index.ts +++ b/sdk/search/search-documents/src/index.ts @@ -129,6 +129,7 @@ export { } from "./generated/data/models"; export { RegexFlags, + KnownRegexFlags, LuceneStandardAnalyzer, StopAnalyzer, MappingCharFilter, @@ -187,6 +188,7 @@ export { AnalyzeResult, AnalyzedTokenInfo, ConditionalSkill, + CustomEntityLookupSkill, KeyPhraseExtractionSkill, OcrSkill, ImageAnalysisSkill, @@ -199,20 +201,31 @@ export { TextTranslationSkill, WebApiSkill, SentimentSkillLanguage, + KnownSentimentSkillLanguage, SplitSkillLanguage, + KnownSplitSkillLanguage, TextSplitMode, + KnownTextSplitMode, TextTranslationSkillLanguage, + KnownTextTranslationSkillLanguage, DefaultCognitiveServicesAccount, CognitiveServicesAccountKey, InputFieldMappingEntry, OutputFieldMappingEntry, EntityCategory, + KnownEntityCategory, EntityRecognitionSkillLanguage, + KnownEntityRecognitionSkillLanguage, ImageAnalysisSkillLanguage, + KnownImageAnalysisSkillLanguage, ImageDetail, + KnownImageDetail, VisualFeature, + KnownVisualFeature, KeyPhraseExtractionSkillLanguage, + KnownKeyPhraseExtractionSkillLanguage, OcrSkillLanguage, + KnownOcrSkillLanguage, FieldMapping, IndexingParameters, IndexingSchedule, @@ -226,6 +239,7 @@ export { SearchIndexerWarning, SearchIndexerDataContainer, SearchIndexerDataSourceType, + KnownSearchIndexerDataSourceType, SoftDeleteColumnDeletionDetectionPolicy, SqlIntegratedChangeTrackingPolicy, HighWaterMarkChangeDetectionPolicy, @@ -233,13 +247,32 @@ export { ServiceLimits, ResourceCounter, LexicalAnalyzerName, + KnownLexicalAnalyzerName, ClassicSimilarity, BM25Similarity, IndexingParametersConfiguration, BlobIndexerDataToExtract, + KnownBlobIndexerDataToExtract, IndexerExecutionEnvironment, BlobIndexerImageAction, + KnownBlobIndexerImageAction, BlobIndexerParsingMode, - BlobIndexerPDFTextRotationAlgorithm + KnownBlobIndexerParsingMode, + BlobIndexerPDFTextRotationAlgorithm, + KnownBlobIndexerPDFTextRotationAlgorithm, + TokenFilter as BaseTokenFilter, + Similarity, + LexicalTokenizer as BaseLexicalTokenizer, + CognitiveServicesAccount as BaseCognitiveServicesAccount, + SearchIndexerSkill as BaseSearchIndexerSkill, + ScoringFunction as BaseScoringFunction, + DataChangeDetectionPolicy as BaseDataChangeDetectionPolicy, + LexicalAnalyzer as BaseLexicalAnalyzer, + CharFilter as BaseCharFilter, + DataDeletionDetectionPolicy as BaseDataDeletionDetectionPolicy, + CustomEntityLookupSkillLanguage, + KnownCustomEntityLookupSkillLanguage, + CustomEntity, + CustomEntityAlias } from "./generated/service/models"; export { AzureKeyCredential } from "@azure/core-auth"; diff --git a/sdk/search/search-documents/src/searchClient.ts b/sdk/search/search-documents/src/searchClient.ts index 9e49b506fea8..88b222dddfd8 100644 --- a/sdk/search/search-documents/src/searchClient.ts +++ b/sdk/search/search-documents/src/searchClient.ts @@ -148,7 +148,7 @@ export class SearchClient { pipeline.requestPolicyFactories.unshift(odataMetadataPolicy("none")); } - this.client = new GeneratedClient(this.apiVersion, this.endpoint, this.indexName, pipeline); + this.client = new GeneratedClient(this.endpoint, this.indexName, this.apiVersion, pipeline); } /** diff --git a/sdk/search/search-documents/src/searchIndexClient.ts b/sdk/search/search-documents/src/searchIndexClient.ts index e5034b84c56d..e58ccaf86d37 100644 --- a/sdk/search/search-documents/src/searchIndexClient.ts +++ b/sdk/search/search-documents/src/searchIndexClient.ts @@ -137,7 +137,7 @@ export class SearchIndexClient { pipeline.requestPolicyFactories.unshift(odataMetadataPolicy("minimal")); } - this.client = new GeneratedClient(this.apiVersion, this.endpoint, pipeline); + this.client = new GeneratedClient(this.endpoint, this.apiVersion, pipeline); } private async *listIndexesPage( @@ -473,7 +473,7 @@ export class SearchIndexClient { const etag = typeof index === "string" ? undefined : options.onlyIfUnchanged ? index.etag : undefined; - await this.client.indexes.deleteMethod(indexName, { + await this.client.indexes.delete(indexName, { ...operationOptionsToRequestOptionsBase(updatedOptions), ifMatch: etag }); @@ -507,7 +507,7 @@ export class SearchIndexClient { ? synonymMap.etag : undefined; - await this.client.synonymMaps.deleteMethod(synonymMapName, { + await this.client.synonymMaps.delete(synonymMapName, { ...operationOptionsToRequestOptionsBase(updatedOptions), ifMatch: etag }); diff --git a/sdk/search/search-documents/src/searchIndexerClient.ts b/sdk/search/search-documents/src/searchIndexerClient.ts index c18421a95862..cc7f46eba1e1 100644 --- a/sdk/search/search-documents/src/searchIndexerClient.ts +++ b/sdk/search/search-documents/src/searchIndexerClient.ts @@ -128,7 +128,7 @@ export class SearchIndexerClient { pipeline.requestPolicyFactories.unshift(odataMetadataPolicy("minimal")); } - this.client = new GeneratedClient(this.apiVersion, this.endpoint, pipeline); + this.client = new GeneratedClient(this.endpoint, this.apiVersion, pipeline); } /** @@ -574,7 +574,7 @@ export class SearchIndexerClient { ? indexer.etag : undefined; - await this.client.indexers.deleteMethod(indexerName, { + await this.client.indexers.delete(indexerName, { ...operationOptionsToRequestOptionsBase(updatedOptions), ifMatch: etag }); @@ -612,7 +612,7 @@ export class SearchIndexerClient { ? dataSourceConnection.etag : undefined; - await this.client.dataSources.deleteMethod(dataSourceConnectionName, { + await this.client.dataSources.delete(dataSourceConnectionName, { ...operationOptionsToRequestOptionsBase(updatedOptions), ifMatch: etag }); @@ -646,7 +646,7 @@ export class SearchIndexerClient { ? skillset.etag : undefined; - await this.client.skillsets.deleteMethod(skillsetName, { + await this.client.skillsets.delete(skillsetName, { ...operationOptionsToRequestOptionsBase(updatedOptions), ifMatch: etag }); diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index 0fce983cb574..c3a1c97103eb 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -71,7 +71,8 @@ import { ServiceLimits, FieldMapping, IndexingParameters, - IndexingSchedule + IndexingSchedule, + CustomEntityLookupSkill } from "./generated/service/models"; import { PagedAsyncIterableIterator } from "@azure/core-paging"; @@ -415,7 +416,7 @@ export interface CustomAnalyzer { * The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as * breaking a sentence into words. KnownTokenizerNames is an enum containing known values. */ - tokenizerName: string; + tokenizer: string; /** * A list of token filters used to filter out or modify the tokens generated by a tokenizer. For * example, you can specify a lowercase filter that converts all characters to lowercase. The @@ -454,7 +455,8 @@ export type SearchIndexerSkill = | SentimentSkill | SplitSkill | TextTranslationSkill - | WebApiSkill; + | WebApiSkill + | CustomEntityLookupSkill; /** * Contains the possible cases for CognitiveServicesAccount. @@ -850,7 +852,7 @@ export interface SynonymMap { * keys is not available for free search services, and is only available for paid services * created on or after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; + encryptionKey?: SearchResourceEncryptionKey | null; /** * The ETag of the synonym map. */ @@ -899,7 +901,7 @@ export interface SearchIndex { /** * Options to control Cross-Origin Resource Sharing (CORS) for the index. */ - corsOptions?: CorsOptions; + corsOptions?: CorsOptions | null; /** * The suggesters for the index. */ @@ -930,7 +932,7 @@ export interface SearchIndex { * keys is not available for free search services, and is only available for paid services * created on or after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; + encryptionKey?: SearchResourceEncryptionKey | null; /** * The type of similarity algorithm to be used when scoring and ranking the documents matching a * search query. The similarity algorithm can only be defined at index creation time and cannot @@ -970,11 +972,11 @@ export interface SearchIndexer { /** * The schedule for this indexer. */ - schedule?: IndexingSchedule; + schedule?: IndexingSchedule | null; /** * Parameters for indexer execution. */ - parameters?: IndexingParameters; + parameters?: IndexingParameters | null; /** * Defines mappings between fields in the data source and corresponding target fields in the * index. @@ -987,7 +989,7 @@ export interface SearchIndexer { /** * A value indicating whether the indexer is disabled. Default is false. Default value: false. */ - isDisabled?: boolean; + isDisabled?: boolean | null; /** * The ETag of the indexer. */ @@ -1003,7 +1005,7 @@ export interface SearchIndexer { * customer-managed keys is not available for free search services, and is only available for * paid services created on or after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; + encryptionKey?: SearchResourceEncryptionKey | null; } /** @@ -1072,7 +1074,7 @@ export interface SearchIndexerSkillset { * definition will be unaffected. Encryption with customer-managed keys is not available for free * search services, and is only available for paid services created on or after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; + encryptionKey?: SearchResourceEncryptionKey | null; } /** @@ -1786,11 +1788,11 @@ export interface SearchIndexerDataSourceConnection { /** * The data change detection policy for the datasource. */ - dataChangeDetectionPolicy?: DataChangeDetectionPolicy; + dataChangeDetectionPolicy?: DataChangeDetectionPolicy | null; /** * The data deletion detection policy for the datasource. */ - dataDeletionDetectionPolicy?: DataDeletionDetectionPolicy; + dataDeletionDetectionPolicy?: DataDeletionDetectionPolicy | null; /** * The ETag of the DataSource. */ @@ -1806,6 +1808,6 @@ export interface SearchIndexerDataSourceConnection { * available for free search services, and is only available for paid services created on or * after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; + encryptionKey?: SearchResourceEncryptionKey | null; } // END manually modified generated interfaces diff --git a/sdk/search/search-documents/src/serviceUtils.ts b/sdk/search/search-documents/src/serviceUtils.ts index dd1c61962b94..0699aed659a9 100644 --- a/sdk/search/search-documents/src/serviceUtils.ts +++ b/sdk/search/search-documents/src/serviceUtils.ts @@ -26,7 +26,25 @@ import { BM25Similarity, ClassicSimilarity, TokenFilterUnion, - SearchResourceEncryptionKey as GeneratedSearchResourceEncryptionKey + SearchResourceEncryptionKey as GeneratedSearchResourceEncryptionKey, + ConditionalSkill, + KeyPhraseExtractionSkill, + OcrSkill, + ImageAnalysisSkill, + LanguageDetectionSkill, + ShaperSkill, + MergeSkill, + EntityRecognitionSkill, + SentimentSkill, + SplitSkill, + CustomEntityLookupSkill, + TextTranslationSkill, + WebApiSkill, + LuceneStandardAnalyzer, + StopAnalyzer, + PatternAnalyzer as GeneratedPatternAnalyzer, + CustomAnalyzer, + PatternTokenizer } from "./generated/service/models"; import { LexicalAnalyzer, @@ -48,7 +66,8 @@ import { DataChangeDetectionPolicy, DataDeletionDetectionPolicy, SimilarityAlgorithm, - SearchResourceEncryptionKey + SearchResourceEncryptionKey, + PatternAnalyzer } from "./serviceModels"; import { SuggestDocumentsResult, SuggestResult, SearchResult } from "./indexModels"; import { @@ -63,8 +82,46 @@ export function convertSkillsToPublic(skills: SearchIndexerSkillUnion[]): Search const result: SearchIndexerSkill[] = []; for (const skill of skills) { - if (skill.odatatype !== "SearchIndexerSkill") { - result.push(skill); + switch (skill.odatatype) { + case "#Microsoft.Skills.Util.ConditionalSkill": + result.push(skill as ConditionalSkill); + break; + case "#Microsoft.Skills.Text.KeyPhraseExtractionSkill": + result.push(skill as KeyPhraseExtractionSkill); + break; + case "#Microsoft.Skills.Vision.OcrSkill": + result.push(skill as OcrSkill); + break; + case "#Microsoft.Skills.Vision.ImageAnalysisSkill": + result.push(skill as ImageAnalysisSkill); + break; + case "#Microsoft.Skills.Text.LanguageDetectionSkill": + result.push(skill as LanguageDetectionSkill); + break; + case "#Microsoft.Skills.Util.ShaperSkill": + result.push(skill as ShaperSkill); + break; + case "#Microsoft.Skills.Text.MergeSkill": + result.push(skill as MergeSkill); + break; + case "#Microsoft.Skills.Text.EntityRecognitionSkill": + result.push(skill as EntityRecognitionSkill); + break; + case "#Microsoft.Skills.Text.SentimentSkill": + result.push(skill as SentimentSkill); + break; + case "#Microsoft.Skills.Text.SplitSkill": + result.push(skill as SplitSkill); + break; + case "#Microsoft.Skills.Text.CustomEntityLookupSkill": + result.push(skill as CustomEntityLookupSkill); + break; + case "#Microsoft.Skills.Text.TranslationSkill": + result.push(skill as TextTranslationSkill); + break; + case "#Microsoft.Skills.Custom.WebApiSkill": + result.push(skill as WebApiSkill); + break; } } return result; @@ -132,7 +189,7 @@ export function convertAnalyzersToGenerated( case "#Microsoft.Azure.Search.CustomAnalyzer": result.push({ ...analyzer, - tokenizer: analyzer.tokenizerName + tokenizer: analyzer.tokenizer }); break; } @@ -151,20 +208,24 @@ export function convertAnalyzersToPublic( for (const analyzer of analyzers) { switch (analyzer.odatatype) { case "#Microsoft.Azure.Search.StandardAnalyzer": + result.push(analyzer as LuceneStandardAnalyzer); + break; case "#Microsoft.Azure.Search.StopAnalyzer": - result.push(analyzer); + result.push(analyzer as StopAnalyzer); break; case "#Microsoft.Azure.Search.PatternAnalyzer": result.push({ ...analyzer, - flags: analyzer.flags ? (analyzer.flags.split("|") as RegexFlags[]) : undefined - }); + flags: (analyzer as GeneratedPatternAnalyzer).flags + ? ((analyzer as GeneratedPatternAnalyzer).flags!.split("|") as RegexFlags[]) + : undefined + } as PatternAnalyzer); break; case "#Microsoft.Azure.Search.CustomAnalyzer": result.push({ ...analyzer, - tokenizerName: analyzer.tokenizer - }); + tokenizer: (analyzer as CustomAnalyzer).tokenizer + } as CustomAnalyzer); break; } } @@ -181,9 +242,9 @@ export function convertFieldsToPublic(fields: GeneratedSearchField[]): SearchFie if (field.type === "Collection(Edm.ComplexType)" || field.type === "Edm.ComplexType") { result = field as ComplexField; } else { - const anayzerName: LexicalAnalyzerName | undefined = field.analyzer; - const searchAnalyzerName: LexicalAnalyzerName | undefined = field.searchAnalyzer; - const indexAnalyzerName: LexicalAnalyzerName | undefined = field.indexAnalyzer; + const anayzerName: LexicalAnalyzerName | undefined | null = field.analyzer; + const searchAnalyzerName: LexicalAnalyzerName | undefined | null = field.searchAnalyzer; + const indexAnalyzerName: LexicalAnalyzerName | undefined | null = field.indexAnalyzer; const synonymMapNames: string[] | undefined = field.synonymMaps; const { retrievable, ...restField } = field; @@ -259,9 +320,11 @@ export function convertTokenizersToPublic( if (tokenizer.odatatype === "#Microsoft.Azure.Search.PatternTokenizer") { result.push({ ...tokenizer, - flags: tokenizer.flags ? (tokenizer.flags.split("|") as RegexFlags[]) : undefined + flags: (tokenizer as PatternTokenizer).flags + ? ((tokenizer as PatternTokenizer).flags!.split("|") as RegexFlags[]) + : undefined }); - } else if (tokenizer.odatatype !== "LexicalTokenizer") { + } else { result.push(tokenizer); } } @@ -311,8 +374,8 @@ export function extractOperationOptions( } export function convertEncryptionKeyToPublic( - encryptionKey?: GeneratedSearchResourceEncryptionKey -): SearchResourceEncryptionKey | undefined { + encryptionKey?: GeneratedSearchResourceEncryptionKey | null +): SearchResourceEncryptionKey | undefined | null { if (!encryptionKey) { return encryptionKey; } @@ -332,8 +395,8 @@ export function convertEncryptionKeyToPublic( } export function convertEncryptionKeyToGenerated( - encryptionKey?: SearchResourceEncryptionKey -): GeneratedSearchResourceEncryptionKey | undefined { + encryptionKey?: SearchResourceEncryptionKey | null +): GeneratedSearchResourceEncryptionKey | undefined | null { if (!encryptionKey) { return encryptionKey; } @@ -482,6 +545,7 @@ export function generatedSynonymMapToPublicSynonymMap(synonymMap: GeneratedSynon export function publicSynonymMapToGeneratedSynonymMap(synonymMap: SynonymMap): GeneratedSynonymMap { const result: GeneratedSynonymMap = { name: synonymMap.name, + format: "solr", encryptionKey: convertEncryptionKeyToGenerated(synonymMap.encryptionKey), etag: synonymMap.etag, synonyms: synonymMap.synonyms.join("\n") @@ -549,8 +613,8 @@ export function generatedDataSourceToPublicDataSource( } export function convertDataChangeDetectionPolicyToPublic( - dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion -): DataChangeDetectionPolicy | undefined { + dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion | null +): DataChangeDetectionPolicy | undefined | null { if (!dataChangeDetectionPolicy) { return dataChangeDetectionPolicy; } @@ -566,8 +630,8 @@ export function convertDataChangeDetectionPolicyToPublic( } export function convertDataDeletionDetectionPolicyToPublic( - dataDeletionDetectionPolicy?: DataDeletionDetectionPolicyUnion -): DataDeletionDetectionPolicy | undefined { + dataDeletionDetectionPolicy?: DataDeletionDetectionPolicyUnion | null +): DataDeletionDetectionPolicy | undefined | null { if (!dataDeletionDetectionPolicy) { return dataDeletionDetectionPolicy; } diff --git a/sdk/search/search-documents/swagger/Data.md b/sdk/search/search-documents/swagger/Data.md index 397224a8d6a5..ccb94a53f39f 100644 --- a/sdk/search/search-documents/swagger/Data.md +++ b/sdk/search/search-documents/swagger/Data.md @@ -14,7 +14,11 @@ input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/ add-credentials: false title: SearchClient use-extension: - "@microsoft.azure/autorest.typescript": "5.0.1" + "@autorest/typescript": "6.0.0-dev.20210121.1" +disable-async-iterators: true +api-version-parameter: choice +v3: true +hide-clients: true ``` ## Customizations for Track 2 Generator @@ -55,28 +59,29 @@ directive: - from: swagger-document where: $.definitions.IndexAction transform: > - $.properties['@search.action']['x-ms-client-name'] = '__actionType'; $.required = ['@search.action']; -``` +modelerfour: + naming: + override: + ActionType: $DO_NOT_NORMALIZE$__actionType +``` -### Change text to _text in SuggestResult +### Change text to \_text in SuggestResult ```yaml -directive: - - from: swagger-document - where: $.definitions.SuggestResult.properties['@search.text'] - transform: > - $['x-ms-client-name'] = '_text' +modelerfour: + naming: + override: + Text: $DO_NOT_NORMALIZE$_text ``` -### Change score to _score & highlights to _highlights in SuggestResult +### Change score to \_score & highlights to \_highlights in SuggestResult ```yaml -directive: - - from: swagger-document - where: $.definitions.SearchResult - transform: > - $.properties['@search.score']['x-ms-client-name'] = '_score'; - $.properties['@search.highlights']['x-ms-client-name'] = '_highlights'; +modelerfour: + naming: + override: + Score: $DO_NOT_NORMALIZE$_score + Highlights: $DO_NOT_NORMALIZE$_highlights ``` diff --git a/sdk/search/search-documents/swagger/Service.md b/sdk/search/search-documents/swagger/Service.md index 2877544b7d20..b0d9ee34fbbf 100644 --- a/sdk/search/search-documents/swagger/Service.md +++ b/sdk/search/search-documents/swagger/Service.md @@ -13,7 +13,11 @@ source-code-folder-path: ./src/generated/service input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/specification/search/data-plane/Azure.Search/preview/2020-06-30/searchservice.json add-credentials: false use-extension: - "@microsoft.azure/autorest.typescript": "5.0.1" + "@autorest/typescript": "6.0.0-dev.20210121.1" +disable-async-iterators: true +api-version-parameter: choice +v3: true +hide-clients: true ``` ## Customizations for Track 2 Generator @@ -118,6 +122,7 @@ directive: $.enum = newValues; } ``` + ### Make AnalyzerName a string ```yaml @@ -253,3 +258,13 @@ directive: transform: > $["x-ms-client-name"] = "maxPageLength" ``` + +### Change odataType to odatatype + +```yaml +directive: + - from: swagger-document + where: $.definitions..properties["@odata.type"] + transform: > + $["x-ms-client-name"] = "odatatype" +``` diff --git a/sdk/search/search-documents/test/public/utils/setup.ts b/sdk/search/search-documents/test/public/utils/setup.ts index a6c7c3a62a12..ef0a6beac731 100644 --- a/sdk/search/search-documents/test/public/utils/setup.ts +++ b/sdk/search/search-documents/test/public/utils/setup.ts @@ -199,6 +199,7 @@ export async function createIndex(client: SearchIndexClient, name: string): Prom suggesters: [ { name: "sg", + searchMode: "analyzingInfixMatching", sourceFields: ["description", "hotelName"] } ],