diff --git a/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml b/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml index d43aa914c7517..07f9e8ff671c0 100755 --- a/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml +++ b/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml @@ -471,7 +471,7 @@ the main ServiceBusClientBuilder. --> - + diff --git a/eng/jacoco-test-coverage/pom.xml b/eng/jacoco-test-coverage/pom.xml index 3bd1f3f041906..7ce6479abfa02 100644 --- a/eng/jacoco-test-coverage/pom.xml +++ b/eng/jacoco-test-coverage/pom.xml @@ -58,7 +58,7 @@ com.azure azure-ai-textanalytics - 5.1.4 + 5.1.5 com.azure diff --git a/eng/versioning/version_client.txt b/eng/versioning/version_client.txt index ee35a4a89cf57..870a950ef0540 100644 --- a/eng/versioning/version_client.txt +++ b/eng/versioning/version_client.txt @@ -42,7 +42,7 @@ com.azure:azure-ai-formrecognizer-perf;1.0.0-beta.1;1.0.0-beta.1 com.azure:azure-ai-documenttranslator;1.0.0-beta.1;1.0.0-beta.2 com.azure:azure-ai-metricsadvisor;1.0.4;1.1.0-beta.1 com.azure:azure-ai-metricsadvisor-perf;1.0.0-beta.1;1.0.0-beta.1 -com.azure:azure-ai-textanalytics;5.1.4;5.2.0-beta.3 +com.azure:azure-ai-textanalytics;5.1.4;5.1.5 com.azure:azure-ai-textanalytics-perf;1.0.0-beta.1;1.0.0-beta.1 com.azure:azure-analytics-purview-catalog;1.0.0-beta.2;1.0.0-beta.3 com.azure:azure-analytics-purview-scanning;1.0.0-beta.2;1.0.0-beta.3 diff --git a/sdk/textanalytics/azure-ai-textanalytics-perf/pom.xml b/sdk/textanalytics/azure-ai-textanalytics-perf/pom.xml index 629a9bf30bee5..6dc0896cfba67 100644 --- a/sdk/textanalytics/azure-ai-textanalytics-perf/pom.xml +++ b/sdk/textanalytics/azure-ai-textanalytics-perf/pom.xml @@ -27,7 +27,7 @@ com.azure azure-ai-textanalytics - 5.1.4 + 5.1.5 diff --git a/sdk/textanalytics/azure-ai-textanalytics-perf/src/main/java/com/azure/ai/textanalytics/perf/DetectLanguageTest.java b/sdk/textanalytics/azure-ai-textanalytics-perf/src/main/java/com/azure/ai/textanalytics/perf/DetectLanguageTest.java index c0bfc9c3b18f5..ece2b839f51d5 100644 --- a/sdk/textanalytics/azure-ai-textanalytics-perf/src/main/java/com/azure/ai/textanalytics/perf/DetectLanguageTest.java +++ b/sdk/textanalytics/azure-ai-textanalytics-perf/src/main/java/com/azure/ai/textanalytics/perf/DetectLanguageTest.java @@ -14,7 +14,7 @@ * Performs custom model recognition operations. */ public class DetectLanguageTest extends ServiceTest { - List documents = new ArrayList<>(); + final List documents = new ArrayList<>(); /** * The DetectLanguageTest class. diff --git a/sdk/textanalytics/azure-ai-textanalytics-perf/src/main/java/com/azure/ai/textanalytics/perf/core/package-info.java b/sdk/textanalytics/azure-ai-textanalytics-perf/src/main/java/com/azure/ai/textanalytics/perf/core/package-info.java new file mode 100644 index 0000000000000..a226416f50211 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics-perf/src/main/java/com/azure/ai/textanalytics/perf/core/package-info.java @@ -0,0 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/** + * Package contains the core class shared in the Text Analytics performance tests. + */ +package com.azure.ai.textanalytics.perf.core; diff --git a/sdk/textanalytics/azure-ai-textanalytics-perf/src/main/java/com/azure/ai/textanalytics/perf/package-info.java b/sdk/textanalytics/azure-ai-textanalytics-perf/src/main/java/com/azure/ai/textanalytics/perf/package-info.java new file mode 100644 index 0000000000000..acbfd5185e1d3 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics-perf/src/main/java/com/azure/ai/textanalytics/perf/package-info.java @@ -0,0 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/** + * Package contains Text Analytics performance tests. + */ +package com.azure.ai.textanalytics.perf; diff --git a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md index 4e56675a37711..0de8ef21e7e4f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md +++ b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md @@ -1,4 +1,12 @@ # Release History + +## 5.1.5 (2022-01-14) +### Other Changes + +#### Dependency Updates +- Updated `azure-core` from `1.22.0` to `1.24.1`. +- Updated `azure-core-http-netty` from `1.11.2` to `1.11.6`. + ## 5.1.4 (2021-11-11) ### Other Changes diff --git a/sdk/textanalytics/azure-ai-textanalytics/README.md b/sdk/textanalytics/azure-ai-textanalytics/README.md index 0403c6f15832f..145598bd8b161 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/README.md +++ b/sdk/textanalytics/azure-ai-textanalytics/README.md @@ -27,7 +27,7 @@ and includes six main functions: com.azure azure-ai-textanalytics - 5.1.4 + 5.1.5 ``` [//]: # ({x-version-update-end}) @@ -96,8 +96,7 @@ az cognitiveservices account keys list --resource-group -```java +```java readme-sample-createTextAnalyticsClientWithKeyCredential TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") @@ -105,8 +104,7 @@ TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder() ``` The Azure Text Analytics client library provides a way to **rotate the existing key**. - -```java +```java readme-sample-rotatingAzureKeyCredential AzureKeyCredential credential = new AzureKeyCredential("{key}"); TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder() .credential(credential) @@ -115,6 +113,7 @@ TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder() credential.update("{new_key}"); ``` + #### Create a Text Analytics client with Azure Active Directory credential Azure SDK for Java supports an Azure Identity package, making it easy to get credentials from Microsoft identity platform. @@ -127,7 +126,7 @@ Authentication with AAD requires some initial setup: com.azure azure-identity - 1.4.1 + 1.4.3 ``` [//]: # ({x-version-update-end}) @@ -143,10 +142,9 @@ Authorization is easiest using [DefaultAzureCredential][wiki_identity]. It finds running environment. For more information about using Azure Active Directory authorization with Text Analytics, please refer to [the associated documentation][aad_authorization]. - -```java +```java readme-sample-createTextAnalyticsAsyncClientWithAAD TokenCredential defaultCredential = new DefaultAzureCredentialBuilder().build(); -TextAnalyticsAsyncClient textAnalyticsClient = new TextAnalyticsClientBuilder() +TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() .endpoint("{endpoint}") .credential(defaultCredential) .buildAsyncClient(); @@ -206,33 +204,33 @@ The following sections provide several code snippets covering some of the most c Text analytics support both synchronous and asynchronous client creation by using `TextAnalyticsClientBuilder`, - -``` java +``` java readme-sample-createTextAnalyticsClientWithKeyCredential TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); ``` - -``` java -TextAnalyticsAsyncClient textAnalyticsClient = new TextAnalyticsClientBuilder() + +or + +``` java readme-sample-createTextAnalyticsAsyncClientWithKeyCredential +TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildAsyncClient(); ``` ### Analyze sentiment -Run a Text Analytics predictive model to identify the positive, negative, neutral or mixed sentiment contained in the +Run a Text Analytics predictive model to identify the positive, negative, neutral or mixed sentiment contained in the provided document or batch of documents. - - -```java +``` java readme-sample-analyzeSentiment String document = "The hotel was dark and unclean. I like microsoft."; DocumentSentiment documentSentiment = textAnalyticsClient.analyzeSentiment(document); System.out.printf("Analyzed document sentiment: %s.%n", documentSentiment.getSentiment()); documentSentiment.getSentences().forEach(sentenceSentiment -> System.out.printf("Analyzed sentence sentiment: %s.%n", sentenceSentiment.getSentiment())); ``` + For samples on using the production recommended option `AnalyzeSentimentBatch` see [here][analyze_sentiment_sample]. To get more granular information about the opinions related to aspects of a product/service, also knows as Aspect-based @@ -244,8 +242,7 @@ Please refer to the service documentation for a conceptual discussion of [sentim ### Detect language Run a Text Analytics predictive model to determine the language that the provided document or batch of documents are written in. - -```java +```java readme-sample-detectLanguages String document = "Bonjour tout le monde"; DetectedLanguage detectedLanguage = textAnalyticsClient.detectLanguage(document); System.out.printf("Detected language name: %s, ISO 6391 name: %s, confidence score: %f.%n", @@ -257,8 +254,7 @@ Please refer to the service documentation for a conceptual discussion of [langua ### Extract key phrases Run a model to identify a collection of significant phrases found in the provided document or batch of documents. - -```java +```java readme-sample-extractKeyPhrases String document = "My cat might need to see a veterinarian."; System.out.println("Extracted phrases:"); textAnalyticsClient.extractKeyPhrases(document).forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); @@ -271,8 +267,7 @@ Run a predictive model to identify a collection of named entities in the provide categorize those entities into categories such as person, location, or organization. For more information on available categories, see [Text Analytics Named Entity Categories][named_entities_categories]. - -```java +```java readme-sample-recognizeEntity String document = "Satya Nadella is the CEO of Microsoft"; textAnalyticsClient.recognizeEntities(document).forEach(entity -> System.out.printf("Recognized entity: %s, category: %s, subcategory: %s, confidence score: %f.%n", @@ -287,8 +282,7 @@ document. It recognizes and categorizes PII entities in its input text, such as Social Security Numbers, bank account information, credit card numbers, and more. This endpoint is only supported for API versions v3.1-preview.1 and above. - -```java +```java readme-sample-recognizePiiEntity String document = "My SSN is 859-98-0987"; PiiEntityCollection piiEntityCollection = textAnalyticsClient.recognizePiiEntities(document); System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); @@ -305,9 +299,7 @@ Please refer to the service documentation for [supported PII entity types][pii_e Run a predictive model to identify a collection of entities found in the provided document or batch of documents, and include information linking the entities to their corresponding entries in a well-known knowledge base. - - -```java +```java readme-sample-recognizeLinkedEntity String document = "Old Faithful is a geyser at Yellowstone Park."; textAnalyticsClient.recognizeLinkedEntities(document).forEach(linkedEntity -> { System.out.println("Linked Entities:"); @@ -324,8 +316,7 @@ Please refer to the service documentation for a conceptual discussion of [entity Text Analytics for health is a containerized service that extracts and labels relevant medical information from unstructured texts such as doctor's notes, discharge summaries, clinical documents, and electronic health records. For more information see [How to: Use Text Analytics for health][healthcare]. - -```java +```java readme-sample-recognizeHealthcareEntities List documents = Arrays.asList(new TextDocumentInput("0", "RECORD #333582770390100 | MH | 85986313 | | 054351 | 2/14/2001 12:00:00 AM | " + "CORONARY ARTERY DISEASE | Signed | DIS | Admission Date: 5/22/2001 " @@ -377,60 +368,70 @@ syncPoller.getFinalResult().forEach( ``` ### Analyze multiple actions -The `Analyze` functionality allows to choose which of the supported Text Analytics features to execute in the same -set of documents. Currently, the supported features are: `entity recognition`, `linked entity recognition`, -`Personally Identifiable Information (PII) entity recognition`, `key phrase extraction`, and `sentiment analysis`. - -```java -List documents = Arrays.asList( - new TextDocumentInput("0", - "We went to Contoso Steakhouse located at midtown NYC last week for a dinner party, and we adore" - + " the spot! They provide marvelous food and they have a great menu. The chief cook happens to be" - + " the owner (I think his name is John Doe) and he is super nice, coming out of the kitchen and " - + "greeted us all. We enjoyed very much dining in the place! The Sirloin steak I ordered was tender" - + " and juicy, and the place was impeccably clean. You can even pre-order from their online menu at" - + " www.contososteakhouse.com, call 312-555-0176 or send email to order@contososteakhouse.com! The" - + " only complaint I have is the food didn't come fast enough. Overall I highly recommend it!") -); +The `Analyze` functionality allows choosing which of the supported Text Analytics features to execute in the same +set of documents. Currently, the supported features are: -SyncPoller syncPoller = - textAnalyticsClient.beginAnalyzeActions(documents, - new TextAnalyticsActions().setDisplayName("{tasks_display_name}") - .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()) - .setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction()), - new AnalyzeActionsOptions().setIncludeStatistics(false), - Context.NONE); -syncPoller.waitForCompletion(); -syncPoller.getFinalResult().forEach(analyzeActionsResult -> { - System.out.println("Key phrases extraction action results:"); - analyzeActionsResult.getExtractKeyPhrasesResults().forEach(actionResult -> { - AtomicInteger counter = new AtomicInteger(); - if (!actionResult.isError()) { - for (ExtractKeyPhraseResult extractKeyPhraseResult : actionResult.getDocumentsResults()) { - System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); - System.out.println("Extracted phrases:"); - extractKeyPhraseResult.getKeyPhrases() - .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); +- Entities Recognition +- PII Entities Recognition +- Linked Entity Recognition +- Key Phrase Extraction +- Sentiment Analysis +- Extractive Summarization (see sample [here][extractive_summarization_sample]) +- Custom Entity Recognition (see sample [here][custom_entities_sample]) +- Custom Single Category Classification (see sample [here][custom_single_classification_sample]) +- Custom Multi Category Classification (see sample [here][custom_multi_classification_sample]) + +```java readme-sample-analyzeActions + List documents = Arrays.asList( + new TextDocumentInput("0", + "We went to Contoso Steakhouse located at midtown NYC last week for a dinner party, and we adore" + + " the spot! They provide marvelous food and they have a great menu. The chief cook happens to be" + + " the owner (I think his name is John Doe) and he is super nice, coming out of the kitchen and " + + "greeted us all. We enjoyed very much dining in the place! The Sirloin steak I ordered was tender" + + " and juicy, and the place was impeccably clean. You can even pre-order from their online menu at" + + " www.contososteakhouse.com, call 312-555-0176 or send email to order@contososteakhouse.com! The" + + " only complaint I have is the food didn't come fast enough. Overall I highly recommend it!") + ); + + SyncPoller syncPoller = + textAnalyticsClient.beginAnalyzeActions(documents, + new TextAnalyticsActions().setDisplayName("{tasks_display_name}") + .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()) + .setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction()), + new AnalyzeActionsOptions().setIncludeStatistics(false), + Context.NONE); + syncPoller.waitForCompletion(); + syncPoller.getFinalResult().forEach(analyzeActionsResult -> { + System.out.println("Key phrases extraction action results:"); + analyzeActionsResult.getExtractKeyPhrasesResults().forEach(actionResult -> { + AtomicInteger counter = new AtomicInteger(); + if (!actionResult.isError()) { + for (ExtractKeyPhraseResult extractKeyPhraseResult : actionResult.getDocumentsResults()) { + System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); + System.out.println("Extracted phrases:"); + extractKeyPhraseResult.getKeyPhrases() + .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); + } } - } - }); - System.out.println("PII entities recognition action results:"); - analyzeActionsResult.getRecognizePiiEntitiesResults().forEach(actionResult -> { - AtomicInteger counter = new AtomicInteger(); - if (!actionResult.isError()) { - for (RecognizePiiEntitiesResult entitiesResult : actionResult.getDocumentsResults()) { - System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); - PiiEntityCollection piiEntityCollection = entitiesResult.getEntities(); - System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); - piiEntityCollection.forEach(entity -> System.out.printf( - "Recognized Personally Identifiable Information entity: %s, entity category: %s, " - + "entity subcategory: %s, offset: %s, confidence score: %f.%n", - entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getOffset(), - entity.getConfidenceScore())); + }); + System.out.println("PII entities recognition action results:"); + analyzeActionsResult.getRecognizePiiEntitiesResults().forEach(actionResult -> { + AtomicInteger counter = new AtomicInteger(); + if (!actionResult.isError()) { + for (RecognizePiiEntitiesResult entitiesResult : actionResult.getDocumentsResults()) { + System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); + PiiEntityCollection piiEntityCollection = entitiesResult.getEntities(); + System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); + piiEntityCollection.forEach(entity -> System.out.printf( + "Recognized Personally Identifiable Information entity: %s, entity category: %s, " + + "entity subcategory: %s, offset: %s, confidence score: %f.%n", + entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getOffset(), + entity.getConfidenceScore())); + } } - } + }); }); -}); +} ``` For more examples, such as asynchronous samples, refer to [here][samples_readme]. @@ -440,8 +441,7 @@ Text Analytics clients raise exceptions. For example, if you try to detect the l document IDs, `400` error is return that indicating bad request. In the following code snippet, the error is handled gracefully by catching the exception and display the additional information about the error. - -```java +```java readme-sample-handlingException List documents = Arrays.asList( new DetectLanguageInput("1", "This is written in English.", "us"), new DetectLanguageInput("1", "Este es un documento escrito en Español.", "es") diff --git a/sdk/textanalytics/azure-ai-textanalytics/pom.xml b/sdk/textanalytics/azure-ai-textanalytics/pom.xml index b1d01935d7bf5..f85001545aac6 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/pom.xml +++ b/sdk/textanalytics/azure-ai-textanalytics/pom.xml @@ -13,7 +13,7 @@ com.azure azure-ai-textanalytics - 5.1.4 + 5.1.5 Microsoft Azure client library for Text Analytics This package contains the Microsoft Azure Cognitive Services Text Analytics SDK. @@ -32,16 +32,28 @@ HEAD + + + + --add-exports com.azure.core/com.azure.core.implementation.http=ALL-UNNAMED + --add-opens com.azure.ai.textanalytics/com.azure.ai.textanalytics=ALL-UNNAMED + --add-exports com.azure.core/com.azure.core.implementation.http=ALL-UNNAMED + + false + + + + com.azure azure-core - 1.22.0 + 1.24.1 com.azure azure-core-http-netty - 1.11.2 + 1.11.6 + 1.7.7 test com.azure azure-core-http-okhttp - 1.7.5 + 1.7.8 test org.junit.jupiter junit-jupiter-api - 5.7.2 + 5.8.2 test org.junit.jupiter junit-jupiter-engine - 5.7.2 + 5.8.2 test org.junit.jupiter junit-jupiter-params - 5.7.2 + 5.8.2 test com.azure azure-identity - 1.4.1 + 1.4.3 test org.mockito mockito-core - 3.12.4 + 4.0.0 test diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClient.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClient.java index 738956414e637..2284fbb1eb90f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClient.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClient.java @@ -58,7 +58,14 @@ * key phrases extraction, and sentiment analysis of a document or a list of documents. * *

Instantiating an asynchronous Text Analytics Client

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.instantiation} + * + *
+ * TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder()
+ *     .credential(new AzureKeyCredential("{key}"))
+ *     .endpoint("{endpoint}")
+ *     .buildAsyncClient();
+ * 
+ * * *

View {@link TextAnalyticsClientBuilder} for additional ways to construct the client.

* @@ -72,7 +79,7 @@ public final class TextAnalyticsAsyncClient { private final String defaultCountryHint; private final String defaultLanguage; - // Please see here + // Please see here // for more information on Azure resource provider namespaces. static final String COGNITIVE_TRACING_NAMESPACE_VALUE = "Microsoft.CognitiveServices"; final DetectLanguageAsyncClient detectLanguageAsyncClient; @@ -139,7 +146,14 @@ public String getDefaultLanguage() { *

Detects language in a document. Subscribes to the call asynchronously and prints out the detected language * details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguage#string} + * + *
+     * String document = "Bonjour tout le monde";
+     * textAnalyticsAsyncClient.detectLanguage(document).subscribe(detectedLanguage ->
+     *     System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n",
+     *         detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore()));
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -163,12 +177,20 @@ public Mono detectLanguage(String document) { *

Detects language with http response in a document with a provided country hint. Subscribes to the call * asynchronously and prints out the detected language details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguage#string-string} + * + *
+     * String document = "This text is in English";
+     * String countryHint = "US";
+     * textAnalyticsAsyncClient.detectLanguage(document, countryHint).subscribe(detectedLanguage ->
+     *     System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n",
+     *         detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore()));
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see * data limits. - * @param countryHint Accepts two letter country codes specified by ISO 3166-1 alpha-2. Defaults to "US" if not + * @param countryHint Accepts 2-letter country codes specified by ISO 3166-1 alpha-2. Defaults to "US" if not * specified. To remove this behavior you can reset this parameter by setting this value to empty string * {@code countryHint} = "" or "none". * @@ -207,7 +229,28 @@ public Mono detectLanguage(String document, String countryHint * Subscribes to the call asynchronously and prints out the detected language details when a response is received. *

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguageBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "This is written in English",
+     *     "Este es un documento  escrito en Español."
+     * );
+     * textAnalyticsAsyncClient.detectLanguageBatch(documents, "US", null).subscribe(
+     *     batchResult -> {
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *         // Batch result of languages
+     *         for (DetectLanguageResult detectLanguageResult : batchResult) {
+     *             DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage();
+     *             System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n",
+     *                 detectedLanguage.getName(), detectedLanguage.getIso6391Name(),
+     *                 detectedLanguage.getConfidenceScore());
+     *         }
+     *     });
+     * 
+ * * * @param documents The list of documents to detect languages for. * For text length limits, maximum batch size, and supported text encoding, see @@ -226,7 +269,7 @@ public Mono detectLanguage(String document, String countryHint public Mono detectLanguageBatch( Iterable documents, String countryHint, TextAnalyticsRequestOptions options) { - if (countryHint != null && countryHint.equalsIgnoreCase("none")) { + if (countryHint != null && "none".equalsIgnoreCase(countryHint)) { countryHint = ""; } final String finalCountryHint = countryHint; @@ -246,7 +289,35 @@ public Mono detectLanguageBatch( *

Detects language in a batch of {@link DetectLanguageInput document} with provided request options. Subscribes * to the call asynchronously and prints out the detected language details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguageBatch#Iterable-TextAnalyticsRequestOptions} + * + *
+     * List<DetectLanguageInput> detectLanguageInputs1 = Arrays.asList(
+     *     new DetectLanguageInput("1", "This is written in English.", "US"),
+     *     new DetectLanguageInput("2", "Este es un documento  escrito en Español.", "ES")
+     * );
+     *
+     * TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
+     *
+     * textAnalyticsAsyncClient.detectLanguageBatchWithResponse(detectLanguageInputs1, requestOptions)
+     *     .subscribe(response -> {
+     *         // Response's status code
+     *         System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     *
+     *         DetectLanguageResultCollection resultCollection = response.getValue();
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *         // Batch result of languages
+     *         for (DetectLanguageResult detectLanguageResult : resultCollection) {
+     *             DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage();
+     *             System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n",
+     *                 detectedLanguage.getName(), detectedLanguage.getIso6391Name(),
+     *                 detectedLanguage.getConfidenceScore());
+     *         }
+     *     });
+     * 
+ * * * @param documents The list of {@link DetectLanguageInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -280,7 +351,17 @@ public Mono> detectLanguageBatchWithRes *

Recognize entities in a document. Subscribes to the call asynchronously and prints out the recognized entity * details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeEntities#string} + * + *
+     * String document = "Satya Nadella is the CEO of Microsoft";
+     * textAnalyticsAsyncClient.recognizeEntities(document)
+     *     .subscribe(entityCollection -> entityCollection.forEach(entity ->
+     *         System.out.printf("Recognized categorized entity: %s, category: %s, confidence score: %f.%n",
+     *         entity.getText(),
+     *         entity.getCategory(),
+     *         entity.getConfidenceScore())));
+     * 
+ * * * @param document The document to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -306,7 +387,17 @@ public Mono recognizeEntities(String document) { *

Recognize entities in a document with provided language code. Subscribes to the call asynchronously and prints * out the entity details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeEntities#string-string} + * + *
+     * String document = "Satya Nadella is the CEO of Microsoft";
+     * textAnalyticsAsyncClient.recognizeEntities(document, "en")
+     *     .subscribe(entityCollection -> entityCollection.forEach(entity ->
+     *         System.out.printf("Recognized categorized entity: %s, category: %s, confidence score: %f.%n",
+     *         entity.getText(),
+     *         entity.getCategory(),
+     *         entity.getConfidenceScore())));
+     * 
+ * * * @param document the text to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -332,7 +423,25 @@ public Mono recognizeEntities(String document, Stri *

Recognize entities in a document with the provided language code. Subscribes to the call asynchronously and * prints out the entity details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeCategorizedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "I had a wonderful trip to Seattle last week.", "I work at Microsoft.");
+     *
+     * textAnalyticsAsyncClient.recognizeEntitiesBatch(documents, "en", null)
+     *     .subscribe(batchResult -> {
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *         // Batch Result of entities
+     *         batchResult.forEach(recognizeEntitiesResult ->
+     *             recognizeEntitiesResult.getEntities().forEach(entity -> System.out.printf(
+     *                 "Recognized categorized entity: %s, category: %s, confidence score: %f.%n",
+     *                     entity.getText(), entity.getCategory(), entity.getConfidenceScore())));
+     *     });
+     * 
+ * * * @param documents A list of documents to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -369,7 +478,34 @@ public Mono recognizeEntitiesBatch( *

Recognize entities in a list of {@link TextDocumentInput document}. Subscribes to the call asynchronously * and prints out the entity details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeCategorizedEntitiesBatch#Iterable-TextAnalyticsRequestOptions} + * + *
+     * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList(
+     *     new TextDocumentInput("0", "I had a wonderful trip to Seattle last week.").setLanguage("en"),
+     *     new TextDocumentInput("1", "I work at Microsoft.").setLanguage("en"));
+     *
+     * TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
+     *
+     * textAnalyticsAsyncClient.recognizeEntitiesBatchWithResponse(textDocumentInputs1, requestOptions)
+     *     .subscribe(response -> {
+     *         // Response's status code
+     *         System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     *         RecognizeEntitiesResultCollection resultCollection = response.getValue();
+     *
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     *         resultCollection.forEach(recognizeEntitiesResult ->
+     *             recognizeEntitiesResult.getEntities().forEach(entity -> System.out.printf(
+     *                 "Recognized categorized entity: %s, category: %s, confidence score: %f.%n",
+     *                 entity.getText(),
+     *                 entity.getCategory(),
+     *                 entity.getConfidenceScore())));
+     *     });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -402,7 +538,18 @@ public Mono> recognizeEntitiesBatchW * Subscribes to the call asynchronously and prints out the recognized entity details when a response is * received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizePiiEntities#string} + * + *
+     * String document = "My SSN is 859-98-0987";
+     * textAnalyticsAsyncClient.recognizePiiEntities(document).subscribe(piiEntityCollection -> {
+     *     System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
+     *     piiEntityCollection.forEach(entity -> System.out.printf(
+     *         "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
+     *             + " entity subcategory: %s, confidence score: %f.%n",
+     *         entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
+     * });
+     * 
+ * * * @param document The document to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see @@ -429,7 +576,19 @@ public Mono recognizePiiEntities(String document) { *

Recognize the PII entities details in a document with provided language code. * Subscribes to the call asynchronously and prints out the entity details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizePiiEntities#string-string} + * + *
+     * String document = "My SSN is 859-98-0987";
+     * textAnalyticsAsyncClient.recognizePiiEntities(document, "en")
+     *     .subscribe(piiEntityCollection -> {
+     *         System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
+     *         piiEntityCollection.forEach(entity -> System.out.printf(
+     *             "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
+     *                 + " entity subcategory: %s, confidence score: %f.%n",
+     *             entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
+     *     });
+     * 
+ * * * @param document the text to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see @@ -458,12 +617,25 @@ public Mono recognizePiiEntities(String document, String la * {@link RecognizePiiEntitiesOptions}. * Subscribes to the call asynchronously and prints out the entity details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizePiiEntities#string-string-RecognizePiiEntitiesOptions} + * + *
+     * String document = "My SSN is 859-98-0987";
+     * textAnalyticsAsyncClient.recognizePiiEntities(document, "en",
+     *     new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION))
+     *     .subscribe(piiEntityCollection -> {
+     *         System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
+     *         piiEntityCollection.forEach(entity -> System.out.printf(
+     *             "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
+     *                 + " entity subcategory: %s, confidence score: %f.%n",
+     *             entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
+     *     });
+     * 
+ * * * @param document the text to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see * data limits. - * @param language The 2 letter ISO 639-1 representation of language. If not set, uses "en" for English as default. + * @param language The 2-letter ISO 639-1 representation of language. If not set, uses "en" for English as default. * @param options The additional configurable {@link RecognizePiiEntitiesOptions options} that may be passed when * recognizing PII entities. * @@ -486,7 +658,35 @@ public Mono recognizePiiEntities(String document, String la *

Recognize Personally Identifiable Information entities in a document with the provided language code. * Subscribes to the call asynchronously and prints out the entity details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizePiiEntitiesBatch#Iterable-String-RecognizePiiEntitiesOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "My SSN is 859-98-0987.",
+     *     "Visa card 0111 1111 1111 1111."
+     * );
+     *
+     * // Show statistics and model version
+     * RecognizePiiEntitiesOptions requestOptions = new RecognizePiiEntitiesOptions().setIncludeStatistics(true)
+     *     .setModelVersion("latest");
+     *
+     * textAnalyticsAsyncClient.recognizePiiEntitiesBatch(documents, "en", requestOptions)
+     *     .subscribe(piiEntitiesResults -> {
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = piiEntitiesResults.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     *         piiEntitiesResults.forEach(recognizePiiEntitiesResult -> {
+     *             PiiEntityCollection piiEntityCollection = recognizePiiEntitiesResult.getEntities();
+     *             System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
+     *             piiEntityCollection.forEach(entity -> System.out.printf(
+     *                 "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
+     *                     + " entity subcategory: %s, confidence score: %f.%n",
+     *                 entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
+     *         });
+     *     });
+     * 
+ * * * @param documents A list of documents to recognize PII entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -525,7 +725,35 @@ public Mono recognizePiiEntitiesBatch( * with provided request options. * Subscribes to the call asynchronously and prints out the entity details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizePiiEntitiesBatch#Iterable-RecognizePiiEntitiesOptions} + * + *
+     * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList(
+     *     new TextDocumentInput("0", "My SSN is 859-98-0987."),
+     *     new TextDocumentInput("1", "Visa card 0111 1111 1111 1111."));
+     *
+     * // Show statistics and model version
+     * RecognizePiiEntitiesOptions requestOptions = new RecognizePiiEntitiesOptions().setIncludeStatistics(true)
+     *     .setModelVersion("latest");
+     *
+     * textAnalyticsAsyncClient.recognizePiiEntitiesBatchWithResponse(textDocumentInputs1, requestOptions)
+     *     .subscribe(response -> {
+     *         RecognizePiiEntitiesResultCollection piiEntitiesResults = response.getValue();
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = piiEntitiesResults.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     *         piiEntitiesResults.forEach(recognizePiiEntitiesResult -> {
+     *             PiiEntityCollection piiEntityCollection = recognizePiiEntitiesResult.getEntities();
+     *             System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
+     *             piiEntityCollection.forEach(entity -> System.out.printf(
+     *                 "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
+     *                     + " entity subcategory: %s, confidence score: %f.%n",
+     *                 entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
+     *         });
+     *     });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize PII entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -557,7 +785,21 @@ public Mono> recognizePiiEntities *

Recognize linked entities in a document. Subscribes to the call asynchronously and prints out the * entity details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntities#string} + * + *
+     * String document = "Old Faithful is a geyser at Yellowstone Park.";
+     * textAnalyticsAsyncClient.recognizeLinkedEntities(document).subscribe(
+     *     linkedEntityCollection -> linkedEntityCollection.forEach(linkedEntity -> {
+     *         System.out.println("Linked Entities:");
+     *         System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
+     *             linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
+     *             linkedEntity.getDataSource());
+     *         linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
+     *             "Matched entity: %s, confidence score: %f.%n",
+     *             entityMatch.getText(), entityMatch.getConfidenceScore()));
+     *     }));
+     * 
+ * * * @param document The document to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -580,7 +822,21 @@ public Mono recognizeLinkedEntities(String document) { *

Recognize linked entities in a text with provided language code. Subscribes to the call asynchronously * and prints out the entity details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntities#string-string} + * + *
+     * String document = "Old Faithful is a geyser at Yellowstone Park.";
+     * textAnalyticsAsyncClient.recognizeLinkedEntities(document, "en").subscribe(
+     *     linkedEntityCollection -> linkedEntityCollection.forEach(linkedEntity -> {
+     *         System.out.println("Linked Entities:");
+     *         System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
+     *             linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
+     *             linkedEntity.getDataSource());
+     *         linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
+     *             "Matched entity: %s, confidence score: %f.%n",
+     *             entityMatch.getText(), entityMatch.getConfidenceScore()));
+     *     }));
+     * 
+ * * * @param document The document to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -607,7 +863,33 @@ public Mono recognizeLinkedEntities(String document, Str *

Recognize linked entities in a list of documents with provided language code. Subscribes to the call * asynchronously and prints out the entity details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "Old Faithful is a geyser at Yellowstone Park.",
+     *     "Mount Shasta has lenticular clouds."
+     * );
+     *
+     * textAnalyticsAsyncClient.recognizeLinkedEntitiesBatch(documents, "en", null)
+     *     .subscribe(batchResult -> {
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     *         batchResult.forEach(recognizeLinkedEntitiesResult ->
+     *             recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> {
+     *                 System.out.println("Linked Entities:");
+     *                 System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
+     *                     linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
+     *                     linkedEntity.getDataSource());
+     *                 linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
+     *                     "Matched entity: %s, confidence score: %f.%n",
+     *                     entityMatch.getText(), entityMatch.getConfidenceScore()));
+     *             }));
+     *     });
+     * 
+ * * * @param documents A list of documents to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -645,7 +927,38 @@ public Mono recognizeLinkedEntitiesBatc * show statistics. Subscribes to the call asynchronously and prints out the entity details when a response is * received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntitiesBatch#Iterable-TextAnalyticsRequestOptions} + * + *
+     * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList(
+     *     new TextDocumentInput("0", "Old Faithful is a geyser at Yellowstone Park.").setLanguage("en"),
+     *     new TextDocumentInput("1", "Mount Shasta has lenticular clouds.").setLanguage("en"));
+     *
+     * TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
+     *
+     * textAnalyticsAsyncClient.recognizeLinkedEntitiesBatchWithResponse(textDocumentInputs1, requestOptions)
+     *     .subscribe(response -> {
+     *         // Response's status code
+     *         System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     *         RecognizeLinkedEntitiesResultCollection resultCollection = response.getValue();
+     *
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     *         resultCollection.forEach(recognizeLinkedEntitiesResult ->
+     *             recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> {
+     *                 System.out.println("Linked Entities:");
+     *                 System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
+     *                     linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
+     *                     linkedEntity.getDataSource());
+     *                 linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
+     *                     "Matched entity: %s, confidence score: %.2f.%n",
+     *                     entityMatch.getText(), entityMatch.getConfidenceScore()));
+     *             }));
+     *     });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -676,7 +989,13 @@ public Mono> recognizeLinkedEn *

Extract key phrases in a document. Subscribes to the call asynchronously and prints out the * key phrases when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrases#string} + * + *
+     * System.out.println("Extracted phrases:");
+     * textAnalyticsAsyncClient.extractKeyPhrases("Bonjour tout le monde").subscribe(keyPhrase ->
+     *     System.out.printf("%s.%n", keyPhrase));
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -700,7 +1019,13 @@ public Mono extractKeyPhrases(String document) { *

Extract key phrases in a document with a provided language code. Subscribes to the call asynchronously and * prints out the key phrases when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrases#string-string} + * + *
+     * System.out.println("Extracted phrases:");
+     * textAnalyticsAsyncClient.extractKeyPhrases("Bonjour tout le monde", "fr")
+     *     .subscribe(keyPhrase -> System.out.printf("%s.%n", keyPhrase));
+     * 
+ * * * @param document The document to be analyzed. For text length limits, maximum batch size, and supported text * encoding, see @@ -727,7 +1052,26 @@ public Mono extractKeyPhrases(String document, String lang *

Extract key phrases in a list of documents with a provided language and request options. Subscribes to the * call asynchronously and prints out the key phrases when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrasesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "Hello world. This is some input text that I love.",
+     *     "Bonjour tout le monde");
+     *
+     * textAnalyticsAsyncClient.extractKeyPhrasesBatch(documents, "en", null).subscribe(
+     *     extractKeyPhraseResults -> {
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = extractKeyPhraseResults.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     *         extractKeyPhraseResults.forEach(extractKeyPhraseResult -> {
+     *             System.out.println("Extracted phrases:");
+     *             extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase));
+     *         });
+     *     });
+     * 
+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -764,7 +1108,34 @@ public Mono extractKeyPhrasesBatch( *

Extract key phrases in a list of {@link TextDocumentInput document} with provided request options. * Subscribes to the call asynchronously and prints out the key phrases when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrasesBatch#Iterable-TextAnalyticsRequestOptions} + * + *
+     * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList(
+     *     new TextDocumentInput("0", "I had a wonderful trip to Seattle last week.").setLanguage("en"),
+     *     new TextDocumentInput("1", "I work at Microsoft.").setLanguage("en"));
+     *
+     * TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
+     *
+     * textAnalyticsAsyncClient.extractKeyPhrasesBatchWithResponse(textDocumentInputs1, requestOptions)
+     *     .subscribe(response -> {
+     *         // Response's status code
+     *         System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     *         ExtractKeyPhrasesResultCollection resultCollection = response.getValue();
+     *
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     *         for (ExtractKeyPhraseResult extractKeyPhraseResult : resultCollection) {
+     *             System.out.println("Extracted phrases:");
+     *             for (String keyPhrase : extractKeyPhraseResult.getKeyPhrases()) {
+     *                 System.out.printf("%s.%n", keyPhrase);
+     *             }
+     *         }
+     *     });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -796,7 +1167,24 @@ public Mono> extractKeyPhrasesBatchW *

Analyze the sentiment in a document. Subscribes to the call asynchronously and prints out the * sentiment details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentiment#string} + * + *
+     * String document = "The hotel was dark and unclean.";
+     * textAnalyticsAsyncClient.analyzeSentiment(document).subscribe(documentSentiment -> {
+     *     System.out.printf("Recognized document sentiment: %s.%n", documentSentiment.getSentiment());
+     *
+     *     for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) {
+     *         System.out.printf(
+     *             "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, "
+     *                 + "negative score: %.2f.%n",
+     *             sentenceSentiment.getSentiment(),
+     *             sentenceSentiment.getConfidenceScores().getPositive(),
+     *             sentenceSentiment.getConfidenceScores().getNeutral(),
+     *             sentenceSentiment.getConfidenceScores().getNegative());
+     *     }
+     * });
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -820,7 +1208,23 @@ public Mono analyzeSentiment(String document) { *

Analyze the sentiments in a document with a provided language representation. Subscribes to the call * asynchronously and prints out the sentiment details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentiment#String-String} + * + *
+     * String document = "The hotel was dark and unclean.";
+     * textAnalyticsAsyncClient.analyzeSentiment(document, "en")
+     *     .subscribe(documentSentiment -> {
+     *         System.out.printf("Recognized sentiment label: %s.%n", documentSentiment.getSentiment());
+     *         for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) {
+     *             System.out.printf("Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, "
+     *                     + "negative score: %.2f.%n",
+     *                 sentenceSentiment.getSentiment(),
+     *                 sentenceSentiment.getConfidenceScores().getPositive(),
+     *                 sentenceSentiment.getConfidenceScores().getNeutral(),
+     *                 sentenceSentiment.getConfidenceScores().getNegative());
+     *         }
+     *     });
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -850,7 +1254,27 @@ public Mono analyzeSentiment(String document, String language * representation and {@link AnalyzeSentimentOptions} options. Subscribes to the call asynchronously and prints * out the sentiment and sentence opinions details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentiment#String-String-AnalyzeSentimentOptions} + * + *
+     * textAnalyticsAsyncClient.analyzeSentiment("The hotel was dark and unclean.", "en",
+     *     new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
+     *     .subscribe(documentSentiment -> {
+     *         for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) {
+     *             System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment());
+     *             sentenceSentiment.getOpinions().forEach(opinion -> {
+     *                 TargetSentiment targetSentiment = opinion.getTarget();
+     *                 System.out.printf("\tTarget sentiment: %s, target text: %s%n",
+     *                     targetSentiment.getSentiment(), targetSentiment.getText());
+     *                 for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) {
+     *                     System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the assessment negated: %s.%n",
+     *                         assessmentSentiment.getSentiment(), assessmentSentiment.getText(),
+     *                         assessmentSentiment.isNegated());
+     *                 }
+     *             });
+     *         }
+     *     });
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -894,7 +1318,36 @@ public Mono analyzeSentiment(String document, String language *

Analyze sentiment in a list of documents with provided language code and request options. Subscribes to the * call asynchronously and prints out the sentiment details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "The hotel was dark and unclean.",
+     *     "The restaurant had amazing gnocchi."
+     * );
+     *
+     * textAnalyticsAsyncClient.analyzeSentimentBatch(documents, "en",
+     *     new TextAnalyticsRequestOptions().setIncludeStatistics(true)).subscribe(
+     *         response -> {
+     *             // Batch statistics
+     *             TextDocumentBatchStatistics batchStatistics = response.getStatistics();
+     *             System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *                 batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     *             response.forEach(analyzeSentimentResult -> {
+     *                 System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId());
+     *                 DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment();
+     *                 System.out.printf("Recognized document sentiment: %s.%n", documentSentiment.getSentiment());
+     *                 documentSentiment.getSentences().forEach(sentenceSentiment ->
+     *                     System.out.printf("Recognized sentence sentiment: %s, positive score: %.2f, "
+     *                             + "neutral score: %.2f, negative score: %.2f.%n",
+     *                         sentenceSentiment.getSentiment(),
+     *                         sentenceSentiment.getConfidenceScores().getPositive(),
+     *                         sentenceSentiment.getConfidenceScores().getNeutral(),
+     *                         sentenceSentiment.getConfidenceScores().getNegative()));
+     *             });
+     *         });
+     * 
+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -932,7 +1385,51 @@ public Mono analyzeSentimentBatch( * representation and {@link AnalyzeSentimentOptions} options. Subscribes to the call asynchronously and prints out * the sentiment and sentence opinions details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable-String-AnalyzeSentimentOptions} + * + *
+     * List<TextDocumentInput> documents = Arrays.asList(
+     *     new TextDocumentInput("0", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en"),
+     *     new TextDocumentInput("1", "My SSN is 859-98-0987").setLanguage("en")
+     * );
+     *
+     * SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
+     *     textAnalyticsClient.beginAnalyzeActions(
+     *         documents,
+     *         new TextAnalyticsActions().setDisplayName("{tasks_display_name}")
+     *            .setRecognizeEntitiesActions(new RecognizeEntitiesAction())
+     *            .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()),
+     *         new AnalyzeActionsOptions().setIncludeStatistics(false),
+     *         Context.NONE);
+     * syncPoller.waitForCompletion();
+     * AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
+     * result.forEach(analyzeActionsResult -> {
+     *     System.out.println("Entities recognition action results:");
+     *     analyzeActionsResult.getRecognizeEntitiesResults().forEach(
+     *         actionResult -> {
+     *             if (!actionResult.isError()) {
+     *                 actionResult.getDocumentsResults().forEach(
+     *                     entitiesResult -> entitiesResult.getEntities().forEach(
+     *                         entity -> System.out.printf(
+     *                             "Recognized entity: %s, entity category: %s, entity subcategory: %s,"
+     *                                 + " confidence score: %f.%n",
+     *                             entity.getText(), entity.getCategory(), entity.getSubcategory(),
+     *                             entity.getConfidenceScore())));
+     *             }
+     *         });
+     *     System.out.println("Key phrases extraction action results:");
+     *     analyzeActionsResult.getExtractKeyPhrasesResults().forEach(
+     *         actionResult -> {
+     *             if (!actionResult.isError()) {
+     *                 actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> {
+     *                     System.out.println("Extracted phrases:");
+     *                     extractKeyPhraseResult.getKeyPhrases()
+     *                         .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases));
+     *                 });
+     *             }
+     *         });
+     * });
+     * 
+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -969,7 +1466,41 @@ public Mono analyzeSentimentBatch(IterableAnalyze sentiment in a list of {@link TextDocumentInput document} with provided request options. Subscribes * to the call asynchronously and prints out the sentiment details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable-TextAnalyticsRequestOptions} + * + *
+     * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList(
+     *     new TextDocumentInput("0", "The hotel was dark and unclean.").setLanguage("en"),
+     *     new TextDocumentInput("1", "The restaurant had amazing gnocchi.").setLanguage("en"));
+     *
+     * TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
+     *
+     * textAnalyticsAsyncClient.analyzeSentimentBatchWithResponse(textDocumentInputs1, requestOptions)
+     *     .subscribe(response -> {
+     *         // Response's status code
+     *         System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     *         AnalyzeSentimentResultCollection resultCollection = response.getValue();
+     *
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(),
+     *             batchStatistics.getValidDocumentCount());
+     *
+     *         resultCollection.forEach(analyzeSentimentResult -> {
+     *             System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId());
+     *             DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment();
+     *             System.out.printf("Recognized document sentiment: %s.%n", documentSentiment.getSentiment());
+     *             documentSentiment.getSentences().forEach(sentenceSentiment ->
+     *                 System.out.printf("Recognized sentence sentiment: %s, positive score: %.2f, "
+     *                         + "neutral score: %.2f, negative score: %.2f.%n",
+     *                     sentenceSentiment.getSentiment(),
+     *                     sentenceSentiment.getConfidenceScores().getPositive(),
+     *                     sentenceSentiment.getConfidenceScores().getNeutral(),
+     *                     sentenceSentiment.getConfidenceScores().getNegative()));
+     *         });
+     *     });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -1005,7 +1536,47 @@ public Mono> analyzeSentimentBatchWit * {@link TextDocumentInput document} with provided {@link AnalyzeSentimentOptions} options. Subscribes to the call * asynchronously and prints out the sentiment and sentence opinions details when a response is received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable-AnalyzeSentimentOptions} + * + *
+     * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList(
+     *     new TextDocumentInput("0", "The hotel was dark and unclean.").setLanguage("en"),
+     *     new TextDocumentInput("1", "The restaurant had amazing gnocchi.").setLanguage("en"));
+     *
+     * AnalyzeSentimentOptions options = new AnalyzeSentimentOptions()
+     *     .setIncludeOpinionMining(true).setIncludeStatistics(true);
+     * textAnalyticsAsyncClient.analyzeSentimentBatchWithResponse(textDocumentInputs1, options)
+     *     .subscribe(response -> {
+     *         // Response's status code
+     *         System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     *         AnalyzeSentimentResultCollection resultCollection = response.getValue();
+     *
+     *         // Batch statistics
+     *         TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     *         System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n",
+     *             batchStatistics.getTransactionCount(),
+     *             batchStatistics.getValidDocumentCount());
+     *
+     *         resultCollection.forEach(analyzeSentimentResult -> {
+     *             System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId());
+     *             DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment();
+     *             documentSentiment.getSentences().forEach(sentenceSentiment -> {
+     *                 System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment());
+     *                 sentenceSentiment.getOpinions().forEach(opinion -> {
+     *                     TargetSentiment targetSentiment = opinion.getTarget();
+     *                     System.out.printf("\t\tTarget sentiment: %s, target text: %s%n",
+     *                         targetSentiment.getSentiment(), targetSentiment.getText());
+     *                     for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) {
+     *                         System.out.printf(
+     *                             "\t\t\t'%s' assessment sentiment because of \"%s\". Is the assessment negated: %s.%n",
+     *                             assessmentSentiment.getSentiment(), assessmentSentiment.getText(),
+     *                             assessmentSentiment.isNegated());
+     *                     }
+     *                 });
+     *             });
+     *         });
+     *     });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -1037,7 +1608,7 @@ public Mono> analyzeSentimentBatchWit * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see * data limits. - * @param language The 2 letter ISO 639-1 representation of language for the documents. If not set, uses "en" for + * @param language The 2-letter ISO 639-1 representation of language for the documents. If not set, uses "en" for * English as default. * @param options The additional configurable {@link AnalyzeHealthcareEntitiesOptions options} that may be passed * when analyzing healthcare entities. @@ -1077,7 +1648,74 @@ public Mono> analyzeSentimentBatchWit * show statistics. Subscribes to the call asynchronously and prints out the entity details when a response is * received.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.beginAnalyzeHealthcareEntities#Iterable-AnalyzeHealthcareEntitiesOptions} + * + *
+     * List<TextDocumentInput> documents = new ArrayList<>();
+     * for (int i = 0; i < 3; i++) {
+     *     documents.add(new TextDocumentInput(Integer.toString(i),
+     *         "The patient is a 54-year-old gentleman with a history of progressive angina "
+     *             + "over the past several months."));
+     * }
+     *
+     * AnalyzeHealthcareEntitiesOptions options = new AnalyzeHealthcareEntitiesOptions()
+     *     .setIncludeStatistics(true);
+     *
+     * textAnalyticsAsyncClient.beginAnalyzeHealthcareEntities(documents, options)
+     *     .flatMap(pollResult -> {
+     *         AnalyzeHealthcareEntitiesOperationDetail operationResult = pollResult.getValue();
+     *         System.out.printf("Operation created time: %s, expiration time: %s.%n",
+     *             operationResult.getCreatedAt(), operationResult.getExpiresAt());
+     *         return pollResult.getFinalResult();
+     *     })
+     *     .flatMap(analyzeActionsResultPagedFlux -> analyzeActionsResultPagedFlux.byPage())
+     *     .subscribe(
+     *         pagedResponse -> pagedResponse.getElements().forEach(
+     *             analyzeHealthcareEntitiesResultCollection -> {
+     *                 // Model version
+     *                 System.out.printf("Results of Azure Text Analytics \"Analyze Healthcare\" Model, version: %s%n",
+     *                     analyzeHealthcareEntitiesResultCollection.getModelVersion());
+     *
+     *                 TextDocumentBatchStatistics healthcareTaskStatistics =
+     *                     analyzeHealthcareEntitiesResultCollection.getStatistics();
+     *                 // Batch statistics
+     *                 System.out.printf("Documents statistics: document count = %s, erroneous document count = %s,"
+     *                                       + " transaction count = %s, valid document count = %s.%n",
+     *                     healthcareTaskStatistics.getDocumentCount(),
+     *                     healthcareTaskStatistics.getInvalidDocumentCount(),
+     *                     healthcareTaskStatistics.getTransactionCount(),
+     *                     healthcareTaskStatistics.getValidDocumentCount());
+     *
+     *                 analyzeHealthcareEntitiesResultCollection.forEach(healthcareEntitiesResult -> {
+     *                     System.out.println("document id = " + healthcareEntitiesResult.getId());
+     *                     System.out.println("Document entities: ");
+     *                     AtomicInteger ct = new AtomicInteger();
+     *                     healthcareEntitiesResult.getEntities().forEach(healthcareEntity -> {
+     *                         System.out.printf(
+     *                             "\ti = %d, Text: %s, category: %s, confidence score: %f.%n",
+     *                             ct.getAndIncrement(), healthcareEntity.getText(), healthcareEntity.getCategory(),
+     *                             healthcareEntity.getConfidenceScore());
+     *
+     *                         IterableStream<EntityDataSource> healthcareEntityDataSources =
+     *                             healthcareEntity.getDataSources();
+     *                         if (healthcareEntityDataSources != null) {
+     *                             healthcareEntityDataSources.forEach(healthcareEntityLink -> System.out.printf(
+     *                                 "\t\tEntity ID in data source: %s, data source: %s.%n",
+     *                                 healthcareEntityLink.getEntityId(), healthcareEntityLink.getName()));
+     *                         }
+     *                     });
+     *                     // Healthcare entity relation groups
+     *                     healthcareEntitiesResult.getEntityRelations().forEach(entityRelation -> {
+     *                         System.out.printf("\tRelation type: %s.%n", entityRelation.getRelationType());
+     *                         entityRelation.getRoles().forEach(role -> {
+     *                             final HealthcareEntity entity = role.getEntity();
+     *                             System.out.printf("\t\tEntity text: %s, category: %s, role: %s.%n",
+     *                                 entity.getText(), entity.getCategory(), role.getName());
+     *                         });
+     *                     });
+     *                 });
+     *             }));
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * @param options The additional configurable {@link AnalyzeHealthcareEntitiesOptions options} that may be passed @@ -1105,7 +1743,48 @@ public Mono> analyzeSentimentBatchWit * See this supported languages in Text Analytics API. * *

Code Sample

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-String-AnalyzeActionsOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "Elon Musk is the CEO of SpaceX and Tesla.",
+     *     "1", "My SSN is 859-98-0987"
+     * );
+     * textAnalyticsAsyncClient.beginAnalyzeActions(documents,
+     *     new TextAnalyticsActions().setDisplayName("{tasks_display_name}")
+     *         .setRecognizeEntitiesActions(new RecognizeEntitiesAction())
+     *         .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()),
+     *     "en",
+     *     new AnalyzeActionsOptions().setIncludeStatistics(false))
+     *     .flatMap(AsyncPollResponse::getFinalResult)
+     *     .flatMap(analyzeActionsResultPagedFlux -> analyzeActionsResultPagedFlux.byPage())
+     *     .subscribe(
+     *         pagedResponse -> pagedResponse.getElements().forEach(
+     *             analyzeActionsResult -> {
+     *                 analyzeActionsResult.getRecognizeEntitiesResults().forEach(
+     *                     actionResult -> {
+     *                         if (!actionResult.isError()) {
+     *                             actionResult.getDocumentsResults().forEach(
+     *                                 entitiesResult -> entitiesResult.getEntities().forEach(
+     *                                     entity -> System.out.printf(
+     *                                         "Recognized entity: %s, entity category: %s, entity subcategory: %s,"
+     *                                             + " confidence score: %f.%n",
+     *                                         entity.getText(), entity.getCategory(), entity.getSubcategory(),
+     *                                         entity.getConfidenceScore())));
+     *                         }
+     *                     });
+     *                 analyzeActionsResult.getExtractKeyPhrasesResults().forEach(
+     *                     actionResult -> {
+     *                         if (!actionResult.isError()) {
+     *                             actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> {
+     *                                 System.out.println("Extracted phrases:");
+     *                                 extractKeyPhraseResult.getKeyPhrases()
+     *                                     .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases));
+     *                             });
+     *                         }
+     *                     });
+     *             }));
+     * 
+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -1143,7 +1822,49 @@ public PollerFlux * See this supported languages in Text Analytics API. * *

Code Sample

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-AnalyzeActionsOptions} + * + *
+     * List<TextDocumentInput> documents = Arrays.asList(
+     *     new TextDocumentInput("0", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en"),
+     *     new TextDocumentInput("1", "My SSN is 859-98-0987").setLanguage("en")
+     * );
+     * textAnalyticsAsyncClient.beginAnalyzeActions(documents,
+     *     new TextAnalyticsActions().setDisplayName("{tasks_display_name}")
+     *         .setRecognizeEntitiesActions(new RecognizeEntitiesAction())
+     *         .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()),
+     *     new AnalyzeActionsOptions().setIncludeStatistics(false))
+     *     .flatMap(AsyncPollResponse::getFinalResult)
+     *     .flatMap(analyzeActionsResultPagedFlux -> analyzeActionsResultPagedFlux.byPage())
+     *     .subscribe(
+     *         pagedResponse -> pagedResponse.getElements().forEach(
+     *             analyzeActionsResult -> {
+     *                 System.out.println("Entities recognition action results:");
+     *                 analyzeActionsResult.getRecognizeEntitiesResults().forEach(
+     *                     actionResult -> {
+     *                         if (!actionResult.isError()) {
+     *                             actionResult.getDocumentsResults().forEach(
+     *                                 entitiesResult -> entitiesResult.getEntities().forEach(
+     *                                     entity -> System.out.printf(
+     *                                         "Recognized entity: %s, entity category: %s, entity subcategory: %s,"
+     *                                             + " confidence score: %f.%n",
+     *                                         entity.getText(), entity.getCategory(), entity.getSubcategory(),
+     *                                         entity.getConfidenceScore())));
+     *                         }
+     *                     });
+     *                 System.out.println("Key phrases extraction action results:");
+     *                 analyzeActionsResult.getExtractKeyPhrasesResults().forEach(
+     *                     actionResult -> {
+     *                         if (!actionResult.isError()) {
+     *                             actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> {
+     *                                 System.out.println("Extracted phrases:");
+     *                                 extractKeyPhraseResult.getKeyPhrases()
+     *                                     .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases));
+     *                             });
+     *                         }
+     *                     });
+     *             }));
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * @param actions The {@link TextAnalyticsActions actions} that contains all actions to be executed. diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java index f7f6a1ceadf7c..0d92134c70e7d 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java @@ -51,8 +51,51 @@ * key phrases extraction, and sentiment analysis of a document or a list of documents. * *

Instantiating a synchronous Text Analytics Client

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.instantiation} + * + *
+ * List<String> documents = Arrays.asList(
+ *     "Elon Musk is the CEO of SpaceX and Tesla.",
+ *     "My SSN is 859-98-0987"
+ * );
  *
+ * SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
+ *     textAnalyticsClient.beginAnalyzeActions(
+ *         documents,
+ *         new TextAnalyticsActions().setDisplayName("{tasks_display_name}")
+ *             .setRecognizeEntitiesActions(new RecognizeEntitiesAction())
+ *             .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()),
+ *         "en",
+ *         new AnalyzeActionsOptions().setIncludeStatistics(false));
+ * syncPoller.waitForCompletion();
+ * AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
+ * result.forEach(analyzeActionsResult -> {
+ *     System.out.println("Entities recognition action results:");
+ *     analyzeActionsResult.getRecognizeEntitiesResults().forEach(
+ *         actionResult -> {
+ *             if (!actionResult.isError()) {
+ *                 actionResult.getDocumentsResults().forEach(
+ *                     entitiesResult -> entitiesResult.getEntities().forEach(
+ *                         entity -> System.out.printf(
+ *                             "Recognized entity: %s, entity category: %s, entity subcategory: %s,"
+ *                                 + " confidence score: %f.%n",
+ *                             entity.getText(), entity.getCategory(), entity.getSubcategory(),
+ *                             entity.getConfidenceScore())));
+ *             }
+ *         });
+ *     System.out.println("Key phrases extraction action results:");
+ *     analyzeActionsResult.getExtractKeyPhrasesResults().forEach(
+ *         actionResult -> {
+ *             if (!actionResult.isError()) {
+ *                 actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> {
+ *                     System.out.println("Extracted phrases:");
+ *                     extractKeyPhraseResult.getKeyPhrases()
+ *                         .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases));
+ *                 });
+ *             }
+ *         });
+ * });
+ * 
+ * *

View {@link TextAnalyticsClientBuilder this} for additional ways to construct the client.

* * @see TextAnalyticsClientBuilder @@ -99,7 +142,13 @@ public String getDefaultLanguage() { * *

Code Sample

*

Detects the language of single document.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguage#String} + * + *
+     * DetectedLanguage detectedLanguage = textAnalyticsClient.detectLanguage("Bonjour tout le monde");
+     * System.out.printf("Detected language name: %s, ISO 6391 name: %s, confidence score: %f.%n",
+     *     detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore());
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -120,7 +169,14 @@ public DetectedLanguage detectLanguage(String document) { * *

Code Sample

*

Detects the language of documents with a provided country hint.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguage#String-String} + * + *
+     * DetectedLanguage detectedLanguage = textAnalyticsClient.detectLanguage(
+     *     "This text is in English", "US");
+     * System.out.printf("Detected language name: %s, ISO 6391 name: %s, confidence score: %f.%n",
+     *     detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore());
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -143,7 +199,31 @@ public DetectedLanguage detectLanguage(String document, String countryHint) { * *

Code Sample

*

Detects the language in a list of documents with a provided country hint and request options.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "This is written in English",
+     *     "Este es un documento  escrito en Español."
+     * );
+     *
+     * DetectLanguageResultCollection resultCollection =
+     *     textAnalyticsClient.detectLanguageBatch(documents, "US", null);
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * // Batch result of languages
+     * resultCollection.forEach(detectLanguageResult -> {
+     *     System.out.printf("Document ID: %s%n", detectLanguageResult.getId());
+     *     DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage();
+     *     System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n",
+     *         detectedLanguage.getName(), detectedLanguage.getIso6391Name(),
+     *         detectedLanguage.getConfidenceScore());
+     * });
+     * 
+ * * * @param documents The list of documents to detect languages for. * For text length limits, maximum batch size, and supported text encoding, see @@ -172,7 +252,39 @@ public DetectLanguageResultCollection detectLanguageBatch( *

Code Sample

*

Detects the languages with http response in a list of {@link DetectLanguageInput document} with provided * request options.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-TextAnalyticsRequestOptions-Context} + * + *
+     * List<DetectLanguageInput> detectLanguageInputs = Arrays.asList(
+     *     new DetectLanguageInput("1", "This is written in English.", "US"),
+     *     new DetectLanguageInput("2", "Este es un documento  escrito en Español.", "es")
+     * );
+     *
+     * Response<DetectLanguageResultCollection> response =
+     *     textAnalyticsClient.detectLanguageBatchWithResponse(detectLanguageInputs,
+     *         new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE);
+     *
+     * // Response's status code
+     * System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     * DetectLanguageResultCollection detectedLanguageResultCollection = response.getValue();
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = detectedLanguageResultCollection.getStatistics();
+     * System.out.printf(
+     *     "Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s,"
+     *         + " valid document count = %s.%n",
+     *     batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(),
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * // Batch result of languages
+     * detectedLanguageResultCollection.forEach(detectLanguageResult -> {
+     *     System.out.printf("Document ID: %s%n", detectLanguageResult.getId());
+     *     DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage();
+     *     System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n",
+     *         detectedLanguage.getName(), detectedLanguage.getIso6391Name(),
+     *         detectedLanguage.getConfidenceScore());
+     * });
+     * 
+ * * * @param documents The list of {@link DetectLanguageInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -205,7 +317,16 @@ public Response detectLanguageBatchWithResponse( * *

Code Sample

*

Recognize the entities of documents

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntities#String} + * + *
+     * final CategorizedEntityCollection recognizeEntitiesResult =
+     *     textAnalyticsClient.recognizeEntities("Satya Nadella is the CEO of Microsoft");
+     * for (CategorizedEntity entity : recognizeEntitiesResult) {
+     *     System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n",
+     *         entity.getText(), entity.getCategory(), entity.getConfidenceScore());
+     * }
+     * 
+ * * * @param document The document to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -230,7 +351,17 @@ public CategorizedEntityCollection recognizeEntities(String document) { * *

Code Sample

*

Recognizes the entities in a document with a provided language code.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntities#String-String} + * + *
+     * final CategorizedEntityCollection recognizeEntitiesResult =
+     *     textAnalyticsClient.recognizeEntities("Satya Nadella is the CEO of Microsoft", "en");
+     *
+     * for (CategorizedEntity entity : recognizeEntitiesResult) {
+     *     System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n",
+     *         entity.getText(), entity.getCategory(), entity.getConfidenceScore());
+     * }
+     * 
+ * * * @param document The document to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -254,7 +385,27 @@ public CategorizedEntityCollection recognizeEntities(String document, String lan * *

Code Sample

*

Recognizes the entities in a list of documents with a provided language code and request options.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "I had a wonderful trip to Seattle last week.",
+     *     "I work at Microsoft.");
+     *
+     * RecognizeEntitiesResultCollection resultCollection =
+     *     textAnalyticsClient.recognizeEntitiesBatch(documents, "en", null);
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     * System.out.printf(
+     *     "A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * resultCollection.forEach(recognizeEntitiesResult ->
+     *     recognizeEntitiesResult.getEntities().forEach(entity ->
+     *         System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n",
+     *             entity.getText(), entity.getCategory(), entity.getConfidenceScore())));
+     * 
+ * * * @param documents A list of documents to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -282,7 +433,33 @@ public RecognizeEntitiesResultCollection recognizeEntitiesBatch( *

Code Sample

*

Recognizes the entities with http response in a list of {@link TextDocumentInput document} with provided * request options.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context} + * + *
+     * List<TextDocumentInput> textDocumentInputs = Arrays.asList(
+     *     new TextDocumentInput("0", "I had a wonderful trip to Seattle last week.").setLanguage("en"),
+     *     new TextDocumentInput("1", "I work at Microsoft.").setLanguage("en")
+     * );
+     *
+     * Response<RecognizeEntitiesResultCollection> response =
+     *     textAnalyticsClient.recognizeEntitiesBatchWithResponse(textDocumentInputs,
+     *         new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE);
+     *
+     * // Response's status code
+     * System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     * RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = response.getValue();
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResultCollection.getStatistics();
+     * System.out.printf(
+     *     "A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * recognizeEntitiesResultCollection.forEach(recognizeEntitiesResult ->
+     *     recognizeEntitiesResult.getEntities().forEach(entity ->
+     *         System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n",
+     *             entity.getText(), entity.getCategory(), entity.getConfidenceScore())));
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -315,7 +492,18 @@ public Response recognizeEntitiesBatchWithRes *

Code Sample

*

Recognize the PII entities details in a document.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String} + * + *
+     * PiiEntityCollection piiEntityCollection = textAnalyticsClient.recognizePiiEntities("My SSN is 859-98-0987");
+     * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
+     * for (PiiEntity entity : piiEntityCollection) {
+     *     System.out.printf(
+     *         "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
+     *             + " entity subcategory: %s, confidence score: %f.%n",
+     *         entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore());
+     * }
+     * 
+ * * * @param document The document to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see @@ -341,7 +529,17 @@ public PiiEntityCollection recognizePiiEntities(String document) { *

Code Sample

*

Recognizes the PII entities details in a document with a provided language code.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String-String} + * + *
+     * PiiEntityCollection piiEntityCollection = textAnalyticsClient.recognizePiiEntities(
+     *     "My SSN is 859-98-0987", "en");
+     * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
+     * piiEntityCollection.forEach(entity -> System.out.printf(
+     *         "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
+     *             + " entity subcategory: %s, confidence score: %f.%n",
+     *         entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
+     * 
+ * * * @param document The document to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see @@ -369,7 +567,18 @@ public PiiEntityCollection recognizePiiEntities(String document, String language *

Recognizes the PII entities details in a document with a provided language code and * {@link RecognizePiiEntitiesOptions}.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String-String-RecognizePiiEntitiesOptions} + * + *
+     * PiiEntityCollection piiEntityCollection = textAnalyticsClient.recognizePiiEntities(
+     *     "My SSN is 859-98-0987", "en",
+     *     new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION));
+     * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
+     * piiEntityCollection.forEach(entity -> System.out.printf(
+     *     "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
+     *         + " entity subcategory: %s, confidence score: %f.%n",
+     *     entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
+     * 
+ * * * @param document The document to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see @@ -397,7 +606,31 @@ public PiiEntityCollection recognizePiiEntities(String document, String language *

Recognizes the PII entities details in a list of documents with a provided language code * and request options.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntitiesBatch#Iterable-String-RecognizePiiEntitiesOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "My SSN is 859-98-0987",
+     *     "Visa card 4111 1111 1111 1111"
+     * );
+     *
+     * RecognizePiiEntitiesResultCollection resultCollection = textAnalyticsClient.recognizePiiEntitiesBatch(
+     *     documents, "en", new RecognizePiiEntitiesOptions().setIncludeStatistics(true));
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * resultCollection.forEach(recognizePiiEntitiesResult -> {
+     *     PiiEntityCollection piiEntityCollection = recognizePiiEntitiesResult.getEntities();
+     *     System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
+     *     piiEntityCollection.forEach(entity -> System.out.printf(
+     *         "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
+     *             + " entity subcategory: %s, confidence score: %f.%n",
+     *         entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
+     * });
+     * 
+ * * * @param documents A list of documents to recognize PII entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -425,7 +658,34 @@ public RecognizePiiEntitiesResultCollection recognizePiiEntitiesBatch( *

Recognizes the PII entities details with http response in a list of {@link TextDocumentInput document} * with provided request options.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntitiesBatch#Iterable-RecognizePiiEntitiesOptions-Context} + * + *
+     * List<TextDocumentInput> textDocumentInputs = Arrays.asList(
+     *     new TextDocumentInput("0", "My SSN is 859-98-0987"),
+     *     new TextDocumentInput("1", "Visa card 4111 1111 1111 1111")
+     * );
+     *
+     * Response<RecognizePiiEntitiesResultCollection> response =
+     *     textAnalyticsClient.recognizePiiEntitiesBatchWithResponse(textDocumentInputs,
+     *         new RecognizePiiEntitiesOptions().setIncludeStatistics(true), Context.NONE);
+     *
+     * RecognizePiiEntitiesResultCollection resultCollection = response.getValue();
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * resultCollection.forEach(recognizePiiEntitiesResult -> {
+     *     PiiEntityCollection piiEntityCollection = recognizePiiEntitiesResult.getEntities();
+     *     System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
+     *     piiEntityCollection.forEach(entity -> System.out.printf(
+     *         "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
+     *             + " entity subcategory: %s, confidence score: %f.%n",
+     *         entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
+     * });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize PII entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -457,7 +717,20 @@ public Response recognizePiiEntitiesBatchW * *

Code Sample

*

Recognize the linked entities of documents

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntities#String} + * + *
+     * final String document = "Old Faithful is a geyser at Yellowstone Park.";
+     * System.out.println("Linked Entities:");
+     * textAnalyticsClient.recognizeLinkedEntities(document).forEach(linkedEntity -> {
+     *     System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
+     *         linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
+     *         linkedEntity.getDataSource());
+     *     linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
+     *         "Matched entity: %s, confidence score: %f.%n",
+     *         entityMatch.getText(), entityMatch.getConfidenceScore()));
+     * });
+     * 
+ * * * @param document The document to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -481,7 +754,19 @@ public LinkedEntityCollection recognizeLinkedEntities(String document) { * *

Code Sample

*

Recognizes the linked entities in a document with a provided language code.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntities#String-String} + * + *
+     * String document = "Old Faithful is a geyser at Yellowstone Park.";
+     * textAnalyticsClient.recognizeLinkedEntities(document, "en").forEach(linkedEntity -> {
+     *     System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
+     *         linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
+     *         linkedEntity.getDataSource());
+     *     linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
+     *         "Matched entity: %s, confidence score: %f.%n",
+     *         entityMatch.getText(), entityMatch.getConfidenceScore()));
+     * });
+     * 
+ * * * @param document The document to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -509,7 +794,33 @@ public LinkedEntityCollection recognizeLinkedEntities(String document, String la *

Code Sample

*

Recognizes the linked entities in a list of documents with a provided language code and request options. *

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "Old Faithful is a geyser at Yellowstone Park.",
+     *     "Mount Shasta has lenticular clouds."
+     * );
+     *
+     * RecognizeLinkedEntitiesResultCollection resultCollection =
+     *     textAnalyticsClient.recognizeLinkedEntitiesBatch(documents, "en", null);
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * resultCollection.forEach(recognizeLinkedEntitiesResult ->
+     *     recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> {
+     *         System.out.println("Linked Entities:");
+     *         System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
+     *             linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
+     *             linkedEntity.getDataSource());
+     *         linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
+     *             "Matched entity: %s, confidence score: %f.%n",
+     *             entityMatch.getText(), entityMatch.getConfidenceScore()));
+     *     }));
+     * 
+ * * * @param documents A list of documents to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -540,7 +851,39 @@ public RecognizeLinkedEntitiesResultCollection recognizeLinkedEntitiesBatch( *

Code Sample

*

Recognizes the linked entities with http response in a list of {@link TextDocumentInput} with request options. *

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context} + * + *
+     * List<TextDocumentInput> textDocumentInputs = Arrays.asList(
+     *     new TextDocumentInput("1", "Old Faithful is a geyser at Yellowstone Park.").setLanguage("en"),
+     *     new TextDocumentInput("2", "Mount Shasta has lenticular clouds.").setLanguage("en")
+     * );
+     *
+     * Response<RecognizeLinkedEntitiesResultCollection> response =
+     *     textAnalyticsClient.recognizeLinkedEntitiesBatchWithResponse(textDocumentInputs,
+     *         new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE);
+     *
+     * // Response's status code
+     * System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     * RecognizeLinkedEntitiesResultCollection resultCollection = response.getValue();
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     * System.out.printf(
+     *     "A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * resultCollection.forEach(recognizeLinkedEntitiesResult ->
+     *     recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> {
+     *         System.out.println("Linked Entities:");
+     *         System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
+     *             linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
+     *             linkedEntity.getDataSource());
+     *         linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
+     *             "Matched entity: %s, confidence score: %.2f.%n",
+     *             entityMatch.getText(), entityMatch.getConfidenceScore()));
+     *     }));
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -572,7 +915,14 @@ public Response recognizeLinkedEntities * *

Code Sample

*

Extracts key phrases of documents

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrases#String} + * + *
+     * System.out.println("Extracted phrases:");
+     * for (String keyPhrase : textAnalyticsClient.extractKeyPhrases("My cat might need to see a veterinarian.")) {
+     *     System.out.printf("%s.%n", keyPhrase);
+     * }
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -594,7 +944,13 @@ public KeyPhrasesCollection extractKeyPhrases(String document) { * *

Code Sample

*

Extracts key phrases in a document with a provided language representation.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrases#String-String-Context} + * + *
+     * System.out.println("Extracted phrases:");
+     * textAnalyticsClient.extractKeyPhrases("My cat might need to see a veterinarian.", "en")
+     *     .forEach(kegPhrase -> System.out.printf("%s.%n", kegPhrase));
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -621,7 +977,32 @@ public KeyPhrasesCollection extractKeyPhrases(String document, String language) * *

Code Sample

*

Extracts key phrases in a list of documents with a provided language code and request options.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "My cat might need to see a veterinarian.",
+     *     "The pitot tube is used to measure airspeed."
+     * );
+     *
+     * // Extracting batch key phrases
+     * ExtractKeyPhrasesResultCollection resultCollection =
+     *     textAnalyticsClient.extractKeyPhrasesBatch(documents, "en", null);
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     * System.out.printf(
+     *     "A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * // Extracted key phrase for each of documents from a batch of documents
+     * resultCollection.forEach(extractKeyPhraseResult -> {
+     *     System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId());
+     *     // Valid document
+     *     System.out.println("Extracted phrases:");
+     *     extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase));
+     * });
+     * 
+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -651,7 +1032,39 @@ public ExtractKeyPhrasesResultCollection extractKeyPhrasesBatch( * *

Code Sample

*

Extracts key phrases with http response in a list of {@link TextDocumentInput} with request options.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-TextAnalyticsRequestOptions-Context} + * + *
+     * List<TextDocumentInput> textDocumentInputs = Arrays.asList(
+     *     new TextDocumentInput("1", "My cat might need to see a veterinarian.").setLanguage("en"),
+     *     new TextDocumentInput("2", "The pitot tube is used to measure airspeed.").setLanguage("en")
+     * );
+     *
+     * // Extracting batch key phrases
+     * Response<ExtractKeyPhrasesResultCollection> response =
+     *     textAnalyticsClient.extractKeyPhrasesBatchWithResponse(textDocumentInputs,
+     *         new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE);
+     *
+     *
+     * // Response's status code
+     * System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     * ExtractKeyPhrasesResultCollection resultCollection = response.getValue();
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     * System.out.printf(
+     *     "A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * // Extracted key phrase for each of documents from a batch of documents
+     * resultCollection.forEach(extractKeyPhraseResult -> {
+     *     System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId());
+     *     // Valid document
+     *     System.out.println("Extracted phrases:");
+     *     extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase ->
+     *         System.out.printf("%s.%n", keyPhrase));
+     * });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -685,7 +1098,28 @@ public Response extractKeyPhrasesBatchWithRes *

Code Sample

*

Analyze the sentiments of documents

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String} + * + *
+     * final DocumentSentiment documentSentiment =
+     *     textAnalyticsClient.analyzeSentiment("The hotel was dark and unclean.");
+     *
+     * System.out.printf(
+     *     "Recognized sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n",
+     *     documentSentiment.getSentiment(),
+     *     documentSentiment.getConfidenceScores().getPositive(),
+     *     documentSentiment.getConfidenceScores().getNeutral(),
+     *     documentSentiment.getConfidenceScores().getNegative());
+     *
+     * for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) {
+     *     System.out.printf(
+     *         "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n",
+     *         sentenceSentiment.getSentiment(),
+     *         sentenceSentiment.getConfidenceScores().getPositive(),
+     *         sentenceSentiment.getConfidenceScores().getNeutral(),
+     *         sentenceSentiment.getConfidenceScores().getNegative());
+     * }
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -708,7 +1142,28 @@ public DocumentSentiment analyzeSentiment(String document) { *

Code Sample

*

Analyze the sentiments in a document with a provided language representation.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String-String} + * + *
+     * final DocumentSentiment documentSentiment = textAnalyticsClient.analyzeSentiment(
+     *     "The hotel was dark and unclean.", "en");
+     *
+     * System.out.printf(
+     *     "Recognized sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n",
+     *     documentSentiment.getSentiment(),
+     *     documentSentiment.getConfidenceScores().getPositive(),
+     *     documentSentiment.getConfidenceScores().getNeutral(),
+     *     documentSentiment.getConfidenceScores().getNegative());
+     *
+     * for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) {
+     *     System.out.printf(
+     *         "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n",
+     *         sentenceSentiment.getSentiment(),
+     *         sentenceSentiment.getConfidenceScores().getPositive(),
+     *         sentenceSentiment.getConfidenceScores().getNeutral(),
+     *         sentenceSentiment.getConfidenceScores().getNegative());
+     * }
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -737,7 +1192,25 @@ public DocumentSentiment analyzeSentiment(String document, String language) { *

Analyze the sentiment and mine the opinions for each sentence in a document with a provided language * representation and {@link AnalyzeSentimentOptions} options.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String-String-AnalyzeSentimentOptions} + * + *
+     * final DocumentSentiment documentSentiment = textAnalyticsClient.analyzeSentiment(
+     *     "The hotel was dark and unclean.", "en",
+     *     new AnalyzeSentimentOptions().setIncludeOpinionMining(true));
+     * for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) {
+     *     System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment());
+     *     sentenceSentiment.getOpinions().forEach(opinion -> {
+     *         TargetSentiment targetSentiment = opinion.getTarget();
+     *         System.out.printf("\tTarget sentiment: %s, target text: %s%n", targetSentiment.getSentiment(),
+     *             targetSentiment.getText());
+     *         for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) {
+     *             System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the assessment negated: %s.%n",
+     *                 assessmentSentiment.getSentiment(), assessmentSentiment.getText(), assessmentSentiment.isNegated());
+     *         }
+     *     });
+     * }
+     * 
+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -763,7 +1236,44 @@ public DocumentSentiment analyzeSentiment(String document, String language, Anal * *

Code Sample

*

Analyze the sentiments in a list of documents with a provided language representation and request options.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "The hotel was dark and unclean. The restaurant had amazing gnocchi.",
+     *     "The restaurant had amazing gnocchi. The hotel was dark and unclean."
+     * );
+     *
+     * // Analyzing batch sentiments
+     * AnalyzeSentimentResultCollection resultCollection = textAnalyticsClient.analyzeSentimentBatch(
+     *     documents, "en", new TextAnalyticsRequestOptions().setIncludeStatistics(true));
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * // Analyzed sentiment for each of documents from a batch of documents
+     * resultCollection.forEach(analyzeSentimentResult -> {
+     *     System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId());
+     *     // Valid document
+     *     DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment();
+     *     System.out.printf(
+     *         "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f,"
+     *             + " negative score: %.2f.%n",
+     *         documentSentiment.getSentiment(),
+     *         documentSentiment.getConfidenceScores().getPositive(),
+     *         documentSentiment.getConfidenceScores().getNeutral(),
+     *         documentSentiment.getConfidenceScores().getNegative());
+     *     documentSentiment.getSentences().forEach(sentenceSentiment -> System.out.printf(
+     *         "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f,"
+     *             + " negative score: %.2f.%n",
+     *         sentenceSentiment.getSentiment(),
+     *         sentenceSentiment.getConfidenceScores().getPositive(),
+     *         sentenceSentiment.getConfidenceScores().getNeutral(),
+     *         sentenceSentiment.getConfidenceScores().getNegative()));
+     * });
+     * 
+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -798,7 +1308,36 @@ public AnalyzeSentimentResultCollection analyzeSentimentBatch( *

Analyze the sentiments and mine the opinions for each sentence in a list of documents with a provided language * representation and {@link AnalyzeSentimentOptions} options.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String-AnalyzeSentimentOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "The hotel was dark and unclean. The restaurant had amazing gnocchi.",
+     *     "The restaurant had amazing gnocchi. The hotel was dark and unclean."
+     * );
+     *
+     * // Analyzing batch sentiments
+     * AnalyzeSentimentResultCollection resultCollection = textAnalyticsClient.analyzeSentimentBatch(
+     *     documents, "en", new AnalyzeSentimentOptions().setIncludeOpinionMining(true));
+     *
+     * // Analyzed sentiment for each of documents from a batch of documents
+     * resultCollection.forEach(analyzeSentimentResult -> {
+     *     System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId());
+     *     DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment();
+     *     documentSentiment.getSentences().forEach(sentenceSentiment -> {
+     *         System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment());
+     *         sentenceSentiment.getOpinions().forEach(opinion -> {
+     *             TargetSentiment targetSentiment = opinion.getTarget();
+     *             System.out.printf("\tTarget sentiment: %s, target text: %s%n", targetSentiment.getSentiment(),
+     *                 targetSentiment.getText());
+     *             for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) {
+     *                 System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the assessment negated: %s.%n",
+     *                     assessmentSentiment.getSentiment(), assessmentSentiment.getText(), assessmentSentiment.isNegated());
+     *             }
+     *         });
+     *     });
+     * });
+     * 
+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -826,7 +1365,53 @@ public AnalyzeSentimentResultCollection analyzeSentimentBatch(Iterable d *

Code Sample

*

Analyze sentiment in a list of {@link TextDocumentInput document} with provided request options.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-TextAnalyticsRequestOptions-Context} + * + *
+     * List<TextDocumentInput> textDocumentInputs = Arrays.asList(
+     *     new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.")
+     *         .setLanguage("en"),
+     *     new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.")
+     *         .setLanguage("en")
+     * );
+     *
+     * // Analyzing batch sentiments
+     * Response<AnalyzeSentimentResultCollection> response =
+     *     textAnalyticsClient.analyzeSentimentBatchWithResponse(textDocumentInputs,
+     *         new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE);
+     *
+     * // Response's status code
+     * System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     * AnalyzeSentimentResultCollection resultCollection = response.getValue();
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * // Analyzed sentiment for each of documents from a batch of documents
+     * resultCollection.forEach(analyzeSentimentResult -> {
+     *     System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId());
+     *     // Valid document
+     *     DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment();
+     *     System.out.printf(
+     *         "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f, "
+     *             + "negative score: %.2f.%n",
+     *         documentSentiment.getSentiment(),
+     *         documentSentiment.getConfidenceScores().getPositive(),
+     *         documentSentiment.getConfidenceScores().getNeutral(),
+     *         documentSentiment.getConfidenceScores().getNegative());
+     *     documentSentiment.getSentences().forEach(sentenceSentiment -> {
+     *         System.out.printf(
+     *             "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f,"
+     *                 + " negative score: %.2f.%n",
+     *             sentenceSentiment.getSentiment(),
+     *             sentenceSentiment.getConfidenceScores().getPositive(),
+     *             sentenceSentiment.getConfidenceScores().getNeutral(),
+     *             sentenceSentiment.getConfidenceScores().getNegative());
+     *     });
+     * });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -864,7 +1449,51 @@ public Response analyzeSentimentBatchWithRespo *

Analyze sentiment and mine the opinions for each sentence in a list of * {@link TextDocumentInput document} with provided {@link AnalyzeSentimentOptions} options.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-AnalyzeSentimentOptions-Context} + * + *
+     * List<TextDocumentInput> textDocumentInputs = Arrays.asList(
+     *     new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.")
+     *         .setLanguage("en"),
+     *     new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.")
+     *         .setLanguage("en")
+     * );
+     *
+     * AnalyzeSentimentOptions options = new AnalyzeSentimentOptions().setIncludeOpinionMining(true)
+     *     .setIncludeStatistics(true);
+     *
+     * // Analyzing batch sentiments
+     * Response<AnalyzeSentimentResultCollection> response =
+     *     textAnalyticsClient.analyzeSentimentBatchWithResponse(textDocumentInputs, options, Context.NONE);
+     *
+     * // Response's status code
+     * System.out.printf("Status code of request response: %d%n", response.getStatusCode());
+     * AnalyzeSentimentResultCollection resultCollection = response.getValue();
+     *
+     * // Batch statistics
+     * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
+     * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
+     *     batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
+     *
+     * // Analyzed sentiment for each of documents from a batch of documents
+     * resultCollection.forEach(analyzeSentimentResult -> {
+     *     System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId());
+     *     DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment();
+     *     documentSentiment.getSentences().forEach(sentenceSentiment -> {
+     *         System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment());
+     *         sentenceSentiment.getOpinions().forEach(opinion -> {
+     *             TargetSentiment targetSentiment = opinion.getTarget();
+     *             System.out.printf("\tTarget sentiment: %s, target text: %s%n", targetSentiment.getSentiment(),
+     *                 targetSentiment.getText());
+     *             for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) {
+     *                 System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the assessment negated: %s.%n",
+     *                     assessmentSentiment.getSentiment(), assessmentSentiment.getText(),
+     *                     assessmentSentiment.isNegated());
+     *             }
+     *         });
+     *     });
+     * });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -897,7 +1526,7 @@ public Response analyzeSentimentBatchWithRespo * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see * data limits. - * @param language The 2 letter ISO 639-1 representation of language for the documents. If not set, uses "en" for + * @param language The 2-letter ISO 639-1 representation of language for the documents. If not set, uses "en" for * English as default. * @param options The additional configurable {@link AnalyzeHealthcareEntitiesOptions options} that may be passed * when analyzing healthcare entities. @@ -936,7 +1565,73 @@ public Response analyzeSentimentBatchWithRespo * {@link TextDocumentInput document} and provided request options to * show statistics.

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeHealthcareEntities#Iterable-AnalyzeHealthcareEntitiesOptions-Context} + * + *
+     * List<TextDocumentInput> documents = new ArrayList<>();
+     * for (int i = 0; i < 3; i++) {
+     *     documents.add(new TextDocumentInput(Integer.toString(i),
+     *         "The patient is a 54-year-old gentleman with a history of progressive angina over "
+     *             + "the past several months."));
+     * }
+     *
+     * // Request options: show statistics and model version
+     * AnalyzeHealthcareEntitiesOptions options = new AnalyzeHealthcareEntitiesOptions()
+     *     .setIncludeStatistics(true);
+     *
+     * SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
+     *     syncPoller = textAnalyticsClient.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
+     *
+     * syncPoller.waitForCompletion();
+     * AnalyzeHealthcareEntitiesPagedIterable result = syncPoller.getFinalResult();
+     *
+     * // Task operation statistics
+     * final AnalyzeHealthcareEntitiesOperationDetail operationResult = syncPoller.poll().getValue();
+     * System.out.printf("Operation created time: %s, expiration time: %s.%n",
+     *     operationResult.getCreatedAt(), operationResult.getExpiresAt());
+     *
+     * result.forEach(analyzeHealthcareEntitiesResultCollection -> {
+     *     // Model version
+     *     System.out.printf("Results of Azure Text Analytics \"Analyze Healthcare\" Model, version: %s%n",
+     *         analyzeHealthcareEntitiesResultCollection.getModelVersion());
+     *
+     *     TextDocumentBatchStatistics healthcareTaskStatistics =
+     *         analyzeHealthcareEntitiesResultCollection.getStatistics();
+     *     // Batch statistics
+     *     System.out.printf("Documents statistics: document count = %s, erroneous document count = %s,"
+     *             + " transaction count = %s, valid document count = %s.%n",
+     *         healthcareTaskStatistics.getDocumentCount(), healthcareTaskStatistics.getInvalidDocumentCount(),
+     *         healthcareTaskStatistics.getTransactionCount(), healthcareTaskStatistics.getValidDocumentCount());
+     *
+     *     analyzeHealthcareEntitiesResultCollection.forEach(healthcareEntitiesResult -> {
+     *         System.out.println("document id = " + healthcareEntitiesResult.getId());
+     *         System.out.println("Document entities: ");
+     *         AtomicInteger ct = new AtomicInteger();
+     *         healthcareEntitiesResult.getEntities().forEach(healthcareEntity -> {
+     *             System.out.printf("\ti = %d, Text: %s, category: %s, confidence score: %f.%n",
+     *                 ct.getAndIncrement(), healthcareEntity.getText(), healthcareEntity.getCategory(),
+     *                 healthcareEntity.getConfidenceScore());
+     *
+     *             IterableStream<EntityDataSource> healthcareEntityDataSources =
+     *                 healthcareEntity.getDataSources();
+     *             if (healthcareEntityDataSources != null) {
+     *                 healthcareEntityDataSources.forEach(healthcareEntityLink -> System.out.printf(
+     *                     "\t\tEntity ID in data source: %s, data source: %s.%n",
+     *                     healthcareEntityLink.getEntityId(), healthcareEntityLink.getName()));
+     *             }
+     *         });
+     *         // Healthcare entity relation groups
+     *         healthcareEntitiesResult.getEntityRelations().forEach(entityRelation -> {
+     *             System.out.printf("\tRelation type: %s.%n", entityRelation.getRelationType());
+     *             entityRelation.getRoles().forEach(role -> {
+     *                 final HealthcareEntity entity = role.getEntity();
+     *                 System.out.printf("\t\tEntity text: %s, category: %s, role: %s.%n",
+     *                     entity.getText(), entity.getCategory(), role.getName());
+     *             });
+     *         });
+     *     });
+     * });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * @param options The additional configurable {@link AnalyzeHealthcareEntitiesOptions options} that may be passed @@ -966,7 +1661,51 @@ public Response analyzeSentimentBatchWithRespo * See this supported languages in Text Analytics API. * *

Code Sample

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-String-AnalyzeActionsOptions} + * + *
+     * List<String> documents = Arrays.asList(
+     *     "Elon Musk is the CEO of SpaceX and Tesla.",
+     *     "My SSN is 859-98-0987"
+     * );
+     *
+     * SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
+     *     textAnalyticsClient.beginAnalyzeActions(
+     *         documents,
+     *         new TextAnalyticsActions().setDisplayName("{tasks_display_name}")
+     *             .setRecognizeEntitiesActions(new RecognizeEntitiesAction())
+     *             .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()),
+     *         "en",
+     *         new AnalyzeActionsOptions().setIncludeStatistics(false));
+     * syncPoller.waitForCompletion();
+     * AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
+     * result.forEach(analyzeActionsResult -> {
+     *     System.out.println("Entities recognition action results:");
+     *     analyzeActionsResult.getRecognizeEntitiesResults().forEach(
+     *         actionResult -> {
+     *             if (!actionResult.isError()) {
+     *                 actionResult.getDocumentsResults().forEach(
+     *                     entitiesResult -> entitiesResult.getEntities().forEach(
+     *                         entity -> System.out.printf(
+     *                             "Recognized entity: %s, entity category: %s, entity subcategory: %s,"
+     *                                 + " confidence score: %f.%n",
+     *                             entity.getText(), entity.getCategory(), entity.getSubcategory(),
+     *                             entity.getConfidenceScore())));
+     *             }
+     *         });
+     *     System.out.println("Key phrases extraction action results:");
+     *     analyzeActionsResult.getExtractKeyPhrasesResults().forEach(
+     *         actionResult -> {
+     *             if (!actionResult.isError()) {
+     *                 actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> {
+     *                     System.out.println("Extracted phrases:");
+     *                     extractKeyPhraseResult.getKeyPhrases()
+     *                         .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases));
+     *                 });
+     *             }
+     *         });
+     * });
+     * 
+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -1004,7 +1743,51 @@ public SyncPollerthis supported languages in Text Analytics API. * *

Code Sample

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-AnalyzeActionsOptions-Context} + * + *
+     * List<TextDocumentInput> documents = Arrays.asList(
+     *     new TextDocumentInput("0", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en"),
+     *     new TextDocumentInput("1", "My SSN is 859-98-0987").setLanguage("en")
+     * );
+     *
+     * SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
+     *     textAnalyticsClient.beginAnalyzeActions(
+     *         documents,
+     *         new TextAnalyticsActions().setDisplayName("{tasks_display_name}")
+     *            .setRecognizeEntitiesActions(new RecognizeEntitiesAction())
+     *            .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()),
+     *         new AnalyzeActionsOptions().setIncludeStatistics(false),
+     *         Context.NONE);
+     * syncPoller.waitForCompletion();
+     * AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
+     * result.forEach(analyzeActionsResult -> {
+     *     System.out.println("Entities recognition action results:");
+     *     analyzeActionsResult.getRecognizeEntitiesResults().forEach(
+     *         actionResult -> {
+     *             if (!actionResult.isError()) {
+     *                 actionResult.getDocumentsResults().forEach(
+     *                     entitiesResult -> entitiesResult.getEntities().forEach(
+     *                         entity -> System.out.printf(
+     *                             "Recognized entity: %s, entity category: %s, entity subcategory: %s,"
+     *                                 + " confidence score: %f.%n",
+     *                             entity.getText(), entity.getCategory(), entity.getSubcategory(),
+     *                             entity.getConfidenceScore())));
+     *             }
+     *         });
+     *     System.out.println("Key phrases extraction action results:");
+     *     analyzeActionsResult.getExtractKeyPhrasesResults().forEach(
+     *         actionResult -> {
+     *             if (!actionResult.isError()) {
+     *                 actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> {
+     *                     System.out.println("Extracted phrases:");
+     *                     extractKeyPhraseResult.getKeyPhrases()
+     *                         .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases));
+     *                 });
+     *             }
+     *         });
+     * });
+     * 
+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * @param actions The {@link TextAnalyticsActions actions} that contains all actions to be executed. diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilder.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilder.java index 0d14ba586e7c5..a1542eac17764 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilder.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilder.java @@ -52,11 +52,25 @@ * *

Instantiating an asynchronous Text Analytics Client

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.instantiation} + * + *
+ * TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder()
+ *     .credential(new AzureKeyCredential("{key}"))
+ *     .endpoint("{endpoint}")
+ *     .buildAsyncClient();
+ * 
+ * * *

Instantiating a synchronous Text Analytics Client

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.instantiation} + * + *
+ * TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder()
+ *     .credential(new AzureKeyCredential("{key}"))
+ *     .endpoint("{endpoint}")
+ *     .buildClient();
+ * 
+ * * *

* Another way to construct the client is using a {@link HttpPipeline}. The pipeline gives the client an authenticated @@ -65,7 +79,19 @@ * on how the {@link TextAnalyticsClient} and {@link TextAnalyticsAsyncClient} is built. *

* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.pipeline.instantiation} + * + *
+ * HttpPipeline pipeline = new HttpPipelineBuilder()
+ *     .policies(/* add policies */)
+ *     .build();
+ *
+ * TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder()
+ *     .credential(new AzureKeyCredential("{key}"))
+ *     .endpoint("{endpoint}")
+ *     .pipeline(pipeline)
+ *     .buildClient();
+ * 
+ * * * @see TextAnalyticsAsyncClient * @see TextAnalyticsClient diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java index c58ddc77a1927..05dd5809d1d79 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java @@ -49,47 +49,56 @@ public class ReadmeSamples { * Code snippet for configuring http client. */ public void configureHttpClient() { + // BEGIN: readme-sample-configureHttpClient HttpClient client = new NettyAsyncHttpClientBuilder() .port(8080) .wiretap(true) .build(); + // END: readme-sample-configureHttpClient } /** * Code snippet for getting sync client using the AzureKeyCredential authentication. */ public void useAzureKeyCredentialSyncClient() { + // BEGIN: readme-sample-createTextAnalyticsClientWithKeyCredential TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); + // END: readme-sample-createTextAnalyticsClientWithKeyCredential } /** * Code snippet for getting async client using AzureKeyCredential authentication. */ public void useAzureKeyCredentialAsyncClient() { - TextAnalyticsAsyncClient textAnalyticsClient = new TextAnalyticsClientBuilder() + // BEGIN: readme-sample-createTextAnalyticsAsyncClientWithKeyCredential + TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildAsyncClient(); + // END: readme-sample-createTextAnalyticsAsyncClientWithKeyCredential } /** * Code snippet for getting async client using AAD authentication. */ public void useAadAsyncClient() { + // BEGIN: readme-sample-createTextAnalyticsAsyncClientWithAAD TokenCredential defaultCredential = new DefaultAzureCredentialBuilder().build(); - TextAnalyticsAsyncClient textAnalyticsClient = new TextAnalyticsClientBuilder() + TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() .endpoint("{endpoint}") .credential(defaultCredential) .buildAsyncClient(); + // END: readme-sample-createTextAnalyticsAsyncClientWithAAD } /** * Code snippet for rotating AzureKeyCredential of the client */ public void rotatingAzureKeyCredential() { + // BEGIN: readme-sample-rotatingAzureKeyCredential AzureKeyCredential credential = new AzureKeyCredential("{key}"); TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder() .credential(credential) @@ -97,12 +106,14 @@ public void rotatingAzureKeyCredential() { .buildClient(); credential.update("{new_key}"); + // END: readme-sample-rotatingAzureKeyCredential } /** * Code snippet for handling exception */ public void handlingException() { + // BEGIN: readme-sample-handlingException List documents = Arrays.asList( new DetectLanguageInput("1", "This is written in English.", "us"), new DetectLanguageInput("1", "Este es un documento escrito en Español.", "es") @@ -113,43 +124,51 @@ public void handlingException() { } catch (HttpResponseException e) { System.out.println(e.getMessage()); } + // END: readme-sample-handlingException } /** * Code snippet for analyzing sentiment of a document. */ public void analyzeSentiment() { + // BEGIN: readme-sample-analyzeSentiment String document = "The hotel was dark and unclean. I like microsoft."; DocumentSentiment documentSentiment = textAnalyticsClient.analyzeSentiment(document); System.out.printf("Analyzed document sentiment: %s.%n", documentSentiment.getSentiment()); documentSentiment.getSentences().forEach(sentenceSentiment -> System.out.printf("Analyzed sentence sentiment: %s.%n", sentenceSentiment.getSentiment())); + // END: readme-sample-analyzeSentiment } /** * Code snippet for detecting language in a document. */ public void detectLanguages() { + // BEGIN: readme-sample-detectLanguages String document = "Bonjour tout le monde"; DetectedLanguage detectedLanguage = textAnalyticsClient.detectLanguage(document); System.out.printf("Detected language name: %s, ISO 6391 name: %s, confidence score: %f.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore()); + // END: readme-sample-detectLanguages } /** * Code snippet for recognizing category entity in a document. */ public void recognizeEntity() { + // BEGIN: readme-sample-recognizeEntity String document = "Satya Nadella is the CEO of Microsoft"; textAnalyticsClient.recognizeEntities(document).forEach(entity -> System.out.printf("Recognized entity: %s, category: %s, subcategory: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + // END: readme-sample-recognizeEntity } /** * Code snippet for recognizing linked entity in a document. */ public void recognizeLinkedEntity() { + // BEGIN: readme-sample-recognizeLinkedEntity String document = "Old Faithful is a geyser at Yellowstone Park."; textAnalyticsClient.recognizeLinkedEntities(document).forEach(linkedEntity -> { System.out.println("Linked Entities:"); @@ -158,21 +177,25 @@ public void recognizeLinkedEntity() { linkedEntity.getMatches().forEach(match -> System.out.printf("Text: %s, confidence score: %f.%n", match.getText(), match.getConfidenceScore())); }); + // END: readme-sample-recognizeLinkedEntity } /** * Code snippet for extracting key phrases in a document. */ public void extractKeyPhrases() { + // BEGIN: readme-sample-extractKeyPhrases String document = "My cat might need to see a veterinarian."; System.out.println("Extracted phrases:"); textAnalyticsClient.extractKeyPhrases(document).forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); + // END: readme-sample-extractKeyPhrases } /** * Code snippet for recognizing Personally Identifiable Information entity in a document. */ public void recognizePiiEntity() { + // BEGIN: readme-sample-recognizePiiEntity String document = "My SSN is 859-98-0987"; PiiEntityCollection piiEntityCollection = textAnalyticsClient.recognizePiiEntities(document); System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); @@ -180,12 +203,14 @@ public void recognizePiiEntity() { "Recognized Personally Identifiable Information entity: %s, entity category: %s, entity subcategory: %s," + " confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + // END: readme-sample-recognizePiiEntity } /** * Code snippet for recognizing healthcare entities in documents. */ public void recognizeHealthcareEntities() { + // BEGIN: readme-sample-recognizeHealthcareEntities List documents = Arrays.asList(new TextDocumentInput("0", "RECORD #333582770390100 | MH | 85986313 | | 054351 | 2/14/2001 12:00:00 AM | " + "CORONARY ARTERY DISEASE | Signed | DIS | Admission Date: 5/22/2001 " @@ -234,12 +259,14 @@ public void recognizeHealthcareEntities() { }); }); })); + // END: readme-sample-recognizeHealthcareEntities } /** * Code snippet for executing actions in a batch of documents. */ public void analyzeActions() { + // BEGIN: readme-sample-analyzeActions List documents = Arrays.asList( new TextDocumentInput("0", "We went to Contoso Steakhouse located at midtown NYC last week for a dinner party, and we adore" @@ -290,4 +317,5 @@ public void analyzeActions() { }); }); } + // END: readme-sample-analyzeActions } diff --git a/sdk/textanalytics/pom.xml b/sdk/textanalytics/pom.xml index 4f56bc7bdbb4a..839a0465eb2e7 100644 --- a/sdk/textanalytics/pom.xml +++ b/sdk/textanalytics/pom.xml @@ -20,7 +20,7 @@ com.azure azure-ai-textanalytics - 5.1.4 + 5.1.5