diff --git a/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml b/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml
index d43aa914c7517..07f9e8ff671c0 100755
--- a/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml
+++ b/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml
@@ -471,7 +471,7 @@ the main ServiceBusClientBuilder. -->
Instantiating an asynchronous Text Analytics Client
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.instantiation} + * + *+ * TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() + * .credential(new AzureKeyCredential("{key}")) + * .endpoint("{endpoint}") + * .buildAsyncClient(); + *+ * * *
View {@link TextAnalyticsClientBuilder} for additional ways to construct the client.
* @@ -72,7 +79,7 @@ public final class TextAnalyticsAsyncClient { private final String defaultCountryHint; private final String defaultLanguage; - // Please see here + // Please see here // for more information on Azure resource provider namespaces. static final String COGNITIVE_TRACING_NAMESPACE_VALUE = "Microsoft.CognitiveServices"; final DetectLanguageAsyncClient detectLanguageAsyncClient; @@ -139,7 +146,14 @@ public String getDefaultLanguage() { *Detects language in a document. Subscribes to the call asynchronously and prints out the detected language * details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguage#string} + * + *+ * String document = "Bonjour tout le monde"; + * textAnalyticsAsyncClient.detectLanguage(document).subscribe(detectedLanguage -> + * System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n", + * detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore())); + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -163,12 +177,20 @@ public Mono
Detects language with http response in a document with a provided country hint. Subscribes to the call * asynchronously and prints out the detected language details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguage#string-string} + * + *+ * String document = "This text is in English"; + * String countryHint = "US"; + * textAnalyticsAsyncClient.detectLanguage(document, countryHint).subscribe(detectedLanguage -> + * System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n", + * detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore())); + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see * data limits. - * @param countryHint Accepts two letter country codes specified by ISO 3166-1 alpha-2. Defaults to "US" if not + * @param countryHint Accepts 2-letter country codes specified by ISO 3166-1 alpha-2. Defaults to "US" if not * specified. To remove this behavior you can reset this parameter by setting this value to empty string * {@code countryHint} = "" or "none". * @@ -207,7 +229,28 @@ public Mono
+ * List<String> documents = Arrays.asList( + * "This is written in English", + * "Este es un documento escrito en Español." + * ); + * textAnalyticsAsyncClient.detectLanguageBatch(documents, "US", null).subscribe( + * batchResult -> { + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * // Batch result of languages + * for (DetectLanguageResult detectLanguageResult : batchResult) { + * DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); + * System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n", + * detectedLanguage.getName(), detectedLanguage.getIso6391Name(), + * detectedLanguage.getConfidenceScore()); + * } + * }); + *+ * * * @param documents The list of documents to detect languages for. * For text length limits, maximum batch size, and supported text encoding, see @@ -226,7 +269,7 @@ public Mono
Detects language in a batch of {@link DetectLanguageInput document} with provided request options. Subscribes * to the call asynchronously and prints out the detected language details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguageBatch#Iterable-TextAnalyticsRequestOptions} + * + *+ * List<DetectLanguageInput> detectLanguageInputs1 = Arrays.asList( + * new DetectLanguageInput("1", "This is written in English.", "US"), + * new DetectLanguageInput("2", "Este es un documento escrito en Español.", "ES") + * ); + * + * TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true); + * + * textAnalyticsAsyncClient.detectLanguageBatchWithResponse(detectLanguageInputs1, requestOptions) + * .subscribe(response -> { + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * + * DetectLanguageResultCollection resultCollection = response.getValue(); + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * // Batch result of languages + * for (DetectLanguageResult detectLanguageResult : resultCollection) { + * DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); + * System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n", + * detectedLanguage.getName(), detectedLanguage.getIso6391Name(), + * detectedLanguage.getConfidenceScore()); + * } + * }); + *+ * * * @param documents The list of {@link DetectLanguageInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -280,7 +351,17 @@ public Mono
Recognize entities in a document. Subscribes to the call asynchronously and prints out the recognized entity * details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeEntities#string} + * + *+ * String document = "Satya Nadella is the CEO of Microsoft"; + * textAnalyticsAsyncClient.recognizeEntities(document) + * .subscribe(entityCollection -> entityCollection.forEach(entity -> + * System.out.printf("Recognized categorized entity: %s, category: %s, confidence score: %f.%n", + * entity.getText(), + * entity.getCategory(), + * entity.getConfidenceScore()))); + *+ * * * @param document The document to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -306,7 +387,17 @@ public Mono
Recognize entities in a document with provided language code. Subscribes to the call asynchronously and prints * out the entity details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeEntities#string-string} + * + *+ * String document = "Satya Nadella is the CEO of Microsoft"; + * textAnalyticsAsyncClient.recognizeEntities(document, "en") + * .subscribe(entityCollection -> entityCollection.forEach(entity -> + * System.out.printf("Recognized categorized entity: %s, category: %s, confidence score: %f.%n", + * entity.getText(), + * entity.getCategory(), + * entity.getConfidenceScore()))); + *+ * * * @param document the text to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -332,7 +423,25 @@ public Mono
Recognize entities in a document with the provided language code. Subscribes to the call asynchronously and * prints out the entity details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeCategorizedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *+ * List<String> documents = Arrays.asList( + * "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); + * + * textAnalyticsAsyncClient.recognizeEntitiesBatch(documents, "en", null) + * .subscribe(batchResult -> { + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * // Batch Result of entities + * batchResult.forEach(recognizeEntitiesResult -> + * recognizeEntitiesResult.getEntities().forEach(entity -> System.out.printf( + * "Recognized categorized entity: %s, category: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getConfidenceScore()))); + * }); + *+ * * * @param documents A list of documents to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -369,7 +478,34 @@ public Mono
Recognize entities in a list of {@link TextDocumentInput document}. Subscribes to the call asynchronously * and prints out the entity details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeCategorizedEntitiesBatch#Iterable-TextAnalyticsRequestOptions} + * + *+ * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList( + * new TextDocumentInput("0", "I had a wonderful trip to Seattle last week.").setLanguage("en"), + * new TextDocumentInput("1", "I work at Microsoft.").setLanguage("en")); + * + * TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true); + * + * textAnalyticsAsyncClient.recognizeEntitiesBatchWithResponse(textDocumentInputs1, requestOptions) + * .subscribe(response -> { + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * RecognizeEntitiesResultCollection resultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * resultCollection.forEach(recognizeEntitiesResult -> + * recognizeEntitiesResult.getEntities().forEach(entity -> System.out.printf( + * "Recognized categorized entity: %s, category: %s, confidence score: %f.%n", + * entity.getText(), + * entity.getCategory(), + * entity.getConfidenceScore()))); + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -402,7 +538,18 @@ public Mono
+ * String document = "My SSN is 859-98-0987"; + * textAnalyticsAsyncClient.recognizePiiEntities(document).subscribe(piiEntityCollection -> { + * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); + * piiEntityCollection.forEach(entity -> System.out.printf( + * "Recognized Personally Identifiable Information entity: %s, entity category: %s," + * + " entity subcategory: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + * }); + *+ * * * @param document The document to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see @@ -429,7 +576,19 @@ public Mono
Recognize the PII entities details in a document with provided language code. * Subscribes to the call asynchronously and prints out the entity details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizePiiEntities#string-string} + * + *+ * String document = "My SSN is 859-98-0987"; + * textAnalyticsAsyncClient.recognizePiiEntities(document, "en") + * .subscribe(piiEntityCollection -> { + * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); + * piiEntityCollection.forEach(entity -> System.out.printf( + * "Recognized Personally Identifiable Information entity: %s, entity category: %s," + * + " entity subcategory: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + * }); + *+ * * * @param document the text to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see @@ -458,12 +617,25 @@ public Mono
+ * String document = "My SSN is 859-98-0987"; + * textAnalyticsAsyncClient.recognizePiiEntities(document, "en", + * new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)) + * .subscribe(piiEntityCollection -> { + * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); + * piiEntityCollection.forEach(entity -> System.out.printf( + * "Recognized Personally Identifiable Information entity: %s, entity category: %s," + * + " entity subcategory: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + * }); + *+ * * * @param document the text to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see * data limits. - * @param language The 2 letter ISO 639-1 representation of language. If not set, uses "en" for English as default. + * @param language The 2-letter ISO 639-1 representation of language. If not set, uses "en" for English as default. * @param options The additional configurable {@link RecognizePiiEntitiesOptions options} that may be passed when * recognizing PII entities. * @@ -486,7 +658,35 @@ public Mono
Recognize Personally Identifiable Information entities in a document with the provided language code. * Subscribes to the call asynchronously and prints out the entity details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizePiiEntitiesBatch#Iterable-String-RecognizePiiEntitiesOptions} + * + *+ * List<String> documents = Arrays.asList( + * "My SSN is 859-98-0987.", + * "Visa card 0111 1111 1111 1111." + * ); + * + * // Show statistics and model version + * RecognizePiiEntitiesOptions requestOptions = new RecognizePiiEntitiesOptions().setIncludeStatistics(true) + * .setModelVersion("latest"); + * + * textAnalyticsAsyncClient.recognizePiiEntitiesBatch(documents, "en", requestOptions) + * .subscribe(piiEntitiesResults -> { + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = piiEntitiesResults.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * piiEntitiesResults.forEach(recognizePiiEntitiesResult -> { + * PiiEntityCollection piiEntityCollection = recognizePiiEntitiesResult.getEntities(); + * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); + * piiEntityCollection.forEach(entity -> System.out.printf( + * "Recognized Personally Identifiable Information entity: %s, entity category: %s," + * + " entity subcategory: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + * }); + * }); + *+ * * * @param documents A list of documents to recognize PII entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -525,7 +725,35 @@ public Mono
+ * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList( + * new TextDocumentInput("0", "My SSN is 859-98-0987."), + * new TextDocumentInput("1", "Visa card 0111 1111 1111 1111.")); + * + * // Show statistics and model version + * RecognizePiiEntitiesOptions requestOptions = new RecognizePiiEntitiesOptions().setIncludeStatistics(true) + * .setModelVersion("latest"); + * + * textAnalyticsAsyncClient.recognizePiiEntitiesBatchWithResponse(textDocumentInputs1, requestOptions) + * .subscribe(response -> { + * RecognizePiiEntitiesResultCollection piiEntitiesResults = response.getValue(); + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = piiEntitiesResults.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * piiEntitiesResults.forEach(recognizePiiEntitiesResult -> { + * PiiEntityCollection piiEntityCollection = recognizePiiEntitiesResult.getEntities(); + * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); + * piiEntityCollection.forEach(entity -> System.out.printf( + * "Recognized Personally Identifiable Information entity: %s, entity category: %s," + * + " entity subcategory: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + * }); + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize PII entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -557,7 +785,21 @@ public Mono
Recognize linked entities in a document. Subscribes to the call asynchronously and prints out the * entity details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntities#string} + * + *+ * String document = "Old Faithful is a geyser at Yellowstone Park."; + * textAnalyticsAsyncClient.recognizeLinkedEntities(document).subscribe( + * linkedEntityCollection -> linkedEntityCollection.forEach(linkedEntity -> { + * System.out.println("Linked Entities:"); + * System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + * linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), + * linkedEntity.getDataSource()); + * linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + * "Matched entity: %s, confidence score: %f.%n", + * entityMatch.getText(), entityMatch.getConfidenceScore())); + * })); + *+ * * * @param document The document to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -580,7 +822,21 @@ public Mono
Recognize linked entities in a text with provided language code. Subscribes to the call asynchronously * and prints out the entity details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntities#string-string} + * + *+ * String document = "Old Faithful is a geyser at Yellowstone Park."; + * textAnalyticsAsyncClient.recognizeLinkedEntities(document, "en").subscribe( + * linkedEntityCollection -> linkedEntityCollection.forEach(linkedEntity -> { + * System.out.println("Linked Entities:"); + * System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + * linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), + * linkedEntity.getDataSource()); + * linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + * "Matched entity: %s, confidence score: %f.%n", + * entityMatch.getText(), entityMatch.getConfidenceScore())); + * })); + *+ * * * @param document The document to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -607,7 +863,33 @@ public Mono
Recognize linked entities in a list of documents with provided language code. Subscribes to the call * asynchronously and prints out the entity details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *+ * List<String> documents = Arrays.asList( + * "Old Faithful is a geyser at Yellowstone Park.", + * "Mount Shasta has lenticular clouds." + * ); + * + * textAnalyticsAsyncClient.recognizeLinkedEntitiesBatch(documents, "en", null) + * .subscribe(batchResult -> { + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * batchResult.forEach(recognizeLinkedEntitiesResult -> + * recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { + * System.out.println("Linked Entities:"); + * System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + * linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), + * linkedEntity.getDataSource()); + * linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + * "Matched entity: %s, confidence score: %f.%n", + * entityMatch.getText(), entityMatch.getConfidenceScore())); + * })); + * }); + *+ * * * @param documents A list of documents to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -645,7 +927,38 @@ public Mono
+ * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList( + * new TextDocumentInput("0", "Old Faithful is a geyser at Yellowstone Park.").setLanguage("en"), + * new TextDocumentInput("1", "Mount Shasta has lenticular clouds.").setLanguage("en")); + * + * TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true); + * + * textAnalyticsAsyncClient.recognizeLinkedEntitiesBatchWithResponse(textDocumentInputs1, requestOptions) + * .subscribe(response -> { + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * RecognizeLinkedEntitiesResultCollection resultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * resultCollection.forEach(recognizeLinkedEntitiesResult -> + * recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { + * System.out.println("Linked Entities:"); + * System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + * linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), + * linkedEntity.getDataSource()); + * linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + * "Matched entity: %s, confidence score: %.2f.%n", + * entityMatch.getText(), entityMatch.getConfidenceScore())); + * })); + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -676,7 +989,13 @@ public Mono
Extract key phrases in a document. Subscribes to the call asynchronously and prints out the * key phrases when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrases#string} + * + *+ * System.out.println("Extracted phrases:"); + * textAnalyticsAsyncClient.extractKeyPhrases("Bonjour tout le monde").subscribe(keyPhrase -> + * System.out.printf("%s.%n", keyPhrase)); + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -700,7 +1019,13 @@ public Mono
Extract key phrases in a document with a provided language code. Subscribes to the call asynchronously and * prints out the key phrases when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrases#string-string} + * + *+ * System.out.println("Extracted phrases:"); + * textAnalyticsAsyncClient.extractKeyPhrases("Bonjour tout le monde", "fr") + * .subscribe(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); + *+ * * * @param document The document to be analyzed. For text length limits, maximum batch size, and supported text * encoding, see @@ -727,7 +1052,26 @@ public Mono
Extract key phrases in a list of documents with a provided language and request options. Subscribes to the * call asynchronously and prints out the key phrases when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrasesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *+ * List<String> documents = Arrays.asList( + * "Hello world. This is some input text that I love.", + * "Bonjour tout le monde"); + * + * textAnalyticsAsyncClient.extractKeyPhrasesBatch(documents, "en", null).subscribe( + * extractKeyPhraseResults -> { + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = extractKeyPhraseResults.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * extractKeyPhraseResults.forEach(extractKeyPhraseResult -> { + * System.out.println("Extracted phrases:"); + * extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); + * }); + * }); + *+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -764,7 +1108,34 @@ public Mono
Extract key phrases in a list of {@link TextDocumentInput document} with provided request options. * Subscribes to the call asynchronously and prints out the key phrases when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrasesBatch#Iterable-TextAnalyticsRequestOptions} + * + *+ * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList( + * new TextDocumentInput("0", "I had a wonderful trip to Seattle last week.").setLanguage("en"), + * new TextDocumentInput("1", "I work at Microsoft.").setLanguage("en")); + * + * TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true); + * + * textAnalyticsAsyncClient.extractKeyPhrasesBatchWithResponse(textDocumentInputs1, requestOptions) + * .subscribe(response -> { + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * ExtractKeyPhrasesResultCollection resultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * for (ExtractKeyPhraseResult extractKeyPhraseResult : resultCollection) { + * System.out.println("Extracted phrases:"); + * for (String keyPhrase : extractKeyPhraseResult.getKeyPhrases()) { + * System.out.printf("%s.%n", keyPhrase); + * } + * } + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -796,7 +1167,24 @@ public Mono
Analyze the sentiment in a document. Subscribes to the call asynchronously and prints out the * sentiment details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentiment#string} + * + *+ * String document = "The hotel was dark and unclean."; + * textAnalyticsAsyncClient.analyzeSentiment(document).subscribe(documentSentiment -> { + * System.out.printf("Recognized document sentiment: %s.%n", documentSentiment.getSentiment()); + * + * for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) { + * System.out.printf( + * "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, " + * + "negative score: %.2f.%n", + * sentenceSentiment.getSentiment(), + * sentenceSentiment.getConfidenceScores().getPositive(), + * sentenceSentiment.getConfidenceScores().getNeutral(), + * sentenceSentiment.getConfidenceScores().getNegative()); + * } + * }); + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -820,7 +1208,23 @@ public Mono
Analyze the sentiments in a document with a provided language representation. Subscribes to the call * asynchronously and prints out the sentiment details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentiment#String-String} + * + *+ * String document = "The hotel was dark and unclean."; + * textAnalyticsAsyncClient.analyzeSentiment(document, "en") + * .subscribe(documentSentiment -> { + * System.out.printf("Recognized sentiment label: %s.%n", documentSentiment.getSentiment()); + * for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) { + * System.out.printf("Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, " + * + "negative score: %.2f.%n", + * sentenceSentiment.getSentiment(), + * sentenceSentiment.getConfidenceScores().getPositive(), + * sentenceSentiment.getConfidenceScores().getNeutral(), + * sentenceSentiment.getConfidenceScores().getNegative()); + * } + * }); + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -850,7 +1254,27 @@ public Mono
+ * textAnalyticsAsyncClient.analyzeSentiment("The hotel was dark and unclean.", "en", + * new AnalyzeSentimentOptions().setIncludeOpinionMining(true)) + * .subscribe(documentSentiment -> { + * for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) { + * System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment()); + * sentenceSentiment.getOpinions().forEach(opinion -> { + * TargetSentiment targetSentiment = opinion.getTarget(); + * System.out.printf("\tTarget sentiment: %s, target text: %s%n", + * targetSentiment.getSentiment(), targetSentiment.getText()); + * for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) { + * System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the assessment negated: %s.%n", + * assessmentSentiment.getSentiment(), assessmentSentiment.getText(), + * assessmentSentiment.isNegated()); + * } + * }); + * } + * }); + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -894,7 +1318,36 @@ public Mono
Analyze sentiment in a list of documents with provided language code and request options. Subscribes to the * call asynchronously and prints out the sentiment details when a response is received.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *+ * List<String> documents = Arrays.asList( + * "The hotel was dark and unclean.", + * "The restaurant had amazing gnocchi." + * ); + * + * textAnalyticsAsyncClient.analyzeSentimentBatch(documents, "en", + * new TextAnalyticsRequestOptions().setIncludeStatistics(true)).subscribe( + * response -> { + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = response.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * response.forEach(analyzeSentimentResult -> { + * System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); + * DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); + * System.out.printf("Recognized document sentiment: %s.%n", documentSentiment.getSentiment()); + * documentSentiment.getSentences().forEach(sentenceSentiment -> + * System.out.printf("Recognized sentence sentiment: %s, positive score: %.2f, " + * + "neutral score: %.2f, negative score: %.2f.%n", + * sentenceSentiment.getSentiment(), + * sentenceSentiment.getConfidenceScores().getPositive(), + * sentenceSentiment.getConfidenceScores().getNeutral(), + * sentenceSentiment.getConfidenceScores().getNegative())); + * }); + * }); + *+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -932,7 +1385,51 @@ public Mono
+ * List<TextDocumentInput> documents = Arrays.asList( + * new TextDocumentInput("0", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en"), + * new TextDocumentInput("1", "My SSN is 859-98-0987").setLanguage("en") + * ); + * + * SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = + * textAnalyticsClient.beginAnalyzeActions( + * documents, + * new TextAnalyticsActions().setDisplayName("{tasks_display_name}") + * .setRecognizeEntitiesActions(new RecognizeEntitiesAction()) + * .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()), + * new AnalyzeActionsOptions().setIncludeStatistics(false), + * Context.NONE); + * syncPoller.waitForCompletion(); + * AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult(); + * result.forEach(analyzeActionsResult -> { + * System.out.println("Entities recognition action results:"); + * analyzeActionsResult.getRecognizeEntitiesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach( + * entitiesResult -> entitiesResult.getEntities().forEach( + * entity -> System.out.printf( + * "Recognized entity: %s, entity category: %s, entity subcategory: %s," + * + " confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), + * entity.getConfidenceScore()))); + * } + * }); + * System.out.println("Key phrases extraction action results:"); + * analyzeActionsResult.getExtractKeyPhrasesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> { + * System.out.println("Extracted phrases:"); + * extractKeyPhraseResult.getKeyPhrases() + * .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); + * }); + * } + * }); + * }); + *+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -969,7 +1466,41 @@ public Mono
+ * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList( + * new TextDocumentInput("0", "The hotel was dark and unclean.").setLanguage("en"), + * new TextDocumentInput("1", "The restaurant had amazing gnocchi.").setLanguage("en")); + * + * TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true); + * + * textAnalyticsAsyncClient.analyzeSentimentBatchWithResponse(textDocumentInputs1, requestOptions) + * .subscribe(response -> { + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * AnalyzeSentimentResultCollection resultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), + * batchStatistics.getValidDocumentCount()); + * + * resultCollection.forEach(analyzeSentimentResult -> { + * System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); + * DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); + * System.out.printf("Recognized document sentiment: %s.%n", documentSentiment.getSentiment()); + * documentSentiment.getSentences().forEach(sentenceSentiment -> + * System.out.printf("Recognized sentence sentiment: %s, positive score: %.2f, " + * + "neutral score: %.2f, negative score: %.2f.%n", + * sentenceSentiment.getSentiment(), + * sentenceSentiment.getConfidenceScores().getPositive(), + * sentenceSentiment.getConfidenceScores().getNeutral(), + * sentenceSentiment.getConfidenceScores().getNegative())); + * }); + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -1005,7 +1536,47 @@ public Mono
+ * List<TextDocumentInput> textDocumentInputs1 = Arrays.asList( + * new TextDocumentInput("0", "The hotel was dark and unclean.").setLanguage("en"), + * new TextDocumentInput("1", "The restaurant had amazing gnocchi.").setLanguage("en")); + * + * AnalyzeSentimentOptions options = new AnalyzeSentimentOptions() + * .setIncludeOpinionMining(true).setIncludeStatistics(true); + * textAnalyticsAsyncClient.analyzeSentimentBatchWithResponse(textDocumentInputs1, options) + * .subscribe(response -> { + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * AnalyzeSentimentResultCollection resultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), + * batchStatistics.getValidDocumentCount()); + * + * resultCollection.forEach(analyzeSentimentResult -> { + * System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); + * DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); + * documentSentiment.getSentences().forEach(sentenceSentiment -> { + * System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment()); + * sentenceSentiment.getOpinions().forEach(opinion -> { + * TargetSentiment targetSentiment = opinion.getTarget(); + * System.out.printf("\t\tTarget sentiment: %s, target text: %s%n", + * targetSentiment.getSentiment(), targetSentiment.getText()); + * for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) { + * System.out.printf( + * "\t\t\t'%s' assessment sentiment because of \"%s\". Is the assessment negated: %s.%n", + * assessmentSentiment.getSentiment(), assessmentSentiment.getText(), + * assessmentSentiment.isNegated()); + * } + * }); + * }); + * }); + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -1037,7 +1608,7 @@ public Mono
+ * List<TextDocumentInput> documents = new ArrayList<>(); + * for (int i = 0; i < 3; i++) { + * documents.add(new TextDocumentInput(Integer.toString(i), + * "The patient is a 54-year-old gentleman with a history of progressive angina " + * + "over the past several months.")); + * } + * + * AnalyzeHealthcareEntitiesOptions options = new AnalyzeHealthcareEntitiesOptions() + * .setIncludeStatistics(true); + * + * textAnalyticsAsyncClient.beginAnalyzeHealthcareEntities(documents, options) + * .flatMap(pollResult -> { + * AnalyzeHealthcareEntitiesOperationDetail operationResult = pollResult.getValue(); + * System.out.printf("Operation created time: %s, expiration time: %s.%n", + * operationResult.getCreatedAt(), operationResult.getExpiresAt()); + * return pollResult.getFinalResult(); + * }) + * .flatMap(analyzeActionsResultPagedFlux -> analyzeActionsResultPagedFlux.byPage()) + * .subscribe( + * pagedResponse -> pagedResponse.getElements().forEach( + * analyzeHealthcareEntitiesResultCollection -> { + * // Model version + * System.out.printf("Results of Azure Text Analytics \"Analyze Healthcare\" Model, version: %s%n", + * analyzeHealthcareEntitiesResultCollection.getModelVersion()); + * + * TextDocumentBatchStatistics healthcareTaskStatistics = + * analyzeHealthcareEntitiesResultCollection.getStatistics(); + * // Batch statistics + * System.out.printf("Documents statistics: document count = %s, erroneous document count = %s," + * + " transaction count = %s, valid document count = %s.%n", + * healthcareTaskStatistics.getDocumentCount(), + * healthcareTaskStatistics.getInvalidDocumentCount(), + * healthcareTaskStatistics.getTransactionCount(), + * healthcareTaskStatistics.getValidDocumentCount()); + * + * analyzeHealthcareEntitiesResultCollection.forEach(healthcareEntitiesResult -> { + * System.out.println("document id = " + healthcareEntitiesResult.getId()); + * System.out.println("Document entities: "); + * AtomicInteger ct = new AtomicInteger(); + * healthcareEntitiesResult.getEntities().forEach(healthcareEntity -> { + * System.out.printf( + * "\ti = %d, Text: %s, category: %s, confidence score: %f.%n", + * ct.getAndIncrement(), healthcareEntity.getText(), healthcareEntity.getCategory(), + * healthcareEntity.getConfidenceScore()); + * + * IterableStream<EntityDataSource> healthcareEntityDataSources = + * healthcareEntity.getDataSources(); + * if (healthcareEntityDataSources != null) { + * healthcareEntityDataSources.forEach(healthcareEntityLink -> System.out.printf( + * "\t\tEntity ID in data source: %s, data source: %s.%n", + * healthcareEntityLink.getEntityId(), healthcareEntityLink.getName())); + * } + * }); + * // Healthcare entity relation groups + * healthcareEntitiesResult.getEntityRelations().forEach(entityRelation -> { + * System.out.printf("\tRelation type: %s.%n", entityRelation.getRelationType()); + * entityRelation.getRoles().forEach(role -> { + * final HealthcareEntity entity = role.getEntity(); + * System.out.printf("\t\tEntity text: %s, category: %s, role: %s.%n", + * entity.getText(), entity.getCategory(), role.getName()); + * }); + * }); + * }); + * })); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * @param options The additional configurable {@link AnalyzeHealthcareEntitiesOptions options} that may be passed @@ -1105,7 +1743,48 @@ public Mono
Code Sample
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-String-AnalyzeActionsOptions} + * + *+ * List<String> documents = Arrays.asList( + * "Elon Musk is the CEO of SpaceX and Tesla.", + * "1", "My SSN is 859-98-0987" + * ); + * textAnalyticsAsyncClient.beginAnalyzeActions(documents, + * new TextAnalyticsActions().setDisplayName("{tasks_display_name}") + * .setRecognizeEntitiesActions(new RecognizeEntitiesAction()) + * .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()), + * "en", + * new AnalyzeActionsOptions().setIncludeStatistics(false)) + * .flatMap(AsyncPollResponse::getFinalResult) + * .flatMap(analyzeActionsResultPagedFlux -> analyzeActionsResultPagedFlux.byPage()) + * .subscribe( + * pagedResponse -> pagedResponse.getElements().forEach( + * analyzeActionsResult -> { + * analyzeActionsResult.getRecognizeEntitiesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach( + * entitiesResult -> entitiesResult.getEntities().forEach( + * entity -> System.out.printf( + * "Recognized entity: %s, entity category: %s, entity subcategory: %s," + * + " confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), + * entity.getConfidenceScore()))); + * } + * }); + * analyzeActionsResult.getExtractKeyPhrasesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> { + * System.out.println("Extracted phrases:"); + * extractKeyPhraseResult.getKeyPhrases() + * .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); + * }); + * } + * }); + * })); + *+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -1143,7 +1822,49 @@ public PollerFlux
Code Sample
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-AnalyzeActionsOptions} + * + *+ * List<TextDocumentInput> documents = Arrays.asList( + * new TextDocumentInput("0", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en"), + * new TextDocumentInput("1", "My SSN is 859-98-0987").setLanguage("en") + * ); + * textAnalyticsAsyncClient.beginAnalyzeActions(documents, + * new TextAnalyticsActions().setDisplayName("{tasks_display_name}") + * .setRecognizeEntitiesActions(new RecognizeEntitiesAction()) + * .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()), + * new AnalyzeActionsOptions().setIncludeStatistics(false)) + * .flatMap(AsyncPollResponse::getFinalResult) + * .flatMap(analyzeActionsResultPagedFlux -> analyzeActionsResultPagedFlux.byPage()) + * .subscribe( + * pagedResponse -> pagedResponse.getElements().forEach( + * analyzeActionsResult -> { + * System.out.println("Entities recognition action results:"); + * analyzeActionsResult.getRecognizeEntitiesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach( + * entitiesResult -> entitiesResult.getEntities().forEach( + * entity -> System.out.printf( + * "Recognized entity: %s, entity category: %s, entity subcategory: %s," + * + " confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), + * entity.getConfidenceScore()))); + * } + * }); + * System.out.println("Key phrases extraction action results:"); + * analyzeActionsResult.getExtractKeyPhrasesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> { + * System.out.println("Extracted phrases:"); + * extractKeyPhraseResult.getKeyPhrases() + * .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); + * }); + * } + * }); + * })); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * @param actions The {@link TextAnalyticsActions actions} that contains all actions to be executed. diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java index f7f6a1ceadf7c..0d92134c70e7d 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java @@ -51,8 +51,51 @@ * key phrases extraction, and sentiment analysis of a document or a list of documents. * *
Instantiating a synchronous Text Analytics Client
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.instantiation} + * + *+ * List<String> documents = Arrays.asList( + * "Elon Musk is the CEO of SpaceX and Tesla.", + * "My SSN is 859-98-0987" + * ); * + * SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = + * textAnalyticsClient.beginAnalyzeActions( + * documents, + * new TextAnalyticsActions().setDisplayName("{tasks_display_name}") + * .setRecognizeEntitiesActions(new RecognizeEntitiesAction()) + * .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()), + * "en", + * new AnalyzeActionsOptions().setIncludeStatistics(false)); + * syncPoller.waitForCompletion(); + * AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult(); + * result.forEach(analyzeActionsResult -> { + * System.out.println("Entities recognition action results:"); + * analyzeActionsResult.getRecognizeEntitiesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach( + * entitiesResult -> entitiesResult.getEntities().forEach( + * entity -> System.out.printf( + * "Recognized entity: %s, entity category: %s, entity subcategory: %s," + * + " confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), + * entity.getConfidenceScore()))); + * } + * }); + * System.out.println("Key phrases extraction action results:"); + * analyzeActionsResult.getExtractKeyPhrasesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> { + * System.out.println("Extracted phrases:"); + * extractKeyPhraseResult.getKeyPhrases() + * .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); + * }); + * } + * }); + * }); + *+ * *
View {@link TextAnalyticsClientBuilder this} for additional ways to construct the client.
* * @see TextAnalyticsClientBuilder @@ -99,7 +142,13 @@ public String getDefaultLanguage() { * *Code Sample
*Detects the language of single document.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguage#String} + * + *+ * DetectedLanguage detectedLanguage = textAnalyticsClient.detectLanguage("Bonjour tout le monde"); + * System.out.printf("Detected language name: %s, ISO 6391 name: %s, confidence score: %f.%n", + * detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore()); + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -120,7 +169,14 @@ public DetectedLanguage detectLanguage(String document) { * *
Code Sample
*Detects the language of documents with a provided country hint.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguage#String-String} + * + *+ * DetectedLanguage detectedLanguage = textAnalyticsClient.detectLanguage( + * "This text is in English", "US"); + * System.out.printf("Detected language name: %s, ISO 6391 name: %s, confidence score: %f.%n", + * detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore()); + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -143,7 +199,31 @@ public DetectedLanguage detectLanguage(String document, String countryHint) { * *
Code Sample
*Detects the language in a list of documents with a provided country hint and request options.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *+ * List<String> documents = Arrays.asList( + * "This is written in English", + * "Este es un documento escrito en Español." + * ); + * + * DetectLanguageResultCollection resultCollection = + * textAnalyticsClient.detectLanguageBatch(documents, "US", null); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * // Batch result of languages + * resultCollection.forEach(detectLanguageResult -> { + * System.out.printf("Document ID: %s%n", detectLanguageResult.getId()); + * DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); + * System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n", + * detectedLanguage.getName(), detectedLanguage.getIso6391Name(), + * detectedLanguage.getConfidenceScore()); + * }); + *+ * * * @param documents The list of documents to detect languages for. * For text length limits, maximum batch size, and supported text encoding, see @@ -172,7 +252,39 @@ public DetectLanguageResultCollection detectLanguageBatch( *
Code Sample
*Detects the languages with http response in a list of {@link DetectLanguageInput document} with provided * request options.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-TextAnalyticsRequestOptions-Context} + * + *+ * List<DetectLanguageInput> detectLanguageInputs = Arrays.asList( + * new DetectLanguageInput("1", "This is written in English.", "US"), + * new DetectLanguageInput("2", "Este es un documento escrito en Español.", "es") + * ); + * + * Response<DetectLanguageResultCollection> response = + * textAnalyticsClient.detectLanguageBatchWithResponse(detectLanguageInputs, + * new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE); + * + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * DetectLanguageResultCollection detectedLanguageResultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = detectedLanguageResultCollection.getStatistics(); + * System.out.printf( + * "Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s," + * + " valid document count = %s.%n", + * batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * // Batch result of languages + * detectedLanguageResultCollection.forEach(detectLanguageResult -> { + * System.out.printf("Document ID: %s%n", detectLanguageResult.getId()); + * DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); + * System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n", + * detectedLanguage.getName(), detectedLanguage.getIso6391Name(), + * detectedLanguage.getConfidenceScore()); + * }); + *+ * * * @param documents The list of {@link DetectLanguageInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -205,7 +317,16 @@ public Response
Code Sample
*Recognize the entities of documents
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntities#String} + * + *+ * final CategorizedEntityCollection recognizeEntitiesResult = + * textAnalyticsClient.recognizeEntities("Satya Nadella is the CEO of Microsoft"); + * for (CategorizedEntity entity : recognizeEntitiesResult) { + * System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getConfidenceScore()); + * } + *+ * * * @param document The document to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -230,7 +351,17 @@ public CategorizedEntityCollection recognizeEntities(String document) { * *
Code Sample
*Recognizes the entities in a document with a provided language code.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntities#String-String} + * + *+ * final CategorizedEntityCollection recognizeEntitiesResult = + * textAnalyticsClient.recognizeEntities("Satya Nadella is the CEO of Microsoft", "en"); + * + * for (CategorizedEntity entity : recognizeEntitiesResult) { + * System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getConfidenceScore()); + * } + *+ * * * @param document The document to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -254,7 +385,27 @@ public CategorizedEntityCollection recognizeEntities(String document, String lan * *
Code Sample
*Recognizes the entities in a list of documents with a provided language code and request options.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *+ * List<String> documents = Arrays.asList( + * "I had a wonderful trip to Seattle last week.", + * "I work at Microsoft."); + * + * RecognizeEntitiesResultCollection resultCollection = + * textAnalyticsClient.recognizeEntitiesBatch(documents, "en", null); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf( + * "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * resultCollection.forEach(recognizeEntitiesResult -> + * recognizeEntitiesResult.getEntities().forEach(entity -> + * System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getConfidenceScore()))); + *+ * * * @param documents A list of documents to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -282,7 +433,33 @@ public RecognizeEntitiesResultCollection recognizeEntitiesBatch( *
Code Sample
*Recognizes the entities with http response in a list of {@link TextDocumentInput document} with provided * request options.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context} + * + *+ * List<TextDocumentInput> textDocumentInputs = Arrays.asList( + * new TextDocumentInput("0", "I had a wonderful trip to Seattle last week.").setLanguage("en"), + * new TextDocumentInput("1", "I work at Microsoft.").setLanguage("en") + * ); + * + * Response<RecognizeEntitiesResultCollection> response = + * textAnalyticsClient.recognizeEntitiesBatchWithResponse(textDocumentInputs, + * new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE); + * + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResultCollection.getStatistics(); + * System.out.printf( + * "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * recognizeEntitiesResultCollection.forEach(recognizeEntitiesResult -> + * recognizeEntitiesResult.getEntities().forEach(entity -> + * System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getConfidenceScore()))); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -315,7 +492,18 @@ public Response
Code Sample
*Recognize the PII entities details in a document.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String} + * + *+ * PiiEntityCollection piiEntityCollection = textAnalyticsClient.recognizePiiEntities("My SSN is 859-98-0987"); + * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); + * for (PiiEntity entity : piiEntityCollection) { + * System.out.printf( + * "Recognized Personally Identifiable Information entity: %s, entity category: %s," + * + " entity subcategory: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()); + * } + *+ * * * @param document The document to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see @@ -341,7 +529,17 @@ public PiiEntityCollection recognizePiiEntities(String document) { *
Code Sample
*Recognizes the PII entities details in a document with a provided language code.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String-String} + * + *+ * PiiEntityCollection piiEntityCollection = textAnalyticsClient.recognizePiiEntities( + * "My SSN is 859-98-0987", "en"); + * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); + * piiEntityCollection.forEach(entity -> System.out.printf( + * "Recognized Personally Identifiable Information entity: %s, entity category: %s," + * + " entity subcategory: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + *+ * * * @param document The document to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see @@ -369,7 +567,18 @@ public PiiEntityCollection recognizePiiEntities(String document, String language *
Recognizes the PII entities details in a document with a provided language code and * {@link RecognizePiiEntitiesOptions}.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String-String-RecognizePiiEntitiesOptions} + * + *+ * PiiEntityCollection piiEntityCollection = textAnalyticsClient.recognizePiiEntities( + * "My SSN is 859-98-0987", "en", + * new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)); + * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); + * piiEntityCollection.forEach(entity -> System.out.printf( + * "Recognized Personally Identifiable Information entity: %s, entity category: %s," + * + " entity subcategory: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + *+ * * * @param document The document to recognize PII entities details for. * For text length limits, maximum batch size, and supported text encoding, see @@ -397,7 +606,31 @@ public PiiEntityCollection recognizePiiEntities(String document, String language *
Recognizes the PII entities details in a list of documents with a provided language code * and request options.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntitiesBatch#Iterable-String-RecognizePiiEntitiesOptions} + * + *+ * List<String> documents = Arrays.asList( + * "My SSN is 859-98-0987", + * "Visa card 4111 1111 1111 1111" + * ); + * + * RecognizePiiEntitiesResultCollection resultCollection = textAnalyticsClient.recognizePiiEntitiesBatch( + * documents, "en", new RecognizePiiEntitiesOptions().setIncludeStatistics(true)); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * resultCollection.forEach(recognizePiiEntitiesResult -> { + * PiiEntityCollection piiEntityCollection = recognizePiiEntitiesResult.getEntities(); + * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); + * piiEntityCollection.forEach(entity -> System.out.printf( + * "Recognized Personally Identifiable Information entity: %s, entity category: %s," + * + " entity subcategory: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + * }); + *+ * * * @param documents A list of documents to recognize PII entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -425,7 +658,34 @@ public RecognizePiiEntitiesResultCollection recognizePiiEntitiesBatch( *
Recognizes the PII entities details with http response in a list of {@link TextDocumentInput document} * with provided request options.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntitiesBatch#Iterable-RecognizePiiEntitiesOptions-Context} + * + *+ * List<TextDocumentInput> textDocumentInputs = Arrays.asList( + * new TextDocumentInput("0", "My SSN is 859-98-0987"), + * new TextDocumentInput("1", "Visa card 4111 1111 1111 1111") + * ); + * + * Response<RecognizePiiEntitiesResultCollection> response = + * textAnalyticsClient.recognizePiiEntitiesBatchWithResponse(textDocumentInputs, + * new RecognizePiiEntitiesOptions().setIncludeStatistics(true), Context.NONE); + * + * RecognizePiiEntitiesResultCollection resultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * resultCollection.forEach(recognizePiiEntitiesResult -> { + * PiiEntityCollection piiEntityCollection = recognizePiiEntitiesResult.getEntities(); + * System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText()); + * piiEntityCollection.forEach(entity -> System.out.printf( + * "Recognized Personally Identifiable Information entity: %s, entity category: %s," + * + " entity subcategory: %s, confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize PII entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -457,7 +717,20 @@ public Response
Code Sample
*Recognize the linked entities of documents
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntities#String} + * + *+ * final String document = "Old Faithful is a geyser at Yellowstone Park."; + * System.out.println("Linked Entities:"); + * textAnalyticsClient.recognizeLinkedEntities(document).forEach(linkedEntity -> { + * System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + * linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), + * linkedEntity.getDataSource()); + * linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + * "Matched entity: %s, confidence score: %f.%n", + * entityMatch.getText(), entityMatch.getConfidenceScore())); + * }); + *+ * * * @param document The document to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -481,7 +754,19 @@ public LinkedEntityCollection recognizeLinkedEntities(String document) { * *
Code Sample
*Recognizes the linked entities in a document with a provided language code.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntities#String-String} + * + *+ * String document = "Old Faithful is a geyser at Yellowstone Park."; + * textAnalyticsClient.recognizeLinkedEntities(document, "en").forEach(linkedEntity -> { + * System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + * linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), + * linkedEntity.getDataSource()); + * linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + * "Matched entity: %s, confidence score: %f.%n", + * entityMatch.getText(), entityMatch.getConfidenceScore())); + * }); + *+ * * * @param document The document to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -509,7 +794,33 @@ public LinkedEntityCollection recognizeLinkedEntities(String document, String la *
Code Sample
*Recognizes the linked entities in a list of documents with a provided language code and request options. *
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *+ * List<String> documents = Arrays.asList( + * "Old Faithful is a geyser at Yellowstone Park.", + * "Mount Shasta has lenticular clouds." + * ); + * + * RecognizeLinkedEntitiesResultCollection resultCollection = + * textAnalyticsClient.recognizeLinkedEntitiesBatch(documents, "en", null); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * resultCollection.forEach(recognizeLinkedEntitiesResult -> + * recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { + * System.out.println("Linked Entities:"); + * System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + * linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), + * linkedEntity.getDataSource()); + * linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + * "Matched entity: %s, confidence score: %f.%n", + * entityMatch.getText(), entityMatch.getConfidenceScore())); + * })); + *+ * * * @param documents A list of documents to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -540,7 +851,39 @@ public RecognizeLinkedEntitiesResultCollection recognizeLinkedEntitiesBatch( *
Code Sample
*Recognizes the linked entities with http response in a list of {@link TextDocumentInput} with request options. *
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context} + * + *+ * List<TextDocumentInput> textDocumentInputs = Arrays.asList( + * new TextDocumentInput("1", "Old Faithful is a geyser at Yellowstone Park.").setLanguage("en"), + * new TextDocumentInput("2", "Mount Shasta has lenticular clouds.").setLanguage("en") + * ); + * + * Response<RecognizeLinkedEntitiesResultCollection> response = + * textAnalyticsClient.recognizeLinkedEntitiesBatchWithResponse(textDocumentInputs, + * new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE); + * + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * RecognizeLinkedEntitiesResultCollection resultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf( + * "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * resultCollection.forEach(recognizeLinkedEntitiesResult -> + * recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { + * System.out.println("Linked Entities:"); + * System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + * linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), + * linkedEntity.getDataSource()); + * linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + * "Matched entity: %s, confidence score: %.2f.%n", + * entityMatch.getText(), entityMatch.getConfidenceScore())); + * })); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to recognize linked entities for. * For text length limits, maximum batch size, and supported text encoding, see @@ -572,7 +915,14 @@ public Response
Code Sample
*Extracts key phrases of documents
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrases#String} + * + *+ * System.out.println("Extracted phrases:"); + * for (String keyPhrase : textAnalyticsClient.extractKeyPhrases("My cat might need to see a veterinarian.")) { + * System.out.printf("%s.%n", keyPhrase); + * } + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -594,7 +944,13 @@ public KeyPhrasesCollection extractKeyPhrases(String document) { * *
Code Sample
*Extracts key phrases in a document with a provided language representation.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrases#String-String-Context} + * + *+ * System.out.println("Extracted phrases:"); + * textAnalyticsClient.extractKeyPhrases("My cat might need to see a veterinarian.", "en") + * .forEach(kegPhrase -> System.out.printf("%s.%n", kegPhrase)); + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -621,7 +977,32 @@ public KeyPhrasesCollection extractKeyPhrases(String document, String language) * *
Code Sample
*Extracts key phrases in a list of documents with a provided language code and request options.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *+ * List<String> documents = Arrays.asList( + * "My cat might need to see a veterinarian.", + * "The pitot tube is used to measure airspeed." + * ); + * + * // Extracting batch key phrases + * ExtractKeyPhrasesResultCollection resultCollection = + * textAnalyticsClient.extractKeyPhrasesBatch(documents, "en", null); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf( + * "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * // Extracted key phrase for each of documents from a batch of documents + * resultCollection.forEach(extractKeyPhraseResult -> { + * System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); + * // Valid document + * System.out.println("Extracted phrases:"); + * extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); + * }); + *+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -651,7 +1032,39 @@ public ExtractKeyPhrasesResultCollection extractKeyPhrasesBatch( * *
Code Sample
*Extracts key phrases with http response in a list of {@link TextDocumentInput} with request options.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-TextAnalyticsRequestOptions-Context} + * + *+ * List<TextDocumentInput> textDocumentInputs = Arrays.asList( + * new TextDocumentInput("1", "My cat might need to see a veterinarian.").setLanguage("en"), + * new TextDocumentInput("2", "The pitot tube is used to measure airspeed.").setLanguage("en") + * ); + * + * // Extracting batch key phrases + * Response<ExtractKeyPhrasesResultCollection> response = + * textAnalyticsClient.extractKeyPhrasesBatchWithResponse(textDocumentInputs, + * new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE); + * + * + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * ExtractKeyPhrasesResultCollection resultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf( + * "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * // Extracted key phrase for each of documents from a batch of documents + * resultCollection.forEach(extractKeyPhraseResult -> { + * System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); + * // Valid document + * System.out.println("Extracted phrases:"); + * extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> + * System.out.printf("%s.%n", keyPhrase)); + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -685,7 +1098,28 @@ public Response
Code Sample
*Analyze the sentiments of documents
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String} + * + *+ * final DocumentSentiment documentSentiment = + * textAnalyticsClient.analyzeSentiment("The hotel was dark and unclean."); + * + * System.out.printf( + * "Recognized sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n", + * documentSentiment.getSentiment(), + * documentSentiment.getConfidenceScores().getPositive(), + * documentSentiment.getConfidenceScores().getNeutral(), + * documentSentiment.getConfidenceScores().getNegative()); + * + * for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) { + * System.out.printf( + * "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n", + * sentenceSentiment.getSentiment(), + * sentenceSentiment.getConfidenceScores().getPositive(), + * sentenceSentiment.getConfidenceScores().getNeutral(), + * sentenceSentiment.getConfidenceScores().getNegative()); + * } + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -708,7 +1142,28 @@ public DocumentSentiment analyzeSentiment(String document) { *
Code Sample
*Analyze the sentiments in a document with a provided language representation.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String-String} + * + *+ * final DocumentSentiment documentSentiment = textAnalyticsClient.analyzeSentiment( + * "The hotel was dark and unclean.", "en"); + * + * System.out.printf( + * "Recognized sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n", + * documentSentiment.getSentiment(), + * documentSentiment.getConfidenceScores().getPositive(), + * documentSentiment.getConfidenceScores().getNeutral(), + * documentSentiment.getConfidenceScores().getNegative()); + * + * for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) { + * System.out.printf( + * "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n", + * sentenceSentiment.getSentiment(), + * sentenceSentiment.getConfidenceScores().getPositive(), + * sentenceSentiment.getConfidenceScores().getNeutral(), + * sentenceSentiment.getConfidenceScores().getNegative()); + * } + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -737,7 +1192,25 @@ public DocumentSentiment analyzeSentiment(String document, String language) { *
Analyze the sentiment and mine the opinions for each sentence in a document with a provided language * representation and {@link AnalyzeSentimentOptions} options.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String-String-AnalyzeSentimentOptions} + * + *+ * final DocumentSentiment documentSentiment = textAnalyticsClient.analyzeSentiment( + * "The hotel was dark and unclean.", "en", + * new AnalyzeSentimentOptions().setIncludeOpinionMining(true)); + * for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) { + * System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment()); + * sentenceSentiment.getOpinions().forEach(opinion -> { + * TargetSentiment targetSentiment = opinion.getTarget(); + * System.out.printf("\tTarget sentiment: %s, target text: %s%n", targetSentiment.getSentiment(), + * targetSentiment.getText()); + * for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) { + * System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the assessment negated: %s.%n", + * assessmentSentiment.getSentiment(), assessmentSentiment.getText(), assessmentSentiment.isNegated()); + * } + * }); + * } + *+ * * * @param document The document to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -763,7 +1236,44 @@ public DocumentSentiment analyzeSentiment(String document, String language, Anal * *
Code Sample
*Analyze the sentiments in a list of documents with a provided language representation and request options.
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String-TextAnalyticsRequestOptions} + * + *+ * List<String> documents = Arrays.asList( + * "The hotel was dark and unclean. The restaurant had amazing gnocchi.", + * "The restaurant had amazing gnocchi. The hotel was dark and unclean." + * ); + * + * // Analyzing batch sentiments + * AnalyzeSentimentResultCollection resultCollection = textAnalyticsClient.analyzeSentimentBatch( + * documents, "en", new TextAnalyticsRequestOptions().setIncludeStatistics(true)); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * // Analyzed sentiment for each of documents from a batch of documents + * resultCollection.forEach(analyzeSentimentResult -> { + * System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); + * // Valid document + * DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); + * System.out.printf( + * "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f," + * + " negative score: %.2f.%n", + * documentSentiment.getSentiment(), + * documentSentiment.getConfidenceScores().getPositive(), + * documentSentiment.getConfidenceScores().getNeutral(), + * documentSentiment.getConfidenceScores().getNegative()); + * documentSentiment.getSentences().forEach(sentenceSentiment -> System.out.printf( + * "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f," + * + " negative score: %.2f.%n", + * sentenceSentiment.getSentiment(), + * sentenceSentiment.getConfidenceScores().getPositive(), + * sentenceSentiment.getConfidenceScores().getNeutral(), + * sentenceSentiment.getConfidenceScores().getNegative())); + * }); + *+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -798,7 +1308,36 @@ public AnalyzeSentimentResultCollection analyzeSentimentBatch( *
Analyze the sentiments and mine the opinions for each sentence in a list of documents with a provided language * representation and {@link AnalyzeSentimentOptions} options.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String-AnalyzeSentimentOptions} + * + *+ * List<String> documents = Arrays.asList( + * "The hotel was dark and unclean. The restaurant had amazing gnocchi.", + * "The restaurant had amazing gnocchi. The hotel was dark and unclean." + * ); + * + * // Analyzing batch sentiments + * AnalyzeSentimentResultCollection resultCollection = textAnalyticsClient.analyzeSentimentBatch( + * documents, "en", new AnalyzeSentimentOptions().setIncludeOpinionMining(true)); + * + * // Analyzed sentiment for each of documents from a batch of documents + * resultCollection.forEach(analyzeSentimentResult -> { + * System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); + * DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); + * documentSentiment.getSentences().forEach(sentenceSentiment -> { + * System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment()); + * sentenceSentiment.getOpinions().forEach(opinion -> { + * TargetSentiment targetSentiment = opinion.getTarget(); + * System.out.printf("\tTarget sentiment: %s, target text: %s%n", targetSentiment.getSentiment(), + * targetSentiment.getText()); + * for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) { + * System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the assessment negated: %s.%n", + * assessmentSentiment.getSentiment(), assessmentSentiment.getText(), assessmentSentiment.isNegated()); + * } + * }); + * }); + * }); + *+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -826,7 +1365,53 @@ public AnalyzeSentimentResultCollection analyzeSentimentBatch(Iterable
Code Sample
*Analyze sentiment in a list of {@link TextDocumentInput document} with provided request options.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-TextAnalyticsRequestOptions-Context} + * + *+ * List<TextDocumentInput> textDocumentInputs = Arrays.asList( + * new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.") + * .setLanguage("en"), + * new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.") + * .setLanguage("en") + * ); + * + * // Analyzing batch sentiments + * Response<AnalyzeSentimentResultCollection> response = + * textAnalyticsClient.analyzeSentimentBatchWithResponse(textDocumentInputs, + * new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE); + * + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * AnalyzeSentimentResultCollection resultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * // Analyzed sentiment for each of documents from a batch of documents + * resultCollection.forEach(analyzeSentimentResult -> { + * System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); + * // Valid document + * DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); + * System.out.printf( + * "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f, " + * + "negative score: %.2f.%n", + * documentSentiment.getSentiment(), + * documentSentiment.getConfidenceScores().getPositive(), + * documentSentiment.getConfidenceScores().getNeutral(), + * documentSentiment.getConfidenceScores().getNegative()); + * documentSentiment.getSentences().forEach(sentenceSentiment -> { + * System.out.printf( + * "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f," + * + " negative score: %.2f.%n", + * sentenceSentiment.getSentiment(), + * sentenceSentiment.getConfidenceScores().getPositive(), + * sentenceSentiment.getConfidenceScores().getNeutral(), + * sentenceSentiment.getConfidenceScores().getNegative()); + * }); + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -864,7 +1449,51 @@ public Response
Analyze sentiment and mine the opinions for each sentence in a list of * {@link TextDocumentInput document} with provided {@link AnalyzeSentimentOptions} options.
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-AnalyzeSentimentOptions-Context} + * + *+ * List<TextDocumentInput> textDocumentInputs = Arrays.asList( + * new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.") + * .setLanguage("en"), + * new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.") + * .setLanguage("en") + * ); + * + * AnalyzeSentimentOptions options = new AnalyzeSentimentOptions().setIncludeOpinionMining(true) + * .setIncludeStatistics(true); + * + * // Analyzing batch sentiments + * Response<AnalyzeSentimentResultCollection> response = + * textAnalyticsClient.analyzeSentimentBatchWithResponse(textDocumentInputs, options, Context.NONE); + * + * // Response's status code + * System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + * AnalyzeSentimentResultCollection resultCollection = response.getValue(); + * + * // Batch statistics + * TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + * System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + * batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + * + * // Analyzed sentiment for each of documents from a batch of documents + * resultCollection.forEach(analyzeSentimentResult -> { + * System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); + * DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); + * documentSentiment.getSentences().forEach(sentenceSentiment -> { + * System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment()); + * sentenceSentiment.getOpinions().forEach(opinion -> { + * TargetSentiment targetSentiment = opinion.getTarget(); + * System.out.printf("\tTarget sentiment: %s, target text: %s%n", targetSentiment.getSentiment(), + * targetSentiment.getText()); + * for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) { + * System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the assessment negated: %s.%n", + * assessmentSentiment.getSentiment(), assessmentSentiment.getText(), + * assessmentSentiment.isNegated()); + * } + * }); + * }); + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -897,7 +1526,7 @@ public Response
+ * List<TextDocumentInput> documents = new ArrayList<>(); + * for (int i = 0; i < 3; i++) { + * documents.add(new TextDocumentInput(Integer.toString(i), + * "The patient is a 54-year-old gentleman with a history of progressive angina over " + * + "the past several months.")); + * } + * + * // Request options: show statistics and model version + * AnalyzeHealthcareEntitiesOptions options = new AnalyzeHealthcareEntitiesOptions() + * .setIncludeStatistics(true); + * + * SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable> + * syncPoller = textAnalyticsClient.beginAnalyzeHealthcareEntities(documents, options, Context.NONE); + * + * syncPoller.waitForCompletion(); + * AnalyzeHealthcareEntitiesPagedIterable result = syncPoller.getFinalResult(); + * + * // Task operation statistics + * final AnalyzeHealthcareEntitiesOperationDetail operationResult = syncPoller.poll().getValue(); + * System.out.printf("Operation created time: %s, expiration time: %s.%n", + * operationResult.getCreatedAt(), operationResult.getExpiresAt()); + * + * result.forEach(analyzeHealthcareEntitiesResultCollection -> { + * // Model version + * System.out.printf("Results of Azure Text Analytics \"Analyze Healthcare\" Model, version: %s%n", + * analyzeHealthcareEntitiesResultCollection.getModelVersion()); + * + * TextDocumentBatchStatistics healthcareTaskStatistics = + * analyzeHealthcareEntitiesResultCollection.getStatistics(); + * // Batch statistics + * System.out.printf("Documents statistics: document count = %s, erroneous document count = %s," + * + " transaction count = %s, valid document count = %s.%n", + * healthcareTaskStatistics.getDocumentCount(), healthcareTaskStatistics.getInvalidDocumentCount(), + * healthcareTaskStatistics.getTransactionCount(), healthcareTaskStatistics.getValidDocumentCount()); + * + * analyzeHealthcareEntitiesResultCollection.forEach(healthcareEntitiesResult -> { + * System.out.println("document id = " + healthcareEntitiesResult.getId()); + * System.out.println("Document entities: "); + * AtomicInteger ct = new AtomicInteger(); + * healthcareEntitiesResult.getEntities().forEach(healthcareEntity -> { + * System.out.printf("\ti = %d, Text: %s, category: %s, confidence score: %f.%n", + * ct.getAndIncrement(), healthcareEntity.getText(), healthcareEntity.getCategory(), + * healthcareEntity.getConfidenceScore()); + * + * IterableStream<EntityDataSource> healthcareEntityDataSources = + * healthcareEntity.getDataSources(); + * if (healthcareEntityDataSources != null) { + * healthcareEntityDataSources.forEach(healthcareEntityLink -> System.out.printf( + * "\t\tEntity ID in data source: %s, data source: %s.%n", + * healthcareEntityLink.getEntityId(), healthcareEntityLink.getName())); + * } + * }); + * // Healthcare entity relation groups + * healthcareEntitiesResult.getEntityRelations().forEach(entityRelation -> { + * System.out.printf("\tRelation type: %s.%n", entityRelation.getRelationType()); + * entityRelation.getRoles().forEach(role -> { + * final HealthcareEntity entity = role.getEntity(); + * System.out.printf("\t\tEntity text: %s, category: %s, role: %s.%n", + * entity.getText(), entity.getCategory(), role.getName()); + * }); + * }); + * }); + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * @param options The additional configurable {@link AnalyzeHealthcareEntitiesOptions options} that may be passed @@ -966,7 +1661,51 @@ public Response
Code Sample
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-String-AnalyzeActionsOptions} + * + *+ * List<String> documents = Arrays.asList( + * "Elon Musk is the CEO of SpaceX and Tesla.", + * "My SSN is 859-98-0987" + * ); + * + * SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = + * textAnalyticsClient.beginAnalyzeActions( + * documents, + * new TextAnalyticsActions().setDisplayName("{tasks_display_name}") + * .setRecognizeEntitiesActions(new RecognizeEntitiesAction()) + * .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()), + * "en", + * new AnalyzeActionsOptions().setIncludeStatistics(false)); + * syncPoller.waitForCompletion(); + * AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult(); + * result.forEach(analyzeActionsResult -> { + * System.out.println("Entities recognition action results:"); + * analyzeActionsResult.getRecognizeEntitiesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach( + * entitiesResult -> entitiesResult.getEntities().forEach( + * entity -> System.out.printf( + * "Recognized entity: %s, entity category: %s, entity subcategory: %s," + * + " confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), + * entity.getConfidenceScore()))); + * } + * }); + * System.out.println("Key phrases extraction action results:"); + * analyzeActionsResult.getExtractKeyPhrasesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> { + * System.out.println("Extracted phrases:"); + * extractKeyPhraseResult.getKeyPhrases() + * .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); + * }); + * } + * }); + * }); + *+ * * * @param documents A list of documents to be analyzed. * For text length limits, maximum batch size, and supported text encoding, see @@ -1004,7 +1743,51 @@ public SyncPoller
Code Sample
- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-AnalyzeActionsOptions-Context} + * + *+ * List<TextDocumentInput> documents = Arrays.asList( + * new TextDocumentInput("0", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en"), + * new TextDocumentInput("1", "My SSN is 859-98-0987").setLanguage("en") + * ); + * + * SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = + * textAnalyticsClient.beginAnalyzeActions( + * documents, + * new TextAnalyticsActions().setDisplayName("{tasks_display_name}") + * .setRecognizeEntitiesActions(new RecognizeEntitiesAction()) + * .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()), + * new AnalyzeActionsOptions().setIncludeStatistics(false), + * Context.NONE); + * syncPoller.waitForCompletion(); + * AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult(); + * result.forEach(analyzeActionsResult -> { + * System.out.println("Entities recognition action results:"); + * analyzeActionsResult.getRecognizeEntitiesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach( + * entitiesResult -> entitiesResult.getEntities().forEach( + * entity -> System.out.printf( + * "Recognized entity: %s, entity category: %s, entity subcategory: %s," + * + " confidence score: %f.%n", + * entity.getText(), entity.getCategory(), entity.getSubcategory(), + * entity.getConfidenceScore()))); + * } + * }); + * System.out.println("Key phrases extraction action results:"); + * analyzeActionsResult.getExtractKeyPhrasesResults().forEach( + * actionResult -> { + * if (!actionResult.isError()) { + * actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> { + * System.out.println("Extracted phrases:"); + * extractKeyPhraseResult.getKeyPhrases() + * .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); + * }); + * } + * }); + * }); + *+ * * * @param documents A list of {@link TextDocumentInput documents} to be analyzed. * @param actions The {@link TextAnalyticsActions actions} that contains all actions to be executed. diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilder.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilder.java index 0d14ba586e7c5..a1542eac17764 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilder.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilder.java @@ -52,11 +52,25 @@ * *
Instantiating an asynchronous Text Analytics Client
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.instantiation} + * + *+ * TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() + * .credential(new AzureKeyCredential("{key}")) + * .endpoint("{endpoint}") + * .buildAsyncClient(); + *+ * * *
Instantiating a synchronous Text Analytics Client
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.instantiation} + * + *+ * TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder() + * .credential(new AzureKeyCredential("{key}")) + * .endpoint("{endpoint}") + * .buildClient(); + *+ * * *
* Another way to construct the client is using a {@link HttpPipeline}. The pipeline gives the client an authenticated @@ -65,7 +79,19 @@ * on how the {@link TextAnalyticsClient} and {@link TextAnalyticsAsyncClient} is built. *
* - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.pipeline.instantiation} + * + *+ * HttpPipeline pipeline = new HttpPipelineBuilder() + * .policies(/* add policies */) + * .build(); + * + * TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder() + * .credential(new AzureKeyCredential("{key}")) + * .endpoint("{endpoint}") + * .pipeline(pipeline) + * .buildClient(); + *+ * * * @see TextAnalyticsAsyncClient * @see TextAnalyticsClient diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java index c58ddc77a1927..05dd5809d1d79 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java @@ -49,47 +49,56 @@ public class ReadmeSamples { * Code snippet for configuring http client. */ public void configureHttpClient() { + // BEGIN: readme-sample-configureHttpClient HttpClient client = new NettyAsyncHttpClientBuilder() .port(8080) .wiretap(true) .build(); + // END: readme-sample-configureHttpClient } /** * Code snippet for getting sync client using the AzureKeyCredential authentication. */ public void useAzureKeyCredentialSyncClient() { + // BEGIN: readme-sample-createTextAnalyticsClientWithKeyCredential TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); + // END: readme-sample-createTextAnalyticsClientWithKeyCredential } /** * Code snippet for getting async client using AzureKeyCredential authentication. */ public void useAzureKeyCredentialAsyncClient() { - TextAnalyticsAsyncClient textAnalyticsClient = new TextAnalyticsClientBuilder() + // BEGIN: readme-sample-createTextAnalyticsAsyncClientWithKeyCredential + TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildAsyncClient(); + // END: readme-sample-createTextAnalyticsAsyncClientWithKeyCredential } /** * Code snippet for getting async client using AAD authentication. */ public void useAadAsyncClient() { + // BEGIN: readme-sample-createTextAnalyticsAsyncClientWithAAD TokenCredential defaultCredential = new DefaultAzureCredentialBuilder().build(); - TextAnalyticsAsyncClient textAnalyticsClient = new TextAnalyticsClientBuilder() + TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() .endpoint("{endpoint}") .credential(defaultCredential) .buildAsyncClient(); + // END: readme-sample-createTextAnalyticsAsyncClientWithAAD } /** * Code snippet for rotating AzureKeyCredential of the client */ public void rotatingAzureKeyCredential() { + // BEGIN: readme-sample-rotatingAzureKeyCredential AzureKeyCredential credential = new AzureKeyCredential("{key}"); TextAnalyticsClient textAnalyticsClient = new TextAnalyticsClientBuilder() .credential(credential) @@ -97,12 +106,14 @@ public void rotatingAzureKeyCredential() { .buildClient(); credential.update("{new_key}"); + // END: readme-sample-rotatingAzureKeyCredential } /** * Code snippet for handling exception */ public void handlingException() { + // BEGIN: readme-sample-handlingException List