TextAnalyticsClient.java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.ai.textanalytics;
import com.azure.ai.textanalytics.models.AnalyzeActionsOperationDetail;
import com.azure.ai.textanalytics.models.AnalyzeActionsOptions;
import com.azure.ai.textanalytics.models.AnalyzeHealthcareEntitiesOperationDetail;
import com.azure.ai.textanalytics.models.AnalyzeHealthcareEntitiesOptions;
import com.azure.ai.textanalytics.models.AnalyzeSentimentOptions;
import com.azure.ai.textanalytics.models.CategorizedEntity;
import com.azure.ai.textanalytics.models.CategorizedEntityCollection;
import com.azure.ai.textanalytics.models.DetectLanguageInput;
import com.azure.ai.textanalytics.models.DetectedLanguage;
import com.azure.ai.textanalytics.models.DocumentSentiment;
import com.azure.ai.textanalytics.models.KeyPhrasesCollection;
import com.azure.ai.textanalytics.models.LinkedEntity;
import com.azure.ai.textanalytics.models.LinkedEntityCollection;
import com.azure.ai.textanalytics.models.PiiEntityCollection;
import com.azure.ai.textanalytics.models.RecognizePiiEntitiesOptions;
import com.azure.ai.textanalytics.models.TextAnalyticsActions;
import com.azure.ai.textanalytics.models.TextAnalyticsError;
import com.azure.ai.textanalytics.models.TextAnalyticsException;
import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions;
import com.azure.ai.textanalytics.models.TextDocumentInput;
import com.azure.ai.textanalytics.util.AnalyzeActionsResultPagedIterable;
import com.azure.ai.textanalytics.util.AnalyzeHealthcareEntitiesResultCollection;
import com.azure.ai.textanalytics.util.AnalyzeSentimentResultCollection;
import com.azure.ai.textanalytics.util.DetectLanguageResultCollection;
import com.azure.ai.textanalytics.util.ExtractKeyPhrasesResultCollection;
import com.azure.ai.textanalytics.util.AnalyzeHealthcareEntitiesPagedIterable;
import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection;
import com.azure.ai.textanalytics.util.RecognizeLinkedEntitiesResultCollection;
import com.azure.ai.textanalytics.util.RecognizePiiEntitiesResultCollection;
import com.azure.core.annotation.ReturnType;
import com.azure.core.annotation.ServiceClient;
import com.azure.core.annotation.ServiceMethod;
import com.azure.core.http.rest.PagedIterable;
import com.azure.core.http.rest.Response;
import com.azure.core.util.Context;
import com.azure.core.util.polling.SyncPoller;
import java.util.Objects;
import static com.azure.ai.textanalytics.implementation.Utility.inputDocumentsValidation;
import static com.azure.ai.textanalytics.implementation.Utility.mapByIndex;
/**
* This class provides a synchronous client that contains all the operations that apply to Azure Text Analytics.
* Operations allowed by the client are language detection, entities recognition, linked entities recognition,
* key phrases extraction, and sentiment analysis of a document or a list of documents.
*
* <p><strong>Instantiating a synchronous Text Analytics Client</strong></p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-String-AnalyzeActionsOptions -->
* <pre>
* List<String> documents = Arrays.asList(
* "Elon Musk is the CEO of SpaceX and Tesla.",
* "My SSN is 859-98-0987"
* );
*
* SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
* textAnalyticsClient.beginAnalyzeActions(
* documents,
* new TextAnalyticsActions().setDisplayName("{tasks_display_name}")
* .setRecognizeEntitiesActions(new RecognizeEntitiesAction())
* .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()),
* "en",
* new AnalyzeActionsOptions().setIncludeStatistics(false));
* syncPoller.waitForCompletion();
* AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
* result.forEach(analyzeActionsResult -> {
* System.out.println("Entities recognition action results:");
* analyzeActionsResult.getRecognizeEntitiesResults().forEach(
* actionResult -> {
* if (!actionResult.isError()) {
* actionResult.getDocumentsResults().forEach(
* entitiesResult -> entitiesResult.getEntities().forEach(
* entity -> System.out.printf(
* "Recognized entity: %s, entity category: %s, entity subcategory: %s,"
* + " confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getSubcategory(),
* entity.getConfidenceScore())));
* }
* });
* System.out.println("Key phrases extraction action results:");
* analyzeActionsResult.getExtractKeyPhrasesResults().forEach(
* actionResult -> {
* if (!actionResult.isError()) {
* actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> {
* System.out.println("Extracted phrases:");
* extractKeyPhraseResult.getKeyPhrases()
* .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases));
* });
* }
* });
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-String-AnalyzeActionsOptions -->
* <p>View {@link TextAnalyticsClientBuilder this} for additional ways to construct the client.</p>
*
* @see TextAnalyticsClientBuilder
*/
@ServiceClient(builder = TextAnalyticsClientBuilder.class)
public final class TextAnalyticsClient {
private final TextAnalyticsAsyncClient client;
/**
* Creates a {@code TextAnalyticsClient client} that sends requests to the Text Analytics service's endpoint.
* Each service call goes through the {@link TextAnalyticsClientBuilder#pipeline http pipeline}.
*
* @param client The {@link TextAnalyticsClient} that the client routes its request through.
*/
TextAnalyticsClient(TextAnalyticsAsyncClient client) {
this.client = client;
}
/**
* Gets default country hint code.
*
* @return The default country hint code
*/
public String getDefaultCountryHint() {
return client.getDefaultCountryHint();
}
/**
* Gets default language when the builder is setup.
*
* @return The default language
*/
public String getDefaultLanguage() {
return client.getDefaultLanguage();
}
/**
* Returns the detected language and a confidence score between zero and one. Scores close to one indicate 100%
* certainty that the identified language is true.
*
* This method will use the default country hint that sets up in
* {@link TextAnalyticsClientBuilder#defaultCountryHint(String)}. If none is specified, service will use 'US' as
* the country hint.
*
* <p><strong>Code Sample</strong></p>
* <p>Detects the language of single document.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguage#String -->
* <pre>
* DetectedLanguage detectedLanguage = textAnalyticsClient.detectLanguage("Bonjour tout le monde");
* System.out.printf("Detected language name: %s, ISO 6391 name: %s, confidence score: %f.%n",
* detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore());
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguage#String -->
*
* @param document The document to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
*
* @return The {@link DetectedLanguage detected language} of the document.
*
* @throws NullPointerException if {@code document} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DetectedLanguage detectLanguage(String document) {
return detectLanguage(document, client.getDefaultCountryHint());
}
/**
* Returns the detected language and a confidence score between zero and one.
* Scores close to one indicate 100% certainty that the identified language is true.
*
* <p><strong>Code Sample</strong></p>
* <p>Detects the language of documents with a provided country hint.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguage#String-String -->
* <pre>
* DetectedLanguage detectedLanguage = textAnalyticsClient.detectLanguage(
* "This text is in English", "US");
* System.out.printf("Detected language name: %s, ISO 6391 name: %s, confidence score: %f.%n",
* detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore());
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguage#String-String -->
*
* @param document The document to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param countryHint Accepts two letter country codes specified by ISO 3166-1 alpha-2. Defaults to "US" if not
* specified. To remove this behavior you can reset this parameter by setting this value to empty string
* {@code countryHint} = "" or "none".
*
* @return The {@link DetectedLanguage detected language} of the document.
*
* @throws NullPointerException if {@code document} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DetectedLanguage detectLanguage(String document, String countryHint) {
return client.detectLanguage(document, countryHint).block();
}
/**
* Detects Language for a batch of document with the provided country hint and request options.
*
* <p><strong>Code Sample</strong></p>
* <p>Detects the language in a list of documents with a provided country hint and request options.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-String-TextAnalyticsRequestOptions -->
* <pre>
* List<String> documents = Arrays.asList(
* "This is written in English",
* "Este es un documento escrito en Español."
* );
*
* DetectLanguageResultCollection resultCollection =
* textAnalyticsClient.detectLanguageBatch(documents, "US", null);
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
* System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* // Batch result of languages
* resultCollection.forEach(detectLanguageResult -> {
* System.out.printf("Document ID: %s%n", detectLanguageResult.getId());
* DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage();
* System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n",
* detectedLanguage.getName(), detectedLanguage.getIso6391Name(),
* detectedLanguage.getConfidenceScore());
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-String-TextAnalyticsRequestOptions -->
*
* @param documents The list of documents to detect languages for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param countryHint Accepts two letter country codes specified by ISO 3166-1 alpha-2. Defaults to "US" if not
* specified. To remove this behavior you can reset this parameter by setting this value to empty string
* {@code countryHint} = "" or "none".
* @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents
* and show statistics.
*
* @return A {@link DetectLanguageResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DetectLanguageResultCollection detectLanguageBatch(
Iterable<String> documents, String countryHint, TextAnalyticsRequestOptions options) {
inputDocumentsValidation(documents);
return client.detectLanguageBatch(documents, countryHint, options).block();
}
/**
* Detects Language for a batch of {@link DetectLanguageInput document} with provided request options.
*
* <p><strong>Code Sample</strong></p>
* <p>Detects the languages with http response in a list of {@link DetectLanguageInput document} with provided
* request options.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-TextAnalyticsRequestOptions-Context -->
* <pre>
* List<DetectLanguageInput> detectLanguageInputs = Arrays.asList(
* new DetectLanguageInput("1", "This is written in English.", "US"),
* new DetectLanguageInput("2", "Este es un documento escrito en Español.", "es")
* );
*
* Response<DetectLanguageResultCollection> response =
* textAnalyticsClient.detectLanguageBatchWithResponse(detectLanguageInputs,
* new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE);
*
* // Response's status code
* System.out.printf("Status code of request response: %d%n", response.getStatusCode());
* DetectLanguageResultCollection detectedLanguageResultCollection = response.getValue();
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = detectedLanguageResultCollection.getStatistics();
* System.out.printf(
* "Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s,"
* + " valid document count = %s.%n",
* batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(),
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* // Batch result of languages
* detectedLanguageResultCollection.forEach(detectLanguageResult -> {
* System.out.printf("Document ID: %s%n", detectLanguageResult.getId());
* DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage();
* System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n",
* detectedLanguage.getName(), detectedLanguage.getIso6391Name(),
* detectedLanguage.getConfidenceScore());
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-TextAnalyticsRequestOptions-Context -->
*
* @param documents The list of {@link DetectLanguageInput documents} to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents
* and show statistics.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} that contains a {@link DetectLanguageResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<DetectLanguageResultCollection> detectLanguageBatchWithResponse(
Iterable<DetectLanguageInput> documents, TextAnalyticsRequestOptions options, Context context) {
inputDocumentsValidation(documents);
return client.detectLanguageAsyncClient.detectLanguageBatchWithContext(documents, options, context).block();
}
// Categorized Entity
/**
* Returns a list of general categorized entities in the provided document.
*
* For a list of supported entity types, check: <a href="https://aka.ms/taner">this</a>
*
* This method will use the default language that can be set by using method
* {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as
* the language.
*
* <p><strong>Code Sample</strong></p>
* <p>Recognize the entities of documents</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntities#String -->
* <pre>
* final CategorizedEntityCollection recognizeEntitiesResult =
* textAnalyticsClient.recognizeEntities("Satya Nadella is the CEO of Microsoft");
* for (CategorizedEntity entity : recognizeEntitiesResult) {
* System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getConfidenceScore());
* }
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntities#String -->
*
* @param document The document to recognize entities for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
*
* @return A {@link CategorizedEntityCollection} contains a list of
* {@link CategorizedEntity recognized categorized entities} and warnings.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CategorizedEntityCollection recognizeEntities(String document) {
return recognizeEntities(document, client.getDefaultLanguage());
}
/**
* Returns a list of general categorized entities in the provided document with provided language code.
*
* For a list of supported entity types, check: <a href="https://aka.ms/taner">this</a>
* For a list of enabled languages, check: <a href="https://aka.ms/talangs">this</a>
*
* <p><strong>Code Sample</strong></p>
* <p>Recognizes the entities in a document with a provided language code.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntities#String-String -->
* <pre>
* final CategorizedEntityCollection recognizeEntitiesResult =
* textAnalyticsClient.recognizeEntities("Satya Nadella is the CEO of Microsoft", "en");
*
* for (CategorizedEntity entity : recognizeEntitiesResult) {
* System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getConfidenceScore());
* }
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntities#String-String -->
*
* @param document The document to recognize entities for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language. If not set, uses "en" for English as default.
*
* @return The {@link CategorizedEntityCollection} contains a list of
* {@link CategorizedEntity recognized categorized entities} and warnings.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CategorizedEntityCollection recognizeEntities(String document, String language) {
return client.recognizeEntities(document, language).block();
}
/**
* Returns a list of general categorized entities for the provided list of documents with provided language code
* and request options.
*
* <p><strong>Code Sample</strong></p>
* <p>Recognizes the entities in a list of documents with a provided language code and request options.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions -->
* <pre>
* List<String> documents = Arrays.asList(
* "I had a wonderful trip to Seattle last week.",
* "I work at Microsoft.");
*
* RecognizeEntitiesResultCollection resultCollection =
* textAnalyticsClient.recognizeEntitiesBatch(documents, "en", null);
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
* System.out.printf(
* "A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* resultCollection.forEach(recognizeEntitiesResult ->
* recognizeEntitiesResult.getEntities().forEach(entity ->
* System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getConfidenceScore())));
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions -->
*
* @param documents A list of documents to recognize entities for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language. If not set, uses "en" for English as default.
* @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents
* and show statistics.
*
* @return A {@link RecognizeEntitiesResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public RecognizeEntitiesResultCollection recognizeEntitiesBatch(
Iterable<String> documents, String language, TextAnalyticsRequestOptions options) {
inputDocumentsValidation(documents);
return client.recognizeEntitiesBatch(documents, language, options).block();
}
/**
* Returns a list of general categorized entities for the provided list of {@link TextDocumentInput document} with
* provided request options.
*
* <p><strong>Code Sample</strong></p>
* <p>Recognizes the entities with http response in a list of {@link TextDocumentInput document} with provided
* request options.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizeEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context -->
* <pre>
* List<TextDocumentInput> textDocumentInputs = Arrays.asList(
* new TextDocumentInput("0", "I had a wonderful trip to Seattle last week.").setLanguage("en"),
* new TextDocumentInput("1", "I work at Microsoft.").setLanguage("en")
* );
*
* Response<RecognizeEntitiesResultCollection> response =
* textAnalyticsClient.recognizeEntitiesBatchWithResponse(textDocumentInputs,
* new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE);
*
* // Response's status code
* System.out.printf("Status code of request response: %d%n", response.getStatusCode());
* RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = response.getValue();
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResultCollection.getStatistics();
* System.out.printf(
* "A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* recognizeEntitiesResultCollection.forEach(recognizeEntitiesResult ->
* recognizeEntitiesResult.getEntities().forEach(entity ->
* System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getConfidenceScore())));
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizeEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context -->
*
* @param documents A list of {@link TextDocumentInput documents} to recognize entities for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents
* and show statistics.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} that contains a {@link RecognizeEntitiesResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<RecognizeEntitiesResultCollection> recognizeEntitiesBatchWithResponse(
Iterable<TextDocumentInput> documents, TextAnalyticsRequestOptions options, Context context) {
inputDocumentsValidation(documents);
return client.recognizeEntityAsyncClient.recognizeEntitiesBatchWithContext(documents, options, context).block();
}
// PII Entity
/**
* Returns a list of Personally Identifiable Information(PII) entities in the provided document.
*
* For a list of supported entity types, check: <a href="https://aka.ms/tanerpii">this</a>
* For a list of enabled languages, check: <a href="https://aka.ms/talangs">this</a>. This method will use the
* default language that is set using {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is
* specified, service will use 'en' as the language.
*
* <p><strong>Code Sample</strong></p>
* <p>Recognize the PII entities details in a document.</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String -->
* <pre>
* PiiEntityCollection piiEntityCollection = textAnalyticsClient.recognizePiiEntities("My SSN is 859-98-0987");
* System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
* for (PiiEntity entity : piiEntityCollection) {
* System.out.printf(
* "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
* + " entity subcategory: %s, confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore());
* }
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String -->
*
* @param document The document to recognize PII entities details for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
*
* @return A {@link PiiEntityCollection recognized PII entities collection}.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PiiEntityCollection recognizePiiEntities(String document) {
return recognizePiiEntities(document, client.getDefaultLanguage());
}
/**
* Returns a list of Personally Identifiable Information(PII) entities in the provided document
* with provided language code.
*
* For a list of supported entity types, check: <a href="https://aka.ms/tanerpii">this</a>
* For a list of enabled languages, check: <a href="https://aka.ms/talangs">this</a>
*
* <p><strong>Code Sample</strong></p>
* <p>Recognizes the PII entities details in a document with a provided language code.</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String-String -->
* <pre>
* PiiEntityCollection piiEntityCollection = textAnalyticsClient.recognizePiiEntities(
* "My SSN is 859-98-0987", "en");
* System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
* piiEntityCollection.forEach(entity -> System.out.printf(
* "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
* + " entity subcategory: %s, confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String-String -->
*
* @param document The document to recognize PII entities details for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language. If not set, uses "en" for English as default.
*
* @return The {@link PiiEntityCollection recognized PII entities collection}.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PiiEntityCollection recognizePiiEntities(String document, String language) {
return client.recognizePiiEntities(document, language).block();
}
/**
* Returns a list of Personally Identifiable Information(PII) entities in the provided document
* with provided language code.
*
* For a list of supported entity types, check: <a href="https://aka.ms/tanerpii">this</a>
* For a list of enabled languages, check: <a href="https://aka.ms/talangs">this</a>
*
* <p><strong>Code Sample</strong></p>
* <p>Recognizes the PII entities details in a document with a provided language code and
* {@link RecognizePiiEntitiesOptions}.</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String-String-RecognizePiiEntitiesOptions -->
* <pre>
* PiiEntityCollection piiEntityCollection = textAnalyticsClient.recognizePiiEntities(
* "My SSN is 859-98-0987", "en",
* new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION));
* System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
* piiEntityCollection.forEach(entity -> System.out.printf(
* "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
* + " entity subcategory: %s, confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntities#String-String-RecognizePiiEntitiesOptions -->
*
* @param document The document to recognize PII entities details for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language. If not set, uses "en" for English as default.
* @param options The additional configurable {@link RecognizePiiEntitiesOptions options} that may be passed when
* recognizing PII entities.
*
* @return The {@link PiiEntityCollection recognized PII entities collection}.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PiiEntityCollection recognizePiiEntities(String document, String language,
RecognizePiiEntitiesOptions options) {
return client.recognizePiiEntities(document, language, options).block();
}
/**
* Returns a list of Personally Identifiable Information(PII) entities for the provided list of documents with
* provided language code and request options.
*
* <p><strong>Code Sample</strong></p>
* <p>Recognizes the PII entities details in a list of documents with a provided language code
* and request options.</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntitiesBatch#Iterable-String-RecognizePiiEntitiesOptions -->
* <pre>
* List<String> documents = Arrays.asList(
* "My SSN is 859-98-0987",
* "Visa card 4111 1111 1111 1111"
* );
*
* RecognizePiiEntitiesResultCollection resultCollection = textAnalyticsClient.recognizePiiEntitiesBatch(
* documents, "en", new RecognizePiiEntitiesOptions().setIncludeStatistics(true));
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
* System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* resultCollection.forEach(recognizePiiEntitiesResult -> {
* PiiEntityCollection piiEntityCollection = recognizePiiEntitiesResult.getEntities();
* System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
* piiEntityCollection.forEach(entity -> System.out.printf(
* "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
* + " entity subcategory: %s, confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntitiesBatch#Iterable-String-RecognizePiiEntitiesOptions -->
*
* @param documents A list of documents to recognize PII entities for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language. If not set, uses "en" for English as default.
* @param options The additional configurable {@link RecognizePiiEntitiesOptions options} that may be passed when
* recognizing PII entities.
*
* @return A {@link RecognizePiiEntitiesResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public RecognizePiiEntitiesResultCollection recognizePiiEntitiesBatch(
Iterable<String> documents, String language, RecognizePiiEntitiesOptions options) {
return client.recognizePiiEntitiesBatch(documents, language, options).block();
}
/**
* Returns a list of Personally Identifiable Information(PII) entities for the provided list of
* {@link TextDocumentInput document} with provided request options.
*
* <p><strong>Code Sample</strong></p>
* <p>Recognizes the PII entities details with http response in a list of {@link TextDocumentInput document}
* with provided request options.</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntitiesBatch#Iterable-RecognizePiiEntitiesOptions-Context -->
* <pre>
* List<TextDocumentInput> textDocumentInputs = Arrays.asList(
* new TextDocumentInput("0", "My SSN is 859-98-0987"),
* new TextDocumentInput("1", "Visa card 4111 1111 1111 1111")
* );
*
* Response<RecognizePiiEntitiesResultCollection> response =
* textAnalyticsClient.recognizePiiEntitiesBatchWithResponse(textDocumentInputs,
* new RecognizePiiEntitiesOptions().setIncludeStatistics(true), Context.NONE);
*
* RecognizePiiEntitiesResultCollection resultCollection = response.getValue();
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
* System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* resultCollection.forEach(recognizePiiEntitiesResult -> {
* PiiEntityCollection piiEntityCollection = recognizePiiEntitiesResult.getEntities();
* System.out.printf("Redacted Text: %s%n", piiEntityCollection.getRedactedText());
* piiEntityCollection.forEach(entity -> System.out.printf(
* "Recognized Personally Identifiable Information entity: %s, entity category: %s,"
* + " entity subcategory: %s, confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()));
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizePiiEntitiesBatch#Iterable-RecognizePiiEntitiesOptions-Context -->
*
* @param documents A list of {@link TextDocumentInput documents} to recognize PII entities for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param options The additional configurable {@link RecognizePiiEntitiesOptions options} that may be passed when
* recognizing PII entities.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} that contains a {@link RecognizePiiEntitiesResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<RecognizePiiEntitiesResultCollection> recognizePiiEntitiesBatchWithResponse(
Iterable<TextDocumentInput> documents, RecognizePiiEntitiesOptions options, Context context) {
return client.recognizePiiEntityAsyncClient.recognizePiiEntitiesBatchWithContext(documents, options,
context).block();
}
// Linked Entities
/**
* Returns a list of recognized entities with links to a well-known knowledge base for the provided document.
* See <a href="https://aka.ms/talangs">this</a> for supported languages in Text Analytics API.
*
* This method will use the default language that can be set by using method
* {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as
* the language.
*
* <p><strong>Code Sample</strong></p>
* <p>Recognize the linked entities of documents</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntities#String -->
* <pre>
* final String document = "Old Faithful is a geyser at Yellowstone Park.";
* System.out.println("Linked Entities:");
* textAnalyticsClient.recognizeLinkedEntities(document).forEach(linkedEntity -> {
* System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
* linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
* linkedEntity.getDataSource());
* linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
* "Matched entity: %s, confidence score: %f.%n",
* entityMatch.getText(), entityMatch.getConfidenceScore()));
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntities#String -->
*
* @param document The document to recognize linked entities for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
*
* @return A {@link LinkedEntityCollection} contains a list of {@link LinkedEntity recognized linked entities}.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public LinkedEntityCollection recognizeLinkedEntities(String document) {
return recognizeLinkedEntities(document, client.getDefaultLanguage());
}
/**
* Returns a list of recognized entities with links to a well-known knowledge base for the provided document with
* language code.
*
* See <a href="https://aka.ms/talangs">this</a> for supported languages in Text Analytics API.
*
* <p><strong>Code Sample</strong></p>
* <p>Recognizes the linked entities in a document with a provided language code.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntities#String-String -->
* <pre>
* String document = "Old Faithful is a geyser at Yellowstone Park.";
* textAnalyticsClient.recognizeLinkedEntities(document, "en").forEach(linkedEntity -> {
* System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
* linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
* linkedEntity.getDataSource());
* linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
* "Matched entity: %s, confidence score: %f.%n",
* entityMatch.getText(), entityMatch.getConfidenceScore()));
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntities#String-String -->
*
* @param document The document to recognize linked entities for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language for the document. If not set, uses "en" for
* English as default.
*
* @return A {@link LinkedEntityCollection} contains a list of {@link LinkedEntity recognized linked entities}.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public LinkedEntityCollection recognizeLinkedEntities(String document, String language) {
Objects.requireNonNull(document, "'document' cannot be null.");
return client.recognizeLinkedEntities(document, language).block();
}
/**
* Returns a list of recognized entities with links to a well-known knowledge base for the list of documents with
* provided language code and request options.
*
* See <a href="https://aka.ms/talangs">this</a> for supported languages in Text Analytics API.
*
* <p><strong>Code Sample</strong></p>
* <p>Recognizes the linked entities in a list of documents with a provided language code and request options.
* </p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions -->
* <pre>
* List<String> documents = Arrays.asList(
* "Old Faithful is a geyser at Yellowstone Park.",
* "Mount Shasta has lenticular clouds."
* );
*
* RecognizeLinkedEntitiesResultCollection resultCollection =
* textAnalyticsClient.recognizeLinkedEntitiesBatch(documents, "en", null);
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
* System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* resultCollection.forEach(recognizeLinkedEntitiesResult ->
* recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> {
* System.out.println("Linked Entities:");
* System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
* linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
* linkedEntity.getDataSource());
* linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
* "Matched entity: %s, confidence score: %f.%n",
* entityMatch.getText(), entityMatch.getConfidenceScore()));
* }));
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions -->
*
* @param documents A list of documents to recognize linked entities for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language for the documents. If not set, uses "en" for
* English as default.
* @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents
* and show statistics.
*
* @return A {@link RecognizeLinkedEntitiesResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public RecognizeLinkedEntitiesResultCollection recognizeLinkedEntitiesBatch(
Iterable<String> documents, String language, TextAnalyticsRequestOptions options) {
inputDocumentsValidation(documents);
return client.recognizeLinkedEntitiesBatch(documents, language, options).block();
}
/**
* Returns a list of recognized entities with links to a well-known knowledge base for the list of
* {@link TextDocumentInput document} and request options.
*
* See <a href="https://aka.ms/talangs">this</a> for supported languages in Text Analytics API.
*
* <p><strong>Code Sample</strong></p>
* <p>Recognizes the linked entities with http response in a list of {@link TextDocumentInput} with request options.
* </p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context -->
* <pre>
* List<TextDocumentInput> textDocumentInputs = Arrays.asList(
* new TextDocumentInput("1", "Old Faithful is a geyser at Yellowstone Park.").setLanguage("en"),
* new TextDocumentInput("2", "Mount Shasta has lenticular clouds.").setLanguage("en")
* );
*
* Response<RecognizeLinkedEntitiesResultCollection> response =
* textAnalyticsClient.recognizeLinkedEntitiesBatchWithResponse(textDocumentInputs,
* new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE);
*
* // Response's status code
* System.out.printf("Status code of request response: %d%n", response.getStatusCode());
* RecognizeLinkedEntitiesResultCollection resultCollection = response.getValue();
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
* System.out.printf(
* "A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* resultCollection.forEach(recognizeLinkedEntitiesResult ->
* recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> {
* System.out.println("Linked Entities:");
* System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n",
* linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(),
* linkedEntity.getDataSource());
* linkedEntity.getMatches().forEach(entityMatch -> System.out.printf(
* "Matched entity: %s, confidence score: %.2f.%n",
* entityMatch.getText(), entityMatch.getConfidenceScore()));
* }));
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context -->
*
* @param documents A list of {@link TextDocumentInput documents} to recognize linked entities for.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents
* and show statistics.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} that contains a {@link RecognizeLinkedEntitiesResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<RecognizeLinkedEntitiesResultCollection> recognizeLinkedEntitiesBatchWithResponse(
Iterable<TextDocumentInput> documents, TextAnalyticsRequestOptions options, Context context) {
inputDocumentsValidation(documents);
return client.recognizeLinkedEntityAsyncClient.recognizeLinkedEntitiesBatchWithContext(documents,
options, context).block();
}
// Key Phrase
/**
* Returns a list of strings denoting the key phrases in the document.
*
* This method will use the default language that can be set by using method
* {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as
* the language.
*
* <p><strong>Code Sample</strong></p>
* <p>Extracts key phrases of documents</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrases#String -->
* <pre>
* System.out.println("Extracted phrases:");
* for (String keyPhrase : textAnalyticsClient.extractKeyPhrases("My cat might need to see a veterinarian.")) {
* System.out.printf("%s.%n", keyPhrase);
* }
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrases#String -->
*
* @param document The document to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
*
* @return A {@link KeyPhrasesCollection} contains a list of extracted key phrases.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public KeyPhrasesCollection extractKeyPhrases(String document) {
return extractKeyPhrases(document, client.getDefaultLanguage());
}
/**
* Returns a list of strings denoting the key phrases in the document.
* See <a href="https://aka.ms/talangs">this</a> for the list of enabled languages.
*
* <p><strong>Code Sample</strong></p>
* <p>Extracts key phrases in a document with a provided language representation.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrases#String-String-Context -->
* <pre>
* System.out.println("Extracted phrases:");
* textAnalyticsClient.extractKeyPhrases("My cat might need to see a veterinarian.", "en")
* .forEach(kegPhrase -> System.out.printf("%s.%n", kegPhrase));
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrases#String-String-Context -->
*
* @param document The document to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language for the document. If not set, uses "en" for
* English as default.
*
* @return A {@link KeyPhrasesCollection} contains a list of extracted key phrases.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public KeyPhrasesCollection extractKeyPhrases(String document, String language) {
Objects.requireNonNull(document, "'document' cannot be null.");
return client.extractKeyPhrases(document, language).block();
}
/**
* Returns a list of strings denoting the key phrases in the documents with provided language code and
* request options.
*
* See <a href="https://aka.ms/talangs">this</a> for the list of enabled languages.
*
* <p><strong>Code Sample</strong></p>
* <p>Extracts key phrases in a list of documents with a provided language code and request options.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-String-TextAnalyticsRequestOptions -->
* <pre>
* List<String> documents = Arrays.asList(
* "My cat might need to see a veterinarian.",
* "The pitot tube is used to measure airspeed."
* );
*
* // Extracting batch key phrases
* ExtractKeyPhrasesResultCollection resultCollection =
* textAnalyticsClient.extractKeyPhrasesBatch(documents, "en", null);
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
* System.out.printf(
* "A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* // Extracted key phrase for each of documents from a batch of documents
* resultCollection.forEach(extractKeyPhraseResult -> {
* System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId());
* // Valid document
* System.out.println("Extracted phrases:");
* extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase));
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-String-TextAnalyticsRequestOptions -->
*
* @param documents A list of documents to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language for the documents. If not set, uses "en" for
* English as default.
* @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents
* and show statistics.
*
* @return A {@link ExtractKeyPhrasesResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ExtractKeyPhrasesResultCollection extractKeyPhrasesBatch(
Iterable<String> documents, String language, TextAnalyticsRequestOptions options) {
inputDocumentsValidation(documents);
return client.extractKeyPhrasesBatch(documents, language, options).block();
}
/**
* Returns a list of strings denoting the key phrases in the a batch of {@link TextDocumentInput document} with
* request options.
*
* See <a href="https://aka.ms/talangs">this</a> for the list of enabled languages.
*
* <p><strong>Code Sample</strong></p>
* <p>Extracts key phrases with http response in a list of {@link TextDocumentInput} with request options.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-TextAnalyticsRequestOptions-Context -->
* <pre>
* List<TextDocumentInput> textDocumentInputs = Arrays.asList(
* new TextDocumentInput("1", "My cat might need to see a veterinarian.").setLanguage("en"),
* new TextDocumentInput("2", "The pitot tube is used to measure airspeed.").setLanguage("en")
* );
*
* // Extracting batch key phrases
* Response<ExtractKeyPhrasesResultCollection> response =
* textAnalyticsClient.extractKeyPhrasesBatchWithResponse(textDocumentInputs,
* new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE);
*
*
* // Response's status code
* System.out.printf("Status code of request response: %d%n", response.getStatusCode());
* ExtractKeyPhrasesResultCollection resultCollection = response.getValue();
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
* System.out.printf(
* "A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* // Extracted key phrase for each of documents from a batch of documents
* resultCollection.forEach(extractKeyPhraseResult -> {
* System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId());
* // Valid document
* System.out.println("Extracted phrases:");
* extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase ->
* System.out.printf("%s.%n", keyPhrase));
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-TextAnalyticsRequestOptions-Context -->
*
* @param documents A list of {@link TextDocumentInput documents} to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents
* and show statistics.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} that contains a {@link ExtractKeyPhrasesResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ExtractKeyPhrasesResultCollection> extractKeyPhrasesBatchWithResponse(
Iterable<TextDocumentInput> documents, TextAnalyticsRequestOptions options, Context context) {
inputDocumentsValidation(documents);
return client.extractKeyPhraseAsyncClient.extractKeyPhrasesBatchWithContext(documents, options, context)
.block();
}
// Sentiment
/**
* Returns a sentiment prediction, as well as confidence scores for each sentiment label
* (Positive, Negative, and Neutral) for the document and each sentence within it.
*
* This method will use the default language that can be set by using method
* {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as
* the language.
*
* <p><strong>Code Sample</strong></p>
* <p>Analyze the sentiments of documents</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String -->
* <pre>
* final DocumentSentiment documentSentiment =
* textAnalyticsClient.analyzeSentiment("The hotel was dark and unclean.");
*
* System.out.printf(
* "Recognized sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n",
* documentSentiment.getSentiment(),
* documentSentiment.getConfidenceScores().getPositive(),
* documentSentiment.getConfidenceScores().getNeutral(),
* documentSentiment.getConfidenceScores().getNegative());
*
* for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) {
* System.out.printf(
* "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n",
* sentenceSentiment.getSentiment(),
* sentenceSentiment.getConfidenceScores().getPositive(),
* sentenceSentiment.getConfidenceScores().getNeutral(),
* sentenceSentiment.getConfidenceScores().getNegative());
* }
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.TextAnalyticsClient.analyzeSentiment#String -->
*
* @param document The document to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
*
* @return A {@link DocumentSentiment analyzed document sentiment} of the document.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DocumentSentiment analyzeSentiment(String document) {
return analyzeSentiment(document, client.getDefaultLanguage());
}
/**
* Returns a sentiment prediction, as well as confidence scores for each sentiment label
* (Positive, Negative, and Neutral) for the document and each sentence within it.
*
* <p><strong>Code Sample</strong></p>
* <p>Analyze the sentiments in a document with a provided language representation.</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String-String -->
* <pre>
* final DocumentSentiment documentSentiment = textAnalyticsClient.analyzeSentiment(
* "The hotel was dark and unclean.", "en");
*
* System.out.printf(
* "Recognized sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n",
* documentSentiment.getSentiment(),
* documentSentiment.getConfidenceScores().getPositive(),
* documentSentiment.getConfidenceScores().getNeutral(),
* documentSentiment.getConfidenceScores().getNegative());
*
* for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) {
* System.out.printf(
* "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n",
* sentenceSentiment.getSentiment(),
* sentenceSentiment.getConfidenceScores().getPositive(),
* sentenceSentiment.getConfidenceScores().getNeutral(),
* sentenceSentiment.getConfidenceScores().getNegative());
* }
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String-String -->
*
* @param document The document to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language for the document. If not set, uses "en" for
* English as default.
*
* @return A {@link DocumentSentiment analyzed document sentiment} of the document.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DocumentSentiment analyzeSentiment(String document, String language) {
return client.analyzeSentiment(document, language).block();
}
/**
* Returns a sentiment prediction, as well as confidence scores for each sentiment label (Positive, Negative, and
* Neutral) for the document and each sentence within it. If the {@code includeOpinionMining} of
* {@link AnalyzeSentimentOptions} set to true, the output will include the opinion mining results. It mines the
* opinions of a sentence and conducts more granular analysis around the aspects in the text
* (also known as aspect-based sentiment analysis).
*
* <p><strong>Code Sample</strong></p>
* <p>Analyze the sentiment and mine the opinions for each sentence in a document with a provided language
* representation and {@link AnalyzeSentimentOptions} options.</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String-String-AnalyzeSentimentOptions -->
* <pre>
* final DocumentSentiment documentSentiment = textAnalyticsClient.analyzeSentiment(
* "The hotel was dark and unclean.", "en",
* new AnalyzeSentimentOptions().setIncludeOpinionMining(true));
* for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) {
* System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment());
* sentenceSentiment.getOpinions().forEach(opinion -> {
* TargetSentiment targetSentiment = opinion.getTarget();
* System.out.printf("\tTarget sentiment: %s, target text: %s%n", targetSentiment.getSentiment(),
* targetSentiment.getText());
* for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) {
* System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the assessment negated: %s.%n",
* assessmentSentiment.getSentiment(), assessmentSentiment.getText(), assessmentSentiment.isNegated());
* }
* });
* }
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String-String-AnalyzeSentimentOptions -->
*
* @param document The document to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language for the document. If not set, uses "en" for
* English as default.
* @param options The additional configurable {@link AnalyzeSentimentOptions options} that may be passed when
* analyzing sentiments.
*
* @return A {@link DocumentSentiment analyzed document sentiment} of the document.
*
* @throws NullPointerException if {@code document} is null.
* @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DocumentSentiment analyzeSentiment(String document, String language, AnalyzeSentimentOptions options) {
return client.analyzeSentiment(document, language, options).block();
}
/**
* Returns a sentiment prediction, as well as confidence scores for each sentiment label
* (Positive, Negative, and Neutral) for the document and each sentence within it.
*
* <p><strong>Code Sample</strong></p>
* <p>Analyze the sentiments in a list of documents with a provided language representation and request options.</p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String-TextAnalyticsRequestOptions -->
* <pre>
* List<String> documents = Arrays.asList(
* "The hotel was dark and unclean. The restaurant had amazing gnocchi.",
* "The restaurant had amazing gnocchi. The hotel was dark and unclean."
* );
*
* // Analyzing batch sentiments
* AnalyzeSentimentResultCollection resultCollection = textAnalyticsClient.analyzeSentimentBatch(
* documents, "en", new TextAnalyticsRequestOptions().setIncludeStatistics(true));
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
* System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* // Analyzed sentiment for each of documents from a batch of documents
* resultCollection.forEach(analyzeSentimentResult -> {
* System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId());
* // Valid document
* DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment();
* System.out.printf(
* "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f,"
* + " negative score: %.2f.%n",
* documentSentiment.getSentiment(),
* documentSentiment.getConfidenceScores().getPositive(),
* documentSentiment.getConfidenceScores().getNeutral(),
* documentSentiment.getConfidenceScores().getNegative());
* documentSentiment.getSentences().forEach(sentenceSentiment -> System.out.printf(
* "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f,"
* + " negative score: %.2f.%n",
* sentenceSentiment.getSentiment(),
* sentenceSentiment.getConfidenceScores().getPositive(),
* sentenceSentiment.getConfidenceScores().getNeutral(),
* sentenceSentiment.getConfidenceScores().getNegative()));
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String-TextAnalyticsRequestOptions -->
*
* @param documents A list of documents to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language for the documents. If not set, uses "en" for
* English as default.
* @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents
* and show statistics.
*
* @return A {@link AnalyzeSentimentResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*
* @deprecated Please use the {@link #analyzeSentimentBatch(Iterable, String, AnalyzeSentimentOptions)}.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public AnalyzeSentimentResultCollection analyzeSentimentBatch(
Iterable<String> documents, String language, TextAnalyticsRequestOptions options) {
return client.analyzeSentimentBatch(documents, language, options).block();
}
/**
* Returns a sentiment prediction, as well as confidence scores for each sentiment label (Positive, Negative, and
* Neutral) for the document and each sentence within it. If the {@code includeOpinionMining} of
* {@link AnalyzeSentimentOptions} set to true, the output will include the opinion mining results. It mines the
* opinions of a sentence and conducts more granular analysis around the aspects in the text
* (also known as aspect-based sentiment analysis).
*
* <p><strong>Code Sample</strong></p>
* <p>Analyze the sentiments and mine the opinions for each sentence in a list of documents with a provided language
* representation and {@link AnalyzeSentimentOptions} options.</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String-AnalyzeSentimentOptions -->
* <pre>
* List<String> documents = Arrays.asList(
* "The hotel was dark and unclean. The restaurant had amazing gnocchi.",
* "The restaurant had amazing gnocchi. The hotel was dark and unclean."
* );
*
* // Analyzing batch sentiments
* AnalyzeSentimentResultCollection resultCollection = textAnalyticsClient.analyzeSentimentBatch(
* documents, "en", new AnalyzeSentimentOptions().setIncludeOpinionMining(true));
*
* // Analyzed sentiment for each of documents from a batch of documents
* resultCollection.forEach(analyzeSentimentResult -> {
* System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId());
* DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment();
* documentSentiment.getSentences().forEach(sentenceSentiment -> {
* System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment());
* sentenceSentiment.getOpinions().forEach(opinion -> {
* TargetSentiment targetSentiment = opinion.getTarget();
* System.out.printf("\tTarget sentiment: %s, target text: %s%n", targetSentiment.getSentiment(),
* targetSentiment.getText());
* for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) {
* System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the assessment negated: %s.%n",
* assessmentSentiment.getSentiment(), assessmentSentiment.getText(), assessmentSentiment.isNegated());
* }
* });
* });
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String-AnalyzeSentimentOptions -->
*
* @param documents A list of documents to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2 letter ISO 639-1 representation of language for the documents. If not set, uses "en" for
* English as default.
* @param options The additional configurable {@link AnalyzeSentimentOptions options} that may be passed when
* analyzing sentiments.
*
* @return A {@link AnalyzeSentimentResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AnalyzeSentimentResultCollection analyzeSentimentBatch(Iterable<String> documents,
String language, AnalyzeSentimentOptions options) {
return client.analyzeSentimentBatch(documents, language, options).block();
}
/**
* Returns a sentiment prediction, as well as confidence scores for each sentiment label
* (Positive, Negative, and Neutral) for the document and each sentence within it.
*
* <p><strong>Code Sample</strong></p>
* <p>Analyze sentiment in a list of {@link TextDocumentInput document} with provided request options.</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-TextAnalyticsRequestOptions-Context -->
* <pre>
* List<TextDocumentInput> textDocumentInputs = Arrays.asList(
* new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.")
* .setLanguage("en"),
* new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.")
* .setLanguage("en")
* );
*
* // Analyzing batch sentiments
* Response<AnalyzeSentimentResultCollection> response =
* textAnalyticsClient.analyzeSentimentBatchWithResponse(textDocumentInputs,
* new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE);
*
* // Response's status code
* System.out.printf("Status code of request response: %d%n", response.getStatusCode());
* AnalyzeSentimentResultCollection resultCollection = response.getValue();
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
* System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* // Analyzed sentiment for each of documents from a batch of documents
* resultCollection.forEach(analyzeSentimentResult -> {
* System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId());
* // Valid document
* DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment();
* System.out.printf(
* "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f, "
* + "negative score: %.2f.%n",
* documentSentiment.getSentiment(),
* documentSentiment.getConfidenceScores().getPositive(),
* documentSentiment.getConfidenceScores().getNeutral(),
* documentSentiment.getConfidenceScores().getNegative());
* documentSentiment.getSentences().forEach(sentenceSentiment -> {
* System.out.printf(
* "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f,"
* + " negative score: %.2f.%n",
* sentenceSentiment.getSentiment(),
* sentenceSentiment.getConfidenceScores().getPositive(),
* sentenceSentiment.getConfidenceScores().getNeutral(),
* sentenceSentiment.getConfidenceScores().getNegative());
* });
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-TextAnalyticsRequestOptions-Context -->
*
* @param documents A list of {@link TextDocumentInput documents} to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents
* and show statistics.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} that contains a {@link AnalyzeSentimentResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*
* @deprecated Please use the
* {@link #analyzeSentimentBatchWithResponse(Iterable, AnalyzeSentimentOptions, Context)}.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AnalyzeSentimentResultCollection> analyzeSentimentBatchWithResponse(
Iterable<TextDocumentInput> documents, TextAnalyticsRequestOptions options, Context context) {
return client.analyzeSentimentAsyncClient.analyzeSentimentBatchWithContext(documents,
new AnalyzeSentimentOptions()
.setIncludeStatistics(options == null ? false : options.isIncludeStatistics())
.setModelVersion(options == null ? null : options.getModelVersion()), context).block();
}
/**
* Returns a sentiment prediction, as well as confidence scores for each sentiment label (Positive, Negative, and
* Neutral) for the document and each sentence within it. If the {@code includeOpinionMining} of
* {@link AnalyzeSentimentOptions} set to true, the output will include the opinion mining results. It mines the
* opinions of a sentence and conducts more granular analysis around the aspects in the text
* (also known as aspect-based sentiment analysis).
*
* <p><strong>Code Sample</strong></p>
* <p>Analyze sentiment and mine the opinions for each sentence in a list of
* {@link TextDocumentInput document} with provided {@link AnalyzeSentimentOptions} options.</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-AnalyzeSentimentOptions-Context -->
* <pre>
* List<TextDocumentInput> textDocumentInputs = Arrays.asList(
* new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.")
* .setLanguage("en"),
* new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.")
* .setLanguage("en")
* );
*
* AnalyzeSentimentOptions options = new AnalyzeSentimentOptions().setIncludeOpinionMining(true)
* .setIncludeStatistics(true);
*
* // Analyzing batch sentiments
* Response<AnalyzeSentimentResultCollection> response =
* textAnalyticsClient.analyzeSentimentBatchWithResponse(textDocumentInputs, options, Context.NONE);
*
* // Response's status code
* System.out.printf("Status code of request response: %d%n", response.getStatusCode());
* AnalyzeSentimentResultCollection resultCollection = response.getValue();
*
* // Batch statistics
* TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics();
* System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n",
* batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount());
*
* // Analyzed sentiment for each of documents from a batch of documents
* resultCollection.forEach(analyzeSentimentResult -> {
* System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId());
* DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment();
* documentSentiment.getSentences().forEach(sentenceSentiment -> {
* System.out.printf("\tSentence sentiment: %s%n", sentenceSentiment.getSentiment());
* sentenceSentiment.getOpinions().forEach(opinion -> {
* TargetSentiment targetSentiment = opinion.getTarget();
* System.out.printf("\tTarget sentiment: %s, target text: %s%n", targetSentiment.getSentiment(),
* targetSentiment.getText());
* for (AssessmentSentiment assessmentSentiment : opinion.getAssessments()) {
* System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the assessment negated: %s.%n",
* assessmentSentiment.getSentiment(), assessmentSentiment.getText(),
* assessmentSentiment.isNegated());
* }
* });
* });
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-AnalyzeSentimentOptions-Context -->
*
* @param documents A list of {@link TextDocumentInput documents} to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param options The additional configurable {@link AnalyzeSentimentOptions options} that may be passed when
* analyzing sentiments.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} that contains a {@link AnalyzeSentimentResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AnalyzeSentimentResultCollection> analyzeSentimentBatchWithResponse(
Iterable<TextDocumentInput> documents, AnalyzeSentimentOptions options, Context context) {
return client.analyzeSentimentAsyncClient.analyzeSentimentBatchWithContext(documents, options, context).block();
}
/**
* Analyze healthcare entities, entity data sources, and entity relations in a list of
* {@link String documents} with provided request options.
*
* Note: In order to use this functionality, request to access public preview is required.
* Azure Active Directory (AAD) is not currently supported. For more information see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/how-tos/text-analytics-for-health?tabs=ner#request-access-to-the-public-preview">this</a>.
*
* See <a href="https://aka.ms/talangs">this</a> supported languages in Text Analytics API.
*
* @param documents A list of documents to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param language The 2-letter ISO 639-1 representation of language for the documents. If not set, uses "en" for
* English as default.
* @param options The additional configurable {@link AnalyzeHealthcareEntitiesOptions options} that may be passed
* when analyzing healthcare entities.
* @return A {@link SyncPoller} that polls the analyze healthcare operation until it has completed, has failed,
* or has been cancelled. The completed operation returns a {@link PagedIterable} of
* {@link AnalyzeHealthcareEntitiesResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
* @throws TextAnalyticsException If analyze operation fails.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
beginAnalyzeHealthcareEntities(Iterable<String> documents, String language,
AnalyzeHealthcareEntitiesOptions options) {
return beginAnalyzeHealthcareEntities(
mapByIndex(documents, (index, value) -> {
final TextDocumentInput textDocumentInput = new TextDocumentInput(index, value);
textDocumentInput.setLanguage(language);
return textDocumentInput;
}), options, Context.NONE);
}
/**
* Analyze healthcare entities, entity data sources, and entity relations in a list of
* {@link TextDocumentInput documents} with provided request options.
*
* Note: In order to use this functionality, request to access public preview is required.
* Azure Active Directory (AAD) is not currently supported. For more information see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/how-tos/text-analytics-for-health?tabs=ner#request-access-to-the-public-preview">this</a>.
*
* See <a href="https://aka.ms/talangs">this</a> supported languages in Text Analytics API.
*
* <p><strong>Code Sample</strong></p>
* <p>Analyze healthcare entities, entity data sources, and entity relations in a list of
* {@link TextDocumentInput document} and provided request options to
* show statistics.</p>
*
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeHealthcareEntities#Iterable-AnalyzeHealthcareEntitiesOptions-Context -->
* <pre>
* List<TextDocumentInput> documents = new ArrayList<>();
* for (int i = 0; i < 3; i++) {
* documents.add(new TextDocumentInput(Integer.toString(i),
* "The patient is a 54-year-old gentleman with a history of progressive angina over "
* + "the past several months."));
* }
*
* // Request options: show statistics and model version
* AnalyzeHealthcareEntitiesOptions options = new AnalyzeHealthcareEntitiesOptions()
* .setIncludeStatistics(true);
*
* SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
* syncPoller = textAnalyticsClient.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
*
* syncPoller.waitForCompletion();
* AnalyzeHealthcareEntitiesPagedIterable result = syncPoller.getFinalResult();
*
* // Task operation statistics
* final AnalyzeHealthcareEntitiesOperationDetail operationResult = syncPoller.poll().getValue();
* System.out.printf("Operation created time: %s, expiration time: %s.%n",
* operationResult.getCreatedAt(), operationResult.getExpiresAt());
*
* result.forEach(analyzeHealthcareEntitiesResultCollection -> {
* // Model version
* System.out.printf("Results of Azure Text Analytics \"Analyze Healthcare\" Model, version: %s%n",
* analyzeHealthcareEntitiesResultCollection.getModelVersion());
*
* TextDocumentBatchStatistics healthcareTaskStatistics =
* analyzeHealthcareEntitiesResultCollection.getStatistics();
* // Batch statistics
* System.out.printf("Documents statistics: document count = %s, erroneous document count = %s,"
* + " transaction count = %s, valid document count = %s.%n",
* healthcareTaskStatistics.getDocumentCount(), healthcareTaskStatistics.getInvalidDocumentCount(),
* healthcareTaskStatistics.getTransactionCount(), healthcareTaskStatistics.getValidDocumentCount());
*
* analyzeHealthcareEntitiesResultCollection.forEach(healthcareEntitiesResult -> {
* System.out.println("document id = " + healthcareEntitiesResult.getId());
* System.out.println("Document entities: ");
* AtomicInteger ct = new AtomicInteger();
* healthcareEntitiesResult.getEntities().forEach(healthcareEntity -> {
* System.out.printf("\ti = %d, Text: %s, category: %s, confidence score: %f.%n",
* ct.getAndIncrement(), healthcareEntity.getText(), healthcareEntity.getCategory(),
* healthcareEntity.getConfidenceScore());
*
* IterableStream<EntityDataSource> healthcareEntityDataSources =
* healthcareEntity.getDataSources();
* if (healthcareEntityDataSources != null) {
* healthcareEntityDataSources.forEach(healthcareEntityLink -> System.out.printf(
* "\t\tEntity ID in data source: %s, data source: %s.%n",
* healthcareEntityLink.getEntityId(), healthcareEntityLink.getName()));
* }
* });
* // Healthcare entity relation groups
* healthcareEntitiesResult.getEntityRelations().forEach(entityRelation -> {
* System.out.printf("\tRelation type: %s.%n", entityRelation.getRelationType());
* entityRelation.getRoles().forEach(role -> {
* final HealthcareEntity entity = role.getEntity();
* System.out.printf("\t\tEntity text: %s, category: %s, role: %s.%n",
* entity.getText(), entity.getCategory(), role.getName());
* });
* });
* });
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeHealthcareEntities#Iterable-AnalyzeHealthcareEntitiesOptions-Context -->
*
* @param documents A list of {@link TextDocumentInput documents} to be analyzed.
* @param options The additional configurable {@link AnalyzeHealthcareEntitiesOptions options} that may be passed
* when analyzing healthcare entities.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link SyncPoller} that polls the analyze healthcare operation until it has completed, has failed,
* or has been cancelled. The completed operation returns a {@link PagedIterable} of
* {@link AnalyzeHealthcareEntitiesResultCollection}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
* @throws TextAnalyticsException If analyze operation fails.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
beginAnalyzeHealthcareEntities(Iterable<TextDocumentInput> documents, AnalyzeHealthcareEntitiesOptions options,
Context context) {
return client.analyzeHealthcareEntityAsyncClient.beginAnalyzeHealthcarePagedIterable(documents, options,
context).getSyncPoller();
}
/**
* Execute actions, such as, entities recognition, PII entities recognition and key phrases extraction for a list of
* {@link String documents} with provided request options.
*
* See <a href="https://aka.ms/talangs">this</a> supported languages in Text Analytics API.
*
* <p><strong>Code Sample</strong></p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-String-AnalyzeActionsOptions -->
* <pre>
* List<String> documents = Arrays.asList(
* "Elon Musk is the CEO of SpaceX and Tesla.",
* "My SSN is 859-98-0987"
* );
*
* SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
* textAnalyticsClient.beginAnalyzeActions(
* documents,
* new TextAnalyticsActions().setDisplayName("{tasks_display_name}")
* .setRecognizeEntitiesActions(new RecognizeEntitiesAction())
* .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()),
* "en",
* new AnalyzeActionsOptions().setIncludeStatistics(false));
* syncPoller.waitForCompletion();
* AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
* result.forEach(analyzeActionsResult -> {
* System.out.println("Entities recognition action results:");
* analyzeActionsResult.getRecognizeEntitiesResults().forEach(
* actionResult -> {
* if (!actionResult.isError()) {
* actionResult.getDocumentsResults().forEach(
* entitiesResult -> entitiesResult.getEntities().forEach(
* entity -> System.out.printf(
* "Recognized entity: %s, entity category: %s, entity subcategory: %s,"
* + " confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getSubcategory(),
* entity.getConfidenceScore())));
* }
* });
* System.out.println("Key phrases extraction action results:");
* analyzeActionsResult.getExtractKeyPhrasesResults().forEach(
* actionResult -> {
* if (!actionResult.isError()) {
* actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> {
* System.out.println("Extracted phrases:");
* extractKeyPhraseResult.getKeyPhrases()
* .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases));
* });
* }
* });
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-String-AnalyzeActionsOptions -->
*
* @param documents A list of documents to be analyzed.
* For text length limits, maximum batch size, and supported text encoding, see
* <a href="https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits">data limits</a>.
* @param actions The {@link TextAnalyticsActions actions} that contains all actions to be executed.
* An action is one task of execution, such as a single task of 'Key Phrases Extraction' on the given document
* inputs.
* @param language The 2 letter ISO 639-1 representation of language for the documents. If not set, uses "en" for
* English as default.
* @param options The additional configurable {@link AnalyzeActionsOptions options} that may be passed when
* analyzing a collection of actions.
*
* @return A {@link SyncPoller} that polls the analyze a collection of actions operation until it has completed,
* has failed, or has been cancelled. The completed operation returns a {@link AnalyzeActionsResultPagedIterable}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
* @throws TextAnalyticsException If analyze operation fails.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActions(
Iterable<String> documents, TextAnalyticsActions actions, String language, AnalyzeActionsOptions options) {
return client.analyzeActionsAsyncClient.beginAnalyzeActionsIterable(
mapByIndex(documents, (index, value) -> {
final TextDocumentInput textDocumentInput = new TextDocumentInput(index, value);
textDocumentInput.setLanguage(language);
return textDocumentInput;
}), actions, options, Context.NONE).getSyncPoller();
}
/**
* Execute actions, such as, entities recognition, PII entities recognition and key phrases extraction for a list of
* {@link TextDocumentInput documents} with provided request options.
*
* See <a href="https://aka.ms/talangs">this</a> supported languages in Text Analytics API.
*
* <p><strong>Code Sample</strong></p>
* <!-- src_embed com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-AnalyzeActionsOptions-Context -->
* <pre>
* List<TextDocumentInput> documents = Arrays.asList(
* new TextDocumentInput("0", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en"),
* new TextDocumentInput("1", "My SSN is 859-98-0987").setLanguage("en")
* );
*
* SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
* textAnalyticsClient.beginAnalyzeActions(
* documents,
* new TextAnalyticsActions().setDisplayName("{tasks_display_name}")
* .setRecognizeEntitiesActions(new RecognizeEntitiesAction())
* .setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction()),
* new AnalyzeActionsOptions().setIncludeStatistics(false),
* Context.NONE);
* syncPoller.waitForCompletion();
* AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
* result.forEach(analyzeActionsResult -> {
* System.out.println("Entities recognition action results:");
* analyzeActionsResult.getRecognizeEntitiesResults().forEach(
* actionResult -> {
* if (!actionResult.isError()) {
* actionResult.getDocumentsResults().forEach(
* entitiesResult -> entitiesResult.getEntities().forEach(
* entity -> System.out.printf(
* "Recognized entity: %s, entity category: %s, entity subcategory: %s,"
* + " confidence score: %f.%n",
* entity.getText(), entity.getCategory(), entity.getSubcategory(),
* entity.getConfidenceScore())));
* }
* });
* System.out.println("Key phrases extraction action results:");
* analyzeActionsResult.getExtractKeyPhrasesResults().forEach(
* actionResult -> {
* if (!actionResult.isError()) {
* actionResult.getDocumentsResults().forEach(extractKeyPhraseResult -> {
* System.out.println("Extracted phrases:");
* extractKeyPhraseResult.getKeyPhrases()
* .forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases));
* });
* }
* });
* });
* </pre>
* <!-- end com.azure.ai.textanalytics.TextAnalyticsClient.beginAnalyzeActions#Iterable-TextAnalyticsActions-AnalyzeActionsOptions-Context -->
*
* @param documents A list of {@link TextDocumentInput documents} to be analyzed.
* @param actions The {@link TextAnalyticsActions actions} that contains all actions to be executed.
* An action is one task of execution, such as a single task of 'Key Phrases Extraction' on the given document
* inputs.
* @param options The additional configurable {@link AnalyzeActionsOptions options} that may be passed when
* analyzing a collection of actions.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link SyncPoller} that polls the analyze a collection of actions operation until it has completed,
* has failed, or has been cancelled. The completed operation returns a {@link AnalyzeActionsResultPagedIterable}.
*
* @throws NullPointerException if {@code documents} is null.
* @throws IllegalArgumentException if {@code documents} is empty.
* @throws TextAnalyticsException If analyze operation fails.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
return client.analyzeActionsAsyncClient.beginAnalyzeActionsIterable(documents, actions, options, context)
.getSyncPoller();
}
}