@azure/ai-text-analytics 5.2.0-beta.2 → 6.0.0-alpha.20220518.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/LICENSE +1 -1
  2. package/README.md +375 -133
  3. package/dist/index.js +2797 -4176
  4. package/dist/index.js.map +1 -1
  5. package/dist-esm/src/azureKeyCredentialPolicy.js +2 -2
  6. package/dist-esm/src/azureKeyCredentialPolicy.js.map +1 -1
  7. package/dist-esm/src/constants.js +16 -0
  8. package/dist-esm/src/constants.js.map +1 -0
  9. package/dist-esm/src/generated/generatedClient.js +80 -334
  10. package/dist-esm/src/generated/generatedClient.js.map +1 -1
  11. package/dist-esm/src/generated/index.js +1 -1
  12. package/dist-esm/src/generated/index.js.map +1 -1
  13. package/dist-esm/src/generated/models/index.js +106 -35
  14. package/dist-esm/src/generated/models/index.js.map +1 -1
  15. package/dist-esm/src/generated/models/mappers.js +1625 -2101
  16. package/dist-esm/src/generated/models/mappers.js.map +1 -1
  17. package/dist-esm/src/generated/models/parameters.js +18 -105
  18. package/dist-esm/src/generated/models/parameters.js.map +1 -1
  19. package/dist-esm/src/generated/operations/analyzeText.js +105 -0
  20. package/dist-esm/src/generated/operations/analyzeText.js.map +1 -0
  21. package/dist-esm/src/generated/operations/index.js +9 -0
  22. package/dist-esm/src/generated/operations/index.js.map +1 -0
  23. package/dist-esm/src/generated/operationsInterfaces/analyzeText.js +9 -0
  24. package/dist-esm/src/generated/operationsInterfaces/analyzeText.js.map +1 -0
  25. package/dist-esm/src/generated/operationsInterfaces/index.js +9 -0
  26. package/dist-esm/src/generated/operationsInterfaces/index.js.map +1 -0
  27. package/dist-esm/src/index.js +14 -5
  28. package/dist-esm/src/index.js.map +1 -1
  29. package/dist-esm/src/lro.js +195 -0
  30. package/dist-esm/src/lro.js.map +1 -0
  31. package/dist-esm/src/models.js +42 -0
  32. package/dist-esm/src/models.js.map +1 -0
  33. package/dist-esm/src/textAnalysisClient.js +197 -0
  34. package/dist-esm/src/textAnalysisClient.js.map +1 -0
  35. package/dist-esm/src/transforms.js +279 -0
  36. package/dist-esm/src/transforms.js.map +1 -0
  37. package/dist-esm/src/util.js +35 -141
  38. package/dist-esm/src/util.js.map +1 -1
  39. package/package.json +25 -33
  40. package/types/ai-text-analytics.d.ts +1402 -1421
  41. package/CHANGELOG.md +0 -162
  42. package/dist-esm/src/analyzeActionsResult.js +0 -205
  43. package/dist-esm/src/analyzeActionsResult.js.map +0 -1
  44. package/dist-esm/src/analyzeHealthcareEntitiesResult.js +0 -53
  45. package/dist-esm/src/analyzeHealthcareEntitiesResult.js.map +0 -1
  46. package/dist-esm/src/analyzeLro.js +0 -132
  47. package/dist-esm/src/analyzeLro.js.map +0 -1
  48. package/dist-esm/src/analyzeSentimentResult.js +0 -74
  49. package/dist-esm/src/analyzeSentimentResult.js.map +0 -1
  50. package/dist-esm/src/analyzeSentimentResultArray.js +0 -11
  51. package/dist-esm/src/analyzeSentimentResultArray.js.map +0 -1
  52. package/dist-esm/src/detectLanguageResult.js +0 -17
  53. package/dist-esm/src/detectLanguageResult.js.map +0 -1
  54. package/dist-esm/src/detectLanguageResultArray.js +0 -11
  55. package/dist-esm/src/detectLanguageResultArray.js.map +0 -1
  56. package/dist-esm/src/extractKeyPhrasesResult.js +0 -17
  57. package/dist-esm/src/extractKeyPhrasesResult.js.map +0 -1
  58. package/dist-esm/src/extractKeyPhrasesResultArray.js +0 -11
  59. package/dist-esm/src/extractKeyPhrasesResultArray.js.map +0 -1
  60. package/dist-esm/src/extractSummaryResult.js +0 -17
  61. package/dist-esm/src/extractSummaryResult.js.map +0 -1
  62. package/dist-esm/src/extractSummaryResultArray.js +0 -11
  63. package/dist-esm/src/extractSummaryResultArray.js.map +0 -1
  64. package/dist-esm/src/generated/generatedClientContext.js +0 -42
  65. package/dist-esm/src/generated/generatedClientContext.js.map +0 -1
  66. package/dist-esm/src/healthLro.js +0 -139
  67. package/dist-esm/src/healthLro.js.map +0 -1
  68. package/dist-esm/src/multiCategoryClassifyResult.js +0 -17
  69. package/dist-esm/src/multiCategoryClassifyResult.js.map +0 -1
  70. package/dist-esm/src/multiCategoryClassifyResultArray.js +0 -11
  71. package/dist-esm/src/multiCategoryClassifyResultArray.js.map +0 -1
  72. package/dist-esm/src/pollerModels.js +0 -4
  73. package/dist-esm/src/pollerModels.js.map +0 -1
  74. package/dist-esm/src/recognizeCategorizedEntitiesResult.js +0 -17
  75. package/dist-esm/src/recognizeCategorizedEntitiesResult.js.map +0 -1
  76. package/dist-esm/src/recognizeCategorizedEntitiesResultArray.js +0 -11
  77. package/dist-esm/src/recognizeCategorizedEntitiesResultArray.js.map +0 -1
  78. package/dist-esm/src/recognizeCustomEntitiesResult.js +0 -17
  79. package/dist-esm/src/recognizeCustomEntitiesResult.js.map +0 -1
  80. package/dist-esm/src/recognizeCustomEntitiesResultArray.js +0 -11
  81. package/dist-esm/src/recognizeCustomEntitiesResultArray.js.map +0 -1
  82. package/dist-esm/src/recognizeLinkedEntitiesResult.js +0 -17
  83. package/dist-esm/src/recognizeLinkedEntitiesResult.js.map +0 -1
  84. package/dist-esm/src/recognizeLinkedEntitiesResultArray.js +0 -11
  85. package/dist-esm/src/recognizeLinkedEntitiesResultArray.js.map +0 -1
  86. package/dist-esm/src/recognizePiiEntitiesResult.js +0 -18
  87. package/dist-esm/src/recognizePiiEntitiesResult.js.map +0 -1
  88. package/dist-esm/src/recognizePiiEntitiesResultArray.js +0 -11
  89. package/dist-esm/src/recognizePiiEntitiesResultArray.js.map +0 -1
  90. package/dist-esm/src/singleCategoryClassifyResult.js +0 -17
  91. package/dist-esm/src/singleCategoryClassifyResult.js.map +0 -1
  92. package/dist-esm/src/singleCategoryClassifyResultArray.js +0 -11
  93. package/dist-esm/src/singleCategoryClassifyResultArray.js.map +0 -1
  94. package/dist-esm/src/textAnalyticsAction.js +0 -4
  95. package/dist-esm/src/textAnalyticsAction.js.map +0 -1
  96. package/dist-esm/src/textAnalyticsClient.js +0 -511
  97. package/dist-esm/src/textAnalyticsClient.js.map +0 -1
  98. package/dist-esm/src/textAnalyticsOperationOptions.js +0 -4
  99. package/dist-esm/src/textAnalyticsOperationOptions.js.map +0 -1
  100. package/dist-esm/src/textAnalyticsResult.js +0 -94
  101. package/dist-esm/src/textAnalyticsResult.js.map +0 -1
  102. package/dist-esm/src/tracing.js +0 -12
  103. package/dist-esm/src/tracing.js.map +0 -1
package/README.md CHANGED
@@ -1,18 +1,18 @@
1
- # Azure Text Analytics client library for JavaScript
1
+ # Azure Text Analysis client library for JavaScript
2
2
 
3
- [Azure TextAnalytics](https://azure.microsoft.com/services/cognitive-services/text-analytics/) is a cloud-based service that provides advanced natural language processing over raw text, and includes the following main features:
3
+ [Azure Cognitive Service for Language](https://azure.microsoft.com/services/cognitive-services/language-service/) is a cloud-based service that provides advanced natural language processing over raw text, and includes the following main features:
4
4
 
5
- **Note:** This SDK targets Azure Text Analytics service API version 3.2.0-preview.2.
5
+ **Note:** This SDK targets Azure Cognitive Service for Language API version 2022-04-01-preview.
6
6
 
7
7
  - Language Detection
8
8
  - Sentiment Analysis
9
9
  - Key Phrase Extraction
10
10
  - Named Entity Recognition
11
11
  - Recognition of Personally Identifiable Information
12
- - Linked Entity Recognition
13
- - Extractive Summarization
12
+ - Entity Linking
14
13
  - Healthcare Analysis
15
- - Custom Entities Recognition
14
+ - Extractive Summarization
15
+ - Custom Entity Recognition
16
16
  - Custom Document Classification
17
17
  - Support Multiple Actions Per Document
18
18
 
@@ -29,7 +29,7 @@ Key links:
29
29
  - [Source code](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/textanalytics/ai-text-analytics/)
30
30
  - [Package (NPM)](https://www.npmjs.com/package/@azure/ai-text-analytics)
31
31
  - [API reference documentation](https://docs.microsoft.com/javascript/api/@azure/ai-text-analytics)
32
- - [Product documentation](https://docs.microsoft.com/azure/cognitive-services/text-analytics/)
32
+ - [Product documentation](https://docs.microsoft.com/azure/cognitive-services/language-service/)
33
33
  - [Samples](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/textanalytics/ai-text-analytics/samples)
34
34
 
35
35
  ## Getting started
@@ -44,7 +44,7 @@ See our [support policy](https://github.com/Azure/azure-sdk-for-js/blob/main/SUP
44
44
  ### Prerequisites
45
45
 
46
46
  - An [Azure subscription][azure_sub].
47
- - An existing [Cognitive Services][cognitive_resource] or Text Analytics resource. If you need to create the resource, you can use the [Azure Portal][azure_portal] or [Azure CLI][azure_cli].
47
+ - An existing [Cognitive Services][cognitive_resource] or Language resource. If you need to create the resource, you can use the [Azure Portal][azure_portal] or [Azure CLI][azure_cli].
48
48
 
49
49
  If you use the Azure CLI, replace `<your-resource-group-name>` and `<your-resource-name>` with your own unique names:
50
50
 
@@ -54,25 +54,25 @@ az cognitiveservices account create --kind TextAnalytics --resource-group <your-
54
54
 
55
55
  ### Install the `@azure/ai-text-analytics` package
56
56
 
57
- Install the Azure Text Analytics client library for JavaScript with `npm`:
57
+ Install the Azure Text Analysis client library for JavaScript with `npm`:
58
58
 
59
59
  ```bash
60
60
  npm install @azure/ai-text-analytics
61
61
  ```
62
62
 
63
- ### Create and authenticate a `TextAnalyticsClient`
63
+ ### Create and authenticate a `TextAnalysisClient`
64
64
 
65
- To create a client object to access the Text Analytics API, you will need the `endpoint` of your Text Analytics resource and a `credential`. The Text Analytics client can use either Azure Active Directory credentials or an API key credential to authenticate.
65
+ To create a client object to access the Language API, you will need the `endpoint` of your Language resource and a `credential`. The Text Analysis client can use either Azure Active Directory credentials or an API key credential to authenticate.
66
66
 
67
- You can find the endpoint for your text analytics resource either in the [Azure Portal][azure_portal] or by using the [Azure CLI][azure_cli] snippet below:
67
+ You can find the endpoint for your Language resource either in the [Azure Portal][azure_portal] or by using the [Azure CLI][azure_cli] snippet below:
68
68
 
69
69
  ```bash
70
- az cognitiveservices account show --name <your-resource-name> --resource-group <your-resource-group-name> --query "endpoint"
70
+ az cognitiveservices account show --name <your-resource-name> --resource-group <your-resource-group-name> --query "properties.endpoint"
71
71
  ```
72
72
 
73
73
  #### Using an API Key
74
74
 
75
- Use the [Azure Portal][azure_portal] to browse to your Text Analytics resource and retrieve an API key, or use the [Azure CLI][azure_cli] snippet below:
75
+ Use the [Azure Portal][azure_portal] to browse to your Language resource and retrieve an API key, or use the [Azure CLI][azure_cli] snippet below:
76
76
 
77
77
  **Note:** Sometimes the API key is referred to as a "subscription key" or "subscription API key."
78
78
 
@@ -83,9 +83,9 @@ az cognitiveservices account keys list --resource-group <your-resource-group-nam
83
83
  Once you have an API key and endpoint, you can use the `AzureKeyCredential` class to authenticate the client as follows:
84
84
 
85
85
  ```javascript
86
- const { TextAnalyticsClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
86
+ const { TextAnalysisClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
87
87
 
88
- const client = new TextAnalyticsClient("<endpoint>", new AzureKeyCredential("<API key>"));
88
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
89
89
  ```
90
90
 
91
91
  #### Using an Azure Active Directory Credential
@@ -97,26 +97,26 @@ or other credential providers provided with the Azure SDK, please install the `@
97
97
  npm install @azure/identity
98
98
  ```
99
99
 
100
- You will also need to [register a new AAD application][register_aad_app] and grant access to Text Analytics by assigning the `"Cognitive Services User"` role to your service principal (note: other roles such as `"Owner"` will not grant the necessary permissions, only `"Cognitive Services User"` will suffice to run the examples and the sample code).
100
+ You will also need to [register a new AAD application][register_aad_app] and grant access to Language by assigning the `"Cognitive Services User"` role to your service principal (note: other roles such as `"Owner"` will not grant the necessary permissions, only `"Cognitive Services User"` will suffice to run the examples and the sample code).
101
101
 
102
102
  Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables: `AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET`.
103
103
 
104
104
  ```javascript
105
- const { TextAnalyticsClient } = require("@azure/ai-text-analytics");
105
+ const { TextAnalysisClient } = require("@azure/ai-text-analytics");
106
106
  const { DefaultAzureCredential } = require("@azure/identity");
107
107
 
108
- const client = new TextAnalyticsClient("<endpoint>", new DefaultAzureCredential());
108
+ const client = new TextAnalysisClient("<endpoint>", new DefaultAzureCredential());
109
109
  ```
110
110
 
111
111
  ## Key concepts
112
112
 
113
- ### TextAnalyticsClient
113
+ ### TextAnalysisClient
114
114
 
115
- `TextAnalyticsClient` is the primary interface for developers using the Text Analytics client library. Explore the methods on this client object to understand the different features of the Text Analytics service that you can access.
115
+ `TextAnalysisClient` is the primary interface for developers using the Text Analysis client library. Explore the methods on this client object to understand the different features of the Language service that you can access.
116
116
 
117
117
  ### Input
118
118
 
119
- A **document** represents a single unit of input to be analyzed by the predictive models in the Text Analytics service. Operations on `TextAnalyticsClient` take a collection of inputs to be analyzed as a batch. The operation methods have overloads that allow the inputs to be represented as strings, or as objects with attached metadata.
119
+ A **document** represents a single unit of input to be analyzed by the predictive models in the Language service. Operations on `TextAnalysisClient` take a collection of inputs to be analyzed as a batch. The operation methods have overloads that allow the inputs to be represented as strings, or as objects with attached metadata.
120
120
 
121
121
  For example, each document can be passed as a string in an array, e.g.
122
122
 
@@ -124,7 +124,7 @@ For example, each document can be passed as a string in an array, e.g.
124
124
  const documents = [
125
125
  "I hated the movie. It was so slow!",
126
126
  "The movie made it into my top ten favorites.",
127
- "What a great movie!"
127
+ "What a great movie!",
128
128
  ];
129
129
  ```
130
130
 
@@ -134,7 +134,7 @@ or, if you wish to pass in a per-item document `id` or `language`/`countryHint`,
134
134
  const textDocumentInputs = [
135
135
  { id: "1", language: "en", text: "I hated the movie. It was so slow!" },
136
136
  { id: "2", language: "en", text: "The movie made it into my top ten favorites." },
137
- { id: "3", language: "en", text: "What a great movie!" }
137
+ { id: "3", language: "en", text: "What a great movie!" },
138
138
  ];
139
139
  ```
140
140
 
@@ -142,66 +142,56 @@ See [service limitations][data_limits] for the input, including document length
142
142
 
143
143
  ### Return Value
144
144
 
145
- The return value corresponding to a single document is either a successful result or an error object. Each `TextAnalyticsClient` method returns a heterogeneous array of results and errors that correspond to the inputs by index. A text input and its result will have the same index in the input and result collections. The collection may also optionally include information about the input batch and how it was processed in the `statistics` field.
145
+ The return value corresponding to a single document is either a successful result or an error object. Each `TextAnalysisClient` method returns a heterogeneous array of results and errors that correspond to the inputs by index. A text input and its result will have the same index in the input and result collections.
146
146
 
147
- An **result**, such as `AnalyzeSentimentResult`, is the result of a Text Analytics operation, containing a prediction or predictions about a single text input. An operation's result type also may optionally include information about the input document and how it was processed.
147
+ An **result**, such as `SentimentAnalysisResult`, is the result of a Language operation, containing a prediction or predictions about a single text input. An operation's result type also may optionally include information about the input document and how it was processed.
148
148
 
149
- The **error** object, `TextAnalyticsErrorResult`, indicates that the service encountered an error while processing the document and contains information about the error.
149
+ The **error** object, `TextAnalysisErrorResult`, indicates that the service encountered an error while processing the document and contains information about the error.
150
150
 
151
151
  ### Document Error Handling
152
152
 
153
- In the collection returned by an operation, errors are distinguished from successful responses by the presence of the `error` property, which contains the inner `TextAnalyticsError` object if an error was encountered. For successful result objects, this property is _always_ `undefined`.
153
+ In the collection returned by an operation, errors are distinguished from successful responses by the presence of the `error` property, which contains the inner `TextAnalysisError` object if an error was encountered. For successful result objects, this property is _always_ `undefined`.
154
154
 
155
155
  For example, to filter out all errors, you could use the following `filter`:
156
156
 
157
157
  ```javascript
158
- const results = await client.analyzeSentiment(documents);
158
+ const results = await client.analyze("SentimentAnalysis", documents);
159
159
  const onlySuccessful = results.filter((result) => result.error === undefined);
160
160
  ```
161
161
 
162
162
  **Note**: TypeScript users can benefit from better type-checking of result and error objects if `compilerOptions.strictNullChecks` is set to `true` in the `tsconfig.json` configuration. For example:
163
163
 
164
164
  ```typescript
165
- const [result] = await client.analyzeSentiment(["Hello world!"]);
165
+ const [result] = await client.analyze("SentimentAnalysis", ["Hello world!"]);
166
166
 
167
167
  if (result.error !== undefined) {
168
168
  // In this if block, TypeScript will be sure that the type of `result` is
169
- // `TextAnalyticsError` if compilerOptions.strictNullChecks is enabled in
169
+ // `TextAnalysisError` if compilerOptions.strictNullChecks is enabled in
170
170
  // the tsconfig.json
171
171
 
172
172
  console.log(result.error);
173
173
  }
174
174
  ```
175
175
 
176
- This capability was introduced in TypeScript 3.2, so users of TypeScript 3.1 must cast result values to their corresponding success variant as follows:
177
-
178
- ```typescript
179
- const [result] = await client.detectLanguage(["Hello world!"]);
180
-
181
- if (result.error === undefined) {
182
- const { primaryLanguage } = result as DetectLanguageSuccessResult;
183
- }
184
- ```
185
-
186
176
  ## Examples
187
177
 
188
- ### Analyze Sentiment
178
+ ### Sentiment Analysis
189
179
 
190
180
  Analyze sentiment of text to determine if it is positive, negative, neutral, or mixed, including per-sentence sentiment analysis and confidence scores.
191
181
 
192
182
  ```javascript
193
- const { TextAnalyticsClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
183
+ const { TextAnalysisClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
194
184
 
195
- const client = new TextAnalyticsClient("<endpoint>", new AzureKeyCredential("<API key>"));
185
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
196
186
 
197
187
  const documents = [
198
188
  "I did not like the restaurant. The food was too spicy.",
199
189
  "The restaurant was decorated beautifully. The atmosphere was unlike any other restaurant I've been to.",
200
- "The food was yummy. :)"
190
+ "The food was yummy. :)",
201
191
  ];
202
192
 
203
193
  async function main() {
204
- const results = await client.analyzeSentiment(documents);
194
+ const results = await client.analyze("SentimentAnalysis", documents);
205
195
 
206
196
  for (const result of results) {
207
197
  if (result.error === undefined) {
@@ -218,25 +208,25 @@ main();
218
208
 
219
209
  To get more granular information about the opinions related to aspects of a product/service, also known as Aspect-based Sentiment Analysis in Natural Language Processing (NLP), see a sample on sentiment analysis with opinion mining [here][analyze_sentiment_opinion_mining_sample].
220
210
 
221
- ### Recognize Entities
211
+ ### Entity Recognition
222
212
 
223
213
  Recognize and categorize entities in text as people, places, organizations, dates/times, quantities, currencies, etc.
224
214
 
225
215
  The `language` parameter is optional. If it is not specified, the default English model will be used.
226
216
 
227
217
  ```javascript
228
- const { TextAnalyticsClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
218
+ const { TextAnalysisClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
229
219
 
230
- const client = new TextAnalyticsClient("<endpoint>", new AzureKeyCredential("<API key>"));
220
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
231
221
 
232
222
  const documents = [
233
223
  "Microsoft was founded by Bill Gates and Paul Allen.",
234
224
  "Redmond is a city in King County, Washington, United States, located 15 miles east of Seattle.",
235
- "Jeff bought three dozen eggs because there was a 50% discount."
225
+ "Jeff bought three dozen eggs because there was a 50% discount.",
236
226
  ];
237
227
 
238
228
  async function main() {
239
- const results = await client.recognizeEntities(documents, "en");
229
+ const results = await client.analyze("EntityRecognition", documents, "en");
240
230
 
241
231
  for (const result of results) {
242
232
  if (result.error === undefined) {
@@ -253,19 +243,19 @@ async function main() {
253
243
  main();
254
244
  ```
255
245
 
256
- ### Recognize PII Entities
246
+ ### PII Entity Recognition
257
247
 
258
- There is a separate endpoint and operation for recognizing Personally Identifiable Information (PII) in text such as Social Security Numbers, bank account information, credit card numbers, etc. Its usage is very similar to the standard entity recognition above:
248
+ There is a separate action for recognizing Personally Identifiable Information (PII) in text such as Social Security Numbers, bank account information, credit card numbers, etc. Its usage is very similar to the standard entity recognition above:
259
249
 
260
250
  ```javascript
261
- const { TextAnalyticsClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
262
- const client = new TextAnalyticsClient("<endpoint>", new AzureKeyCredential("<API key>"));
251
+ const { TextAnalysisClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
252
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
263
253
  const documents = [
264
254
  "The employee's SSN is 555-55-5555.",
265
- "The employee's phone number is (555) 555-5555."
255
+ "The employee's phone number is (555) 555-5555.",
266
256
  ];
267
257
  async function main() {
268
- const results = await client.recognizePiiEntities(documents, "en");
258
+ const results = await client.analyze("PiiEntityRecognition", documents, "en");
269
259
  for (const result of results) {
270
260
  if (result.error === undefined) {
271
261
  console.log(" -- Recognized PII entities for input", result.id, "--");
@@ -280,23 +270,23 @@ async function main() {
280
270
  main();
281
271
  ```
282
272
 
283
- ### Recognize Linked Entities
273
+ ### Entity Linking
284
274
 
285
- A "Linked" entity is one that exists in a knowledge base (such as Wikipedia). The `recognizeLinkedEntities` operation can disambiguate entities by determining which entry in a knowledge base they likely refer to (for example, in a piece of text, does the word "Mars" refer to the planet, or to the Roman god of war). Linked entities contain associated URLs to the knowledge base that provides the definition of the entity.
275
+ A "Linked" entity is one that exists in a knowledge base (such as Wikipedia). The `EntityLinking` action can disambiguate entities by determining which entry in a knowledge base they likely refer to (for example, in a piece of text, does the word "Mars" refer to the planet, or to the Roman god of war). Linked entities contain associated URLs to the knowledge base that provides the definition of the entity.
286
276
 
287
277
  ```javascript
288
- const { TextAnalyticsClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
278
+ const { TextAnalysisClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
289
279
 
290
- const client = new TextAnalyticsClient("<endpoint>", new AzureKeyCredential("<API key>"));
280
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
291
281
 
292
282
  const documents = [
293
283
  "Microsoft was founded by Bill Gates and Paul Allen.",
294
284
  "Easter Island, a Chilean territory, is a remote volcanic island in Polynesia.",
295
- "I use Azure Functions to develop my product."
285
+ "I use Azure Functions to develop my product.",
296
286
  ];
297
287
 
298
288
  async function main() {
299
- const results = await client.recognizeLinkedEntities(documents, "en");
289
+ const results = await client.analyze("EntityLinking", documents, "en");
300
290
 
301
291
  for (const result of results) {
302
292
  if (result.error === undefined) {
@@ -322,23 +312,23 @@ async function main() {
322
312
  main();
323
313
  ```
324
314
 
325
- ### Extract Key Phrases
315
+ ### Key Phrase Extraction
326
316
 
327
317
  Key Phrase extraction identifies the main talking points in a document. For example, given input text "The food was delicious and there were wonderful staff", the service returns "food" and "wonderful staff".
328
318
 
329
319
  ```javascript
330
- const { TextAnalyticsClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
320
+ const { TextAnalysisClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
331
321
 
332
- const client = new TextAnalyticsClient("<endpoint>", new AzureKeyCredential("<API key>"));
322
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
333
323
 
334
324
  const documents = [
335
325
  "Redmond is a city in King County, Washington, United States, located 15 miles east of Seattle.",
336
326
  "I need to take my cat to the veterinarian.",
337
- "I will travel to South America in the summer."
327
+ "I will travel to South America in the summer.",
338
328
  ];
339
329
 
340
330
  async function main() {
341
- const results = await client.extractKeyPhrases(documents, "en");
331
+ const results = await client.analyze("KeyPhraseExtraction", documents, "en");
342
332
 
343
333
  for (const result of results) {
344
334
  if (result.error === undefined) {
@@ -353,25 +343,25 @@ async function main() {
353
343
  main();
354
344
  ```
355
345
 
356
- ### Detect Language
346
+ ### Language Detection
357
347
 
358
348
  Determine the language of a piece of text.
359
349
 
360
- The `countryHint` parameter is optional, but can assist the service in providing correct output if the country of origin is known. If provided, it should be set to an ISO-3166 Alpha-2 two-letter country code (such as "us" for the United States or "jp" for Japan) or to the value `"none"`. If the parameter is not provided, then the default `"us"` (United States) model will be used. If you do not know the country of origin of the document, then the parameter `"none"` should be used, and the Text Analytics service will apply a model that is tuned for an unknown country of origin.
350
+ The `countryHint` parameter is optional, but can assist the service in providing correct output if the country of origin is known. If provided, it should be set to an ISO-3166 Alpha-2 two-letter country code (such as "us" for the United States or "jp" for Japan) or to the value `"none"`. If the parameter is not provided, then the default `"us"` (United States) model will be used. If you do not know the country of origin of the document, then the parameter `"none"` should be used, and the Language service will apply a model that is tuned for an unknown country of origin.
361
351
 
362
352
  ```javascript
363
- const { TextAnalyticsClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
353
+ const { TextAnalysisClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
364
354
 
365
- const client = new TextAnalyticsClient("<endpoint>", new AzureKeyCredential("<API key>"));
355
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
366
356
 
367
357
  const documents = [
368
358
  "This is written in English.",
369
359
  "Il documento scritto in italiano.",
370
- "Dies ist in deutscher Sprache verfasst."
360
+ "Dies ist in deutscher Sprache verfasst.",
371
361
  ];
372
362
 
373
363
  async function main() {
374
- const results = await client.detectLanguage(documents, "none");
364
+ const results = await client.analyze("LanguageDetection", documents, "none");
375
365
 
376
366
  for (const result of results) {
377
367
  if (result.error === undefined) {
@@ -396,105 +386,356 @@ async function main() {
396
386
  main();
397
387
  ```
398
388
 
399
- ### Analyze Healthcare Entities
389
+ ### Healthcare Analysis
400
390
 
401
391
  Healthcare analysis identifies healthcare entities. For example, given input text "Prescribed 100mg ibuprofen, taken twice daily", the service returns "100mg" categorized as Dosage, "ibuprofen" as MedicationName, and "twice daily" as Frequency.
402
392
 
403
393
  ```javascript
404
- const { TextAnalyticsClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
394
+ const {
395
+ AnalyzeBatchAction,
396
+ AzureKeyCredential,
397
+ TextAnalysisClient,
398
+ } = require("@azure/ai-text-analytics");
399
+
400
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
401
+
402
+ const documents = [
403
+ "Prescribed 100mg ibuprofen, taken twice daily.",
404
+ "Patient does not suffer from high blood pressure.",
405
+ ];
406
+
407
+ async function main() {
408
+ const actions: AnalyzeBatchAction[] = [
409
+ {
410
+ kind: "Healthcare",
411
+ },
412
+ ];
413
+ const poller = await client.beginAnalyzeBatch(actions, documents, "en");
414
+ const results = await poller.pollUntilDone();
415
+ for await (const actionResult of results) {
416
+ if (actionResult.kind !== "Healthcare") {
417
+ throw new Error(`Expected a healthcare results but got: ${actionResult.kind}`);
418
+ }
419
+ if (actionResult.error) {
420
+ const { code, message } = actionResult.error;
421
+ throw new Error(`Unexpected error (${code}): ${message}`);
422
+ }
423
+ for (const result of actionResult.results) {
424
+ console.log(`- Document ${result.id}`);
425
+ if (result.error) {
426
+ const { code, message } = result.error;
427
+ throw new Error(`Unexpected error (${code}): ${message}`);
428
+ }
429
+ console.log("\tRecognized Entities:");
430
+ for (const entity of result.entities) {
431
+ console.log(`\t- Entity "${entity.text}" of type ${entity.category}`);
432
+ if (entity.dataSources.length > 0) {
433
+ console.log("\t and it can be referenced in the following data sources:");
434
+ for (const ds of entity.dataSources) {
435
+ console.log(`\t\t- ${ds.name} with Entity ID: ${ds.entityId}`);
436
+ }
437
+ }
438
+ }
439
+ }
440
+ }
441
+ }
442
+
443
+ main();
444
+ ```
445
+
446
+ ### Extractive Summarization
447
+
448
+ Extractive summarization identifies sentences that summarize the article they belong to.
449
+
450
+ ```javascript
451
+ const {
452
+ AnalyzeBatchAction,
453
+ AzureKeyCredential,
454
+ TextAnalysisClient,
455
+ } = require("@azure/ai-text-analytics");
405
456
 
406
- const client = new TextAnalyticsClient("<endpoint>", new AzureKeyCredential("<API key>"));
457
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
407
458
 
408
459
  const documents = [
409
460
  "Prescribed 100mg ibuprofen, taken twice daily.",
410
- "Patient does not suffer from high blood pressure."
461
+ "Patient does not suffer from high blood pressure.",
411
462
  ];
412
463
 
413
464
  async function main() {
414
- const poller = await client.beginAnalyzeHealthcareEntities(documents);
465
+ const actions: AnalyzeBatchAction[] = [
466
+ {
467
+ kind: "ExtractiveSummarization",
468
+ maxSentenceCount: 2,
469
+ },
470
+ ];
471
+ const poller = await client.beginAnalyzeBatch(actions, documents, "en");
415
472
  const results = await poller.pollUntilDone();
416
473
 
417
- for await (const result of results) {
418
- console.log(`- Document ${result.id}`);
419
- if (!result.error) {
474
+ for await (const actionResult of results) {
475
+ if (actionResult.kind !== "ExtractiveSummarization") {
476
+ throw new Error(`Expected extractive summarization results but got: ${actionResult.kind}`);
477
+ }
478
+ if (actionResult.error) {
479
+ const { code, message } = actionResult.error;
480
+ throw new Error(`Unexpected error (${code}): ${message}`);
481
+ }
482
+ for (const result of actionResult.results) {
483
+ console.log(`- Document ${result.id}`);
484
+ if (result.error) {
485
+ const { code, message } = result.error;
486
+ throw new Error(`Unexpected error (${code}): ${message}`);
487
+ }
488
+ console.log("Summary:");
489
+ console.log(result.sentences.map((sentence) => sentence.text).join("\n"));
490
+ }
491
+ }
492
+ }
493
+
494
+ main();
495
+ ```
496
+
497
+ ### Custom Entity Recognition
498
+
499
+ Recognize and categorize entities in text as entities using custom entity detection models built using [Azure Language Studio][lang_studio].
500
+
501
+ ```javascript
502
+ const {
503
+ AnalyzeBatchAction,
504
+ AzureKeyCredential,
505
+ TextAnalysisClient,
506
+ } = require("@azure/ai-text-analytics");
507
+
508
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
509
+
510
+ const documents = [
511
+ "We love this trail and make the trip every year. The views are breathtaking and well worth the hike! Yesterday was foggy though, so we missed the spectacular views. We tried again today and it was amazing. Everyone in my family liked the trail although it was too challenging for the less athletic among us.",
512
+ "Last week we stayed at Hotel Foo to celebrate our anniversary. The staff knew about our anniversary so they helped me organize a little surprise for my partner. The room was clean and with the decoration I requested. It was perfect!",
513
+ ];
514
+
515
+ async function main() {
516
+ const actions: AnalyzeBatchAction[] = [
517
+ {
518
+ kind: "CustomEntityRecognition",
519
+ deploymentName,
520
+ projectName,
521
+ },
522
+ ];
523
+ const poller = await client.beginAnalyzeBatch(actions, documents, "en");
524
+ for await (const actionResult of results) {
525
+ if (actionResult.kind !== "CustomEntityRecognition") {
526
+ throw new Error(`Expected a CustomEntityRecognition results but got: ${actionResult.kind}`);
527
+ }
528
+ if (actionResult.error) {
529
+ const { code, message } = actionResult.error;
530
+ throw new Error(`Unexpected error (${code}): ${message}`);
531
+ }
532
+ for (const result of actionResult.results) {
533
+ console.log(`- Document ${result.id}`);
534
+ if (result.error) {
535
+ const { code, message } = result.error;
536
+ throw new Error(`Unexpected error (${code}): ${message}`);
537
+ }
420
538
  console.log("\tRecognized Entities:");
421
539
  for (const entity of result.entities) {
422
- console.log(`\t- Entity ${entity.text} of type ${entity.category}`);
540
+ console.log(`\t- Entity "${entity.text}" of type ${entity.category}`);
541
+ }
542
+ }
543
+ }
544
+ }
545
+
546
+ main();
547
+ ```
548
+
549
+ ### Custom Single-label Classification
550
+
551
+ Classify documents using custom single-label models built using [Azure Language Studio][lang_studio].
552
+
553
+ ```javascript
554
+ const { TextAnalysisClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
555
+
556
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
557
+
558
+ const documents = [
559
+ "The plot begins with a large group of characters where everyone thinks that the two main ones should be together but foolish things keep them apart. Misunderstandings, miscommunication, and confusion cause a series of humorous situations.",
560
+ ];
561
+
562
+ async function main() {
563
+ const actions: AnalyzeBatchAction[] = [
564
+ {
565
+ kind: "CustomSingleLabelClassification",
566
+ deploymentName,
567
+ projectName,
568
+ },
569
+ ];
570
+ const poller = await client.beginAnalyzeBatch(actions, documents, "en");
571
+ const results = await poller.pollUntilDone();
572
+
573
+ for await (const actionResult of results) {
574
+ if (actionResult.kind !== "CustomSingleLabelClassification") {
575
+ throw new Error(
576
+ `Expected a CustomSingleLabelClassification results but got: ${actionResult.kind}`
577
+ );
578
+ }
579
+ if (actionResult.error) {
580
+ const { code, message } = actionResult.error;
581
+ throw new Error(`Unexpected error (${code}): ${message}`);
582
+ }
583
+ for (const result of actionResult.results) {
584
+ console.log(`- Document ${result.id}`);
585
+ if (result.error) {
586
+ const { code, message } = result.error;
587
+ throw new Error(`Unexpected error (${code}): ${message}`);
588
+ }
589
+ console.log(`\tClassification: ${result.classification.category}`);
590
+ }
591
+ }
592
+ }
593
+
594
+ main();
595
+ ```
596
+
597
+ ### Custom Multi-label Classification
598
+
599
+ Classify documents using custom multi-label models built using [Azure Language Studio][lang_studio].
600
+
601
+ ```javascript
602
+ const {
603
+ AnalyzeBatchAction,
604
+ AzureKeyCredential,
605
+ TextAnalysisClient,
606
+ } = require("@azure/ai-text-analytics");
607
+
608
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
609
+
610
+ const documents = [
611
+ "The plot begins with a large group of characters where everyone thinks that the two main ones should be together but foolish things keep them apart. Misunderstandings, miscommunication, and confusion cause a series of humorous situations.",
612
+ ];
613
+
614
+ async function main() {
615
+ const actions: AnalyzeBatchAction[] = [
616
+ {
617
+ kind: "CustomMultiLabelClassification",
618
+ deploymentName,
619
+ projectName,
620
+ },
621
+ ];
622
+ const poller = await client.beginAnalyzeBatch(actions, documents, "en");
623
+ const results = await poller.pollUntilDone();
624
+
625
+ for await (const actionResult of results) {
626
+ if (actionResult.kind !== "CustomMultiLabelClassification") {
627
+ throw new Error(
628
+ `Expected a CustomMultiLabelClassification results but got: ${actionResult.kind}`
629
+ );
630
+ }
631
+ if (actionResult.error) {
632
+ const { code, message } = actionResult.error;
633
+ throw new Error(`Unexpected error (${code}): ${message}`);
634
+ }
635
+ for (const result of actionResult.results) {
636
+ console.log(`- Document ${result.id}`);
637
+ if (result.error) {
638
+ const { code, message } = result.error;
639
+ throw new Error(`Unexpected error (${code}): ${message}`);
640
+ }
641
+ console.log(`\tClassification:`);
642
+ for (const classification of result.classifications) {
643
+ console.log(`\t\t-category: ${classification.category}`);
423
644
  }
424
- } else console.error("\tError:", result.error);
645
+ }
425
646
  }
426
647
  }
427
648
 
428
649
  main();
429
650
  ```
430
651
 
431
- ### Analyze Actions
652
+ ### Action Batching
432
653
 
433
- Analyze actions enables the application of multiple analyses (named actions) at once.
654
+ Applies multiple actions on each input document in one service request.
434
655
 
435
656
  ```javascript
436
- const { TextAnalyticsClient, AzureKeyCredential } = require("@azure/ai-text-analytics");
657
+ const {
658
+ AnalyzeBatchAction,
659
+ AzureKeyCredential,
660
+ TextAnalysisClient,
661
+ } = require("@azure/ai-text-analytics");
437
662
 
438
- const client = new TextAnalyticsClient("<endpoint>", new AzureKeyCredential("<API key>"));
663
+ const client = new TextAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
439
664
 
440
665
  const documents = [
441
666
  "Microsoft was founded by Bill Gates and Paul Allen.",
442
667
  "The employee's SSN is 555-55-5555.",
443
668
  "Easter Island, a Chilean territory, is a remote volcanic island in Polynesia.",
444
- "I use Azure Functions to develop my product."
669
+ "I use Azure Functions to develop my product.",
445
670
  ];
446
671
 
447
672
  async function main() {
448
- const actions = {
449
- recognizeEntitiesActions: [{ modelVersion: "latest" }],
450
- recognizePiiEntitiesActions: [{ modelVersion: "latest" }],
451
- extractKeyPhrasesActions: [{ modelVersion: "latest" }]
452
- };
453
- const poller = await client.beginAnalyzeActions(documents, actions);
454
- const resultPages = await poller.pollUntilDone();
455
- for await (const page of resultPages) {
456
- const keyPhrasesAction = page.extractKeyPhrasesResults[0];
457
- if (!keyPhrasesAction.error) {
458
- for (const doc of keyPhrasesAction.results) {
459
- console.log(`- Document ${doc.id}`);
460
- if (!doc.error) {
461
- console.log("\tKey phrases:");
462
- for (const phrase of doc.keyPhrases) {
463
- console.log(`\t- ${phrase}`);
673
+ const actions: AnalyzeBatchAction[] = [
674
+ {
675
+ kind: "EntityRecognition",
676
+ modelVersion: "latest",
677
+ },
678
+ {
679
+ kind: "PiiEntityRecognition",
680
+ modelVersion: "latest",
681
+ },
682
+ {
683
+ kind: "KeyPhraseExtraction",
684
+ modelVersion: "latest",
685
+ },
686
+ ];
687
+ const poller = await client.beginAnalyzeBatch(actions, documents, "en");
688
+ const actionResults = await poller.pollUntilDone();
689
+ for await (const actionResult of actionResults) {
690
+ if (actionResult.error) {
691
+ const { code, message } = actionResult.error;
692
+ throw new Error(`Unexpected error (${code}): ${message}`);
693
+ }
694
+ switch (actionResult.kind) {
695
+ case "KeyPhraseExtraction": {
696
+ for (const doc of actionResult.results) {
697
+ console.log(`- Document ${doc.id}`);
698
+ if (!doc.error) {
699
+ console.log("\tKey phrases:");
700
+ for (const phrase of doc.keyPhrases) {
701
+ console.log(`\t- ${phrase}`);
702
+ }
703
+ } else {
704
+ console.error("\tError:", doc.error);
464
705
  }
465
- } else {
466
- console.error("\tError:", doc.error);
467
706
  }
707
+ break;
468
708
  }
469
- }
470
-
471
- const entitiesAction = page.recognizeEntitiesResults[0];
472
- if (!entitiesAction.error) {
473
- for (const doc of entitiesAction.results) {
474
- console.log(`- Document ${doc.id}`);
475
- if (!doc.error) {
476
- console.log("\tEntities:");
477
- for (const entity of doc.entities) {
478
- console.log(`\t- Entity ${entity.text} of type ${entity.category}`);
709
+ case "EntityRecognition": {
710
+ for (const doc of actionResult.results) {
711
+ console.log(`- Document ${doc.id}`);
712
+ if (!doc.error) {
713
+ console.log("\tEntities:");
714
+ for (const entity of doc.entities) {
715
+ console.log(`\t- Entity ${entity.text} of type ${entity.category}`);
716
+ }
717
+ } else {
718
+ console.error("\tError:", doc.error);
479
719
  }
480
- } else {
481
- console.error("\tError:", doc.error);
482
720
  }
721
+ break;
483
722
  }
484
- }
485
-
486
- const piiEntitiesAction = page.recognizePiiEntitiesResults[0];
487
- if (!piiEntitiesAction.error) {
488
- for (const doc of piiEntitiesAction.results) {
489
- console.log(`- Document ${doc.id}`);
490
- if (!doc.error) {
491
- console.log("\tPii Entities:");
492
- for (const entity of doc.entities) {
493
- console.log(`\t- Entity ${entity.text} of type ${entity.category}`);
723
+ case "PiiEntityRecognition": {
724
+ for (const doc of actionResult.results) {
725
+ console.log(`- Document ${doc.id}`);
726
+ if (!doc.error) {
727
+ console.log("\tPii Entities:");
728
+ for (const entity of doc.entities) {
729
+ console.log(`\t- Entity ${entity.text} of type ${entity.category}`);
730
+ }
731
+ } else {
732
+ console.error("\tError:", doc.error);
494
733
  }
495
- } else {
496
- console.error("\tError:", doc.error);
497
734
  }
735
+ break;
736
+ }
737
+ default: {
738
+ throw new Error(`Unexpected action results: ${actionResult.kind}`);
498
739
  }
499
740
  }
500
741
  }
@@ -539,5 +780,6 @@ If you'd like to contribute to this library, please read the [contributing guide
539
780
  [cognitive_auth]: https://docs.microsoft.com/azure/cognitive-services/authentication
540
781
  [register_aad_app]: https://docs.microsoft.com/azure/cognitive-services/authentication#assign-a-role-to-a-service-principal
541
782
  [defaultazurecredential]: https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/identity/identity#defaultazurecredential
542
- [data_limits]: https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
543
- [analyze_sentiment_opinion_mining_sample]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/textanalytics/ai-text-analytics/samples/v5/typescript/src/analyzeSentimentWithOpinionMining.ts
783
+ [data_limits]: https://docs.microsoft.com/azure/cognitive-services/language-service/concepts/data-limits
784
+ [analyze_sentiment_opinion_mining_sample]: https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/textanalytics/ai-text-analytics/samples-dev/opinionMining.ts
785
+ [lang_studio]: https://docs.microsoft.com/azure/cognitive-services/language-service/language-studio