azure-ai-textanalytics 5.3.0b2__py3-none-any.whl → 6.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-textanalytics might be problematic. Click here for more details.

Files changed (128) hide show
  1. azure/ai/textanalytics/__init__.py +26 -193
  2. azure/ai/textanalytics/_client.py +111 -0
  3. azure/ai/textanalytics/_configuration.py +73 -0
  4. azure/ai/textanalytics/{_generated/v2022_05_01/operations → _operations}/__init__.py +13 -8
  5. azure/ai/textanalytics/_operations/_operations.py +716 -0
  6. azure/ai/textanalytics/{_generated/v2022_05_01/models → _operations}/_patch.py +8 -6
  7. azure/ai/textanalytics/_patch.py +350 -0
  8. azure/ai/textanalytics/{_generated/aio → _utils}/__init__.py +1 -5
  9. azure/ai/textanalytics/_utils/model_base.py +1237 -0
  10. azure/ai/textanalytics/{_generated/_serialization.py → _utils/serialization.py} +640 -616
  11. azure/ai/textanalytics/{_generated/v2022_05_01/aio/_vendor.py → _utils/utils.py} +10 -12
  12. azure/ai/textanalytics/_version.py +8 -7
  13. azure/ai/textanalytics/aio/__init__.py +25 -14
  14. azure/ai/textanalytics/aio/_client.py +115 -0
  15. azure/ai/textanalytics/aio/_configuration.py +75 -0
  16. azure/ai/textanalytics/{_generated/v2022_10_01_preview/aio/operations → aio/_operations}/__init__.py +13 -8
  17. azure/ai/textanalytics/aio/_operations/_operations.py +623 -0
  18. azure/ai/textanalytics/{_generated/v2022_05_01 → aio/_operations}/_patch.py +8 -6
  19. azure/ai/textanalytics/aio/_patch.py +344 -0
  20. azure/ai/textanalytics/models/__init__.py +402 -0
  21. azure/ai/textanalytics/models/_enums.py +1979 -0
  22. azure/ai/textanalytics/models/_models.py +6641 -0
  23. azure/ai/textanalytics/{_generated/v2022_05_01/aio → models}/_patch.py +8 -6
  24. azure/ai/textanalytics/py.typed +1 -0
  25. {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/METADATA +668 -403
  26. azure_ai_textanalytics-6.0.0b1.dist-info/RECORD +29 -0
  27. {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/WHEEL +1 -1
  28. azure/ai/textanalytics/_base_client.py +0 -113
  29. azure/ai/textanalytics/_check.py +0 -22
  30. azure/ai/textanalytics/_dict_mixin.py +0 -57
  31. azure/ai/textanalytics/_generated/__init__.py +0 -16
  32. azure/ai/textanalytics/_generated/_configuration.py +0 -70
  33. azure/ai/textanalytics/_generated/_operations_mixin.py +0 -795
  34. azure/ai/textanalytics/_generated/_text_analytics_client.py +0 -126
  35. azure/ai/textanalytics/_generated/_version.py +0 -8
  36. azure/ai/textanalytics/_generated/aio/_configuration.py +0 -66
  37. azure/ai/textanalytics/_generated/aio/_operations_mixin.py +0 -776
  38. azure/ai/textanalytics/_generated/aio/_text_analytics_client.py +0 -124
  39. azure/ai/textanalytics/_generated/models.py +0 -8
  40. azure/ai/textanalytics/_generated/v2022_05_01/__init__.py +0 -20
  41. azure/ai/textanalytics/_generated/v2022_05_01/_configuration.py +0 -72
  42. azure/ai/textanalytics/_generated/v2022_05_01/_text_analytics_client.py +0 -100
  43. azure/ai/textanalytics/_generated/v2022_05_01/_vendor.py +0 -45
  44. azure/ai/textanalytics/_generated/v2022_05_01/aio/__init__.py +0 -20
  45. azure/ai/textanalytics/_generated/v2022_05_01/aio/_configuration.py +0 -71
  46. azure/ai/textanalytics/_generated/v2022_05_01/aio/_text_analytics_client.py +0 -97
  47. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/__init__.py +0 -18
  48. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_patch.py +0 -121
  49. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_text_analytics_client_operations.py +0 -603
  50. azure/ai/textanalytics/_generated/v2022_05_01/models/__init__.py +0 -281
  51. azure/ai/textanalytics/_generated/v2022_05_01/models/_models_py3.py +0 -5722
  52. azure/ai/textanalytics/_generated/v2022_05_01/models/_text_analytics_client_enums.py +0 -439
  53. azure/ai/textanalytics/_generated/v2022_05_01/operations/_patch.py +0 -120
  54. azure/ai/textanalytics/_generated/v2022_05_01/operations/_text_analytics_client_operations.py +0 -744
  55. azure/ai/textanalytics/_generated/v2022_10_01_preview/__init__.py +0 -20
  56. azure/ai/textanalytics/_generated/v2022_10_01_preview/_configuration.py +0 -72
  57. azure/ai/textanalytics/_generated/v2022_10_01_preview/_patch.py +0 -19
  58. azure/ai/textanalytics/_generated/v2022_10_01_preview/_text_analytics_client.py +0 -100
  59. azure/ai/textanalytics/_generated/v2022_10_01_preview/_vendor.py +0 -45
  60. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/__init__.py +0 -20
  61. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_configuration.py +0 -71
  62. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_patch.py +0 -19
  63. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_text_analytics_client.py +0 -97
  64. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_vendor.py +0 -27
  65. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_patch.py +0 -121
  66. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_text_analytics_client_operations.py +0 -603
  67. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/__init__.py +0 -405
  68. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_models_py3.py +0 -8420
  69. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_patch.py +0 -486
  70. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_text_analytics_client_enums.py +0 -729
  71. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/__init__.py +0 -18
  72. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_patch.py +0 -120
  73. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_text_analytics_client_operations.py +0 -744
  74. azure/ai/textanalytics/_generated/v3_0/__init__.py +0 -20
  75. azure/ai/textanalytics/_generated/v3_0/_configuration.py +0 -66
  76. azure/ai/textanalytics/_generated/v3_0/_patch.py +0 -31
  77. azure/ai/textanalytics/_generated/v3_0/_text_analytics_client.py +0 -96
  78. azure/ai/textanalytics/_generated/v3_0/_vendor.py +0 -33
  79. azure/ai/textanalytics/_generated/v3_0/aio/__init__.py +0 -20
  80. azure/ai/textanalytics/_generated/v3_0/aio/_configuration.py +0 -65
  81. azure/ai/textanalytics/_generated/v3_0/aio/_patch.py +0 -31
  82. azure/ai/textanalytics/_generated/v3_0/aio/_text_analytics_client.py +0 -93
  83. azure/ai/textanalytics/_generated/v3_0/aio/_vendor.py +0 -27
  84. azure/ai/textanalytics/_generated/v3_0/aio/operations/__init__.py +0 -18
  85. azure/ai/textanalytics/_generated/v3_0/aio/operations/_patch.py +0 -19
  86. azure/ai/textanalytics/_generated/v3_0/aio/operations/_text_analytics_client_operations.py +0 -428
  87. azure/ai/textanalytics/_generated/v3_0/models/__init__.py +0 -81
  88. azure/ai/textanalytics/_generated/v3_0/models/_models_py3.py +0 -1467
  89. azure/ai/textanalytics/_generated/v3_0/models/_patch.py +0 -19
  90. azure/ai/textanalytics/_generated/v3_0/models/_text_analytics_client_enums.py +0 -58
  91. azure/ai/textanalytics/_generated/v3_0/operations/__init__.py +0 -18
  92. azure/ai/textanalytics/_generated/v3_0/operations/_patch.py +0 -19
  93. azure/ai/textanalytics/_generated/v3_0/operations/_text_analytics_client_operations.py +0 -604
  94. azure/ai/textanalytics/_generated/v3_1/__init__.py +0 -20
  95. azure/ai/textanalytics/_generated/v3_1/_configuration.py +0 -66
  96. azure/ai/textanalytics/_generated/v3_1/_patch.py +0 -31
  97. azure/ai/textanalytics/_generated/v3_1/_text_analytics_client.py +0 -98
  98. azure/ai/textanalytics/_generated/v3_1/_vendor.py +0 -45
  99. azure/ai/textanalytics/_generated/v3_1/aio/__init__.py +0 -20
  100. azure/ai/textanalytics/_generated/v3_1/aio/_configuration.py +0 -65
  101. azure/ai/textanalytics/_generated/v3_1/aio/_patch.py +0 -31
  102. azure/ai/textanalytics/_generated/v3_1/aio/_text_analytics_client.py +0 -95
  103. azure/ai/textanalytics/_generated/v3_1/aio/_vendor.py +0 -27
  104. azure/ai/textanalytics/_generated/v3_1/aio/operations/__init__.py +0 -18
  105. azure/ai/textanalytics/_generated/v3_1/aio/operations/_patch.py +0 -19
  106. azure/ai/textanalytics/_generated/v3_1/aio/operations/_text_analytics_client_operations.py +0 -1291
  107. azure/ai/textanalytics/_generated/v3_1/models/__init__.py +0 -205
  108. azure/ai/textanalytics/_generated/v3_1/models/_models_py3.py +0 -3976
  109. azure/ai/textanalytics/_generated/v3_1/models/_patch.py +0 -19
  110. azure/ai/textanalytics/_generated/v3_1/models/_text_analytics_client_enums.py +0 -367
  111. azure/ai/textanalytics/_generated/v3_1/operations/__init__.py +0 -18
  112. azure/ai/textanalytics/_generated/v3_1/operations/_patch.py +0 -19
  113. azure/ai/textanalytics/_generated/v3_1/operations/_text_analytics_client_operations.py +0 -1709
  114. azure/ai/textanalytics/_lro.py +0 -553
  115. azure/ai/textanalytics/_models.py +0 -3158
  116. azure/ai/textanalytics/_policies.py +0 -66
  117. azure/ai/textanalytics/_request_handlers.py +0 -104
  118. azure/ai/textanalytics/_response_handlers.py +0 -583
  119. azure/ai/textanalytics/_text_analytics_client.py +0 -2081
  120. azure/ai/textanalytics/_user_agent.py +0 -8
  121. azure/ai/textanalytics/_validate.py +0 -113
  122. azure/ai/textanalytics/aio/_base_client_async.py +0 -98
  123. azure/ai/textanalytics/aio/_lro_async.py +0 -503
  124. azure/ai/textanalytics/aio/_response_handlers_async.py +0 -94
  125. azure/ai/textanalytics/aio/_text_analytics_client_async.py +0 -2077
  126. azure_ai_textanalytics-5.3.0b2.dist-info/RECORD +0 -115
  127. {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info/licenses}/LICENSE +0 -0
  128. {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/top_level.txt +0 -0
@@ -1,2077 +0,0 @@
1
- # ------------------------------------
2
- # Copyright (c) Microsoft Corporation.
3
- # Licensed under the MIT License.
4
- # ------------------------------------
5
- # pylint: disable=too-many-lines
6
-
7
- from typing import Union, Any, List, Dict, cast, Optional
8
- from azure.core.async_paging import AsyncItemPaged
9
- from azure.core.tracing.decorator_async import distributed_trace_async
10
- from azure.core.exceptions import HttpResponseError
11
- from azure.core.credentials import AzureKeyCredential
12
- from azure.core.credentials_async import AsyncTokenCredential
13
- from .._base_client import TextAnalyticsApiVersion
14
- from ._base_client_async import AsyncTextAnalyticsClientBase
15
- from .._request_handlers import (
16
- _validate_input,
17
- _determine_action_type,
18
- )
19
- from .._validate import validate_multiapi_args, check_for_unsupported_actions_types
20
- from .._response_handlers import (
21
- process_http_response_error,
22
- entities_result,
23
- linked_entities_result,
24
- key_phrases_result,
25
- sentiment_result,
26
- language_result,
27
- pii_entities_result,
28
- _get_result_from_continuation_token,
29
- dynamic_classification_result,
30
- )
31
- from ._response_handlers_async import healthcare_paged_result, analyze_paged_result
32
- from .._generated.models import HealthcareDocumentType, ClassificationType
33
- from .._models import (
34
- DetectLanguageInput,
35
- TextDocumentInput,
36
- DetectLanguageResult,
37
- RecognizeEntitiesResult,
38
- RecognizeLinkedEntitiesResult,
39
- ExtractKeyPhrasesResult,
40
- AnalyzeSentimentResult,
41
- DocumentError,
42
- RecognizePiiEntitiesResult,
43
- RecognizeEntitiesAction,
44
- RecognizePiiEntitiesAction,
45
- ExtractKeyPhrasesAction,
46
- _AnalyzeActionsType,
47
- RecognizeLinkedEntitiesAction,
48
- AnalyzeSentimentAction,
49
- AnalyzeHealthcareEntitiesResult,
50
- RecognizeCustomEntitiesAction,
51
- RecognizeCustomEntitiesResult,
52
- SingleLabelClassifyAction,
53
- MultiLabelClassifyAction,
54
- ClassifyDocumentResult,
55
- AnalyzeHealthcareEntitiesAction,
56
- ExtractSummaryAction,
57
- ExtractSummaryResult,
58
- AbstractiveSummaryAction,
59
- AbstractiveSummaryResult,
60
- DynamicClassificationResult,
61
- PiiEntityDomain,
62
- PiiEntityCategory,
63
- )
64
- from .._check import is_language_api, string_index_type_compatibility
65
- from .._lro import TextAnalyticsOperationResourcePolling
66
- from ._lro_async import (
67
- AsyncAnalyzeHealthcareEntitiesLROPollingMethod,
68
- AsyncAnalyzeActionsLROPollingMethod,
69
- AsyncAnalyzeHealthcareEntitiesLROPoller,
70
- AsyncAnalyzeActionsLROPoller,
71
- AsyncTextAnalysisLROPoller,
72
- )
73
-
74
-
75
- AsyncAnalyzeActionsResponse = AsyncTextAnalysisLROPoller[
76
- AsyncItemPaged[
77
- List[
78
- Union[
79
- RecognizeEntitiesResult,
80
- RecognizeLinkedEntitiesResult,
81
- RecognizePiiEntitiesResult,
82
- ExtractKeyPhrasesResult,
83
- AnalyzeSentimentResult,
84
- RecognizeCustomEntitiesResult,
85
- ClassifyDocumentResult,
86
- AnalyzeHealthcareEntitiesResult,
87
- ExtractSummaryResult,
88
- AbstractiveSummaryResult,
89
- DocumentError,
90
- ]
91
- ]
92
- ]
93
- ]
94
-
95
-
96
- class TextAnalyticsClient(AsyncTextAnalyticsClientBase):
97
- """The Language service API is a suite of natural language processing (NLP) skills built with the best-in-class
98
- Microsoft machine learning algorithms. The API can be used to analyze unstructured text for
99
- tasks such as sentiment analysis, key phrase extraction, entities recognition,
100
- and language detection, and more.
101
-
102
- Further documentation can be found in
103
- https://docs.microsoft.com/azure/cognitive-services/language-service/overview
104
-
105
- :param str endpoint: Supported Cognitive Services or Language resource
106
- endpoints (protocol and hostname, for example: 'https://<resource-name>.cognitiveservices.azure.com').
107
- :param credential: Credentials needed for the client to connect to Azure.
108
- This can be the an instance of AzureKeyCredential if using a Cognitive Services/Language API key
109
- or a token credential from :mod:`azure.identity`.
110
- :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential
111
- :keyword str default_country_hint: Sets the default country_hint to use for all operations.
112
- Defaults to "US". If you don't want to use a country hint, pass the string "none".
113
- :keyword str default_language: Sets the default language to use for all operations.
114
- Defaults to "en".
115
- :keyword api_version: The API version of the service to use for requests. It defaults to the
116
- latest service version. Setting to an older version may result in reduced feature compatibility.
117
- :paramtype api_version: str or ~azure.ai.textanalytics.TextAnalyticsApiVersion
118
-
119
- .. admonition:: Example:
120
-
121
- .. literalinclude:: ../samples/async_samples/sample_authentication_async.py
122
- :start-after: [START create_ta_client_with_key_async]
123
- :end-before: [END create_ta_client_with_key_async]
124
- :language: python
125
- :dedent: 4
126
- :caption: Creating the TextAnalyticsClient with endpoint and API key.
127
-
128
- .. literalinclude:: ../samples/async_samples/sample_authentication_async.py
129
- :start-after: [START create_ta_client_with_aad_async]
130
- :end-before: [END create_ta_client_with_aad_async]
131
- :language: python
132
- :dedent: 4
133
- :caption: Creating the TextAnalyticsClient with endpoint and token credential from Azure Active Directory.
134
- """
135
-
136
- def __init__(
137
- self,
138
- endpoint: str,
139
- credential: Union[AzureKeyCredential, AsyncTokenCredential],
140
- *,
141
- default_language: Optional[str] = None,
142
- default_country_hint: Optional[str] = None,
143
- api_version: Optional[Union[str, TextAnalyticsApiVersion]] = None,
144
- **kwargs: Any,
145
- ) -> None:
146
- super().__init__(
147
- endpoint=endpoint, credential=credential, api_version=api_version, **kwargs
148
- )
149
- self._default_language = default_language if default_language is not None else "en"
150
- self._default_country_hint = default_country_hint if default_country_hint is not None else "US"
151
- self._string_code_unit = (
152
- None if api_version == "v3.0" else "UnicodeCodePoint"
153
- )
154
-
155
- @distributed_trace_async
156
- @validate_multiapi_args(
157
- version_method_added="v3.0",
158
- args_mapping={"v3.1": ["disable_service_logs"]}
159
- )
160
- async def detect_language(
161
- self,
162
- documents: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]],
163
- *,
164
- country_hint: Optional[str] = None,
165
- disable_service_logs: Optional[bool] = None,
166
- model_version: Optional[str] = None,
167
- show_stats: Optional[bool] = None,
168
- **kwargs: Any,
169
- ) -> List[Union[DetectLanguageResult, DocumentError]]:
170
- """Detect language for a batch of documents.
171
-
172
- Returns the detected language and a numeric score between zero and
173
- one. Scores close to one indicate 100% certainty that the identified
174
- language is true. See https://aka.ms/talangs for the list of enabled languages.
175
-
176
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
177
-
178
- :param documents: The set of documents to process as part of this batch.
179
- If you wish to specify the ID and country_hint on a per-item basis you must
180
- use as input a list[:class:`~azure.ai.textanalytics.DetectLanguageInput`] or a list of
181
- dict representations of :class:`~azure.ai.textanalytics.DetectLanguageInput`, like
182
- `{"id": "1", "country_hint": "us", "text": "hello world"}`.
183
- :type documents:
184
- list[str] or list[~azure.ai.textanalytics.DetectLanguageInput] or list[dict[str, str]]
185
- :keyword str country_hint: Country of origin hint for the entire batch. Accepts two
186
- letter country codes specified by ISO 3166-1 alpha-2. Per-document
187
- country hints will take precedence over whole batch hints. Defaults to
188
- "US". If you don't want to use a country hint, pass the string "none".
189
- :keyword str model_version: This value indicates which model will
190
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
191
- is not specified, the API will default to the latest, non-preview version.
192
- See here for more info: https://aka.ms/text-analytics-model-versioning
193
- :keyword bool show_stats: If set to true, response will contain document
194
- level statistics in the `statistics` field of the document-level response.
195
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
196
- logged on the service side for troubleshooting. By default, the Language service logs your
197
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
198
- the service's natural language processing functions. Setting this parameter to true,
199
- disables input logging and may limit our ability to remediate issues that occur. Please see
200
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
201
- additional details, and Microsoft Responsible AI principles at
202
- https://www.microsoft.com/ai/responsible-ai.
203
- :return: The combined list of :class:`~azure.ai.textanalytics.DetectLanguageResult`
204
- and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
205
- were passed in.
206
- :rtype: list[~azure.ai.textanalytics.DetectLanguageResult or ~azure.ai.textanalytics.DocumentError]
207
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
208
-
209
- .. versionadded:: v3.1
210
- The *disable_service_logs* keyword argument.
211
-
212
- .. admonition:: Example:
213
-
214
- .. literalinclude:: ../samples/async_samples/sample_detect_language_async.py
215
- :start-after: [START detect_language_async]
216
- :end-before: [END detect_language_async]
217
- :language: python
218
- :dedent: 4
219
- :caption: Detecting language in a batch of documents.
220
- """
221
-
222
- country_hint_arg = (
223
- country_hint
224
- if country_hint is not None
225
- else self._default_country_hint
226
- )
227
- docs = _validate_input(documents, "country_hint", country_hint_arg)
228
-
229
- try:
230
- if is_language_api(self._api_version):
231
- models = self._client.models(api_version=self._api_version)
232
- return cast(
233
- List[Union[DetectLanguageResult, DocumentError]],
234
- await self._client.analyze_text(
235
- body=models.AnalyzeTextLanguageDetectionInput(
236
- analysis_input={"documents": docs},
237
- parameters=models.LanguageDetectionTaskParameters(
238
- logging_opt_out=disable_service_logs,
239
- model_version=model_version
240
- )
241
- ),
242
- show_stats=show_stats,
243
- cls=kwargs.pop("cls", language_result),
244
- **kwargs
245
- )
246
- )
247
-
248
- # api_versions 3.0, 3.1
249
- return cast(
250
- List[Union[DetectLanguageResult, DocumentError]],
251
- await self._client.languages(
252
- documents=docs,
253
- model_version=model_version,
254
- show_stats=show_stats,
255
- logging_opt_out=disable_service_logs,
256
- cls=kwargs.pop("cls", language_result),
257
- **kwargs
258
- )
259
- )
260
- except HttpResponseError as error:
261
- return process_http_response_error(error)
262
-
263
- @distributed_trace_async
264
- @validate_multiapi_args(
265
- version_method_added="v3.0",
266
- args_mapping={"v3.1": ["string_index_type", "disable_service_logs"]}
267
- )
268
- async def recognize_entities(
269
- self,
270
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
271
- *,
272
- disable_service_logs: Optional[bool] = None,
273
- language: Optional[str] = None,
274
- model_version: Optional[str] = None,
275
- show_stats: Optional[bool] = None,
276
- string_index_type: Optional[str] = None,
277
- **kwargs: Any,
278
- ) -> List[Union[RecognizeEntitiesResult, DocumentError]]:
279
- """Recognize entities for a batch of documents.
280
-
281
- Identifies and categorizes entities in your text as people, places,
282
- organizations, date/time, quantities, percentages, currencies, and more.
283
- For the list of supported entity types, check: https://aka.ms/taner
284
-
285
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
286
-
287
- :param documents: The set of documents to process as part of this batch.
288
- If you wish to specify the ID and language on a per-item basis you must
289
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
290
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
291
- `{"id": "1", "language": "en", "text": "hello world"}`.
292
- :type documents:
293
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
294
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
295
- entire batch. For example, use "en" for English; "es" for Spanish etc.
296
- If not set, uses "en" for English as default. Per-document language will
297
- take precedence over whole batch language. See https://aka.ms/talangs for
298
- supported languages in Language API.
299
- :keyword str model_version: This value indicates which model will
300
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
301
- is not specified, the API will default to the latest, non-preview version.
302
- See here for more info: https://aka.ms/text-analytics-model-versioning
303
- :keyword bool show_stats: If set to true, response will contain document
304
- level statistics in the `statistics` field of the document-level response.
305
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
306
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
307
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
308
- see https://aka.ms/text-analytics-offsets
309
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
310
- logged on the service side for troubleshooting. By default, the Language service logs your
311
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
312
- the service's natural language processing functions. Setting this parameter to true,
313
- disables input logging and may limit our ability to remediate issues that occur. Please see
314
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
315
- additional details, and Microsoft Responsible AI principles at
316
- https://www.microsoft.com/ai/responsible-ai.
317
- :return: The combined list of :class:`~azure.ai.textanalytics.RecognizeEntitiesResult` and
318
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
319
- passed in.
320
- :rtype: list[~azure.ai.textanalytics.RecognizeEntitiesResult or ~azure.ai.textanalytics.DocumentError]
321
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
322
-
323
- .. versionadded:: v3.1
324
- The *disable_service_logs* and *string_index_type* keyword arguments.
325
-
326
- .. admonition:: Example:
327
-
328
- .. literalinclude:: ../samples/async_samples/sample_recognize_entities_async.py
329
- :start-after: [START recognize_entities_async]
330
- :end-before: [END recognize_entities_async]
331
- :language: python
332
- :dedent: 4
333
- :caption: Recognize entities in a batch of documents.
334
- """
335
-
336
- language_arg = language if language is not None else self._default_language
337
- docs = _validate_input(documents, "language", language_arg)
338
- string_index_type_arg = string_index_type if string_index_type is not None else self._string_code_unit
339
-
340
- try:
341
- if is_language_api(self._api_version):
342
- models = self._client.models(api_version=self._api_version)
343
- return cast(
344
- List[Union[RecognizeEntitiesResult, DocumentError]],
345
- await self._client.analyze_text(
346
- body=models.AnalyzeTextEntityRecognitionInput(
347
- analysis_input={"documents": docs},
348
- parameters=models.EntitiesTaskParameters(
349
- logging_opt_out=disable_service_logs,
350
- model_version=model_version,
351
- string_index_type=string_index_type_compatibility(string_index_type_arg)
352
- )
353
- ),
354
- show_stats=show_stats,
355
- cls=kwargs.pop("cls", entities_result),
356
- **kwargs
357
- )
358
- )
359
-
360
- # api_versions 3.0, 3.1
361
- return cast(
362
- List[Union[RecognizeEntitiesResult, DocumentError]],
363
- await self._client.entities_recognition_general(
364
- documents=docs,
365
- model_version=model_version,
366
- show_stats=show_stats,
367
- string_index_type=string_index_type_arg,
368
- logging_opt_out=disable_service_logs,
369
- cls=kwargs.pop("cls", entities_result),
370
- **kwargs,
371
- )
372
- )
373
- except HttpResponseError as error:
374
- return process_http_response_error(error)
375
-
376
- @distributed_trace_async
377
- @validate_multiapi_args(
378
- version_method_added="v3.1"
379
- )
380
- async def recognize_pii_entities(
381
- self,
382
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
383
- *,
384
- categories_filter: Optional[List[Union[str, PiiEntityCategory]]] = None,
385
- disable_service_logs: Optional[bool] = None,
386
- domain_filter: Optional[Union[str, PiiEntityDomain]] = None,
387
- language: Optional[str] = None,
388
- model_version: Optional[str] = None,
389
- show_stats: Optional[bool] = None,
390
- string_index_type: Optional[str] = None,
391
- **kwargs: Any,
392
- ) -> List[Union[RecognizePiiEntitiesResult, DocumentError]]:
393
- """Recognize entities containing personal information for a batch of documents.
394
-
395
- Returns a list of personal information entities ("SSN",
396
- "Bank Account", etc) in the document. For the list of supported entity types,
397
- check https://aka.ms/azsdk/language/pii
398
-
399
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
400
-
401
- :param documents: The set of documents to process as part of this batch.
402
- If you wish to specify the ID and language on a per-item basis you must
403
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
404
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
405
- `{"id": "1", "language": "en", "text": "hello world"}`.
406
- :type documents:
407
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
408
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
409
- entire batch. For example, use "en" for English; "es" for Spanish etc.
410
- If not set, uses "en" for English as default. Per-document language will
411
- take precedence over whole batch language. See https://aka.ms/talangs for
412
- supported languages in Language API.
413
- :keyword str model_version: This value indicates which model will
414
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
415
- is not specified, the API will default to the latest, non-preview version.
416
- See here for more info: https://aka.ms/text-analytics-model-versioning
417
- :keyword bool show_stats: If set to true, response will contain document
418
- level statistics in the `statistics` field of the document-level response.
419
- :keyword domain_filter: Filters the response entities to ones only included in the specified domain.
420
- I.e., if set to 'phi', will only return entities in the Protected Healthcare Information domain.
421
- See https://aka.ms/azsdk/language/pii for more information.
422
- :paramtype domain_filter: str or ~azure.ai.textanalytics.PiiEntityDomain
423
- :keyword categories_filter: Instead of filtering over all PII entity categories, you can pass in a list of
424
- the specific PII entity categories you want to filter out. For example, if you only want to filter out
425
- U.S. social security numbers in a document, you can pass in
426
- `[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]` for this kwarg.
427
- :paramtype categories_filter: list[str or ~azure.ai.textanalytics.PiiEntityCategory]
428
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
429
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
430
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
431
- see https://aka.ms/text-analytics-offsets
432
- :keyword bool disable_service_logs: Defaults to true, meaning that the Language service will not log your
433
- input text on the service side for troubleshooting. If set to False, the Language service logs your
434
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
435
- the service's natural language processing functions. Please see
436
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
437
- additional details, and Microsoft Responsible AI principles at
438
- https://www.microsoft.com/ai/responsible-ai.
439
- :return: The combined list of :class:`~azure.ai.textanalytics.RecognizePiiEntitiesResult`
440
- and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
441
- were passed in.
442
- :rtype: list[~azure.ai.textanalytics.RecognizePiiEntitiesResult or ~azure.ai.textanalytics.DocumentError]
443
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
444
-
445
- .. versionadded:: v3.1
446
- The *recognize_pii_entities* client method.
447
-
448
- .. admonition:: Example:
449
-
450
- .. literalinclude:: ../samples/sample_recognize_pii_entities.py
451
- :start-after: [START recognize_pii_entities]
452
- :end-before: [END recognize_pii_entities]
453
- :language: python
454
- :dedent: 4
455
- :caption: Recognize personally identifiable information entities in a batch of documents.
456
- """
457
- language_arg = language if language is not None else self._default_language
458
- docs = _validate_input(documents, "language", language_arg)
459
- string_index_type_arg = string_index_type if string_index_type is not None else self._string_code_unit
460
-
461
- try:
462
- if is_language_api(self._api_version):
463
- models = self._client.models(api_version=self._api_version)
464
- return cast(
465
- List[Union[RecognizePiiEntitiesResult, DocumentError]],
466
- await self._client.analyze_text(
467
- body=models.AnalyzeTextPiiEntitiesRecognitionInput(
468
- analysis_input={"documents": docs},
469
- parameters=models.PiiTaskParameters(
470
- logging_opt_out=disable_service_logs,
471
- model_version=model_version,
472
- domain=domain_filter,
473
- pii_categories=categories_filter,
474
- string_index_type=string_index_type_compatibility(string_index_type_arg)
475
- )
476
- ),
477
- show_stats=show_stats,
478
- cls=kwargs.pop("cls", pii_entities_result),
479
- **kwargs
480
- )
481
- )
482
-
483
- # api_versions 3.0, 3.1
484
- return cast(
485
- List[Union[RecognizePiiEntitiesResult, DocumentError]],
486
- await self._client.entities_recognition_pii(
487
- documents=docs,
488
- model_version=model_version,
489
- show_stats=show_stats,
490
- domain=domain_filter,
491
- pii_categories=categories_filter,
492
- logging_opt_out=disable_service_logs,
493
- string_index_type=string_index_type_arg,
494
- cls=kwargs.pop("cls", pii_entities_result),
495
- **kwargs
496
- )
497
- )
498
- except HttpResponseError as error:
499
- return process_http_response_error(error)
500
-
501
- @distributed_trace_async
502
- @validate_multiapi_args(
503
- version_method_added="v3.0",
504
- args_mapping={"v3.1": ["string_index_type", "disable_service_logs"]}
505
- )
506
- async def recognize_linked_entities(
507
- self,
508
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
509
- *,
510
- disable_service_logs: Optional[bool] = None,
511
- language: Optional[str] = None,
512
- model_version: Optional[str] = None,
513
- show_stats: Optional[bool] = None,
514
- string_index_type: Optional[str] = None,
515
- **kwargs: Any,
516
- ) -> List[Union[RecognizeLinkedEntitiesResult, DocumentError]]:
517
- """Recognize linked entities from a well-known knowledge base for a batch of documents.
518
-
519
- Identifies and disambiguates the identity of each entity found in text (for example,
520
- determining whether an occurrence of the word Mars refers to the planet, or to the
521
- Roman god of war). Recognized entities are associated with URLs to a well-known
522
- knowledge base, like Wikipedia.
523
-
524
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
525
-
526
- :param documents: The set of documents to process as part of this batch.
527
- If you wish to specify the ID and language on a per-item basis you must
528
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
529
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
530
- `{"id": "1", "language": "en", "text": "hello world"}`.
531
- :type documents:
532
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
533
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
534
- entire batch. For example, use "en" for English; "es" for Spanish etc.
535
- If not set, uses "en" for English as default. Per-document language will
536
- take precedence over whole batch language. See https://aka.ms/talangs for
537
- supported languages in Language API.
538
- :keyword str model_version: This value indicates which model will
539
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
540
- is not specified, the API will default to the latest, non-preview version.
541
- See here for more info: https://aka.ms/text-analytics-model-versioning
542
- :keyword bool show_stats: If set to true, response will contain document
543
- level statistics in the `statistics` field of the document-level response.
544
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
545
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
546
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
547
- see https://aka.ms/text-analytics-offsets
548
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
549
- logged on the service side for troubleshooting. By default, the Language service logs your
550
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
551
- the service's natural language processing functions. Setting this parameter to true,
552
- disables input logging and may limit our ability to remediate issues that occur. Please see
553
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
554
- additional details, and Microsoft Responsible AI principles at
555
- https://www.microsoft.com/ai/responsible-ai.
556
- :return: The combined list of :class:`~azure.ai.textanalytics.RecognizeLinkedEntitiesResult`
557
- and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
558
- were passed in.
559
- :rtype: list[~azure.ai.textanalytics.RecognizeLinkedEntitiesResult or ~azure.ai.textanalytics.DocumentError]
560
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
561
-
562
- .. versionadded:: v3.1
563
- The *disable_service_logs* and *string_index_type* keyword arguments.
564
-
565
- .. admonition:: Example:
566
-
567
- .. literalinclude:: ../samples/async_samples/sample_recognize_linked_entities_async.py
568
- :start-after: [START recognize_linked_entities_async]
569
- :end-before: [END recognize_linked_entities_async]
570
- :language: python
571
- :dedent: 4
572
- :caption: Recognize linked entities in a batch of documents.
573
- """
574
-
575
- language_arg = language if language is not None else self._default_language
576
- docs = _validate_input(documents, "language", language_arg)
577
- string_index_type_arg = string_index_type if string_index_type is not None else self._string_code_unit
578
-
579
- try:
580
- if is_language_api(self._api_version):
581
- models = self._client.models(api_version=self._api_version)
582
- return cast(
583
- List[Union[RecognizeLinkedEntitiesResult, DocumentError]],
584
- await self._client.analyze_text(
585
- body=models.AnalyzeTextEntityLinkingInput(
586
- analysis_input={"documents": docs},
587
- parameters=models.EntityLinkingTaskParameters(
588
- logging_opt_out=disable_service_logs,
589
- model_version=model_version,
590
- string_index_type=string_index_type_compatibility(string_index_type_arg)
591
- )
592
- ),
593
- show_stats=show_stats,
594
- cls=kwargs.pop("cls", linked_entities_result),
595
- **kwargs
596
- )
597
- )
598
-
599
- # api_versions 3.0, 3.1
600
- return cast(
601
- List[Union[RecognizeLinkedEntitiesResult, DocumentError]],
602
- await self._client.entities_linking(
603
- documents=docs,
604
- logging_opt_out=disable_service_logs,
605
- model_version=model_version,
606
- string_index_type=string_index_type_arg,
607
- show_stats=show_stats,
608
- cls=kwargs.pop("cls", linked_entities_result),
609
- **kwargs
610
- )
611
- )
612
- except HttpResponseError as error:
613
- return process_http_response_error(error)
614
-
615
- @distributed_trace_async
616
- @validate_multiapi_args(
617
- version_method_added="v3.0",
618
- args_mapping={"v3.1": ["disable_service_logs"]}
619
- )
620
- async def extract_key_phrases(
621
- self,
622
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
623
- *,
624
- disable_service_logs: Optional[bool] = None,
625
- language: Optional[str] = None,
626
- model_version: Optional[str] = None,
627
- show_stats: Optional[bool] = None,
628
- **kwargs: Any
629
- ) -> List[Union[ExtractKeyPhrasesResult, DocumentError]]:
630
- """Extract key phrases from a batch of documents.
631
-
632
- Returns a list of strings denoting the key phrases in the input
633
- text. For example, for the input text "The food was delicious and there
634
- were wonderful staff", the API returns the main talking points: "food"
635
- and "wonderful staff"
636
-
637
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
638
-
639
- :param documents: The set of documents to process as part of this batch.
640
- If you wish to specify the ID and language on a per-item basis you must
641
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
642
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
643
- `{"id": "1", "language": "en", "text": "hello world"}`.
644
- :type documents:
645
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
646
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
647
- entire batch. For example, use "en" for English; "es" for Spanish etc.
648
- If not set, uses "en" for English as default. Per-document language will
649
- take precedence over whole batch language. See https://aka.ms/talangs for
650
- supported languages in Language API.
651
- :keyword str model_version: This value indicates which model will
652
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
653
- is not specified, the API will default to the latest, non-preview version.
654
- See here for more info: https://aka.ms/text-analytics-model-versioning
655
- :keyword bool show_stats: If set to true, response will contain document
656
- level statistics in the `statistics` field of the document-level response.
657
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
658
- logged on the service side for troubleshooting. By default, the Language service logs your
659
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
660
- the service's natural language processing functions. Setting this parameter to true,
661
- disables input logging and may limit our ability to remediate issues that occur. Please see
662
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
663
- additional details, and Microsoft Responsible AI principles at
664
- https://www.microsoft.com/ai/responsible-ai.
665
- :return: The combined list of :class:`~azure.ai.textanalytics.ExtractKeyPhrasesResult` and
666
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
667
- passed in.
668
- :rtype: list[~azure.ai.textanalytics.ExtractKeyPhrasesResult or ~azure.ai.textanalytics.DocumentError]
669
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
670
-
671
- .. versionadded:: v3.1
672
- The *disable_service_logs* keyword argument.
673
-
674
- .. admonition:: Example:
675
-
676
- .. literalinclude:: ../samples/async_samples/sample_extract_key_phrases_async.py
677
- :start-after: [START extract_key_phrases_async]
678
- :end-before: [END extract_key_phrases_async]
679
- :language: python
680
- :dedent: 4
681
- :caption: Extract the key phrases in a batch of documents.
682
- """
683
-
684
- language_arg = language if language is not None else self._default_language
685
- docs = _validate_input(documents, "language", language_arg)
686
-
687
- try:
688
- if is_language_api(self._api_version):
689
- models = self._client.models(api_version=self._api_version)
690
- return cast(
691
- List[Union[ExtractKeyPhrasesResult, DocumentError]],
692
- await self._client.analyze_text(
693
- body=models.AnalyzeTextKeyPhraseExtractionInput(
694
- analysis_input={"documents": docs},
695
- parameters=models.KeyPhraseTaskParameters(
696
- logging_opt_out=disable_service_logs,
697
- model_version=model_version,
698
- )
699
- ),
700
- show_stats=show_stats,
701
- cls=kwargs.pop("cls", key_phrases_result),
702
- **kwargs
703
- )
704
- )
705
-
706
- # api_versions 3.0, 3.1
707
- return cast(
708
- List[Union[ExtractKeyPhrasesResult, DocumentError]],
709
- await self._client.key_phrases(
710
- documents=docs,
711
- model_version=model_version,
712
- show_stats=show_stats,
713
- logging_opt_out=disable_service_logs,
714
- cls=kwargs.pop("cls", key_phrases_result),
715
- **kwargs
716
- )
717
- )
718
- except HttpResponseError as error:
719
- return process_http_response_error(error)
720
-
721
- @distributed_trace_async
722
- @validate_multiapi_args(
723
- version_method_added="v3.0",
724
- args_mapping={"v3.1": ["show_opinion_mining", "disable_service_logs", "string_index_type"]}
725
- )
726
- async def analyze_sentiment(
727
- self,
728
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
729
- *,
730
- disable_service_logs: Optional[bool] = None,
731
- language: Optional[str] = None,
732
- model_version: Optional[str] = None,
733
- show_opinion_mining: Optional[bool] = None,
734
- show_stats: Optional[bool] = None,
735
- string_index_type: Optional[str] = None,
736
- **kwargs: Any,
737
- ) -> List[Union[AnalyzeSentimentResult, DocumentError]]:
738
- """Analyze sentiment for a batch of documents. Turn on opinion mining with `show_opinion_mining`.
739
-
740
- Returns a sentiment prediction, as well as sentiment scores for
741
- each sentiment class (Positive, Negative, and Neutral) for the document
742
- and each sentence within it.
743
-
744
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
745
-
746
- :param documents: The set of documents to process as part of this batch.
747
- If you wish to specify the ID and language on a per-item basis you must
748
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
749
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
750
- `{"id": "1", "language": "en", "text": "hello world"}`.
751
- :type documents:
752
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
753
- :keyword bool show_opinion_mining: Whether to mine the opinions of a sentence and conduct more
754
- granular analysis around the aspects of a product or service (also known as
755
- aspect-based sentiment analysis). If set to true, the returned
756
- :class:`~azure.ai.textanalytics.SentenceSentiment` objects
757
- will have property `mined_opinions` containing the result of this analysis. Only available for
758
- API version v3.1 and up.
759
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
760
- entire batch. For example, use "en" for English; "es" for Spanish etc.
761
- If not set, uses "en" for English as default. Per-document language will
762
- take precedence over whole batch language. See https://aka.ms/talangs for
763
- supported languages in Language API.
764
- :keyword str model_version: This value indicates which model will
765
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
766
- is not specified, the API will default to the latest, non-preview version.
767
- See here for more info: https://aka.ms/text-analytics-model-versioning
768
- :keyword bool show_stats: If set to true, response will contain document
769
- level statistics in the `statistics` field of the document-level response.
770
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
771
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
772
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
773
- see https://aka.ms/text-analytics-offsets
774
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
775
- logged on the service side for troubleshooting. By default, the Language service logs your
776
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
777
- the service's natural language processing functions. Setting this parameter to true,
778
- disables input logging and may limit our ability to remediate issues that occur. Please see
779
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
780
- additional details, and Microsoft Responsible AI principles at
781
- https://www.microsoft.com/ai/responsible-ai.
782
- :return: The combined list of :class:`~azure.ai.textanalytics.AnalyzeSentimentResult` and
783
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
784
- passed in.
785
- :rtype: list[~azure.ai.textanalytics.AnalyzeSentimentResult or ~azure.ai.textanalytics.DocumentError]
786
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
787
-
788
- .. versionadded:: v3.1
789
- The *show_opinion_mining*, *disable_service_logs*, and *string_index_type* keyword arguments.
790
-
791
- .. admonition:: Example:
792
-
793
- .. literalinclude:: ../samples/async_samples/sample_analyze_sentiment_async.py
794
- :start-after: [START analyze_sentiment_async]
795
- :end-before: [END analyze_sentiment_async]
796
- :language: python
797
- :dedent: 4
798
- :caption: Analyze sentiment in a batch of documents.
799
- """
800
-
801
- language_arg = language if language is not None else self._default_language
802
- docs = _validate_input(documents, "language", language_arg)
803
- string_index_type_arg = string_index_type if string_index_type is not None else self._string_code_unit
804
-
805
- try:
806
- if is_language_api(self._api_version):
807
- models = self._client.models(api_version=self._api_version)
808
- return cast(
809
- List[Union[AnalyzeSentimentResult, DocumentError]],
810
- await self._client.analyze_text(
811
- body=models.AnalyzeTextSentimentAnalysisInput(
812
- analysis_input={"documents": docs},
813
- parameters=models.SentimentAnalysisTaskParameters(
814
- logging_opt_out=disable_service_logs,
815
- model_version=model_version,
816
- string_index_type=string_index_type_compatibility(string_index_type_arg),
817
- opinion_mining=show_opinion_mining,
818
- )
819
- ),
820
- show_stats=show_stats,
821
- cls=kwargs.pop("cls", sentiment_result),
822
- **kwargs
823
- )
824
- )
825
-
826
- # api_versions 3.0, 3.1
827
- return cast(
828
- List[Union[AnalyzeSentimentResult, DocumentError]],
829
- await self._client.sentiment(
830
- documents=docs,
831
- logging_opt_out=disable_service_logs,
832
- model_version=model_version,
833
- string_index_type=string_index_type_arg,
834
- opinion_mining=show_opinion_mining,
835
- show_stats=show_stats,
836
- cls=kwargs.pop("cls", sentiment_result),
837
- **kwargs
838
- )
839
- )
840
- except HttpResponseError as error:
841
- return process_http_response_error(error)
842
-
843
- # pylint: disable=unused-argument
844
- def _healthcare_result_callback(
845
- self, raw_response, deserialized, doc_id_order, task_id_order=None, show_stats=False, bespoke=False
846
- ):
847
- if deserialized is None:
848
- models = self._client.models(api_version=self._api_version)
849
- response_cls = \
850
- models.AnalyzeTextJobState if is_language_api(self._api_version) else models.HealthcareJobState
851
- deserialized = response_cls.deserialize(raw_response)
852
- return healthcare_paged_result(
853
- doc_id_order,
854
- self._client.analyze_text_job_status if is_language_api(self._api_version) else self._client.health_status,
855
- raw_response,
856
- deserialized,
857
- show_stats=show_stats,
858
- )
859
-
860
- @distributed_trace_async
861
- @validate_multiapi_args(
862
- version_method_added="v3.1",
863
- args_mapping={
864
- "2022-10-01-preview": ["fhir_version", "document_type"],
865
- "2022-05-01": ["display_name"]
866
- }
867
- )
868
- async def begin_analyze_healthcare_entities(
869
- self,
870
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
871
- *,
872
- continuation_token: Optional[str] = None,
873
- disable_service_logs: Optional[bool] = None,
874
- display_name: Optional[str] = None,
875
- document_type: Optional[Union[str, HealthcareDocumentType]] = None,
876
- fhir_version: Optional[str] = None,
877
- language: Optional[str] = None,
878
- model_version: Optional[str] = None,
879
- polling_interval: Optional[int] = None,
880
- show_stats: Optional[bool] = None,
881
- string_index_type: Optional[str] = None,
882
- **kwargs: Any,
883
- ) -> AsyncAnalyzeHealthcareEntitiesLROPoller[
884
- AsyncItemPaged[Union[AnalyzeHealthcareEntitiesResult, DocumentError]]
885
- ]:
886
- """Analyze healthcare entities and identify relationships between these entities in a batch of documents.
887
-
888
- Entities are associated with references that can be found in existing knowledge bases,
889
- such as UMLS, CHV, MSH, etc.
890
-
891
- We also extract the relations found between entities, for example in "The subject took 100 mg of ibuprofen",
892
- we would extract the relationship between the "100 mg" dosage and the "ibuprofen" medication.
893
-
894
- :param documents: The set of documents to process as part of this batch.
895
- If you wish to specify the ID and language on a per-item basis you must
896
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
897
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
898
- `{"id": "1", "language": "en", "text": "hello world"}`.
899
- :type documents:
900
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
901
- :keyword str model_version: This value indicates which model will
902
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
903
- is not specified, the API will default to the latest, non-preview version.
904
- See here for more info: https://aka.ms/text-analytics-model-versioning
905
- :keyword bool show_stats: If set to true, response will contain document level statistics.
906
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
907
- entire batch. For example, use "en" for English; "es" for Spanish etc.
908
- For automatic language detection, use "auto" (Only supported by API version
909
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
910
- Per-document language will take precedence over whole batch language.
911
- See https://aka.ms/talangs for supported languages in Language API.
912
- :keyword str display_name: An optional display name to set for the requested analysis.
913
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
914
- Can be one of 'UnicodeCodePoint' (default), 'Utf16CodeUnit', or 'TextElement_v8'.
915
- For additional information see https://aka.ms/text-analytics-offsets
916
- :keyword int polling_interval: Waiting time between two polls for LRO operations
917
- if no Retry-After header is present. Defaults to 5 seconds.
918
- :keyword str continuation_token:
919
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
920
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
921
- to restart the LRO from a saved state.
922
- :keyword bool disable_service_logs: Defaults to true, meaning that the Language service will not log your
923
- input text on the service side for troubleshooting. If set to False, the Language service logs your
924
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
925
- the Text Analytics natural language processing functions. Please see
926
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
927
- additional details, and Microsoft Responsible AI principles at
928
- https://www.microsoft.com/ai/responsible-ai.
929
- :keyword str fhir_version: The FHIR Spec version that the result will use to format the fhir_bundle
930
- on the result object. For additional information see https://www.hl7.org/fhir/overview.html.
931
- The only acceptable values to pass in are None and "4.0.1". The default value is None.
932
- :keyword document_type: Document type that can be provided as input for Fhir Documents. Expect to
933
- have fhir_version provided when used. Behavior of using None enum is the same as not using the
934
- document_type parameter. Known values are: "None", "ClinicalTrial", "DischargeSummary",
935
- "ProgressNote", "HistoryAndPhysical", "Consult", "Imaging", "Pathology", and "ProcedureNote".
936
- :paramtype document_type: str or ~azure.ai.textanalytics.HealthcareDocumentType
937
- :return: An instance of an AsyncAnalyzeHealthcareEntitiesLROPoller. Call `result()` on the poller
938
- object to return a heterogeneous pageable of
939
- :class:`~azure.ai.textanalytics.AnalyzeHealthcareEntitiesResult` and
940
- :class:`~azure.ai.textanalytics.DocumentError`.
941
- :rtype:
942
- ~azure.ai.textanalytics.aio.AsyncAnalyzeHealthcareEntitiesLROPoller[~azure.core.async_paging.AsyncItemPaged[
943
- ~azure.ai.textanalytics.AnalyzeHealthcareEntitiesResult or ~azure.ai.textanalytics.DocumentError]]
944
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
945
-
946
- .. versionadded:: v3.1
947
- The *begin_analyze_healthcare_entities* client method.
948
- .. versionadded:: 2022-05-01
949
- The *display_name* keyword argument.
950
- .. versionadded:: 2022-10-01-preview
951
- The *fhir_version* and *document_type* keyword arguments.
952
-
953
- .. admonition:: Example:
954
-
955
- .. literalinclude:: ../samples/async_samples/sample_analyze_healthcare_entities_async.py
956
- :start-after: [START analyze_healthcare_entities_async]
957
- :end-before: [END analyze_healthcare_entities_async]
958
- :language: python
959
- :dedent: 4
960
- :caption: Analyze healthcare entities in a batch of documents.
961
- """
962
-
963
- language_arg = language if language is not None else self._default_language
964
- polling_interval_arg = polling_interval if polling_interval is not None else 5
965
- string_index_type_arg = string_index_type if string_index_type is not None else self._string_code_unit
966
-
967
- if continuation_token:
968
- return cast(
969
- AsyncAnalyzeHealthcareEntitiesLROPoller[
970
- AsyncItemPaged[Union[AnalyzeHealthcareEntitiesResult, DocumentError]]
971
- ],
972
- _get_result_from_continuation_token(
973
- self._client._client, # pylint: disable=protected-access
974
- continuation_token,
975
- AsyncAnalyzeHealthcareEntitiesLROPoller,
976
- AsyncAnalyzeHealthcareEntitiesLROPollingMethod(
977
- text_analytics_client=self._client,
978
- timeout=polling_interval_arg,
979
- **kwargs
980
- ),
981
- self._healthcare_result_callback
982
- )
983
- )
984
-
985
- docs = _validate_input(documents, "language", language_arg)
986
- doc_id_order = [doc.get("id") for doc in docs]
987
- my_cls = kwargs.pop(
988
- "cls",
989
- lambda pipeline_response, deserialized, _: self._healthcare_result_callback(
990
- pipeline_response, deserialized, doc_id_order, show_stats=show_stats
991
- ),
992
- )
993
- models = self._client.models(api_version=self._api_version)
994
-
995
- try:
996
- if is_language_api(self._api_version):
997
- input_docs = models.MultiLanguageAnalysisInput(
998
- documents=docs
999
- )
1000
- return cast(
1001
- AsyncAnalyzeHealthcareEntitiesLROPoller[
1002
- AsyncItemPaged[Union[AnalyzeHealthcareEntitiesResult, DocumentError]]
1003
- ],
1004
- await self._client.begin_analyze_text_submit_job( # type: ignore
1005
- body=models.AnalyzeTextJobsInput(
1006
- analysis_input=input_docs,
1007
- display_name=display_name,
1008
- tasks=[
1009
- models.HealthcareLROTask(
1010
- task_name="0",
1011
- parameters=models.HealthcareTaskParameters(
1012
- model_version=model_version,
1013
- logging_opt_out=disable_service_logs,
1014
- string_index_type=string_index_type_compatibility(string_index_type_arg),
1015
- fhir_version=fhir_version,
1016
- document_type=document_type,
1017
- )
1018
- )
1019
- ]
1020
- ),
1021
- cls=my_cls,
1022
- polling=AsyncAnalyzeHealthcareEntitiesLROPollingMethod(
1023
- text_analytics_client=self._client,
1024
- timeout=polling_interval_arg,
1025
- show_stats=show_stats,
1026
- doc_id_order=doc_id_order,
1027
- lro_algorithms=[
1028
- TextAnalyticsOperationResourcePolling(
1029
- show_stats=show_stats,
1030
- )
1031
- ],
1032
- **kwargs
1033
- ),
1034
- continuation_token=continuation_token,
1035
- poller_cls=AsyncAnalyzeHealthcareEntitiesLROPoller,
1036
- **kwargs
1037
- )
1038
- )
1039
-
1040
- # v3.1
1041
- return cast(
1042
- AsyncAnalyzeHealthcareEntitiesLROPoller[
1043
- AsyncItemPaged[Union[AnalyzeHealthcareEntitiesResult, DocumentError]]
1044
- ],
1045
- await self._client.begin_health(
1046
- docs,
1047
- model_version=model_version,
1048
- string_index_type=string_index_type_arg,
1049
- logging_opt_out=disable_service_logs,
1050
- cls=my_cls,
1051
- polling=AsyncAnalyzeHealthcareEntitiesLROPollingMethod(
1052
- text_analytics_client=self._client,
1053
- doc_id_order=doc_id_order,
1054
- show_stats=show_stats,
1055
- timeout=polling_interval_arg,
1056
- lro_algorithms=[
1057
- TextAnalyticsOperationResourcePolling(
1058
- show_stats=show_stats,
1059
- )
1060
- ],
1061
- **kwargs,
1062
- ),
1063
- continuation_token=continuation_token,
1064
- **kwargs,
1065
- )
1066
- )
1067
- except HttpResponseError as error:
1068
- return process_http_response_error(error)
1069
-
1070
- def _analyze_result_callback(
1071
- self, raw_response, deserialized, doc_id_order, task_id_order=None, show_stats=False, bespoke=False
1072
- ):
1073
-
1074
- if deserialized is None:
1075
- models = self._client.models(api_version=self._api_version)
1076
- response_cls = models.AnalyzeTextJobState if is_language_api(self._api_version) else models.AnalyzeJobState
1077
- deserialized = response_cls.deserialize(raw_response)
1078
- return analyze_paged_result(
1079
- doc_id_order,
1080
- task_id_order,
1081
- self._client.analyze_text_job_status if is_language_api(self._api_version) else self._client.analyze_status,
1082
- raw_response,
1083
- deserialized,
1084
- show_stats=show_stats,
1085
- bespoke=bespoke
1086
- )
1087
-
1088
- @distributed_trace_async
1089
- @validate_multiapi_args(
1090
- version_method_added="v3.1",
1091
- custom_wrapper=check_for_unsupported_actions_types,
1092
- )
1093
- async def begin_analyze_actions(
1094
- self,
1095
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1096
- actions: List[
1097
- Union[
1098
- RecognizeEntitiesAction,
1099
- RecognizeLinkedEntitiesAction,
1100
- RecognizePiiEntitiesAction,
1101
- ExtractKeyPhrasesAction,
1102
- AnalyzeSentimentAction,
1103
- RecognizeCustomEntitiesAction,
1104
- SingleLabelClassifyAction,
1105
- MultiLabelClassifyAction,
1106
- AnalyzeHealthcareEntitiesAction,
1107
- ExtractSummaryAction,
1108
- AbstractiveSummaryAction,
1109
- ]
1110
- ],
1111
- *,
1112
- continuation_token: Optional[str] = None,
1113
- display_name: Optional[str] = None,
1114
- language: Optional[str] = None,
1115
- polling_interval: Optional[int] = None,
1116
- show_stats: Optional[bool] = None,
1117
- **kwargs: Any,
1118
- ) -> AsyncTextAnalysisLROPoller[
1119
- AsyncItemPaged[
1120
- List[
1121
- Union[
1122
- RecognizeEntitiesResult,
1123
- RecognizeLinkedEntitiesResult,
1124
- RecognizePiiEntitiesResult,
1125
- ExtractKeyPhrasesResult,
1126
- AnalyzeSentimentResult,
1127
- RecognizeCustomEntitiesResult,
1128
- ClassifyDocumentResult,
1129
- AnalyzeHealthcareEntitiesResult,
1130
- ExtractSummaryResult,
1131
- AbstractiveSummaryResult,
1132
- DocumentError,
1133
- ]
1134
- ]
1135
- ]
1136
- ]:
1137
- """Start a long-running operation to perform a variety of text analysis actions over a batch of documents.
1138
-
1139
- We recommend you use this function if you're looking to analyze larger documents, and / or
1140
- combine multiple text analysis actions into one call. Otherwise, we recommend you use
1141
- the action specific endpoints, for example :func:`analyze_sentiment`.
1142
-
1143
- .. note:: The abstractive summarization feature is part of a gated preview. Request access here:
1144
- https://aka.ms/applyforgatedsummarizationfeatures
1145
-
1146
- .. note:: See the service documentation for regional support of custom action features:
1147
- https://aka.ms/azsdk/textanalytics/customfunctionalities
1148
-
1149
- :param documents: The set of documents to process as part of this batch.
1150
- If you wish to specify the ID and language on a per-item basis you must
1151
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
1152
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
1153
- `{"id": "1", "language": "en", "text": "hello world"}`.
1154
- :type documents:
1155
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1156
- :param actions: A heterogeneous list of actions to perform on the input documents.
1157
- Each action object encapsulates the parameters used for the particular action type.
1158
- The action results will be in the same order of the input actions.
1159
- :type actions:
1160
- list[RecognizeEntitiesAction or RecognizePiiEntitiesAction or ExtractKeyPhrasesAction or
1161
- RecognizeLinkedEntitiesAction or AnalyzeSentimentAction or
1162
- RecognizeCustomEntitiesAction or SingleLabelClassifyAction or
1163
- MultiLabelClassifyAction or AnalyzeHealthcareEntitiesAction or
1164
- AbstractiveSummaryAction or ExtractSummaryAction]
1165
- :keyword str display_name: An optional display name to set for the requested analysis.
1166
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1167
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1168
- For automatic language detection, use "auto" (Only supported by API version
1169
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
1170
- Per-document language will take precedence over whole batch language.
1171
- See https://aka.ms/talangs for supported languages in Language API.
1172
- :keyword bool show_stats: If set to true, response will contain document level statistics.
1173
- :keyword int polling_interval: Waiting time between two polls for LRO operations
1174
- if no Retry-After header is present. Defaults to 5 seconds.
1175
- :keyword str continuation_token:
1176
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
1177
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
1178
- to restart the LRO from a saved state.
1179
- :return: An instance of an AsyncTextAnalysisLROPoller. Call `result()` on the poller
1180
- object to return a pageable heterogeneous list of lists. This list of lists is first ordered
1181
- by the documents you input, then ordered by the actions you input. For example,
1182
- if you have documents input ["Hello", "world"], and actions
1183
- :class:`~azure.ai.textanalytics.RecognizeEntitiesAction` and
1184
- :class:`~azure.ai.textanalytics.AnalyzeSentimentAction`, when iterating over the list of lists,
1185
- you will first iterate over the action results for the "Hello" document, getting the
1186
- :class:`~azure.ai.textanalytics.RecognizeEntitiesResult` of "Hello",
1187
- then the :class:`~azure.ai.textanalytics.AnalyzeSentimentResult` of "Hello".
1188
- Then, you will get the :class:`~azure.ai.textanalytics.RecognizeEntitiesResult` and
1189
- :class:`~azure.ai.textanalytics.AnalyzeSentimentResult` of "world".
1190
- :rtype:
1191
- ~azure.ai.textanalytics.aio.AsyncTextAnalysisLROPoller[~azure.core.async_paging.AsyncItemPaged[
1192
- list[RecognizeEntitiesResult or RecognizeLinkedEntitiesResult or RecognizePiiEntitiesResult or
1193
- ExtractKeyPhrasesResult or AnalyzeSentimentResult or RecognizeCustomEntitiesResult
1194
- or ClassifyDocumentResult or AnalyzeHealthcareEntitiesResult or ExtractSummaryResult
1195
- or AbstractiveSummaryResult or DocumentError]]]
1196
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
1197
-
1198
- .. versionadded:: v3.1
1199
- The *begin_analyze_actions* client method.
1200
- .. versionadded:: 2022-05-01
1201
- The *RecognizeCustomEntitiesAction*, *SingleLabelClassifyAction*,
1202
- *MultiLabelClassifyAction*, and *AnalyzeHealthcareEntitiesAction* input options and the
1203
- corresponding *RecognizeCustomEntitiesResult*, *ClassifyDocumentResult*,
1204
- and *AnalyzeHealthcareEntitiesResult* result objects
1205
- .. versionadded:: 2022-10-01-preview
1206
- The *ExtractSummaryAction* and *AbstractSummaryAction* input options and the corresponding
1207
- *ExtractSummaryResult* and *AbstractSummaryResult* result objects.
1208
-
1209
- .. admonition:: Example:
1210
-
1211
- .. literalinclude:: ../samples/async_samples/sample_analyze_actions_async.py
1212
- :start-after: [START analyze_async]
1213
- :end-before: [END analyze_async]
1214
- :language: python
1215
- :dedent: 4
1216
- :caption: Start a long-running operation to perform a variety of text analysis actions over
1217
- a batch of documents.
1218
- """
1219
-
1220
- language_arg = language if language is not None else self._default_language
1221
- polling_interval_arg = polling_interval if polling_interval is not None else 5
1222
- bespoke = kwargs.pop("bespoke", False)
1223
-
1224
- if continuation_token:
1225
- return cast(
1226
- AsyncAnalyzeActionsResponse,
1227
- _get_result_from_continuation_token(
1228
- self._client._client, # pylint: disable=protected-access
1229
- continuation_token,
1230
- AsyncAnalyzeActionsLROPoller,
1231
- AsyncAnalyzeActionsLROPollingMethod(
1232
- text_analytics_client=self._client,
1233
- timeout=polling_interval_arg,
1234
- **kwargs
1235
- ),
1236
- self._analyze_result_callback,
1237
- bespoke
1238
- )
1239
- )
1240
-
1241
- models = self._client.models(api_version=self._api_version)
1242
-
1243
- input_model_cls = \
1244
- models.MultiLanguageAnalysisInput if is_language_api(self._api_version) else models.MultiLanguageBatchInput
1245
- docs = input_model_cls(
1246
- documents=_validate_input(documents, "language", language_arg)
1247
- )
1248
- doc_id_order = [doc.get("id") for doc in docs.documents]
1249
- try:
1250
- generated_tasks = [
1251
- action._to_generated(self._api_version, str(idx)) # pylint: disable=protected-access
1252
- for idx, action in enumerate(actions)
1253
- ]
1254
- except AttributeError as e:
1255
- raise TypeError("Unsupported action type in list.") from e
1256
- task_order = [(_determine_action_type(a), a.task_name) for a in generated_tasks]
1257
-
1258
- response_cls = kwargs.pop(
1259
- "cls",
1260
- lambda pipeline_response, deserialized, _:
1261
- self._analyze_result_callback(
1262
- pipeline_response,
1263
- deserialized,
1264
- doc_id_order,
1265
- task_id_order=task_order,
1266
- show_stats=show_stats,
1267
- bespoke=bespoke
1268
- ),
1269
- )
1270
-
1271
- try:
1272
- if is_language_api(self._api_version):
1273
- return cast(
1274
- AsyncAnalyzeActionsResponse,
1275
- await self._client.begin_analyze_text_submit_job(
1276
- body=models.AnalyzeTextJobsInput(
1277
- analysis_input=docs,
1278
- display_name=display_name,
1279
- tasks=generated_tasks
1280
- ),
1281
- cls=response_cls,
1282
- polling=AsyncAnalyzeActionsLROPollingMethod(
1283
- text_analytics_client=self._client,
1284
- timeout=polling_interval_arg,
1285
- show_stats=show_stats,
1286
- doc_id_order=doc_id_order,
1287
- task_id_order=task_order,
1288
- lro_algorithms=[
1289
- TextAnalyticsOperationResourcePolling(
1290
- show_stats=show_stats,
1291
- )
1292
- ],
1293
- **kwargs
1294
- ),
1295
- continuation_token=continuation_token,
1296
- **kwargs
1297
- )
1298
- )
1299
-
1300
- # v3.1
1301
- analyze_tasks = models.JobManifestTasks(
1302
- entity_recognition_tasks=[
1303
- a for a in generated_tasks
1304
- if _determine_action_type(a) == _AnalyzeActionsType.RECOGNIZE_ENTITIES
1305
- ],
1306
- entity_recognition_pii_tasks=[
1307
- a for a in generated_tasks
1308
- if _determine_action_type(a) == _AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
1309
- ],
1310
- key_phrase_extraction_tasks=[
1311
- a for a in generated_tasks
1312
- if _determine_action_type(a) == _AnalyzeActionsType.EXTRACT_KEY_PHRASES
1313
- ],
1314
- entity_linking_tasks=[
1315
- a for a in generated_tasks
1316
- if _determine_action_type(a) == _AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
1317
- ],
1318
- sentiment_analysis_tasks=[
1319
- a for a in generated_tasks
1320
- if _determine_action_type(a) == _AnalyzeActionsType.ANALYZE_SENTIMENT
1321
- ],
1322
- )
1323
- analyze_body = models.AnalyzeBatchInput(
1324
- display_name=display_name, tasks=analyze_tasks, analysis_input=docs
1325
- )
1326
- return cast(
1327
- AsyncAnalyzeActionsResponse,
1328
- await self._client.begin_analyze(
1329
- body=analyze_body,
1330
- cls=response_cls,
1331
- polling=AsyncAnalyzeActionsLROPollingMethod(
1332
- text_analytics_client=self._client,
1333
- timeout=polling_interval_arg,
1334
- show_stats=show_stats,
1335
- doc_id_order=doc_id_order,
1336
- task_id_order=task_order,
1337
- lro_algorithms=[
1338
- TextAnalyticsOperationResourcePolling(
1339
- show_stats=show_stats,
1340
- )
1341
- ],
1342
- **kwargs,
1343
- ),
1344
- continuation_token=continuation_token,
1345
- **kwargs,
1346
- )
1347
- )
1348
- except HttpResponseError as error:
1349
- return process_http_response_error(error)
1350
-
1351
- @distributed_trace_async
1352
- @validate_multiapi_args(
1353
- version_method_added="2022-05-01",
1354
- )
1355
- async def begin_recognize_custom_entities(
1356
- self,
1357
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1358
- project_name: str,
1359
- deployment_name: str,
1360
- *,
1361
- continuation_token: Optional[str] = None,
1362
- disable_service_logs: Optional[bool] = None,
1363
- display_name: Optional[str] = None,
1364
- language: Optional[str] = None,
1365
- polling_interval: Optional[int] = None,
1366
- show_stats: Optional[bool] = None,
1367
- string_index_type: Optional[str] = None,
1368
- **kwargs: Any,
1369
- ) -> AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[RecognizeCustomEntitiesResult, DocumentError]]]:
1370
- """Start a long-running custom named entity recognition operation.
1371
-
1372
- For information on regional support of custom features and how to train a model to
1373
- recognize custom entities, see https://aka.ms/azsdk/textanalytics/customentityrecognition
1374
-
1375
- :param documents: The set of documents to process as part of this batch.
1376
- If you wish to specify the ID and language on a per-item basis you must
1377
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
1378
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
1379
- `{"id": "1", "language": "en", "text": "hello world"}`.
1380
- :type documents:
1381
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1382
- :param str project_name: Required. This field indicates the project name for the model.
1383
- :param str deployment_name: This field indicates the deployment name for the model.
1384
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1385
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1386
- For automatic language detection, use "auto" (Only supported by API version
1387
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
1388
- Per-document language will take precedence over whole batch language.
1389
- See https://aka.ms/talangs for supported languages in Language API.
1390
- :keyword bool show_stats: If set to true, response will contain document level statistics.
1391
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
1392
- logged on the service side for troubleshooting. By default, the Language service logs your
1393
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1394
- the service's natural language processing functions. Setting this parameter to true,
1395
- disables input logging and may limit our ability to remediate issues that occur. Please see
1396
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1397
- additional details, and Microsoft Responsible AI principles at
1398
- https://www.microsoft.com/ai/responsible-ai.
1399
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
1400
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
1401
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
1402
- see https://aka.ms/text-analytics-offsets
1403
- :keyword int polling_interval: Waiting time between two polls for LRO operations
1404
- if no Retry-After header is present. Defaults to 5 seconds.
1405
- :keyword str continuation_token:
1406
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
1407
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
1408
- to restart the LRO from a saved state.
1409
- :keyword str display_name: An optional display name to set for the requested analysis.
1410
- :return: An instance of an AsyncTextAnalysisLROPoller. Call `result()` on the this
1411
- object to return a heterogeneous pageable of
1412
- :class:`~azure.ai.textanalytics.RecognizeCustomEntitiesResult` and
1413
- :class:`~azure.ai.textanalytics.DocumentError`.
1414
- :rtype:
1415
- ~azure.ai.textanalytics.aio.AsyncTextAnalysisLROPoller[~azure.core.async_paging.AsyncItemPaged[
1416
- ~azure.ai.textanalytics.RecognizeCustomEntitiesResult or ~azure.ai.textanalytics.DocumentError]]
1417
- :raises ~azure.core.exceptions.HttpResponseError:
1418
-
1419
- .. versionadded:: 2022-05-01
1420
- The *begin_recognize_custom_entities* client method.
1421
-
1422
- .. admonition:: Example:
1423
-
1424
- .. literalinclude:: ../samples/async_samples/sample_recognize_custom_entities_async.py
1425
- :start-after: [START recognize_custom_entities_async]
1426
- :end-before: [END recognize_custom_entities_async]
1427
- :language: python
1428
- :dedent: 4
1429
- :caption: Recognize custom entities in a batch of documents.
1430
- """
1431
-
1432
- polling_interval_arg = polling_interval if polling_interval is not None else 5
1433
- string_index_type_arg = string_index_type if string_index_type is not None else self._string_code_unit
1434
-
1435
- if continuation_token:
1436
- return cast(
1437
- AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[RecognizeCustomEntitiesResult, DocumentError]]],
1438
- _get_result_from_continuation_token(
1439
- self._client._client, # pylint: disable=protected-access
1440
- continuation_token,
1441
- AsyncAnalyzeActionsLROPoller,
1442
- AsyncAnalyzeActionsLROPollingMethod(
1443
- text_analytics_client=self._client,
1444
- timeout=polling_interval_arg,
1445
- **kwargs
1446
- ),
1447
- self._analyze_result_callback,
1448
- bespoke=True
1449
- )
1450
- )
1451
-
1452
- try:
1453
- return cast(
1454
- AsyncTextAnalysisLROPoller[
1455
- AsyncItemPaged[Union[RecognizeCustomEntitiesResult, DocumentError]]
1456
- ],
1457
- await self.begin_analyze_actions(
1458
- documents,
1459
- actions=[
1460
- RecognizeCustomEntitiesAction(
1461
- project_name=project_name,
1462
- deployment_name=deployment_name,
1463
- string_index_type=string_index_type_arg,
1464
- disable_service_logs=disable_service_logs
1465
- )
1466
- ],
1467
- display_name=display_name,
1468
- show_stats=show_stats,
1469
- language=language,
1470
- polling_interval=polling_interval_arg,
1471
- bespoke=True,
1472
- **kwargs
1473
- )
1474
- )
1475
-
1476
- except HttpResponseError as error:
1477
- return process_http_response_error(error)
1478
-
1479
- @distributed_trace_async
1480
- @validate_multiapi_args(
1481
- version_method_added="2022-05-01",
1482
- )
1483
- async def begin_single_label_classify(
1484
- self,
1485
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1486
- project_name: str,
1487
- deployment_name: str,
1488
- *,
1489
- continuation_token: Optional[str] = None,
1490
- disable_service_logs: Optional[bool] = None,
1491
- display_name: Optional[str] = None,
1492
- language: Optional[str] = None,
1493
- polling_interval: Optional[int] = None,
1494
- show_stats: Optional[bool] = None,
1495
- **kwargs: Any,
1496
- ) -> AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]]:
1497
- """Start a long-running custom single label classification operation.
1498
-
1499
- For information on regional support of custom features and how to train a model to
1500
- classify your documents, see https://aka.ms/azsdk/textanalytics/customfunctionalities
1501
-
1502
- :param documents: The set of documents to process as part of this batch.
1503
- If you wish to specify the ID and language on a per-item basis you must
1504
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
1505
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
1506
- `{"id": "1", "language": "en", "text": "hello world"}`.
1507
- :type documents:
1508
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1509
- :param str project_name: Required. This field indicates the project name for the model.
1510
- :param str deployment_name: This field indicates the deployment name for the model.
1511
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1512
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1513
- For automatic language detection, use "auto" (Only supported by API version
1514
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
1515
- Per-document language will take precedence over whole batch language.
1516
- See https://aka.ms/talangs for supported languages in Language API.
1517
- :keyword bool show_stats: If set to true, response will contain document level statistics.
1518
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
1519
- logged on the service side for troubleshooting. By default, the Language service logs your
1520
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1521
- the service's natural language processing functions. Setting this parameter to true,
1522
- disables input logging and may limit our ability to remediate issues that occur. Please see
1523
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1524
- additional details, and Microsoft Responsible AI principles at
1525
- https://www.microsoft.com/ai/responsible-ai.
1526
- :keyword int polling_interval: Waiting time between two polls for LRO operations
1527
- if no Retry-After header is present. Defaults to 5 seconds.
1528
- :keyword str continuation_token:
1529
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
1530
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
1531
- to restart the LRO from a saved state.
1532
- :keyword str display_name: An optional display name to set for the requested analysis.
1533
- :return: An instance of an AsyncTextAnalysisLROPoller. Call `result()` on the this
1534
- object to return a heterogeneous pageable of
1535
- :class:`~azure.ai.textanalytics.ClassifyDocumentResult` and
1536
- :class:`~azure.ai.textanalytics.DocumentError`.
1537
- :rtype:
1538
- ~azure.ai.textanalytics.aio.AsyncTextAnalysisLROPoller[~azure.core.async_paging.AsyncItemPaged[
1539
- ~azure.ai.textanalytics.ClassifyDocumentResult or ~azure.ai.textanalytics.DocumentError]]
1540
- :raises ~azure.core.exceptions.HttpResponseError:
1541
-
1542
- .. versionadded:: 2022-05-01
1543
- The *begin_single_label_classify* client method.
1544
-
1545
- .. admonition:: Example:
1546
-
1547
- .. literalinclude:: ../samples/async_samples/sample_single_label_classify_async.py
1548
- :start-after: [START single_label_classify_async]
1549
- :end-before: [END single_label_classify_async]
1550
- :language: python
1551
- :dedent: 4
1552
- :caption: Perform single label classification on a batch of documents.
1553
- """
1554
-
1555
- polling_interval_arg = polling_interval if polling_interval is not None else 5
1556
-
1557
- if continuation_token:
1558
- return cast(
1559
- AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]],
1560
- _get_result_from_continuation_token(
1561
- self._client._client, # pylint: disable=protected-access
1562
- continuation_token,
1563
- AsyncAnalyzeActionsLROPoller,
1564
- AsyncAnalyzeActionsLROPollingMethod(
1565
- text_analytics_client=self._client,
1566
- timeout=polling_interval_arg,
1567
- **kwargs
1568
- ),
1569
- self._analyze_result_callback,
1570
- bespoke=True
1571
- )
1572
- )
1573
-
1574
- try:
1575
- return cast(
1576
- AsyncTextAnalysisLROPoller[
1577
- AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]
1578
- ],
1579
- await self.begin_analyze_actions(
1580
- documents,
1581
- actions=[
1582
- SingleLabelClassifyAction(
1583
- project_name=project_name,
1584
- deployment_name=deployment_name,
1585
- disable_service_logs=disable_service_logs
1586
- )
1587
- ],
1588
- polling_interval=polling_interval_arg,
1589
- display_name=display_name,
1590
- show_stats=show_stats,
1591
- language=language,
1592
- bespoke=True,
1593
- **kwargs
1594
- )
1595
- )
1596
-
1597
- except HttpResponseError as error:
1598
- return process_http_response_error(error)
1599
-
1600
- @distributed_trace_async
1601
- @validate_multiapi_args(
1602
- version_method_added="2022-05-01",
1603
- )
1604
- async def begin_multi_label_classify(
1605
- self,
1606
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1607
- project_name: str,
1608
- deployment_name: str,
1609
- *,
1610
- continuation_token: Optional[str] = None,
1611
- disable_service_logs: Optional[bool] = None,
1612
- display_name: Optional[str] = None,
1613
- language: Optional[str] = None,
1614
- polling_interval: Optional[int] = None,
1615
- show_stats: Optional[bool] = None,
1616
- **kwargs: Any,
1617
- ) -> AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]]:
1618
- """Start a long-running custom multi label classification operation.
1619
-
1620
- For information on regional support of custom features and how to train a model to
1621
- classify your documents, see https://aka.ms/azsdk/textanalytics/customfunctionalities
1622
-
1623
- :param documents: The set of documents to process as part of this batch.
1624
- If you wish to specify the ID and language on a per-item basis you must
1625
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
1626
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
1627
- `{"id": "1", "language": "en", "text": "hello world"}`.
1628
- :type documents:
1629
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1630
- :param str project_name: Required. This field indicates the project name for the model.
1631
- :param str deployment_name: This field indicates the deployment name for the model.
1632
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1633
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1634
- For automatic language detection, use "auto" (Only supported by API version
1635
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
1636
- Per-document language will take precedence over whole batch language.
1637
- See https://aka.ms/talangs for supported languages in Language API.
1638
- :keyword bool show_stats: If set to true, response will contain document level statistics.
1639
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
1640
- logged on the service side for troubleshooting. By default, the Language service logs your
1641
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1642
- the service's natural language processing functions. Setting this parameter to true,
1643
- disables input logging and may limit our ability to remediate issues that occur. Please see
1644
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1645
- additional details, and Microsoft Responsible AI principles at
1646
- https://www.microsoft.com/ai/responsible-ai.
1647
- :keyword int polling_interval: Waiting time between two polls for LRO operations
1648
- if no Retry-After header is present. Defaults to 5 seconds.
1649
- :keyword str continuation_token:
1650
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
1651
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
1652
- to restart the LRO from a saved state.
1653
- :keyword str display_name: An optional display name to set for the requested analysis.
1654
- :return: An instance of an AsyncTextAnalysisLROPoller. Call `result()` on the this
1655
- object to return a heterogeneous pageable of
1656
- :class:`~azure.ai.textanalytics.ClassifyDocumentResult` and
1657
- :class:`~azure.ai.textanalytics.DocumentError`.
1658
- :rtype:
1659
- ~azure.ai.textanalytics.aio.AsyncTextAnalysisLROPoller[~azure.core.async_paging.AsyncItemPaged[
1660
- ~azure.ai.textanalytics.ClassifyDocumentResult or ~azure.ai.textanalytics.DocumentError]]
1661
- :raises ~azure.core.exceptions.HttpResponseError:
1662
-
1663
- .. versionadded:: 2022-05-01
1664
- The *begin_multi_label_classify* client method.
1665
-
1666
- .. admonition:: Example:
1667
-
1668
- .. literalinclude:: ../samples/async_samples/sample_multi_label_classify_async.py
1669
- :start-after: [START multi_label_classify_async]
1670
- :end-before: [END multi_label_classify_async]
1671
- :language: python
1672
- :dedent: 4
1673
- :caption: Perform multi label classification on a batch of documents.
1674
- """
1675
-
1676
- polling_interval_arg = polling_interval if polling_interval is not None else 5
1677
-
1678
- if continuation_token:
1679
- return cast(
1680
- AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]],
1681
- _get_result_from_continuation_token(
1682
- self._client._client, # pylint: disable=protected-access
1683
- continuation_token,
1684
- AsyncAnalyzeActionsLROPoller,
1685
- AsyncAnalyzeActionsLROPollingMethod(
1686
- text_analytics_client=self._client,
1687
- timeout=polling_interval_arg,
1688
- **kwargs
1689
- ),
1690
- self._analyze_result_callback,
1691
- bespoke=True
1692
- )
1693
- )
1694
-
1695
- try:
1696
- return cast(
1697
- AsyncTextAnalysisLROPoller[
1698
- AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]
1699
- ],
1700
- await self.begin_analyze_actions(
1701
- documents,
1702
- actions=[
1703
- MultiLabelClassifyAction(
1704
- project_name=project_name,
1705
- deployment_name=deployment_name,
1706
- disable_service_logs=disable_service_logs
1707
- )
1708
- ],
1709
- polling_interval=polling_interval_arg,
1710
- display_name=display_name,
1711
- show_stats=show_stats,
1712
- language=language,
1713
- bespoke=True,
1714
- **kwargs
1715
- )
1716
- )
1717
-
1718
- except HttpResponseError as error:
1719
- return process_http_response_error(error)
1720
-
1721
- @distributed_trace_async
1722
- @validate_multiapi_args(
1723
- version_method_added="2022-10-01-preview",
1724
- )
1725
- async def dynamic_classification(
1726
- self,
1727
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1728
- categories: List[str],
1729
- *,
1730
- classification_type: Optional[Union[str, ClassificationType]] = None,
1731
- disable_service_logs: Optional[bool] = None,
1732
- language: Optional[str] = None,
1733
- model_version: Optional[str] = None,
1734
- show_stats: Optional[bool] = None,
1735
- **kwargs: Any,
1736
- ) -> List[Union[DynamicClassificationResult, DocumentError]]:
1737
- """Perform dynamic classification on a batch of documents.
1738
-
1739
- On the fly classification of the input documents into one or multiple categories.
1740
- Assigns either one or multiple categories per document. This type of classification
1741
- doesn't require model training.
1742
-
1743
- .. note:: The dynamic classification feature is part of a gated preview. Request access here:
1744
- https://aka.ms/applyforgatedlanguagefeature
1745
-
1746
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
1747
-
1748
- :param documents: The set of documents to process as part of this batch.
1749
- If you wish to specify the ID and language on a per-item basis you must
1750
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list
1751
- of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`,
1752
- like `{"id": "1", "language": "en", "text": "hello world"}`.
1753
- :type documents:
1754
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1755
- :param list[str] categories: A list of categories to which input is classified to.
1756
- :keyword classification_type: Specifies either one or multiple categories per document. Defaults
1757
- to multi classification which may return more than one class for each document. Known values
1758
- are: "Single" and "Multi".
1759
- :paramtype classification_type: str or ~azure.ai.textanalytics.ClassificationType
1760
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1761
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1762
- If not set, uses "en" for English as default. Per-document language will
1763
- take precedence over whole batch language. See https://aka.ms/talangs for
1764
- supported languages in Language API.
1765
- :keyword str model_version: This value indicates which model will
1766
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
1767
- is not specified, the API will default to the latest, non-preview version.
1768
- See here for more info: https://aka.ms/text-analytics-model-versioning
1769
- :keyword bool show_stats: If set to true, response will contain document
1770
- level statistics in the `statistics` field of the document-level response.
1771
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
1772
- logged on the service side for troubleshooting. By default, the Language service logs your
1773
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1774
- the service's natural language processing functions. Setting this parameter to true,
1775
- disables input logging and may limit our ability to remediate issues that occur. Please see
1776
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1777
- additional details, and Microsoft Responsible AI principles at
1778
- https://www.microsoft.com/ai/responsible-ai.
1779
- :return: The combined list of :class:`~azure.ai.textanalytics.DynamicClassificationResult` and
1780
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
1781
- were passed in.
1782
- :rtype: list[~azure.ai.textanalytics.DynamicClassificationResult or ~azure.ai.textanalytics.DocumentError]
1783
- :raises ~azure.core.exceptions.HttpResponseError:
1784
-
1785
- .. versionadded:: 2022-10-01-preview
1786
- The *dynamic_classification* client method.
1787
-
1788
- .. admonition:: Example:
1789
-
1790
- .. literalinclude:: ../samples/async_samples/sample_dynamic_classification_async.py
1791
- :start-after: [START dynamic_classification_async]
1792
- :end-before: [END dynamic_classification_async]
1793
- :language: python
1794
- :dedent: 4
1795
- :caption: Perform dynamic classification on a batch of documents.
1796
- """
1797
-
1798
- language_arg = language if language is not None else self._default_language
1799
- docs = _validate_input(documents, "language", language_arg)
1800
-
1801
- try:
1802
- models = self._client.models(api_version=self._api_version)
1803
- return cast(
1804
- List[Union[DynamicClassificationResult, DocumentError]],
1805
- await self._client.analyze_text(
1806
- body=models.AnalyzeTextDynamicClassificationInput(
1807
- analysis_input={"documents": docs},
1808
- parameters=models.DynamicClassificationTaskParameters(
1809
- categories=categories,
1810
- logging_opt_out=disable_service_logs,
1811
- model_version=model_version,
1812
- classification_type=classification_type,
1813
- )
1814
- ),
1815
- show_stats=show_stats,
1816
- cls=kwargs.pop("cls", dynamic_classification_result),
1817
- **kwargs
1818
- )
1819
- )
1820
- except HttpResponseError as error:
1821
- return process_http_response_error(error)
1822
-
1823
- @distributed_trace_async
1824
- @validate_multiapi_args(
1825
- version_method_added="2022-10-01-preview"
1826
- )
1827
- async def begin_extract_summary(
1828
- self,
1829
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1830
- *,
1831
- continuation_token: Optional[str] = None,
1832
- disable_service_logs: Optional[bool] = None,
1833
- display_name: Optional[str] = None,
1834
- language: Optional[str] = None,
1835
- polling_interval: Optional[int] = None,
1836
- show_stats: Optional[bool] = None,
1837
- model_version: Optional[str] = None,
1838
- string_index_type: Optional[str] = None,
1839
- max_sentence_count: Optional[int] = None,
1840
- order_by: Optional[str] = None,
1841
- **kwargs: Any,
1842
- ) -> AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[ExtractSummaryResult, DocumentError]]]:
1843
- """Start a long-running extractive summarization operation.
1844
-
1845
- For a conceptual discussion of extractive summarization, see the service documentation:
1846
- https://learn.microsoft.com/azure/cognitive-services/language-service/summarization/overview
1847
-
1848
- :param documents: The set of documents to process as part of this batch.
1849
- If you wish to specify the ID and language on a per-item basis you must
1850
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
1851
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
1852
- `{"id": "1", "language": "en", "text": "hello world"}`.
1853
- :type documents:
1854
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1855
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1856
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1857
- For automatic language detection, use "auto" (Only supported by API version
1858
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
1859
- Per-document language will take precedence over whole batch language.
1860
- See https://aka.ms/talangs for supported languages in Language API.
1861
- :keyword bool show_stats: If set to true, response will contain document level statistics.
1862
- :keyword Optional[int] max_sentence_count: Maximum number of sentences to return. Defaults to 3.
1863
- :keyword Optional[str] order_by: Possible values include: "Offset", "Rank". Default value: "Offset".
1864
- :keyword Optional[str] model_version: The model version to use for the analysis.
1865
- :keyword Optional[str] string_index_type: Specifies the method used to interpret string offsets.
1866
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
1867
- logged on the service side for troubleshooting. By default, the Language service logs your
1868
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1869
- the service's natural language processing functions. Setting this parameter to true,
1870
- disables input logging and may limit our ability to remediate issues that occur. Please see
1871
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1872
- additional details, and Microsoft Responsible AI principles at
1873
- https://www.microsoft.com/ai/responsible-ai.
1874
- :keyword int polling_interval: Waiting time between two polls for LRO operations
1875
- if no Retry-After header is present. Defaults to 5 seconds.
1876
- :keyword str continuation_token:
1877
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
1878
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
1879
- to restart the LRO from a saved state.
1880
- :keyword str display_name: An optional display name to set for the requested analysis.
1881
- :return: An instance of an AsyncTextAnalysisLROPoller. Call `result()` on the this
1882
- object to return a heterogeneous pageable of
1883
- :class:`~azure.ai.textanalytics.ExtractSummaryResult` and
1884
- :class:`~azure.ai.textanalytics.DocumentError`.
1885
- :rtype:
1886
- ~azure.ai.textanalytics.AsyncTextAnalysisLROPoller[~azure.core.async_paging.AsyncItemPaged[
1887
- ~azure.ai.textanalytics.ExtractSummaryResult or ~azure.ai.textanalytics.DocumentError]]
1888
- :raises ~azure.core.exceptions.HttpResponseError:
1889
-
1890
- .. versionadded:: 2022-10-01-preview
1891
- The *begin_extract_summary* client method.
1892
-
1893
- .. admonition:: Example:
1894
-
1895
- .. literalinclude:: ../samples/async_samples/sample_extract_summary_async.py
1896
- :start-after: [START extract_summary_async]
1897
- :end-before: [END extract_summary_async]
1898
- :language: python
1899
- :dedent: 4
1900
- :caption: Perform extractive summarization on a batch of documents.
1901
- """
1902
-
1903
- polling_interval_arg = polling_interval if polling_interval is not None else 5
1904
- string_index_type_arg = string_index_type if string_index_type is not None else self._string_code_unit
1905
-
1906
- if continuation_token:
1907
- return cast(
1908
- AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[ExtractSummaryResult, DocumentError]]],
1909
- _get_result_from_continuation_token(
1910
- self._client._client, # pylint: disable=protected-access
1911
- continuation_token,
1912
- AsyncAnalyzeActionsLROPoller,
1913
- AsyncAnalyzeActionsLROPollingMethod(
1914
- text_analytics_client=self._client,
1915
- timeout=polling_interval_arg,
1916
- **kwargs
1917
- ),
1918
- self._analyze_result_callback,
1919
- bespoke=True
1920
- )
1921
- )
1922
-
1923
- try:
1924
- return cast(
1925
- AsyncTextAnalysisLROPoller[
1926
- AsyncItemPaged[Union[ExtractSummaryResult, DocumentError]]
1927
- ],
1928
- await self.begin_analyze_actions(
1929
- documents,
1930
- actions=[
1931
- ExtractSummaryAction(
1932
- model_version=model_version,
1933
- string_index_type=string_index_type_arg,
1934
- max_sentence_count=max_sentence_count,
1935
- order_by=order_by,
1936
- disable_service_logs=disable_service_logs,
1937
- )
1938
- ],
1939
- polling_interval=polling_interval_arg,
1940
- display_name=display_name,
1941
- show_stats=show_stats,
1942
- language=language,
1943
- bespoke=True,
1944
- **kwargs
1945
- )
1946
- )
1947
-
1948
- except HttpResponseError as error:
1949
- return process_http_response_error(error)
1950
-
1951
- @distributed_trace_async
1952
- @validate_multiapi_args(
1953
- version_method_added="2022-10-01-preview"
1954
- )
1955
- async def begin_abstractive_summary(
1956
- self,
1957
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1958
- *,
1959
- continuation_token: Optional[str] = None,
1960
- disable_service_logs: Optional[bool] = None,
1961
- display_name: Optional[str] = None,
1962
- language: Optional[str] = None,
1963
- polling_interval: Optional[int] = None,
1964
- show_stats: Optional[bool] = None,
1965
- model_version: Optional[str] = None,
1966
- string_index_type: Optional[str] = None,
1967
- sentence_count: Optional[int] = None,
1968
- **kwargs: Any,
1969
- ) -> AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[AbstractiveSummaryResult, DocumentError]]]:
1970
- """Start a long-running abstractive summarization operation.
1971
-
1972
- For a conceptual discussion of abstractive summarization, see the service documentation:
1973
- https://learn.microsoft.com/azure/cognitive-services/language-service/summarization/overview
1974
-
1975
- .. note:: The abstractive summarization feature is part of a gated preview. Request access here:
1976
- https://aka.ms/applyforgatedsummarizationfeatures
1977
-
1978
- :param documents: The set of documents to process as part of this batch.
1979
- If you wish to specify the ID and language on a per-item basis you must
1980
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
1981
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
1982
- `{"id": "1", "language": "en", "text": "hello world"}`.
1983
- :type documents:
1984
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1985
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1986
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1987
- For automatic language detection, use "auto" (Only supported by API version
1988
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
1989
- Per-document language will take precedence over whole batch language.
1990
- See https://aka.ms/talangs for supported languages in Language API.
1991
- :keyword bool show_stats: If set to true, response will contain document level statistics.
1992
- :keyword Optional[int] sentence_count: It controls the approximate number of sentences in the output summaries.
1993
- :keyword Optional[str] model_version: The model version to use for the analysis.
1994
- :keyword Optional[str] string_index_type: Specifies the method used to interpret string offsets.
1995
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
1996
- logged on the service side for troubleshooting. By default, the Language service logs your
1997
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1998
- the service's natural language processing functions. Setting this parameter to true,
1999
- disables input logging and may limit our ability to remediate issues that occur. Please see
2000
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2001
- additional details, and Microsoft Responsible AI principles at
2002
- https://www.microsoft.com/ai/responsible-ai.
2003
- :keyword int polling_interval: Waiting time between two polls for LRO operations
2004
- if no Retry-After header is present. Defaults to 5 seconds.
2005
- :keyword str continuation_token:
2006
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
2007
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
2008
- to restart the LRO from a saved state.
2009
- :keyword str display_name: An optional display name to set for the requested analysis.
2010
- :return: An instance of an AsyncTextAnalysisLROPoller. Call `result()` on the this
2011
- object to return a heterogeneous pageable of
2012
- :class:`~azure.ai.textanalytics.AbstractiveSummaryResult` and
2013
- :class:`~azure.ai.textanalytics.DocumentError`.
2014
- :rtype:
2015
- ~azure.ai.textanalytics.AsyncTextAnalysisLROPoller[~azure.core.async_paging.AsyncItemPaged[
2016
- ~azure.ai.textanalytics.AbstractiveSummaryResult or ~azure.ai.textanalytics.DocumentError]]
2017
- :raises ~azure.core.exceptions.HttpResponseError:
2018
-
2019
- .. versionadded:: 2022-10-01-preview
2020
- The *begin_abstractive_summary* client method.
2021
-
2022
- .. admonition:: Example:
2023
-
2024
- .. literalinclude:: ../samples/async_samples/sample_abstractive_summary_async.py
2025
- :start-after: [START abstractive_summary_async]
2026
- :end-before: [END abstractive_summary_async]
2027
- :language: python
2028
- :dedent: 4
2029
- :caption: Perform abstractive summarization on a batch of documents.
2030
- """
2031
-
2032
- polling_interval_arg = polling_interval if polling_interval is not None else 5
2033
- string_index_type_arg = string_index_type if string_index_type is not None else self._string_code_unit
2034
-
2035
- if continuation_token:
2036
- return cast(
2037
- AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[AbstractiveSummaryResult, DocumentError]]],
2038
- _get_result_from_continuation_token(
2039
- self._client._client, # pylint: disable=protected-access
2040
- continuation_token,
2041
- AsyncAnalyzeActionsLROPoller,
2042
- AsyncAnalyzeActionsLROPollingMethod(
2043
- text_analytics_client=self._client,
2044
- timeout=polling_interval_arg,
2045
- **kwargs
2046
- ),
2047
- self._analyze_result_callback,
2048
- bespoke=True
2049
- )
2050
- )
2051
-
2052
- try:
2053
- return cast(
2054
- AsyncTextAnalysisLROPoller[
2055
- AsyncItemPaged[Union[AbstractiveSummaryResult, DocumentError]]
2056
- ],
2057
- await self.begin_analyze_actions(
2058
- documents,
2059
- actions=[
2060
- AbstractiveSummaryAction(
2061
- model_version=model_version,
2062
- string_index_type=string_index_type_arg,
2063
- sentence_count=sentence_count,
2064
- disable_service_logs=disable_service_logs,
2065
- )
2066
- ],
2067
- polling_interval=polling_interval_arg,
2068
- display_name=display_name,
2069
- show_stats=show_stats,
2070
- language=language,
2071
- bespoke=True,
2072
- **kwargs
2073
- )
2074
- )
2075
-
2076
- except HttpResponseError as error:
2077
- return process_http_response_error(error)