azure-ai-textanalytics 5.3.0b1__py3-none-any.whl → 6.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-textanalytics might be problematic. Click here for more details.

Files changed (128) hide show
  1. azure/ai/textanalytics/__init__.py +26 -197
  2. azure/ai/textanalytics/_client.py +111 -0
  3. azure/ai/textanalytics/_configuration.py +73 -0
  4. azure/ai/textanalytics/{_generated/v2022_05_01/operations → _operations}/__init__.py +13 -8
  5. azure/ai/textanalytics/_operations/_operations.py +716 -0
  6. azure/ai/textanalytics/{_generated/v2022_05_01/models → _operations}/_patch.py +8 -6
  7. azure/ai/textanalytics/_patch.py +350 -0
  8. azure/ai/textanalytics/{_generated/aio → _utils}/__init__.py +1 -5
  9. azure/ai/textanalytics/_utils/model_base.py +1237 -0
  10. azure/ai/textanalytics/{_generated/_serialization.py → _utils/serialization.py} +640 -616
  11. azure/ai/textanalytics/{_generated/v2022_05_01/aio/_vendor.py → _utils/utils.py} +10 -12
  12. azure/ai/textanalytics/_version.py +8 -7
  13. azure/ai/textanalytics/aio/__init__.py +25 -14
  14. azure/ai/textanalytics/aio/_client.py +115 -0
  15. azure/ai/textanalytics/aio/_configuration.py +75 -0
  16. azure/ai/textanalytics/{_generated/v2022_10_01_preview/aio/operations → aio/_operations}/__init__.py +13 -8
  17. azure/ai/textanalytics/aio/_operations/_operations.py +623 -0
  18. azure/ai/textanalytics/{_generated/v2022_05_01 → aio/_operations}/_patch.py +8 -6
  19. azure/ai/textanalytics/aio/_patch.py +344 -0
  20. azure/ai/textanalytics/models/__init__.py +402 -0
  21. azure/ai/textanalytics/models/_enums.py +1979 -0
  22. azure/ai/textanalytics/models/_models.py +6641 -0
  23. azure/ai/textanalytics/{_generated/v2022_05_01/aio → models}/_patch.py +8 -6
  24. azure/ai/textanalytics/py.typed +1 -0
  25. {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/METADATA +755 -319
  26. azure_ai_textanalytics-6.0.0b1.dist-info/RECORD +29 -0
  27. {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/WHEEL +1 -1
  28. azure/ai/textanalytics/_base_client.py +0 -111
  29. azure/ai/textanalytics/_check.py +0 -22
  30. azure/ai/textanalytics/_dict_mixin.py +0 -54
  31. azure/ai/textanalytics/_generated/__init__.py +0 -16
  32. azure/ai/textanalytics/_generated/_configuration.py +0 -70
  33. azure/ai/textanalytics/_generated/_operations_mixin.py +0 -795
  34. azure/ai/textanalytics/_generated/_text_analytics_client.py +0 -126
  35. azure/ai/textanalytics/_generated/_version.py +0 -8
  36. azure/ai/textanalytics/_generated/aio/_configuration.py +0 -66
  37. azure/ai/textanalytics/_generated/aio/_operations_mixin.py +0 -776
  38. azure/ai/textanalytics/_generated/aio/_text_analytics_client.py +0 -124
  39. azure/ai/textanalytics/_generated/models.py +0 -8
  40. azure/ai/textanalytics/_generated/v2022_05_01/__init__.py +0 -20
  41. azure/ai/textanalytics/_generated/v2022_05_01/_configuration.py +0 -72
  42. azure/ai/textanalytics/_generated/v2022_05_01/_text_analytics_client.py +0 -100
  43. azure/ai/textanalytics/_generated/v2022_05_01/_vendor.py +0 -45
  44. azure/ai/textanalytics/_generated/v2022_05_01/aio/__init__.py +0 -20
  45. azure/ai/textanalytics/_generated/v2022_05_01/aio/_configuration.py +0 -71
  46. azure/ai/textanalytics/_generated/v2022_05_01/aio/_text_analytics_client.py +0 -97
  47. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/__init__.py +0 -18
  48. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_patch.py +0 -121
  49. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_text_analytics_client_operations.py +0 -603
  50. azure/ai/textanalytics/_generated/v2022_05_01/models/__init__.py +0 -281
  51. azure/ai/textanalytics/_generated/v2022_05_01/models/_models_py3.py +0 -5722
  52. azure/ai/textanalytics/_generated/v2022_05_01/models/_text_analytics_client_enums.py +0 -439
  53. azure/ai/textanalytics/_generated/v2022_05_01/operations/_patch.py +0 -120
  54. azure/ai/textanalytics/_generated/v2022_05_01/operations/_text_analytics_client_operations.py +0 -744
  55. azure/ai/textanalytics/_generated/v2022_10_01_preview/__init__.py +0 -20
  56. azure/ai/textanalytics/_generated/v2022_10_01_preview/_configuration.py +0 -72
  57. azure/ai/textanalytics/_generated/v2022_10_01_preview/_patch.py +0 -19
  58. azure/ai/textanalytics/_generated/v2022_10_01_preview/_text_analytics_client.py +0 -100
  59. azure/ai/textanalytics/_generated/v2022_10_01_preview/_vendor.py +0 -45
  60. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/__init__.py +0 -20
  61. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_configuration.py +0 -71
  62. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_patch.py +0 -19
  63. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_text_analytics_client.py +0 -97
  64. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_vendor.py +0 -27
  65. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_patch.py +0 -121
  66. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_text_analytics_client_operations.py +0 -603
  67. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/__init__.py +0 -407
  68. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_models_py3.py +0 -8462
  69. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_patch.py +0 -72
  70. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_text_analytics_client_enums.py +0 -730
  71. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/__init__.py +0 -18
  72. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_patch.py +0 -120
  73. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_text_analytics_client_operations.py +0 -744
  74. azure/ai/textanalytics/_generated/v3_0/__init__.py +0 -20
  75. azure/ai/textanalytics/_generated/v3_0/_configuration.py +0 -66
  76. azure/ai/textanalytics/_generated/v3_0/_patch.py +0 -31
  77. azure/ai/textanalytics/_generated/v3_0/_text_analytics_client.py +0 -96
  78. azure/ai/textanalytics/_generated/v3_0/_vendor.py +0 -33
  79. azure/ai/textanalytics/_generated/v3_0/aio/__init__.py +0 -20
  80. azure/ai/textanalytics/_generated/v3_0/aio/_configuration.py +0 -65
  81. azure/ai/textanalytics/_generated/v3_0/aio/_patch.py +0 -31
  82. azure/ai/textanalytics/_generated/v3_0/aio/_text_analytics_client.py +0 -93
  83. azure/ai/textanalytics/_generated/v3_0/aio/_vendor.py +0 -27
  84. azure/ai/textanalytics/_generated/v3_0/aio/operations/__init__.py +0 -18
  85. azure/ai/textanalytics/_generated/v3_0/aio/operations/_patch.py +0 -19
  86. azure/ai/textanalytics/_generated/v3_0/aio/operations/_text_analytics_client_operations.py +0 -428
  87. azure/ai/textanalytics/_generated/v3_0/models/__init__.py +0 -81
  88. azure/ai/textanalytics/_generated/v3_0/models/_models_py3.py +0 -1467
  89. azure/ai/textanalytics/_generated/v3_0/models/_patch.py +0 -19
  90. azure/ai/textanalytics/_generated/v3_0/models/_text_analytics_client_enums.py +0 -58
  91. azure/ai/textanalytics/_generated/v3_0/operations/__init__.py +0 -18
  92. azure/ai/textanalytics/_generated/v3_0/operations/_patch.py +0 -19
  93. azure/ai/textanalytics/_generated/v3_0/operations/_text_analytics_client_operations.py +0 -604
  94. azure/ai/textanalytics/_generated/v3_1/__init__.py +0 -20
  95. azure/ai/textanalytics/_generated/v3_1/_configuration.py +0 -66
  96. azure/ai/textanalytics/_generated/v3_1/_patch.py +0 -31
  97. azure/ai/textanalytics/_generated/v3_1/_text_analytics_client.py +0 -98
  98. azure/ai/textanalytics/_generated/v3_1/_vendor.py +0 -45
  99. azure/ai/textanalytics/_generated/v3_1/aio/__init__.py +0 -20
  100. azure/ai/textanalytics/_generated/v3_1/aio/_configuration.py +0 -65
  101. azure/ai/textanalytics/_generated/v3_1/aio/_patch.py +0 -31
  102. azure/ai/textanalytics/_generated/v3_1/aio/_text_analytics_client.py +0 -95
  103. azure/ai/textanalytics/_generated/v3_1/aio/_vendor.py +0 -27
  104. azure/ai/textanalytics/_generated/v3_1/aio/operations/__init__.py +0 -18
  105. azure/ai/textanalytics/_generated/v3_1/aio/operations/_patch.py +0 -19
  106. azure/ai/textanalytics/_generated/v3_1/aio/operations/_text_analytics_client_operations.py +0 -1291
  107. azure/ai/textanalytics/_generated/v3_1/models/__init__.py +0 -205
  108. azure/ai/textanalytics/_generated/v3_1/models/_models_py3.py +0 -3976
  109. azure/ai/textanalytics/_generated/v3_1/models/_patch.py +0 -19
  110. azure/ai/textanalytics/_generated/v3_1/models/_text_analytics_client_enums.py +0 -367
  111. azure/ai/textanalytics/_generated/v3_1/operations/__init__.py +0 -18
  112. azure/ai/textanalytics/_generated/v3_1/operations/_patch.py +0 -19
  113. azure/ai/textanalytics/_generated/v3_1/operations/_text_analytics_client_operations.py +0 -1709
  114. azure/ai/textanalytics/_lro.py +0 -552
  115. azure/ai/textanalytics/_models.py +0 -3142
  116. azure/ai/textanalytics/_policies.py +0 -66
  117. azure/ai/textanalytics/_request_handlers.py +0 -104
  118. azure/ai/textanalytics/_response_handlers.py +0 -580
  119. azure/ai/textanalytics/_text_analytics_client.py +0 -1802
  120. azure/ai/textanalytics/_user_agent.py +0 -8
  121. azure/ai/textanalytics/_validate.py +0 -113
  122. azure/ai/textanalytics/aio/_base_client_async.py +0 -95
  123. azure/ai/textanalytics/aio/_lro_async.py +0 -501
  124. azure/ai/textanalytics/aio/_response_handlers_async.py +0 -94
  125. azure/ai/textanalytics/aio/_text_analytics_client_async.py +0 -1800
  126. azure_ai_textanalytics-5.3.0b1.dist-info/RECORD +0 -115
  127. {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info/licenses}/LICENSE +0 -0
  128. {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/top_level.txt +0 -0
@@ -1,1800 +0,0 @@
1
- # ------------------------------------
2
- # Copyright (c) Microsoft Corporation.
3
- # Licensed under the MIT License.
4
- # ------------------------------------
5
- # pylint: disable=too-many-lines
6
-
7
- from typing import Union, Any, List, Dict, cast
8
- from azure.core.async_paging import AsyncItemPaged
9
- from azure.core.tracing.decorator_async import distributed_trace_async
10
- from azure.core.exceptions import HttpResponseError
11
- from azure.core.credentials import AzureKeyCredential
12
- from azure.core.credentials_async import AsyncTokenCredential
13
- from ._base_client_async import AsyncTextAnalyticsClientBase
14
- from .._request_handlers import (
15
- _validate_input,
16
- _determine_action_type,
17
- )
18
- from .._validate import validate_multiapi_args, check_for_unsupported_actions_types
19
- from .._response_handlers import (
20
- process_http_response_error,
21
- entities_result,
22
- linked_entities_result,
23
- key_phrases_result,
24
- sentiment_result,
25
- language_result,
26
- pii_entities_result,
27
- _get_result_from_continuation_token,
28
- dynamic_classification_result,
29
- )
30
- from ._response_handlers_async import healthcare_paged_result, analyze_paged_result
31
- from .._models import (
32
- DetectLanguageInput,
33
- TextDocumentInput,
34
- DetectLanguageResult,
35
- RecognizeEntitiesResult,
36
- RecognizeLinkedEntitiesResult,
37
- ExtractKeyPhrasesResult,
38
- AnalyzeSentimentResult,
39
- DocumentError,
40
- RecognizePiiEntitiesResult,
41
- RecognizeEntitiesAction,
42
- RecognizePiiEntitiesAction,
43
- ExtractKeyPhrasesAction,
44
- _AnalyzeActionsType,
45
- RecognizeLinkedEntitiesAction,
46
- AnalyzeSentimentAction,
47
- AnalyzeHealthcareEntitiesResult,
48
- RecognizeCustomEntitiesAction,
49
- RecognizeCustomEntitiesResult,
50
- SingleLabelClassifyAction,
51
- MultiLabelClassifyAction,
52
- ClassifyDocumentResult,
53
- AnalyzeHealthcareEntitiesAction,
54
- ExtractSummaryAction,
55
- ExtractSummaryResult,
56
- AbstractSummaryAction,
57
- AbstractSummaryResult,
58
- DynamicClassificationResult,
59
- )
60
- from .._check import is_language_api, string_index_type_compatibility
61
- from .._lro import TextAnalyticsOperationResourcePolling
62
- from ._lro_async import (
63
- AsyncAnalyzeHealthcareEntitiesLROPollingMethod,
64
- AsyncAnalyzeActionsLROPollingMethod,
65
- AsyncAnalyzeHealthcareEntitiesLROPoller,
66
- AsyncAnalyzeActionsLROPoller,
67
- AsyncTextAnalysisLROPoller,
68
- )
69
-
70
-
71
- AsyncAnalyzeActionsResponse = AsyncTextAnalysisLROPoller[
72
- AsyncItemPaged[
73
- List[
74
- Union[
75
- RecognizeEntitiesResult,
76
- RecognizeLinkedEntitiesResult,
77
- RecognizePiiEntitiesResult,
78
- ExtractKeyPhrasesResult,
79
- AnalyzeSentimentResult,
80
- RecognizeCustomEntitiesResult,
81
- ClassifyDocumentResult,
82
- AnalyzeHealthcareEntitiesResult,
83
- ExtractSummaryResult,
84
- AbstractSummaryResult,
85
- DocumentError,
86
- ]
87
- ]
88
- ]
89
- ]
90
-
91
-
92
- class TextAnalyticsClient(AsyncTextAnalyticsClientBase):
93
- """The Language service API is a suite of natural language processing (NLP) skills built with the best-in-class
94
- Microsoft machine learning algorithms. The API can be used to analyze unstructured text for
95
- tasks such as sentiment analysis, key phrase extraction, entities recognition,
96
- and language detection, and more.
97
-
98
- Further documentation can be found in
99
- https://docs.microsoft.com/azure/cognitive-services/language-service/overview
100
-
101
- :param str endpoint: Supported Cognitive Services or Language resource
102
- endpoints (protocol and hostname, for example: 'https://<resource-name>.cognitiveservices.azure.com').
103
- :param credential: Credentials needed for the client to connect to Azure.
104
- This can be the an instance of AzureKeyCredential if using a Cognitive Services/Language API key
105
- or a token credential from :mod:`azure.identity`.
106
- :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential
107
- :keyword str default_country_hint: Sets the default country_hint to use for all operations.
108
- Defaults to "US". If you don't want to use a country hint, pass the string "none".
109
- :keyword str default_language: Sets the default language to use for all operations.
110
- Defaults to "en".
111
- :keyword api_version: The API version of the service to use for requests. It defaults to the
112
- latest service version. Setting to an older version may result in reduced feature compatibility.
113
- :paramtype api_version: str or ~azure.ai.textanalytics.TextAnalyticsApiVersion
114
-
115
- .. admonition:: Example:
116
-
117
- .. literalinclude:: ../samples/async_samples/sample_authentication_async.py
118
- :start-after: [START create_ta_client_with_key_async]
119
- :end-before: [END create_ta_client_with_key_async]
120
- :language: python
121
- :dedent: 4
122
- :caption: Creating the TextAnalyticsClient with endpoint and API key.
123
-
124
- .. literalinclude:: ../samples/async_samples/sample_authentication_async.py
125
- :start-after: [START create_ta_client_with_aad_async]
126
- :end-before: [END create_ta_client_with_aad_async]
127
- :language: python
128
- :dedent: 4
129
- :caption: Creating the TextAnalyticsClient with endpoint and token credential from Azure Active Directory.
130
- """
131
-
132
- def __init__(
133
- self,
134
- endpoint: str,
135
- credential: Union[AzureKeyCredential, AsyncTokenCredential],
136
- **kwargs: Any
137
- ) -> None:
138
- super().__init__(
139
- endpoint=endpoint, credential=credential, **kwargs
140
- )
141
-
142
- self._default_language = kwargs.pop("default_language", "en")
143
- self._default_country_hint = kwargs.pop("default_country_hint", "US")
144
- self._string_code_unit = (
145
- None if kwargs.get("api_version") == "v3.0" else "UnicodeCodePoint"
146
- )
147
-
148
- @distributed_trace_async
149
- @validate_multiapi_args(
150
- version_method_added="v3.0",
151
- args_mapping={"v3.1": ["disable_service_logs"]}
152
- )
153
- async def detect_language(
154
- self,
155
- documents: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]],
156
- **kwargs: Any,
157
- ) -> List[Union[DetectLanguageResult, DocumentError]]:
158
- """Detect language for a batch of documents.
159
-
160
- Returns the detected language and a numeric score between zero and
161
- one. Scores close to one indicate 100% certainty that the identified
162
- language is true. See https://aka.ms/talangs for the list of enabled languages.
163
-
164
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
165
-
166
- :param documents: The set of documents to process as part of this batch.
167
- If you wish to specify the ID and country_hint on a per-item basis you must
168
- use as input a list[:class:`~azure.ai.textanalytics.DetectLanguageInput`] or a list of
169
- dict representations of :class:`~azure.ai.textanalytics.DetectLanguageInput`, like
170
- `{"id": "1", "country_hint": "us", "text": "hello world"}`.
171
- :type documents:
172
- list[str] or list[~azure.ai.textanalytics.DetectLanguageInput] or list[dict[str, str]]
173
- :keyword str country_hint: Country of origin hint for the entire batch. Accepts two
174
- letter country codes specified by ISO 3166-1 alpha-2. Per-document
175
- country hints will take precedence over whole batch hints. Defaults to
176
- "US". If you don't want to use a country hint, pass the string "none".
177
- :keyword str model_version: This value indicates which model will
178
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
179
- is not specified, the API will default to the latest, non-preview version.
180
- See here for more info: https://aka.ms/text-analytics-model-versioning
181
- :keyword bool show_stats: If set to true, response will contain document
182
- level statistics in the `statistics` field of the document-level response.
183
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
184
- logged on the service side for troubleshooting. By default, the Language service logs your
185
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
186
- the service's natural language processing functions. Setting this parameter to true,
187
- disables input logging and may limit our ability to remediate issues that occur. Please see
188
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
189
- additional details, and Microsoft Responsible AI principles at
190
- https://www.microsoft.com/ai/responsible-ai.
191
- :return: The combined list of :class:`~azure.ai.textanalytics.DetectLanguageResult`
192
- and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
193
- were passed in.
194
- :rtype: list[~azure.ai.textanalytics.DetectLanguageResult or ~azure.ai.textanalytics.DocumentError]
195
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
196
-
197
- .. versionadded:: v3.1
198
- The *disable_service_logs* keyword argument.
199
-
200
- .. admonition:: Example:
201
-
202
- .. literalinclude:: ../samples/async_samples/sample_detect_language_async.py
203
- :start-after: [START detect_language_async]
204
- :end-before: [END detect_language_async]
205
- :language: python
206
- :dedent: 4
207
- :caption: Detecting language in a batch of documents.
208
- """
209
- country_hint_arg = kwargs.pop("country_hint", None)
210
- country_hint = (
211
- country_hint_arg
212
- if country_hint_arg is not None
213
- else self._default_country_hint
214
- )
215
- docs = _validate_input(documents, "country_hint", country_hint)
216
- model_version = kwargs.pop("model_version", None)
217
- show_stats = kwargs.pop("show_stats", None)
218
- disable_service_logs = kwargs.pop("disable_service_logs", None)
219
-
220
- try:
221
- if is_language_api(self._api_version):
222
- models = self._client.models(api_version=self._api_version)
223
- return cast(
224
- List[Union[DetectLanguageResult, DocumentError]],
225
- await self._client.analyze_text(
226
- body=models.AnalyzeTextLanguageDetectionInput(
227
- analysis_input={"documents": docs},
228
- parameters=models.LanguageDetectionTaskParameters(
229
- logging_opt_out=disable_service_logs,
230
- model_version=model_version
231
- )
232
- ),
233
- show_stats=show_stats,
234
- cls=kwargs.pop("cls", language_result),
235
- **kwargs
236
- )
237
- )
238
-
239
- # api_versions 3.0, 3.1
240
- return cast(
241
- List[Union[DetectLanguageResult, DocumentError]],
242
- await self._client.languages(
243
- documents=docs,
244
- model_version=model_version,
245
- show_stats=show_stats,
246
- logging_opt_out=disable_service_logs,
247
- cls=kwargs.pop("cls", language_result),
248
- **kwargs
249
- )
250
- )
251
- except HttpResponseError as error:
252
- return process_http_response_error(error)
253
-
254
- @distributed_trace_async
255
- @validate_multiapi_args(
256
- version_method_added="v3.0",
257
- args_mapping={"v3.1": ["string_index_type", "disable_service_logs"]}
258
- )
259
- async def recognize_entities(
260
- self,
261
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
262
- **kwargs: Any,
263
- ) -> List[Union[RecognizeEntitiesResult, DocumentError]]:
264
- """Recognize entities for a batch of documents.
265
-
266
- Identifies and categorizes entities in your text as people, places,
267
- organizations, date/time, quantities, percentages, currencies, and more.
268
- For the list of supported entity types, check: https://aka.ms/taner
269
-
270
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
271
-
272
- :param documents: The set of documents to process as part of this batch.
273
- If you wish to specify the ID and language on a per-item basis you must
274
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
275
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
276
- `{"id": "1", "language": "en", "text": "hello world"}`.
277
- :type documents:
278
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
279
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
280
- entire batch. For example, use "en" for English; "es" for Spanish etc.
281
- If not set, uses "en" for English as default. Per-document language will
282
- take precedence over whole batch language. See https://aka.ms/talangs for
283
- supported languages in Language API.
284
- :keyword str model_version: This value indicates which model will
285
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
286
- is not specified, the API will default to the latest, non-preview version.
287
- See here for more info: https://aka.ms/text-analytics-model-versioning
288
- :keyword bool show_stats: If set to true, response will contain document
289
- level statistics in the `statistics` field of the document-level response.
290
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
291
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
292
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
293
- see https://aka.ms/text-analytics-offsets
294
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
295
- logged on the service side for troubleshooting. By default, the Language service logs your
296
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
297
- the service's natural language processing functions. Setting this parameter to true,
298
- disables input logging and may limit our ability to remediate issues that occur. Please see
299
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
300
- additional details, and Microsoft Responsible AI principles at
301
- https://www.microsoft.com/ai/responsible-ai.
302
- :return: The combined list of :class:`~azure.ai.textanalytics.RecognizeEntitiesResult` and
303
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
304
- passed in.
305
- :rtype: list[~azure.ai.textanalytics.RecognizeEntitiesResult or ~azure.ai.textanalytics.DocumentError]
306
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
307
-
308
- .. versionadded:: v3.1
309
- The *disable_service_logs* and *string_index_type* keyword arguments.
310
-
311
- .. admonition:: Example:
312
-
313
- .. literalinclude:: ../samples/async_samples/sample_recognize_entities_async.py
314
- :start-after: [START recognize_entities_async]
315
- :end-before: [END recognize_entities_async]
316
- :language: python
317
- :dedent: 4
318
- :caption: Recognize entities in a batch of documents.
319
- """
320
- language_arg = kwargs.pop("language", None)
321
- language = language_arg if language_arg is not None else self._default_language
322
- docs = _validate_input(documents, "language", language)
323
- model_version = kwargs.pop("model_version", None)
324
- show_stats = kwargs.pop("show_stats", None)
325
- disable_service_logs = kwargs.pop("disable_service_logs", None)
326
- string_index_type = kwargs.pop("string_index_type", self._string_code_unit)
327
-
328
- try:
329
- if is_language_api(self._api_version):
330
- models = self._client.models(api_version=self._api_version)
331
- return cast(
332
- List[Union[RecognizeEntitiesResult, DocumentError]],
333
- await self._client.analyze_text(
334
- body=models.AnalyzeTextEntityRecognitionInput(
335
- analysis_input={"documents": docs},
336
- parameters=models.EntitiesTaskParameters(
337
- logging_opt_out=disable_service_logs,
338
- model_version=model_version,
339
- string_index_type=string_index_type_compatibility(string_index_type)
340
- )
341
- ),
342
- show_stats=show_stats,
343
- cls=kwargs.pop("cls", entities_result),
344
- **kwargs
345
- )
346
- )
347
-
348
- # api_versions 3.0, 3.1
349
- return cast(
350
- List[Union[RecognizeEntitiesResult, DocumentError]],
351
- await self._client.entities_recognition_general(
352
- documents=docs,
353
- model_version=model_version,
354
- show_stats=show_stats,
355
- string_index_type=string_index_type,
356
- logging_opt_out=disable_service_logs,
357
- cls=kwargs.pop("cls", entities_result),
358
- **kwargs,
359
- )
360
- )
361
- except HttpResponseError as error:
362
- return process_http_response_error(error)
363
-
364
- @distributed_trace_async
365
- @validate_multiapi_args(
366
- version_method_added="v3.1"
367
- )
368
- async def recognize_pii_entities(
369
- self,
370
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
371
- **kwargs: Any,
372
- ) -> List[Union[RecognizePiiEntitiesResult, DocumentError]]:
373
- """Recognize entities containing personal information for a batch of documents.
374
-
375
- Returns a list of personal information entities ("SSN",
376
- "Bank Account", etc) in the document. For the list of supported entity types,
377
- check https://aka.ms/tanerpii
378
-
379
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
380
-
381
- :param documents: The set of documents to process as part of this batch.
382
- If you wish to specify the ID and language on a per-item basis you must
383
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
384
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
385
- `{"id": "1", "language": "en", "text": "hello world"}`.
386
- :type documents:
387
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
388
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
389
- entire batch. For example, use "en" for English; "es" for Spanish etc.
390
- If not set, uses "en" for English as default. Per-document language will
391
- take precedence over whole batch language. See https://aka.ms/talangs for
392
- supported languages in Language API.
393
- :keyword str model_version: This value indicates which model will
394
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
395
- is not specified, the API will default to the latest, non-preview version.
396
- See here for more info: https://aka.ms/text-analytics-model-versioning
397
- :keyword bool show_stats: If set to true, response will contain document
398
- level statistics in the `statistics` field of the document-level response.
399
- :keyword domain_filter: Filters the response entities to ones only included in the specified domain.
400
- I.e., if set to 'phi', will only return entities in the Protected Healthcare Information domain.
401
- See https://aka.ms/tanerpii for more information.
402
- :paramtype domain_filter: str or ~azure.ai.textanalytics.PiiEntityDomain
403
- :keyword categories_filter: Instead of filtering over all PII entity categories, you can pass in a list of
404
- the specific PII entity categories you want to filter out. For example, if you only want to filter out
405
- U.S. social security numbers in a document, you can pass in
406
- `[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]` for this kwarg.
407
- :paramtype categories_filter: list[str or ~azure.ai.textanalytics.PiiEntityCategory]
408
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
409
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
410
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
411
- see https://aka.ms/text-analytics-offsets
412
- :keyword bool disable_service_logs: Defaults to true, meaning that the Language service will not log your
413
- input text on the service side for troubleshooting. If set to False, the Language service logs your
414
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
415
- the service's natural language processing functions. Please see
416
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
417
- additional details, and Microsoft Responsible AI principles at
418
- https://www.microsoft.com/ai/responsible-ai.
419
- :return: The combined list of :class:`~azure.ai.textanalytics.RecognizePiiEntitiesResult`
420
- and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
421
- were passed in.
422
- :rtype: list[~azure.ai.textanalytics.RecognizePiiEntitiesResult or ~azure.ai.textanalytics.DocumentError]
423
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
424
-
425
- .. versionadded:: v3.1
426
- The *recognize_pii_entities* client method.
427
-
428
- .. admonition:: Example:
429
-
430
- .. literalinclude:: ../samples/sample_recognize_pii_entities.py
431
- :start-after: [START recognize_pii_entities]
432
- :end-before: [END recognize_pii_entities]
433
- :language: python
434
- :dedent: 4
435
- :caption: Recognize personally identifiable information entities in a batch of documents.
436
- """
437
- language_arg = kwargs.pop("language", None)
438
- language = language_arg if language_arg is not None else self._default_language
439
- docs = _validate_input(documents, "language", language)
440
- model_version = kwargs.pop("model_version", None)
441
- show_stats = kwargs.pop("show_stats", None)
442
- domain_filter = kwargs.pop("domain_filter", None)
443
- categories_filter = kwargs.pop("categories_filter", None)
444
- string_index_type = kwargs.pop("string_index_type", self._string_code_unit)
445
- disable_service_logs = kwargs.pop("disable_service_logs", None)
446
-
447
- try:
448
- if is_language_api(self._api_version):
449
- models = self._client.models(api_version=self._api_version)
450
- return cast(
451
- List[Union[RecognizePiiEntitiesResult, DocumentError]],
452
- await self._client.analyze_text(
453
- body=models.AnalyzeTextPiiEntitiesRecognitionInput(
454
- analysis_input={"documents": docs},
455
- parameters=models.PiiTaskParameters(
456
- logging_opt_out=disable_service_logs,
457
- model_version=model_version,
458
- domain=domain_filter,
459
- pii_categories=categories_filter,
460
- string_index_type=string_index_type_compatibility(string_index_type)
461
- )
462
- ),
463
- show_stats=show_stats,
464
- cls=kwargs.pop("cls", pii_entities_result),
465
- **kwargs
466
- )
467
- )
468
-
469
- # api_versions 3.0, 3.1
470
- return cast(
471
- List[Union[RecognizePiiEntitiesResult, DocumentError]],
472
- await self._client.entities_recognition_pii(
473
- documents=docs,
474
- model_version=model_version,
475
- show_stats=show_stats,
476
- domain=domain_filter,
477
- pii_categories=categories_filter,
478
- logging_opt_out=disable_service_logs,
479
- string_index_type=string_index_type,
480
- cls=kwargs.pop("cls", pii_entities_result),
481
- **kwargs
482
- )
483
- )
484
- except HttpResponseError as error:
485
- return process_http_response_error(error)
486
-
487
- @distributed_trace_async
488
- @validate_multiapi_args(
489
- version_method_added="v3.0",
490
- args_mapping={"v3.1": ["string_index_type", "disable_service_logs"]}
491
- )
492
- async def recognize_linked_entities(
493
- self,
494
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
495
- **kwargs: Any,
496
- ) -> List[Union[RecognizeLinkedEntitiesResult, DocumentError]]:
497
- """Recognize linked entities from a well-known knowledge base for a batch of documents.
498
-
499
- Identifies and disambiguates the identity of each entity found in text (for example,
500
- determining whether an occurrence of the word Mars refers to the planet, or to the
501
- Roman god of war). Recognized entities are associated with URLs to a well-known
502
- knowledge base, like Wikipedia.
503
-
504
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
505
-
506
- :param documents: The set of documents to process as part of this batch.
507
- If you wish to specify the ID and language on a per-item basis you must
508
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
509
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
510
- `{"id": "1", "language": "en", "text": "hello world"}`.
511
- :type documents:
512
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
513
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
514
- entire batch. For example, use "en" for English; "es" for Spanish etc.
515
- If not set, uses "en" for English as default. Per-document language will
516
- take precedence over whole batch language. See https://aka.ms/talangs for
517
- supported languages in Language API.
518
- :keyword str model_version: This value indicates which model will
519
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
520
- is not specified, the API will default to the latest, non-preview version.
521
- See here for more info: https://aka.ms/text-analytics-model-versioning
522
- :keyword bool show_stats: If set to true, response will contain document
523
- level statistics in the `statistics` field of the document-level response.
524
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
525
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
526
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
527
- see https://aka.ms/text-analytics-offsets
528
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
529
- logged on the service side for troubleshooting. By default, the Language service logs your
530
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
531
- the service's natural language processing functions. Setting this parameter to true,
532
- disables input logging and may limit our ability to remediate issues that occur. Please see
533
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
534
- additional details, and Microsoft Responsible AI principles at
535
- https://www.microsoft.com/ai/responsible-ai.
536
- :return: The combined list of :class:`~azure.ai.textanalytics.RecognizeLinkedEntitiesResult`
537
- and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
538
- were passed in.
539
- :rtype: list[~azure.ai.textanalytics.RecognizeLinkedEntitiesResult or ~azure.ai.textanalytics.DocumentError]
540
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
541
-
542
- .. versionadded:: v3.1
543
- The *disable_service_logs* and *string_index_type* keyword arguments.
544
-
545
- .. admonition:: Example:
546
-
547
- .. literalinclude:: ../samples/async_samples/sample_recognize_linked_entities_async.py
548
- :start-after: [START recognize_linked_entities_async]
549
- :end-before: [END recognize_linked_entities_async]
550
- :language: python
551
- :dedent: 4
552
- :caption: Recognize linked entities in a batch of documents.
553
- """
554
- language_arg = kwargs.pop("language", None)
555
- language = language_arg if language_arg is not None else self._default_language
556
- docs = _validate_input(documents, "language", language)
557
- model_version = kwargs.pop("model_version", None)
558
- show_stats = kwargs.pop("show_stats", None)
559
- disable_service_logs = kwargs.pop("disable_service_logs", None)
560
- string_index_type = kwargs.pop("string_index_type", self._string_code_unit)
561
-
562
- try:
563
- if is_language_api(self._api_version):
564
- models = self._client.models(api_version=self._api_version)
565
- return cast(
566
- List[Union[RecognizeLinkedEntitiesResult, DocumentError]],
567
- await self._client.analyze_text(
568
- body=models.AnalyzeTextEntityLinkingInput(
569
- analysis_input={"documents": docs},
570
- parameters=models.EntityLinkingTaskParameters(
571
- logging_opt_out=disable_service_logs,
572
- model_version=model_version,
573
- string_index_type=string_index_type_compatibility(string_index_type)
574
- )
575
- ),
576
- show_stats=show_stats,
577
- cls=kwargs.pop("cls", linked_entities_result),
578
- **kwargs
579
- )
580
- )
581
-
582
- # api_versions 3.0, 3.1
583
- return cast(
584
- List[Union[RecognizeLinkedEntitiesResult, DocumentError]],
585
- await self._client.entities_linking(
586
- documents=docs,
587
- logging_opt_out=disable_service_logs,
588
- model_version=model_version,
589
- string_index_type=string_index_type,
590
- show_stats=show_stats,
591
- cls=kwargs.pop("cls", linked_entities_result),
592
- **kwargs
593
- )
594
- )
595
- except HttpResponseError as error:
596
- return process_http_response_error(error)
597
-
598
- @distributed_trace_async
599
- @validate_multiapi_args(
600
- version_method_added="v3.0",
601
- args_mapping={"v3.1": ["disable_service_logs"]}
602
- )
603
- async def extract_key_phrases(
604
- self,
605
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
606
- **kwargs: Any,
607
- ) -> List[Union[ExtractKeyPhrasesResult, DocumentError]]:
608
- """Extract key phrases from a batch of documents.
609
-
610
- Returns a list of strings denoting the key phrases in the input
611
- text. For example, for the input text "The food was delicious and there
612
- were wonderful staff", the API returns the main talking points: "food"
613
- and "wonderful staff"
614
-
615
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
616
-
617
- :param documents: The set of documents to process as part of this batch.
618
- If you wish to specify the ID and language on a per-item basis you must
619
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
620
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
621
- `{"id": "1", "language": "en", "text": "hello world"}`.
622
- :type documents:
623
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
624
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
625
- entire batch. For example, use "en" for English; "es" for Spanish etc.
626
- If not set, uses "en" for English as default. Per-document language will
627
- take precedence over whole batch language. See https://aka.ms/talangs for
628
- supported languages in Language API.
629
- :keyword str model_version: This value indicates which model will
630
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
631
- is not specified, the API will default to the latest, non-preview version.
632
- See here for more info: https://aka.ms/text-analytics-model-versioning
633
- :keyword bool show_stats: If set to true, response will contain document
634
- level statistics in the `statistics` field of the document-level response.
635
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
636
- logged on the service side for troubleshooting. By default, the Language service logs your
637
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
638
- the service's natural language processing functions. Setting this parameter to true,
639
- disables input logging and may limit our ability to remediate issues that occur. Please see
640
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
641
- additional details, and Microsoft Responsible AI principles at
642
- https://www.microsoft.com/ai/responsible-ai.
643
- :return: The combined list of :class:`~azure.ai.textanalytics.ExtractKeyPhrasesResult` and
644
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
645
- passed in.
646
- :rtype: list[~azure.ai.textanalytics.ExtractKeyPhrasesResult or ~azure.ai.textanalytics.DocumentError]
647
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
648
-
649
- .. versionadded:: v3.1
650
- The *disable_service_logs* keyword argument.
651
-
652
- .. admonition:: Example:
653
-
654
- .. literalinclude:: ../samples/async_samples/sample_extract_key_phrases_async.py
655
- :start-after: [START extract_key_phrases_async]
656
- :end-before: [END extract_key_phrases_async]
657
- :language: python
658
- :dedent: 4
659
- :caption: Extract the key phrases in a batch of documents.
660
- """
661
- language_arg = kwargs.pop("language", None)
662
- language = language_arg if language_arg is not None else self._default_language
663
- docs = _validate_input(documents, "language", language)
664
- model_version = kwargs.pop("model_version", None)
665
- show_stats = kwargs.pop("show_stats", None)
666
- disable_service_logs = kwargs.pop("disable_service_logs", None)
667
-
668
- try:
669
- if is_language_api(self._api_version):
670
- models = self._client.models(api_version=self._api_version)
671
- return cast(
672
- List[Union[ExtractKeyPhrasesResult, DocumentError]],
673
- await self._client.analyze_text(
674
- body=models.AnalyzeTextKeyPhraseExtractionInput(
675
- analysis_input={"documents": docs},
676
- parameters=models.KeyPhraseTaskParameters(
677
- logging_opt_out=disable_service_logs,
678
- model_version=model_version,
679
- )
680
- ),
681
- show_stats=show_stats,
682
- cls=kwargs.pop("cls", key_phrases_result),
683
- **kwargs
684
- )
685
- )
686
-
687
- # api_versions 3.0, 3.1
688
- return cast(
689
- List[Union[ExtractKeyPhrasesResult, DocumentError]],
690
- await self._client.key_phrases(
691
- documents=docs,
692
- model_version=model_version,
693
- show_stats=show_stats,
694
- logging_opt_out=disable_service_logs,
695
- cls=kwargs.pop("cls", key_phrases_result),
696
- **kwargs
697
- )
698
- )
699
- except HttpResponseError as error:
700
- return process_http_response_error(error)
701
-
702
- @distributed_trace_async
703
- @validate_multiapi_args(
704
- version_method_added="v3.0",
705
- args_mapping={"v3.1": ["show_opinion_mining", "disable_service_logs", "string_index_type"]}
706
- )
707
- async def analyze_sentiment(
708
- self,
709
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
710
- **kwargs: Any,
711
- ) -> List[Union[AnalyzeSentimentResult, DocumentError]]:
712
- """Analyze sentiment for a batch of documents. Turn on opinion mining with `show_opinion_mining`.
713
-
714
- Returns a sentiment prediction, as well as sentiment scores for
715
- each sentiment class (Positive, Negative, and Neutral) for the document
716
- and each sentence within it.
717
-
718
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
719
-
720
- :param documents: The set of documents to process as part of this batch.
721
- If you wish to specify the ID and language on a per-item basis you must
722
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
723
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
724
- `{"id": "1", "language": "en", "text": "hello world"}`.
725
- :type documents:
726
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
727
- :keyword bool show_opinion_mining: Whether to mine the opinions of a sentence and conduct more
728
- granular analysis around the aspects of a product or service (also known as
729
- aspect-based sentiment analysis). If set to true, the returned
730
- :class:`~azure.ai.textanalytics.SentenceSentiment` objects
731
- will have property `mined_opinions` containing the result of this analysis. Only available for
732
- API version v3.1 and up.
733
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
734
- entire batch. For example, use "en" for English; "es" for Spanish etc.
735
- If not set, uses "en" for English as default. Per-document language will
736
- take precedence over whole batch language. See https://aka.ms/talangs for
737
- supported languages in Language API.
738
- :keyword str model_version: This value indicates which model will
739
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
740
- is not specified, the API will default to the latest, non-preview version.
741
- See here for more info: https://aka.ms/text-analytics-model-versioning
742
- :keyword bool show_stats: If set to true, response will contain document
743
- level statistics in the `statistics` field of the document-level response.
744
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
745
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
746
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
747
- see https://aka.ms/text-analytics-offsets
748
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
749
- logged on the service side for troubleshooting. By default, the Language service logs your
750
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
751
- the service's natural language processing functions. Setting this parameter to true,
752
- disables input logging and may limit our ability to remediate issues that occur. Please see
753
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
754
- additional details, and Microsoft Responsible AI principles at
755
- https://www.microsoft.com/ai/responsible-ai.
756
- :return: The combined list of :class:`~azure.ai.textanalytics.AnalyzeSentimentResult` and
757
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
758
- passed in.
759
- :rtype: list[~azure.ai.textanalytics.AnalyzeSentimentResult or ~azure.ai.textanalytics.DocumentError]
760
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
761
-
762
- .. versionadded:: v3.1
763
- The *show_opinion_mining*, *disable_service_logs*, and *string_index_type* keyword arguments.
764
-
765
- .. admonition:: Example:
766
-
767
- .. literalinclude:: ../samples/async_samples/sample_analyze_sentiment_async.py
768
- :start-after: [START analyze_sentiment_async]
769
- :end-before: [END analyze_sentiment_async]
770
- :language: python
771
- :dedent: 4
772
- :caption: Analyze sentiment in a batch of documents.
773
- """
774
- language_arg = kwargs.pop("language", None)
775
- language = language_arg if language_arg is not None else self._default_language
776
- docs = _validate_input(documents, "language", language)
777
- model_version = kwargs.pop("model_version", None)
778
- show_stats = kwargs.pop("show_stats", None)
779
- show_opinion_mining = kwargs.pop("show_opinion_mining", None)
780
- disable_service_logs = kwargs.pop("disable_service_logs", None)
781
- string_index_type = kwargs.pop("string_index_type", self._string_code_unit)
782
-
783
- try:
784
- if is_language_api(self._api_version):
785
- models = self._client.models(api_version=self._api_version)
786
- return cast(
787
- List[Union[AnalyzeSentimentResult, DocumentError]],
788
- await self._client.analyze_text(
789
- body=models.AnalyzeTextSentimentAnalysisInput(
790
- analysis_input={"documents": docs},
791
- parameters=models.SentimentAnalysisTaskParameters(
792
- logging_opt_out=disable_service_logs,
793
- model_version=model_version,
794
- string_index_type=string_index_type_compatibility(string_index_type),
795
- opinion_mining=show_opinion_mining,
796
- )
797
- ),
798
- show_stats=show_stats,
799
- cls=kwargs.pop("cls", sentiment_result),
800
- **kwargs
801
- )
802
- )
803
-
804
- # api_versions 3.0, 3.1
805
- return cast(
806
- List[Union[AnalyzeSentimentResult, DocumentError]],
807
- await self._client.sentiment(
808
- documents=docs,
809
- logging_opt_out=disable_service_logs,
810
- model_version=model_version,
811
- string_index_type=string_index_type,
812
- opinion_mining=show_opinion_mining,
813
- show_stats=show_stats,
814
- cls=kwargs.pop("cls", sentiment_result),
815
- **kwargs
816
- )
817
- )
818
- except HttpResponseError as error:
819
- return process_http_response_error(error)
820
-
821
- # pylint: disable=unused-argument
822
- def _healthcare_result_callback(
823
- self, raw_response, deserialized, doc_id_order, task_id_order=None, show_stats=False, bespoke=False
824
- ):
825
- if deserialized is None:
826
- models = self._client.models(api_version=self._api_version)
827
- response_cls = \
828
- models.AnalyzeTextJobState if is_language_api(self._api_version) else models.HealthcareJobState
829
- deserialized = response_cls.deserialize(raw_response)
830
- return healthcare_paged_result(
831
- doc_id_order,
832
- self._client.analyze_text_job_status if is_language_api(self._api_version) else self._client.health_status,
833
- raw_response,
834
- deserialized,
835
- show_stats=show_stats,
836
- )
837
-
838
- @distributed_trace_async
839
- @validate_multiapi_args(
840
- version_method_added="v3.1",
841
- args_mapping={
842
- "2022-10-01-preview": ["fhir_version", "document_type", "autodetect_default_language"],
843
- "2022-05-01": ["display_name"]
844
- }
845
- )
846
- async def begin_analyze_healthcare_entities(
847
- self,
848
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
849
- **kwargs: Any,
850
- ) -> AsyncAnalyzeHealthcareEntitiesLROPoller[
851
- AsyncItemPaged[Union[AnalyzeHealthcareEntitiesResult, DocumentError]]
852
- ]:
853
- """Analyze healthcare entities and identify relationships between these entities in a batch of documents.
854
-
855
- Entities are associated with references that can be found in existing knowledge bases,
856
- such as UMLS, CHV, MSH, etc.
857
-
858
- We also extract the relations found between entities, for example in "The subject took 100 mg of ibuprofen",
859
- we would extract the relationship between the "100 mg" dosage and the "ibuprofen" medication.
860
-
861
- :param documents: The set of documents to process as part of this batch.
862
- If you wish to specify the ID and language on a per-item basis you must
863
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
864
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
865
- `{"id": "1", "language": "en", "text": "hello world"}`.
866
- :type documents:
867
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
868
- :keyword str model_version: This value indicates which model will
869
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
870
- is not specified, the API will default to the latest, non-preview version.
871
- See here for more info: https://aka.ms/text-analytics-model-versioning
872
- :keyword bool show_stats: If set to true, response will contain document level statistics.
873
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
874
- entire batch. For example, use "en" for English; "es" for Spanish etc.
875
- For automatic language detection, use "auto" (Only supported by API version
876
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
877
- Per-document language will take precedence over whole batch language.
878
- See https://aka.ms/talangs for supported languages in Language API.
879
- :keyword str autodetect_default_language: Default/fallback language to use for documents requesting
880
- automatic language detection.
881
- :keyword str display_name: An optional display name to set for the requested analysis.
882
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
883
- Can be one of 'UnicodeCodePoint' (default), 'Utf16CodeUnit', or 'TextElement_v8'.
884
- For additional information see https://aka.ms/text-analytics-offsets
885
- :keyword int polling_interval: Waiting time between two polls for LRO operations
886
- if no Retry-After header is present. Defaults to 5 seconds.
887
- :keyword str continuation_token:
888
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
889
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
890
- to restart the LRO from a saved state.
891
- :keyword bool disable_service_logs: Defaults to true, meaning that the Language service will not log your
892
- input text on the service side for troubleshooting. If set to False, the Language service logs your
893
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
894
- the Text Analytics natural language processing functions. Please see
895
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
896
- additional details, and Microsoft Responsible AI principles at
897
- https://www.microsoft.com/ai/responsible-ai.
898
- :keyword str fhir_version: The FHIR Spec version that the result will use to format the fhir_bundle
899
- on the result object. For additional information see https://www.hl7.org/fhir/overview.html.
900
- The only acceptable values to pass in are None and "4.0.1". The default value is None.
901
- :keyword document_type: Document type that can be provided as input for Fhir Documents. Expect to
902
- have fhir_version provided when used. Behavior of using None enum is the same as not using the
903
- document_type parameter. Known values are: "None", "ClinicalTrial", "DischargeSummary",
904
- "ProgressNote", "HistoryAndPhysical", "Consult", "Imaging", "Pathology", and "ProcedureNote".
905
- :paramtype document_type: str or ~azure.ai.textanalytics.HealthcareDocumentType
906
- :return: An instance of an AsyncAnalyzeHealthcareEntitiesLROPoller. Call `result()` on the poller
907
- object to return a heterogeneous pageable of
908
- :class:`~azure.ai.textanalytics.AnalyzeHealthcareEntitiesResult` and
909
- :class:`~azure.ai.textanalytics.DocumentError`.
910
- :rtype:
911
- ~azure.ai.textanalytics.aio.AsyncAnalyzeHealthcareEntitiesLROPoller[~azure.core.async_paging.AsyncItemPaged[
912
- ~azure.ai.textanalytics.AnalyzeHealthcareEntitiesResult or ~azure.ai.textanalytics.DocumentError]]
913
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
914
-
915
- .. versionadded:: v3.1
916
- The *begin_analyze_healthcare_entities* client method.
917
- .. versionadded:: 2022-05-01
918
- The *display_name* keyword argument.
919
- .. versionadded:: 2022-10-01-preview
920
- The *fhir_version*, *document_type*, and *autodetect_default_language* keyword arguments.
921
-
922
- .. admonition:: Example:
923
-
924
- .. literalinclude:: ../samples/async_samples/sample_analyze_healthcare_entities_async.py
925
- :start-after: [START analyze_healthcare_entities_async]
926
- :end-before: [END analyze_healthcare_entities_async]
927
- :language: python
928
- :dedent: 4
929
- :caption: Analyze healthcare entities in a batch of documents.
930
- """
931
- language_arg = kwargs.pop("language", None)
932
- language = language_arg if language_arg is not None else self._default_language
933
- model_version = kwargs.pop("model_version", None)
934
- show_stats = kwargs.pop("show_stats", None)
935
- polling_interval = kwargs.pop("polling_interval", 5)
936
- continuation_token = kwargs.pop("continuation_token", None)
937
- string_index_type = kwargs.pop("string_index_type", self._string_code_unit)
938
- disable_service_logs = kwargs.pop("disable_service_logs", None)
939
- display_name = kwargs.pop("display_name", None)
940
- fhir_version = kwargs.pop("fhir_version", None)
941
- document_type = kwargs.pop("document_type", None)
942
- autodetect_default_language = kwargs.pop("autodetect_default_language", None)
943
-
944
- if continuation_token:
945
- return cast(
946
- AsyncAnalyzeHealthcareEntitiesLROPoller[
947
- AsyncItemPaged[Union[AnalyzeHealthcareEntitiesResult, DocumentError]]
948
- ],
949
- _get_result_from_continuation_token(
950
- self._client._client, # pylint: disable=protected-access
951
- continuation_token,
952
- AsyncAnalyzeHealthcareEntitiesLROPoller,
953
- AsyncAnalyzeHealthcareEntitiesLROPollingMethod(
954
- text_analytics_client=self._client,
955
- timeout=polling_interval,
956
- **kwargs
957
- ),
958
- self._healthcare_result_callback
959
- )
960
- )
961
-
962
- docs = _validate_input(documents, "language", language)
963
- doc_id_order = [doc.get("id") for doc in docs]
964
- my_cls = kwargs.pop(
965
- "cls",
966
- lambda pipeline_response, deserialized, _: self._healthcare_result_callback(
967
- pipeline_response, deserialized, doc_id_order, show_stats=show_stats
968
- ),
969
- )
970
- models = self._client.models(api_version=self._api_version)
971
-
972
- try:
973
- if is_language_api(self._api_version):
974
- docs = models.MultiLanguageAnalysisInput(
975
- documents=_validate_input(documents, "language", language)
976
- )
977
- return cast(
978
- AsyncAnalyzeHealthcareEntitiesLROPoller[
979
- AsyncItemPaged[Union[AnalyzeHealthcareEntitiesResult, DocumentError]]
980
- ],
981
- await self._client.begin_analyze_text_submit_job( # type: ignore
982
- body=models.AnalyzeTextJobsInput(
983
- analysis_input=docs,
984
- display_name=display_name,
985
- default_language=autodetect_default_language,
986
- tasks=[
987
- models.HealthcareLROTask(
988
- task_name="0",
989
- parameters=models.HealthcareTaskParameters(
990
- model_version=model_version,
991
- logging_opt_out=disable_service_logs,
992
- string_index_type=string_index_type_compatibility(string_index_type),
993
- fhir_version=fhir_version,
994
- document_type=document_type,
995
- )
996
- )
997
- ]
998
- ),
999
- cls=my_cls,
1000
- polling=AsyncAnalyzeHealthcareEntitiesLROPollingMethod(
1001
- text_analytics_client=self._client,
1002
- timeout=polling_interval,
1003
- show_stats=show_stats,
1004
- doc_id_order=doc_id_order,
1005
- lro_algorithms=[
1006
- TextAnalyticsOperationResourcePolling(
1007
- show_stats=show_stats,
1008
- )
1009
- ],
1010
- **kwargs
1011
- ),
1012
- continuation_token=continuation_token,
1013
- poller_cls=AsyncAnalyzeHealthcareEntitiesLROPoller,
1014
- **kwargs
1015
- )
1016
- )
1017
-
1018
- # v3.1
1019
- return cast(
1020
- AsyncAnalyzeHealthcareEntitiesLROPoller[
1021
- AsyncItemPaged[Union[AnalyzeHealthcareEntitiesResult, DocumentError]]
1022
- ],
1023
- await self._client.begin_health(
1024
- docs,
1025
- model_version=model_version,
1026
- string_index_type=string_index_type,
1027
- logging_opt_out=disable_service_logs,
1028
- cls=my_cls,
1029
- polling=AsyncAnalyzeHealthcareEntitiesLROPollingMethod(
1030
- text_analytics_client=self._client,
1031
- doc_id_order=doc_id_order,
1032
- show_stats=show_stats,
1033
- timeout=polling_interval,
1034
- lro_algorithms=[
1035
- TextAnalyticsOperationResourcePolling(
1036
- show_stats=show_stats,
1037
- )
1038
- ],
1039
- **kwargs,
1040
- ),
1041
- continuation_token=continuation_token,
1042
- **kwargs,
1043
- )
1044
- )
1045
- except HttpResponseError as error:
1046
- return process_http_response_error(error)
1047
-
1048
- def _analyze_result_callback(
1049
- self, raw_response, deserialized, doc_id_order, task_id_order=None, show_stats=False, bespoke=False
1050
- ):
1051
-
1052
- if deserialized is None:
1053
- models = self._client.models(api_version=self._api_version)
1054
- response_cls = models.AnalyzeTextJobState if is_language_api(self._api_version) else models.AnalyzeJobState
1055
- deserialized = response_cls.deserialize(raw_response)
1056
- return analyze_paged_result(
1057
- doc_id_order,
1058
- task_id_order,
1059
- self._client.analyze_text_job_status if is_language_api(self._api_version) else self._client.analyze_status,
1060
- raw_response,
1061
- deserialized,
1062
- show_stats=show_stats,
1063
- bespoke=bespoke
1064
- )
1065
-
1066
- @distributed_trace_async
1067
- @validate_multiapi_args(
1068
- version_method_added="v3.1",
1069
- custom_wrapper=check_for_unsupported_actions_types,
1070
- args_mapping={
1071
- "2022-10-01-preview": ["autodetect_default_language"],
1072
- }
1073
- )
1074
- async def begin_analyze_actions(
1075
- self,
1076
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1077
- actions: List[
1078
- Union[
1079
- RecognizeEntitiesAction,
1080
- RecognizeLinkedEntitiesAction,
1081
- RecognizePiiEntitiesAction,
1082
- ExtractKeyPhrasesAction,
1083
- AnalyzeSentimentAction,
1084
- RecognizeCustomEntitiesAction,
1085
- SingleLabelClassifyAction,
1086
- MultiLabelClassifyAction,
1087
- AnalyzeHealthcareEntitiesAction,
1088
- ExtractSummaryAction,
1089
- AbstractSummaryAction,
1090
- ]
1091
- ],
1092
- **kwargs: Any,
1093
- ) -> AsyncTextAnalysisLROPoller[
1094
- AsyncItemPaged[
1095
- List[
1096
- Union[
1097
- RecognizeEntitiesResult,
1098
- RecognizeLinkedEntitiesResult,
1099
- RecognizePiiEntitiesResult,
1100
- ExtractKeyPhrasesResult,
1101
- AnalyzeSentimentResult,
1102
- RecognizeCustomEntitiesResult,
1103
- ClassifyDocumentResult,
1104
- AnalyzeHealthcareEntitiesResult,
1105
- ExtractSummaryResult,
1106
- AbstractSummaryResult,
1107
- DocumentError,
1108
- ]
1109
- ]
1110
- ]
1111
- ]:
1112
- """Start a long-running operation to perform a variety of text analysis actions over a batch of documents.
1113
-
1114
- We recommend you use this function if you're looking to analyze larger documents, and / or
1115
- combine multiple text analysis actions into one call. Otherwise, we recommend you use
1116
- the action specific endpoints, for example :func:`analyze_sentiment`.
1117
-
1118
- .. note:: The abstractive summarization feature is part of a gated preview. Request access here:
1119
- https://aka.ms/applyforgatedsummarizationfeatures
1120
-
1121
- .. note:: See the service documentation for regional support of custom action features:
1122
- https://aka.ms/azsdk/textanalytics/customfunctionalities
1123
-
1124
- :param documents: The set of documents to process as part of this batch.
1125
- If you wish to specify the ID and language on a per-item basis you must
1126
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
1127
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
1128
- `{"id": "1", "language": "en", "text": "hello world"}`.
1129
- :type documents:
1130
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1131
- :param actions: A heterogeneous list of actions to perform on the input documents.
1132
- Each action object encapsulates the parameters used for the particular action type.
1133
- The action results will be in the same order of the input actions.
1134
- :type actions:
1135
- list[RecognizeEntitiesAction or RecognizePiiEntitiesAction or ExtractKeyPhrasesAction or
1136
- RecognizeLinkedEntitiesAction or AnalyzeSentimentAction or
1137
- RecognizeCustomEntitiesAction or SingleLabelClassifyAction or
1138
- MultiLabelClassifyAction or AnalyzeHealthcareEntitiesAction or
1139
- AbstractSummaryAction or ExtractSummaryAction]
1140
- :keyword str display_name: An optional display name to set for the requested analysis.
1141
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1142
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1143
- For automatic language detection, use "auto" (Only supported by API version
1144
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
1145
- Per-document language will take precedence over whole batch language.
1146
- See https://aka.ms/talangs for supported languages in Language API.
1147
- :keyword str autodetect_default_language: Default/fallback language to use for documents requesting
1148
- automatic language detection.
1149
- :keyword bool show_stats: If set to true, response will contain document level statistics.
1150
- :keyword int polling_interval: Waiting time between two polls for LRO operations
1151
- if no Retry-After header is present. Defaults to 5 seconds.
1152
- :keyword str continuation_token:
1153
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
1154
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
1155
- to restart the LRO from a saved state.
1156
- :return: An instance of an AsyncTextAnalysisLROPoller. Call `result()` on the poller
1157
- object to return a pageable heterogeneous list of lists. This list of lists is first ordered
1158
- by the documents you input, then ordered by the actions you input. For example,
1159
- if you have documents input ["Hello", "world"], and actions
1160
- :class:`~azure.ai.textanalytics.RecognizeEntitiesAction` and
1161
- :class:`~azure.ai.textanalytics.AnalyzeSentimentAction`, when iterating over the list of lists,
1162
- you will first iterate over the action results for the "Hello" document, getting the
1163
- :class:`~azure.ai.textanalytics.RecognizeEntitiesResult` of "Hello",
1164
- then the :class:`~azure.ai.textanalytics.AnalyzeSentimentResult` of "Hello".
1165
- Then, you will get the :class:`~azure.ai.textanalytics.RecognizeEntitiesResult` and
1166
- :class:`~azure.ai.textanalytics.AnalyzeSentimentResult` of "world".
1167
- :rtype:
1168
- ~azure.ai.textanalytics.aio.AsyncTextAnalysisLROPoller[~azure.core.async_paging.AsyncItemPaged[
1169
- list[RecognizeEntitiesResult or RecognizeLinkedEntitiesResult or RecognizePiiEntitiesResult or
1170
- ExtractKeyPhrasesResult or AnalyzeSentimentResult or RecognizeCustomEntitiesResult
1171
- or ClassifyDocumentResult or AnalyzeHealthcareEntitiesResult or ExtractSummaryResult
1172
- or AbstractSummaryResult or DocumentError]]]
1173
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
1174
-
1175
- .. versionadded:: v3.1
1176
- The *begin_analyze_actions* client method.
1177
- .. versionadded:: 2022-05-01
1178
- The *RecognizeCustomEntitiesAction*, *SingleLabelClassifyAction*,
1179
- *MultiLabelClassifyAction*, and *AnalyzeHealthcareEntitiesAction* input options and the
1180
- corresponding *RecognizeCustomEntitiesResult*, *ClassifyDocumentResult*,
1181
- and *AnalyzeHealthcareEntitiesResult* result objects
1182
- .. versionadded:: 2022-10-01-preview
1183
- The *ExtractSummaryAction* and *AbstractSummaryAction* input options and the corresponding
1184
- *ExtractSummaryResult* and *AbstractSummaryResult* result objects.
1185
- The *autodetect_default_language* keyword argument.
1186
-
1187
- .. admonition:: Example:
1188
-
1189
- .. literalinclude:: ../samples/async_samples/sample_analyze_actions_async.py
1190
- :start-after: [START analyze_async]
1191
- :end-before: [END analyze_async]
1192
- :language: python
1193
- :dedent: 4
1194
- :caption: Start a long-running operation to perform a variety of text analysis actions over
1195
- a batch of documents.
1196
- """
1197
-
1198
- display_name = kwargs.pop("display_name", None)
1199
- language_arg = kwargs.pop("language", None)
1200
- language = language_arg if language_arg is not None else self._default_language
1201
-
1202
- show_stats = kwargs.pop("show_stats", None)
1203
- polling_interval = kwargs.pop("polling_interval", 5)
1204
- continuation_token = kwargs.pop("continuation_token", None)
1205
- bespoke = kwargs.pop("bespoke", False)
1206
- autodetect_default_language = kwargs.pop("autodetect_default_language", None)
1207
-
1208
- if continuation_token:
1209
- return cast(
1210
- AsyncAnalyzeActionsResponse,
1211
- _get_result_from_continuation_token(
1212
- self._client._client, # pylint: disable=protected-access
1213
- continuation_token,
1214
- AsyncAnalyzeActionsLROPoller,
1215
- AsyncAnalyzeActionsLROPollingMethod(
1216
- text_analytics_client=self._client,
1217
- timeout=polling_interval,
1218
- **kwargs
1219
- ),
1220
- self._analyze_result_callback,
1221
- bespoke
1222
- )
1223
- )
1224
-
1225
- models = self._client.models(api_version=self._api_version)
1226
-
1227
- input_model_cls = \
1228
- models.MultiLanguageAnalysisInput if is_language_api(self._api_version) else models.MultiLanguageBatchInput
1229
- docs = input_model_cls(
1230
- documents=_validate_input(documents, "language", language)
1231
- )
1232
- doc_id_order = [doc.get("id") for doc in docs.documents]
1233
- try:
1234
- generated_tasks = [
1235
- action._to_generated(self._api_version, str(idx)) # pylint: disable=protected-access
1236
- for idx, action in enumerate(actions)
1237
- ]
1238
- except AttributeError as e:
1239
- raise TypeError("Unsupported action type in list.") from e
1240
- task_order = [(_determine_action_type(a), a.task_name) for a in generated_tasks]
1241
-
1242
- response_cls = kwargs.pop(
1243
- "cls",
1244
- lambda pipeline_response, deserialized, _:
1245
- self._analyze_result_callback(
1246
- pipeline_response,
1247
- deserialized,
1248
- doc_id_order,
1249
- task_id_order=task_order,
1250
- show_stats=show_stats,
1251
- bespoke=bespoke
1252
- ),
1253
- )
1254
-
1255
- try:
1256
- if is_language_api(self._api_version):
1257
- return cast(
1258
- AsyncAnalyzeActionsResponse,
1259
- await self._client.begin_analyze_text_submit_job(
1260
- body=models.AnalyzeTextJobsInput(
1261
- analysis_input=docs,
1262
- display_name=display_name,
1263
- default_language=autodetect_default_language,
1264
- tasks=generated_tasks
1265
- ),
1266
- cls=response_cls,
1267
- polling=AsyncAnalyzeActionsLROPollingMethod(
1268
- text_analytics_client=self._client,
1269
- timeout=polling_interval,
1270
- show_stats=show_stats,
1271
- doc_id_order=doc_id_order,
1272
- task_id_order=task_order,
1273
- lro_algorithms=[
1274
- TextAnalyticsOperationResourcePolling(
1275
- show_stats=show_stats,
1276
- )
1277
- ],
1278
- **kwargs
1279
- ),
1280
- continuation_token=continuation_token,
1281
- **kwargs
1282
- )
1283
- )
1284
-
1285
- # v3.1
1286
- analyze_tasks = models.JobManifestTasks(
1287
- entity_recognition_tasks=[
1288
- a for a in generated_tasks
1289
- if _determine_action_type(a) == _AnalyzeActionsType.RECOGNIZE_ENTITIES
1290
- ],
1291
- entity_recognition_pii_tasks=[
1292
- a for a in generated_tasks
1293
- if _determine_action_type(a) == _AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
1294
- ],
1295
- key_phrase_extraction_tasks=[
1296
- a for a in generated_tasks
1297
- if _determine_action_type(a) == _AnalyzeActionsType.EXTRACT_KEY_PHRASES
1298
- ],
1299
- entity_linking_tasks=[
1300
- a for a in generated_tasks
1301
- if _determine_action_type(a) == _AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
1302
- ],
1303
- sentiment_analysis_tasks=[
1304
- a for a in generated_tasks
1305
- if _determine_action_type(a) == _AnalyzeActionsType.ANALYZE_SENTIMENT
1306
- ],
1307
- )
1308
- analyze_body = models.AnalyzeBatchInput(
1309
- display_name=display_name, tasks=analyze_tasks, analysis_input=docs
1310
- )
1311
- return cast(
1312
- AsyncAnalyzeActionsResponse,
1313
- await self._client.begin_analyze(
1314
- body=analyze_body,
1315
- cls=response_cls,
1316
- polling=AsyncAnalyzeActionsLROPollingMethod(
1317
- text_analytics_client=self._client,
1318
- timeout=polling_interval,
1319
- show_stats=show_stats,
1320
- doc_id_order=doc_id_order,
1321
- task_id_order=task_order,
1322
- lro_algorithms=[
1323
- TextAnalyticsOperationResourcePolling(
1324
- show_stats=show_stats,
1325
- )
1326
- ],
1327
- **kwargs,
1328
- ),
1329
- continuation_token=continuation_token,
1330
- **kwargs,
1331
- )
1332
- )
1333
- except HttpResponseError as error:
1334
- return process_http_response_error(error)
1335
-
1336
- @distributed_trace_async
1337
- @validate_multiapi_args(
1338
- version_method_added="2022-05-01",
1339
- args_mapping={
1340
- "2022-10-01-preview": ["autodetect_default_language"],
1341
- }
1342
- )
1343
- async def begin_recognize_custom_entities(
1344
- self,
1345
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1346
- project_name: str,
1347
- deployment_name: str,
1348
- **kwargs: Any,
1349
- ) -> AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[RecognizeCustomEntitiesResult, DocumentError]]]:
1350
- """Start a long-running custom named entity recognition operation.
1351
-
1352
- For information on regional support of custom features and how to train a model to
1353
- recognize custom entities, see https://aka.ms/azsdk/textanalytics/customentityrecognition
1354
-
1355
- :param documents: The set of documents to process as part of this batch.
1356
- If you wish to specify the ID and language on a per-item basis you must
1357
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
1358
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
1359
- `{"id": "1", "language": "en", "text": "hello world"}`.
1360
- :type documents:
1361
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1362
- :param str project_name: Required. This field indicates the project name for the model.
1363
- :param str deployment_name: This field indicates the deployment name for the model.
1364
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1365
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1366
- For automatic language detection, use "auto" (Only supported by API version
1367
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
1368
- Per-document language will take precedence over whole batch language.
1369
- See https://aka.ms/talangs for supported languages in Language API.
1370
- :keyword str autodetect_default_language: Default/fallback language to use for documents requesting
1371
- automatic language detection.
1372
- :keyword bool show_stats: If set to true, response will contain document level statistics.
1373
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
1374
- logged on the service side for troubleshooting. By default, the Language service logs your
1375
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1376
- the service's natural language processing functions. Setting this parameter to true,
1377
- disables input logging and may limit our ability to remediate issues that occur. Please see
1378
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1379
- additional details, and Microsoft Responsible AI principles at
1380
- https://www.microsoft.com/ai/responsible-ai.
1381
- :keyword str string_index_type: Specifies the method used to interpret string offsets.
1382
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
1383
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
1384
- see https://aka.ms/text-analytics-offsets
1385
- :keyword int polling_interval: Waiting time between two polls for LRO operations
1386
- if no Retry-After header is present. Defaults to 5 seconds.
1387
- :keyword str continuation_token:
1388
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
1389
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
1390
- to restart the LRO from a saved state.
1391
- :keyword str display_name: An optional display name to set for the requested analysis.
1392
- :return: An instance of an AsyncTextAnalysisLROPoller. Call `result()` on the this
1393
- object to return a heterogeneous pageable of
1394
- :class:`~azure.ai.textanalytics.RecognizeCustomEntitiesResult` and
1395
- :class:`~azure.ai.textanalytics.DocumentError`.
1396
- :rtype:
1397
- ~azure.ai.textanalytics.aio.AsyncTextAnalysisLROPoller[~azure.core.async_paging.AsyncItemPaged[
1398
- ~azure.ai.textanalytics.RecognizeCustomEntitiesResult or ~azure.ai.textanalytics.DocumentError]]
1399
- :raises ~azure.core.exceptions.HttpResponseError:
1400
-
1401
- .. versionadded:: 2022-05-01
1402
- The *begin_recognize_custom_entities* client method.
1403
- .. versionadded:: 2022-10-01-preview
1404
- The *autodetect_default_language* keyword argument.
1405
-
1406
- .. admonition:: Example:
1407
-
1408
- .. literalinclude:: ../samples/async_samples/sample_recognize_custom_entities_async.py
1409
- :start-after: [START recognize_custom_entities_async]
1410
- :end-before: [END recognize_custom_entities_async]
1411
- :language: python
1412
- :dedent: 4
1413
- :caption: Recognize custom entities in a batch of documents.
1414
- """
1415
-
1416
- continuation_token = kwargs.pop("continuation_token", None)
1417
- string_index_type = kwargs.pop("string_index_type", self._string_code_unit)
1418
- disable_service_logs = kwargs.pop("disable_service_logs", None)
1419
- polling_interval = kwargs.pop("polling_interval", 5)
1420
-
1421
- if continuation_token:
1422
- return cast(
1423
- AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[RecognizeCustomEntitiesResult, DocumentError]]],
1424
- _get_result_from_continuation_token(
1425
- self._client._client, # pylint: disable=protected-access
1426
- continuation_token,
1427
- AsyncAnalyzeActionsLROPoller,
1428
- AsyncAnalyzeActionsLROPollingMethod(
1429
- text_analytics_client=self._client,
1430
- timeout=polling_interval,
1431
- **kwargs
1432
- ),
1433
- self._analyze_result_callback,
1434
- bespoke=True
1435
- )
1436
- )
1437
-
1438
- try:
1439
- return cast(
1440
- AsyncTextAnalysisLROPoller[
1441
- AsyncItemPaged[Union[RecognizeCustomEntitiesResult, DocumentError]]
1442
- ],
1443
- await self.begin_analyze_actions(
1444
- documents,
1445
- actions=[
1446
- RecognizeCustomEntitiesAction(
1447
- project_name=project_name,
1448
- deployment_name=deployment_name,
1449
- string_index_type=string_index_type,
1450
- disable_service_logs=disable_service_logs
1451
- )
1452
- ],
1453
- polling_interval=polling_interval,
1454
- bespoke=True,
1455
- **kwargs
1456
- )
1457
- )
1458
-
1459
- except HttpResponseError as error:
1460
- return process_http_response_error(error)
1461
-
1462
- @distributed_trace_async
1463
- @validate_multiapi_args(
1464
- version_method_added="2022-05-01",
1465
- args_mapping={
1466
- "2022-10-01-preview": ["autodetect_default_language"],
1467
- }
1468
- )
1469
- async def begin_single_label_classify(
1470
- self,
1471
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1472
- project_name: str,
1473
- deployment_name: str,
1474
- **kwargs: Any,
1475
- ) -> AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]]:
1476
- """Start a long-running custom single label classification operation.
1477
-
1478
- For information on regional support of custom features and how to train a model to
1479
- classify your documents, see https://aka.ms/azsdk/textanalytics/customfunctionalities
1480
-
1481
- :param documents: The set of documents to process as part of this batch.
1482
- If you wish to specify the ID and language on a per-item basis you must
1483
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
1484
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
1485
- `{"id": "1", "language": "en", "text": "hello world"}`.
1486
- :type documents:
1487
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1488
- :param str project_name: Required. This field indicates the project name for the model.
1489
- :param str deployment_name: This field indicates the deployment name for the model.
1490
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1491
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1492
- For automatic language detection, use "auto" (Only supported by API version
1493
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
1494
- Per-document language will take precedence over whole batch language.
1495
- See https://aka.ms/talangs for supported languages in Language API.
1496
- :keyword str autodetect_default_language: Default/fallback language to use for documents requesting
1497
- automatic language detection.
1498
- :keyword bool show_stats: If set to true, response will contain document level statistics.
1499
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
1500
- logged on the service side for troubleshooting. By default, the Language service logs your
1501
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1502
- the service's natural language processing functions. Setting this parameter to true,
1503
- disables input logging and may limit our ability to remediate issues that occur. Please see
1504
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1505
- additional details, and Microsoft Responsible AI principles at
1506
- https://www.microsoft.com/ai/responsible-ai.
1507
- :keyword int polling_interval: Waiting time between two polls for LRO operations
1508
- if no Retry-After header is present. Defaults to 5 seconds.
1509
- :keyword str continuation_token:
1510
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
1511
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
1512
- to restart the LRO from a saved state.
1513
- :keyword str display_name: An optional display name to set for the requested analysis.
1514
- :return: An instance of an AsyncTextAnalysisLROPoller. Call `result()` on the this
1515
- object to return a heterogeneous pageable of
1516
- :class:`~azure.ai.textanalytics.ClassifyDocumentResult` and
1517
- :class:`~azure.ai.textanalytics.DocumentError`.
1518
- :rtype:
1519
- ~azure.ai.textanalytics.aio.AsyncTextAnalysisLROPoller[~azure.core.async_paging.AsyncItemPaged[
1520
- ~azure.ai.textanalytics.ClassifyDocumentResult or ~azure.ai.textanalytics.DocumentError]]
1521
- :raises ~azure.core.exceptions.HttpResponseError:
1522
-
1523
- .. versionadded:: 2022-05-01
1524
- The *begin_single_label_classify* client method.
1525
- .. versionadded:: 2022-10-01-preview
1526
- The *autodetect_default_language* keyword argument.
1527
-
1528
- .. admonition:: Example:
1529
-
1530
- .. literalinclude:: ../samples/async_samples/sample_single_label_classify_async.py
1531
- :start-after: [START single_label_classify_async]
1532
- :end-before: [END single_label_classify_async]
1533
- :language: python
1534
- :dedent: 4
1535
- :caption: Perform single label classification on a batch of documents.
1536
- """
1537
-
1538
- continuation_token = kwargs.pop("continuation_token", None)
1539
- disable_service_logs = kwargs.pop("disable_service_logs", None)
1540
- polling_interval = kwargs.pop("polling_interval", 5)
1541
-
1542
- if continuation_token:
1543
- return cast(
1544
- AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]],
1545
- _get_result_from_continuation_token(
1546
- self._client._client, # pylint: disable=protected-access
1547
- continuation_token,
1548
- AsyncAnalyzeActionsLROPoller,
1549
- AsyncAnalyzeActionsLROPollingMethod(
1550
- text_analytics_client=self._client,
1551
- timeout=polling_interval,
1552
- **kwargs
1553
- ),
1554
- self._analyze_result_callback,
1555
- bespoke=True
1556
- )
1557
- )
1558
-
1559
- try:
1560
- return cast(
1561
- AsyncTextAnalysisLROPoller[
1562
- AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]
1563
- ],
1564
- await self.begin_analyze_actions(
1565
- documents,
1566
- actions=[
1567
- SingleLabelClassifyAction(
1568
- project_name=project_name,
1569
- deployment_name=deployment_name,
1570
- disable_service_logs=disable_service_logs
1571
- )
1572
- ],
1573
- polling_interval=polling_interval,
1574
- bespoke=True,
1575
- **kwargs
1576
- )
1577
- )
1578
-
1579
- except HttpResponseError as error:
1580
- return process_http_response_error(error)
1581
-
1582
- @distributed_trace_async
1583
- @validate_multiapi_args(
1584
- version_method_added="2022-05-01",
1585
- args_mapping={
1586
- "2022-10-01-preview": ["autodetect_default_language"],
1587
- }
1588
- )
1589
- async def begin_multi_label_classify(
1590
- self,
1591
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1592
- project_name: str,
1593
- deployment_name: str,
1594
- **kwargs: Any,
1595
- ) -> AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]]:
1596
- """Start a long-running custom multi label classification operation.
1597
-
1598
- For information on regional support of custom features and how to train a model to
1599
- classify your documents, see https://aka.ms/azsdk/textanalytics/customfunctionalities
1600
-
1601
- :param documents: The set of documents to process as part of this batch.
1602
- If you wish to specify the ID and language on a per-item basis you must
1603
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
1604
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
1605
- `{"id": "1", "language": "en", "text": "hello world"}`.
1606
- :type documents:
1607
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1608
- :param str project_name: Required. This field indicates the project name for the model.
1609
- :param str deployment_name: This field indicates the deployment name for the model.
1610
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1611
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1612
- For automatic language detection, use "auto" (Only supported by API version
1613
- 2022-10-01-preview and newer). If not set, uses "en" for English as default.
1614
- Per-document language will take precedence over whole batch language.
1615
- See https://aka.ms/talangs for supported languages in Language API.
1616
- :keyword str autodetect_default_language: Default/fallback language to use for documents requesting
1617
- automatic language detection.
1618
- :keyword bool show_stats: If set to true, response will contain document level statistics.
1619
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
1620
- logged on the service side for troubleshooting. By default, the Language service logs your
1621
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1622
- the service's natural language processing functions. Setting this parameter to true,
1623
- disables input logging and may limit our ability to remediate issues that occur. Please see
1624
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1625
- additional details, and Microsoft Responsible AI principles at
1626
- https://www.microsoft.com/ai/responsible-ai.
1627
- :keyword int polling_interval: Waiting time between two polls for LRO operations
1628
- if no Retry-After header is present. Defaults to 5 seconds.
1629
- :keyword str continuation_token:
1630
- Call `continuation_token()` on the poller object to save the long-running operation (LRO)
1631
- state into an opaque token. Pass the value as the `continuation_token` keyword argument
1632
- to restart the LRO from a saved state.
1633
- :keyword str display_name: An optional display name to set for the requested analysis.
1634
- :return: An instance of an AsyncTextAnalysisLROPoller. Call `result()` on the this
1635
- object to return a heterogeneous pageable of
1636
- :class:`~azure.ai.textanalytics.ClassifyDocumentResult` and
1637
- :class:`~azure.ai.textanalytics.DocumentError`.
1638
- :rtype:
1639
- ~azure.ai.textanalytics.aio.AsyncTextAnalysisLROPoller[~azure.core.async_paging.AsyncItemPaged[
1640
- ~azure.ai.textanalytics.ClassifyDocumentResult or ~azure.ai.textanalytics.DocumentError]]
1641
- :raises ~azure.core.exceptions.HttpResponseError:
1642
-
1643
- .. versionadded:: 2022-05-01
1644
- The *begin_multi_label_classify* client method.
1645
- .. versionadded:: 2022-10-01-preview
1646
- The *autodetect_default_language* keyword argument.
1647
-
1648
- .. admonition:: Example:
1649
-
1650
- .. literalinclude:: ../samples/async_samples/sample_multi_label_classify_async.py
1651
- :start-after: [START multi_label_classify_async]
1652
- :end-before: [END multi_label_classify_async]
1653
- :language: python
1654
- :dedent: 4
1655
- :caption: Perform multi label classification on a batch of documents.
1656
- """
1657
-
1658
- continuation_token = kwargs.pop("continuation_token", None)
1659
- disable_service_logs = kwargs.pop("disable_service_logs", None)
1660
- polling_interval = kwargs.pop("polling_interval", 5)
1661
-
1662
- if continuation_token:
1663
- return cast(
1664
- AsyncTextAnalysisLROPoller[AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]],
1665
- _get_result_from_continuation_token(
1666
- self._client._client, # pylint: disable=protected-access
1667
- continuation_token,
1668
- AsyncAnalyzeActionsLROPoller,
1669
- AsyncAnalyzeActionsLROPollingMethod(
1670
- text_analytics_client=self._client,
1671
- timeout=polling_interval,
1672
- **kwargs
1673
- ),
1674
- self._analyze_result_callback,
1675
- bespoke=True
1676
- )
1677
- )
1678
-
1679
- try:
1680
- return cast(
1681
- AsyncTextAnalysisLROPoller[
1682
- AsyncItemPaged[Union[ClassifyDocumentResult, DocumentError]]
1683
- ],
1684
- await self.begin_analyze_actions(
1685
- documents,
1686
- actions=[
1687
- MultiLabelClassifyAction(
1688
- project_name=project_name,
1689
- deployment_name=deployment_name,
1690
- disable_service_logs=disable_service_logs
1691
- )
1692
- ],
1693
- polling_interval=polling_interval,
1694
- bespoke=True,
1695
- **kwargs
1696
- )
1697
- )
1698
-
1699
- except HttpResponseError as error:
1700
- return process_http_response_error(error)
1701
-
1702
- @distributed_trace_async
1703
- @validate_multiapi_args(
1704
- version_method_added="2022-10-01-preview",
1705
- )
1706
- async def dynamic_classification(
1707
- self,
1708
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
1709
- categories: List[str],
1710
- **kwargs: Any,
1711
- ) -> List[Union[DynamicClassificationResult, DocumentError]]:
1712
- """Perform dynamic classification on a batch of documents.
1713
-
1714
- On the fly classification of the input documents into one or multiple categories.
1715
- Assigns either one or multiple categories per document. This type of classification
1716
- doesn't require model training.
1717
-
1718
- .. note:: The dynamic classification feature is part of a gated preview. Request access here:
1719
- https://aka.ms/applyforgatedlanguagefeature
1720
-
1721
- See https://aka.ms/azsdk/textanalytics/data-limits for service data limits.
1722
-
1723
- :param documents: The set of documents to process as part of this batch.
1724
- If you wish to specify the ID and language on a per-item basis you must
1725
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list
1726
- of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`,
1727
- like `{"id": "1", "language": "en", "text": "hello world"}`.
1728
- :type documents:
1729
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput] or list[dict[str, str]]
1730
- :param list[str] categories: A list of categories to which input is classified to.
1731
- :keyword classification_type: Specifies either one or multiple categories per document. Defaults
1732
- to multi classification which may return more than one class for each document. Known values
1733
- are: "Single" and "Multi".
1734
- :paramtype classification_type: str or ~azure.ai.textanalytics.ClassificationType
1735
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
1736
- entire batch. For example, use "en" for English; "es" for Spanish etc.
1737
- If not set, uses "en" for English as default. Per-document language will
1738
- take precedence over whole batch language. See https://aka.ms/talangs for
1739
- supported languages in Language API.
1740
- :keyword str model_version: This value indicates which model will
1741
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
1742
- is not specified, the API will default to the latest, non-preview version.
1743
- See here for more info: https://aka.ms/text-analytics-model-versioning
1744
- :keyword bool show_stats: If set to true, response will contain document
1745
- level statistics in the `statistics` field of the document-level response.
1746
- :keyword bool disable_service_logs: If set to true, you opt-out of having your text input
1747
- logged on the service side for troubleshooting. By default, the Language service logs your
1748
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1749
- the service's natural language processing functions. Setting this parameter to true,
1750
- disables input logging and may limit our ability to remediate issues that occur. Please see
1751
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1752
- additional details, and Microsoft Responsible AI principles at
1753
- https://www.microsoft.com/ai/responsible-ai.
1754
- :return: The combined list of :class:`~azure.ai.textanalytics.DynamicClassificationResult` and
1755
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
1756
- were passed in.
1757
- :rtype: list[~azure.ai.textanalytics.DynamicClassificationResult or ~azure.ai.textanalytics.DocumentError]
1758
- :raises ~azure.core.exceptions.HttpResponseError:
1759
-
1760
- .. versionadded:: 2022-10-01-preview
1761
- The *dynamic_classification* client method.
1762
-
1763
- .. admonition:: Example:
1764
-
1765
- .. literalinclude:: ../samples/async_samples/sample_dynamic_classification_async.py
1766
- :start-after: [START dynamic_classification_async]
1767
- :end-before: [END dynamic_classification_async]
1768
- :language: python
1769
- :dedent: 4
1770
- :caption: Perform dynamic classification on a batch of documents.
1771
- """
1772
- language_arg = kwargs.pop("language", None)
1773
- language = language_arg if language_arg is not None else self._default_language
1774
- docs = _validate_input(documents, "language", language)
1775
- model_version = kwargs.pop("model_version", None)
1776
- show_stats = kwargs.pop("show_stats", None)
1777
- disable_service_logs = kwargs.pop("disable_service_logs", None)
1778
- classification_type = kwargs.pop("classification_type", None)
1779
-
1780
- try:
1781
- models = self._client.models(api_version=self._api_version)
1782
- return cast(
1783
- List[Union[DynamicClassificationResult, DocumentError]],
1784
- await self._client.analyze_text(
1785
- body=models.AnalyzeTextDynamicClassificationInput(
1786
- analysis_input={"documents": docs},
1787
- parameters=models.DynamicClassificationTaskParameters(
1788
- categories=categories,
1789
- logging_opt_out=disable_service_logs,
1790
- model_version=model_version,
1791
- classification_type=classification_type,
1792
- )
1793
- ),
1794
- show_stats=show_stats,
1795
- cls=kwargs.pop("cls", dynamic_classification_result),
1796
- **kwargs
1797
- )
1798
- )
1799
- except HttpResponseError as error:
1800
- return process_http_response_error(error)