azure-ai-textanalytics 5.3.0b1__py3-none-any.whl → 6.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-textanalytics might be problematic. Click here for more details.

Files changed (128) hide show
  1. azure/ai/textanalytics/__init__.py +26 -197
  2. azure/ai/textanalytics/_client.py +111 -0
  3. azure/ai/textanalytics/_configuration.py +73 -0
  4. azure/ai/textanalytics/{_generated/v2022_05_01/operations → _operations}/__init__.py +13 -8
  5. azure/ai/textanalytics/_operations/_operations.py +716 -0
  6. azure/ai/textanalytics/{_generated/v2022_05_01/models → _operations}/_patch.py +8 -6
  7. azure/ai/textanalytics/_patch.py +350 -0
  8. azure/ai/textanalytics/{_generated/aio → _utils}/__init__.py +1 -5
  9. azure/ai/textanalytics/_utils/model_base.py +1237 -0
  10. azure/ai/textanalytics/{_generated/_serialization.py → _utils/serialization.py} +640 -616
  11. azure/ai/textanalytics/{_generated/v2022_05_01/aio/_vendor.py → _utils/utils.py} +10 -12
  12. azure/ai/textanalytics/_version.py +8 -7
  13. azure/ai/textanalytics/aio/__init__.py +25 -14
  14. azure/ai/textanalytics/aio/_client.py +115 -0
  15. azure/ai/textanalytics/aio/_configuration.py +75 -0
  16. azure/ai/textanalytics/{_generated/v2022_10_01_preview/aio/operations → aio/_operations}/__init__.py +13 -8
  17. azure/ai/textanalytics/aio/_operations/_operations.py +623 -0
  18. azure/ai/textanalytics/{_generated/v2022_05_01 → aio/_operations}/_patch.py +8 -6
  19. azure/ai/textanalytics/aio/_patch.py +344 -0
  20. azure/ai/textanalytics/models/__init__.py +402 -0
  21. azure/ai/textanalytics/models/_enums.py +1979 -0
  22. azure/ai/textanalytics/models/_models.py +6641 -0
  23. azure/ai/textanalytics/{_generated/v2022_05_01/aio → models}/_patch.py +8 -6
  24. azure/ai/textanalytics/py.typed +1 -0
  25. {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/METADATA +755 -319
  26. azure_ai_textanalytics-6.0.0b1.dist-info/RECORD +29 -0
  27. {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/WHEEL +1 -1
  28. azure/ai/textanalytics/_base_client.py +0 -111
  29. azure/ai/textanalytics/_check.py +0 -22
  30. azure/ai/textanalytics/_dict_mixin.py +0 -54
  31. azure/ai/textanalytics/_generated/__init__.py +0 -16
  32. azure/ai/textanalytics/_generated/_configuration.py +0 -70
  33. azure/ai/textanalytics/_generated/_operations_mixin.py +0 -795
  34. azure/ai/textanalytics/_generated/_text_analytics_client.py +0 -126
  35. azure/ai/textanalytics/_generated/_version.py +0 -8
  36. azure/ai/textanalytics/_generated/aio/_configuration.py +0 -66
  37. azure/ai/textanalytics/_generated/aio/_operations_mixin.py +0 -776
  38. azure/ai/textanalytics/_generated/aio/_text_analytics_client.py +0 -124
  39. azure/ai/textanalytics/_generated/models.py +0 -8
  40. azure/ai/textanalytics/_generated/v2022_05_01/__init__.py +0 -20
  41. azure/ai/textanalytics/_generated/v2022_05_01/_configuration.py +0 -72
  42. azure/ai/textanalytics/_generated/v2022_05_01/_text_analytics_client.py +0 -100
  43. azure/ai/textanalytics/_generated/v2022_05_01/_vendor.py +0 -45
  44. azure/ai/textanalytics/_generated/v2022_05_01/aio/__init__.py +0 -20
  45. azure/ai/textanalytics/_generated/v2022_05_01/aio/_configuration.py +0 -71
  46. azure/ai/textanalytics/_generated/v2022_05_01/aio/_text_analytics_client.py +0 -97
  47. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/__init__.py +0 -18
  48. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_patch.py +0 -121
  49. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_text_analytics_client_operations.py +0 -603
  50. azure/ai/textanalytics/_generated/v2022_05_01/models/__init__.py +0 -281
  51. azure/ai/textanalytics/_generated/v2022_05_01/models/_models_py3.py +0 -5722
  52. azure/ai/textanalytics/_generated/v2022_05_01/models/_text_analytics_client_enums.py +0 -439
  53. azure/ai/textanalytics/_generated/v2022_05_01/operations/_patch.py +0 -120
  54. azure/ai/textanalytics/_generated/v2022_05_01/operations/_text_analytics_client_operations.py +0 -744
  55. azure/ai/textanalytics/_generated/v2022_10_01_preview/__init__.py +0 -20
  56. azure/ai/textanalytics/_generated/v2022_10_01_preview/_configuration.py +0 -72
  57. azure/ai/textanalytics/_generated/v2022_10_01_preview/_patch.py +0 -19
  58. azure/ai/textanalytics/_generated/v2022_10_01_preview/_text_analytics_client.py +0 -100
  59. azure/ai/textanalytics/_generated/v2022_10_01_preview/_vendor.py +0 -45
  60. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/__init__.py +0 -20
  61. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_configuration.py +0 -71
  62. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_patch.py +0 -19
  63. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_text_analytics_client.py +0 -97
  64. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_vendor.py +0 -27
  65. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_patch.py +0 -121
  66. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_text_analytics_client_operations.py +0 -603
  67. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/__init__.py +0 -407
  68. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_models_py3.py +0 -8462
  69. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_patch.py +0 -72
  70. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_text_analytics_client_enums.py +0 -730
  71. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/__init__.py +0 -18
  72. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_patch.py +0 -120
  73. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_text_analytics_client_operations.py +0 -744
  74. azure/ai/textanalytics/_generated/v3_0/__init__.py +0 -20
  75. azure/ai/textanalytics/_generated/v3_0/_configuration.py +0 -66
  76. azure/ai/textanalytics/_generated/v3_0/_patch.py +0 -31
  77. azure/ai/textanalytics/_generated/v3_0/_text_analytics_client.py +0 -96
  78. azure/ai/textanalytics/_generated/v3_0/_vendor.py +0 -33
  79. azure/ai/textanalytics/_generated/v3_0/aio/__init__.py +0 -20
  80. azure/ai/textanalytics/_generated/v3_0/aio/_configuration.py +0 -65
  81. azure/ai/textanalytics/_generated/v3_0/aio/_patch.py +0 -31
  82. azure/ai/textanalytics/_generated/v3_0/aio/_text_analytics_client.py +0 -93
  83. azure/ai/textanalytics/_generated/v3_0/aio/_vendor.py +0 -27
  84. azure/ai/textanalytics/_generated/v3_0/aio/operations/__init__.py +0 -18
  85. azure/ai/textanalytics/_generated/v3_0/aio/operations/_patch.py +0 -19
  86. azure/ai/textanalytics/_generated/v3_0/aio/operations/_text_analytics_client_operations.py +0 -428
  87. azure/ai/textanalytics/_generated/v3_0/models/__init__.py +0 -81
  88. azure/ai/textanalytics/_generated/v3_0/models/_models_py3.py +0 -1467
  89. azure/ai/textanalytics/_generated/v3_0/models/_patch.py +0 -19
  90. azure/ai/textanalytics/_generated/v3_0/models/_text_analytics_client_enums.py +0 -58
  91. azure/ai/textanalytics/_generated/v3_0/operations/__init__.py +0 -18
  92. azure/ai/textanalytics/_generated/v3_0/operations/_patch.py +0 -19
  93. azure/ai/textanalytics/_generated/v3_0/operations/_text_analytics_client_operations.py +0 -604
  94. azure/ai/textanalytics/_generated/v3_1/__init__.py +0 -20
  95. azure/ai/textanalytics/_generated/v3_1/_configuration.py +0 -66
  96. azure/ai/textanalytics/_generated/v3_1/_patch.py +0 -31
  97. azure/ai/textanalytics/_generated/v3_1/_text_analytics_client.py +0 -98
  98. azure/ai/textanalytics/_generated/v3_1/_vendor.py +0 -45
  99. azure/ai/textanalytics/_generated/v3_1/aio/__init__.py +0 -20
  100. azure/ai/textanalytics/_generated/v3_1/aio/_configuration.py +0 -65
  101. azure/ai/textanalytics/_generated/v3_1/aio/_patch.py +0 -31
  102. azure/ai/textanalytics/_generated/v3_1/aio/_text_analytics_client.py +0 -95
  103. azure/ai/textanalytics/_generated/v3_1/aio/_vendor.py +0 -27
  104. azure/ai/textanalytics/_generated/v3_1/aio/operations/__init__.py +0 -18
  105. azure/ai/textanalytics/_generated/v3_1/aio/operations/_patch.py +0 -19
  106. azure/ai/textanalytics/_generated/v3_1/aio/operations/_text_analytics_client_operations.py +0 -1291
  107. azure/ai/textanalytics/_generated/v3_1/models/__init__.py +0 -205
  108. azure/ai/textanalytics/_generated/v3_1/models/_models_py3.py +0 -3976
  109. azure/ai/textanalytics/_generated/v3_1/models/_patch.py +0 -19
  110. azure/ai/textanalytics/_generated/v3_1/models/_text_analytics_client_enums.py +0 -367
  111. azure/ai/textanalytics/_generated/v3_1/operations/__init__.py +0 -18
  112. azure/ai/textanalytics/_generated/v3_1/operations/_patch.py +0 -19
  113. azure/ai/textanalytics/_generated/v3_1/operations/_text_analytics_client_operations.py +0 -1709
  114. azure/ai/textanalytics/_lro.py +0 -552
  115. azure/ai/textanalytics/_models.py +0 -3142
  116. azure/ai/textanalytics/_policies.py +0 -66
  117. azure/ai/textanalytics/_request_handlers.py +0 -104
  118. azure/ai/textanalytics/_response_handlers.py +0 -580
  119. azure/ai/textanalytics/_text_analytics_client.py +0 -1802
  120. azure/ai/textanalytics/_user_agent.py +0 -8
  121. azure/ai/textanalytics/_validate.py +0 -113
  122. azure/ai/textanalytics/aio/_base_client_async.py +0 -95
  123. azure/ai/textanalytics/aio/_lro_async.py +0 -501
  124. azure/ai/textanalytics/aio/_response_handlers_async.py +0 -94
  125. azure/ai/textanalytics/aio/_text_analytics_client_async.py +0 -1800
  126. azure_ai_textanalytics-5.3.0b1.dist-info/RECORD +0 -115
  127. {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info/licenses}/LICENSE +0 -0
  128. {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/top_level.txt +0 -0
@@ -1,1709 +0,0 @@
1
- # pylint: disable=too-many-lines
2
- # coding=utf-8
3
- # --------------------------------------------------------------------------
4
- # Copyright (c) Microsoft Corporation. All rights reserved.
5
- # Licensed under the MIT License. See License.txt in the project root for license information.
6
- # Code generated by Microsoft (R) AutoRest Code Generator.
7
- # Changes may cause incorrect behavior and will be lost if the code is regenerated.
8
- # --------------------------------------------------------------------------
9
- from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, cast, overload
10
-
11
- from ...._lro import AnalyzeActionsLROPoller, AnalyzeActionsLROPollingMethod, AnalyzeHealthcareEntitiesLROPoller, AnalyzeHealthcareEntitiesLROPollingMethod
12
- from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
13
- from azure.core.pipeline import PipelineResponse
14
- from azure.core.pipeline.transport import HttpResponse
15
- from azure.core.polling import LROPoller, NoPolling, PollingMethod
16
- from azure.core.polling.base_polling import LROBasePolling
17
- from azure.core.rest import HttpRequest
18
- from azure.core.tracing.decorator import distributed_trace
19
- from azure.core.utils import case_insensitive_dict
20
-
21
- from .. import models as _models
22
- from ..._serialization import Serializer
23
- from .._vendor import MixinABC, _convert_request, _format_url_section
24
- T = TypeVar('T')
25
- ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
26
-
27
- _SERIALIZER = Serializer()
28
- _SERIALIZER.client_side_validation = False
29
-
30
- def build_analyze_request(
31
- **kwargs: Any
32
- ) -> HttpRequest:
33
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
34
-
35
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
36
- accept = _headers.pop('Accept', "application/json, text/json")
37
-
38
- # Construct URL
39
- _url = kwargs.pop("template_url", "/analyze")
40
-
41
- # Construct headers
42
- if content_type is not None:
43
- _headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
44
- _headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
45
-
46
- return HttpRequest(
47
- method="POST",
48
- url=_url,
49
- headers=_headers,
50
- **kwargs
51
- )
52
-
53
-
54
- def build_analyze_status_request(
55
- job_id: str,
56
- *,
57
- show_stats: Optional[bool] = None,
58
- top: int = 20,
59
- skip: int = 0,
60
- **kwargs: Any
61
- ) -> HttpRequest:
62
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
63
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
64
-
65
- accept = _headers.pop('Accept', "application/json, text/json")
66
-
67
- # Construct URL
68
- _url = kwargs.pop("template_url", "/analyze/jobs/{jobId}")
69
- path_format_arguments = {
70
- "jobId": _SERIALIZER.url("job_id", job_id, 'str'),
71
- }
72
-
73
- _url = _format_url_section(_url, **path_format_arguments)
74
-
75
- # Construct parameters
76
- if show_stats is not None:
77
- _params['showStats'] = _SERIALIZER.query("show_stats", show_stats, 'bool')
78
- if top is not None:
79
- _params['$top'] = _SERIALIZER.query("top", top, 'int', maximum=50, minimum=1)
80
- if skip is not None:
81
- _params['$skip'] = _SERIALIZER.query("skip", skip, 'int', minimum=0)
82
-
83
- # Construct headers
84
- _headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
85
-
86
- return HttpRequest(
87
- method="GET",
88
- url=_url,
89
- params=_params,
90
- headers=_headers,
91
- **kwargs
92
- )
93
-
94
-
95
- def build_health_status_request(
96
- job_id: str,
97
- *,
98
- top: int = 20,
99
- skip: int = 0,
100
- show_stats: Optional[bool] = None,
101
- **kwargs: Any
102
- ) -> HttpRequest:
103
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
104
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
105
-
106
- accept = _headers.pop('Accept', "application/json, text/json")
107
-
108
- # Construct URL
109
- _url = kwargs.pop("template_url", "/entities/health/jobs/{jobId}")
110
- path_format_arguments = {
111
- "jobId": _SERIALIZER.url("job_id", job_id, 'str'),
112
- }
113
-
114
- _url = _format_url_section(_url, **path_format_arguments)
115
-
116
- # Construct parameters
117
- if top is not None:
118
- _params['$top'] = _SERIALIZER.query("top", top, 'int', maximum=50, minimum=1)
119
- if skip is not None:
120
- _params['$skip'] = _SERIALIZER.query("skip", skip, 'int', minimum=0)
121
- if show_stats is not None:
122
- _params['showStats'] = _SERIALIZER.query("show_stats", show_stats, 'bool')
123
-
124
- # Construct headers
125
- _headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
126
-
127
- return HttpRequest(
128
- method="GET",
129
- url=_url,
130
- params=_params,
131
- headers=_headers,
132
- **kwargs
133
- )
134
-
135
-
136
- def build_cancel_health_job_request(
137
- job_id: str,
138
- **kwargs: Any
139
- ) -> HttpRequest:
140
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
141
-
142
- accept = _headers.pop('Accept', "application/json, text/json")
143
-
144
- # Construct URL
145
- _url = kwargs.pop("template_url", "/entities/health/jobs/{jobId}")
146
- path_format_arguments = {
147
- "jobId": _SERIALIZER.url("job_id", job_id, 'str'),
148
- }
149
-
150
- _url = _format_url_section(_url, **path_format_arguments)
151
-
152
- # Construct headers
153
- _headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
154
-
155
- return HttpRequest(
156
- method="DELETE",
157
- url=_url,
158
- headers=_headers,
159
- **kwargs
160
- )
161
-
162
-
163
- def build_health_request(
164
- *,
165
- model_version: Optional[str] = None,
166
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
167
- logging_opt_out: Optional[bool] = None,
168
- **kwargs: Any
169
- ) -> HttpRequest:
170
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
171
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
172
-
173
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
174
- accept = _headers.pop('Accept', "application/json, text/json")
175
-
176
- # Construct URL
177
- _url = kwargs.pop("template_url", "/entities/health/jobs")
178
-
179
- # Construct parameters
180
- if model_version is not None:
181
- _params['model-version'] = _SERIALIZER.query("model_version", model_version, 'str')
182
- if string_index_type is not None:
183
- _params['stringIndexType'] = _SERIALIZER.query("string_index_type", string_index_type, 'str')
184
- if logging_opt_out is not None:
185
- _params['loggingOptOut'] = _SERIALIZER.query("logging_opt_out", logging_opt_out, 'bool')
186
-
187
- # Construct headers
188
- if content_type is not None:
189
- _headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
190
- _headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
191
-
192
- return HttpRequest(
193
- method="POST",
194
- url=_url,
195
- params=_params,
196
- headers=_headers,
197
- **kwargs
198
- )
199
-
200
-
201
- def build_entities_recognition_general_request(
202
- *,
203
- model_version: Optional[str] = None,
204
- show_stats: Optional[bool] = None,
205
- logging_opt_out: Optional[bool] = None,
206
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
207
- **kwargs: Any
208
- ) -> HttpRequest:
209
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
210
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
211
-
212
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
213
- accept = _headers.pop('Accept', "application/json, text/json")
214
-
215
- # Construct URL
216
- _url = kwargs.pop("template_url", "/entities/recognition/general")
217
-
218
- # Construct parameters
219
- if model_version is not None:
220
- _params['model-version'] = _SERIALIZER.query("model_version", model_version, 'str')
221
- if show_stats is not None:
222
- _params['showStats'] = _SERIALIZER.query("show_stats", show_stats, 'bool')
223
- if logging_opt_out is not None:
224
- _params['loggingOptOut'] = _SERIALIZER.query("logging_opt_out", logging_opt_out, 'bool')
225
- if string_index_type is not None:
226
- _params['stringIndexType'] = _SERIALIZER.query("string_index_type", string_index_type, 'str')
227
-
228
- # Construct headers
229
- if content_type is not None:
230
- _headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
231
- _headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
232
-
233
- return HttpRequest(
234
- method="POST",
235
- url=_url,
236
- params=_params,
237
- headers=_headers,
238
- **kwargs
239
- )
240
-
241
-
242
- def build_entities_recognition_pii_request(
243
- *,
244
- model_version: Optional[str] = None,
245
- show_stats: Optional[bool] = None,
246
- logging_opt_out: Optional[bool] = None,
247
- domain: Optional[str] = None,
248
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
249
- pii_categories: Optional[List[Union[str, "_models.PiiCategory"]]] = None,
250
- **kwargs: Any
251
- ) -> HttpRequest:
252
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
253
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
254
-
255
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
256
- accept = _headers.pop('Accept', "application/json, text/json")
257
-
258
- # Construct URL
259
- _url = kwargs.pop("template_url", "/entities/recognition/pii")
260
-
261
- # Construct parameters
262
- if model_version is not None:
263
- _params['model-version'] = _SERIALIZER.query("model_version", model_version, 'str')
264
- if show_stats is not None:
265
- _params['showStats'] = _SERIALIZER.query("show_stats", show_stats, 'bool')
266
- if logging_opt_out is not None:
267
- _params['loggingOptOut'] = _SERIALIZER.query("logging_opt_out", logging_opt_out, 'bool')
268
- if domain is not None:
269
- _params['domain'] = _SERIALIZER.query("domain", domain, 'str')
270
- if string_index_type is not None:
271
- _params['stringIndexType'] = _SERIALIZER.query("string_index_type", string_index_type, 'str')
272
- if pii_categories is not None:
273
- _params['piiCategories'] = _SERIALIZER.query("pii_categories", pii_categories, '[str]', div=',')
274
-
275
- # Construct headers
276
- if content_type is not None:
277
- _headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
278
- _headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
279
-
280
- return HttpRequest(
281
- method="POST",
282
- url=_url,
283
- params=_params,
284
- headers=_headers,
285
- **kwargs
286
- )
287
-
288
-
289
- def build_entities_linking_request(
290
- *,
291
- model_version: Optional[str] = None,
292
- show_stats: Optional[bool] = None,
293
- logging_opt_out: Optional[bool] = None,
294
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
295
- **kwargs: Any
296
- ) -> HttpRequest:
297
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
298
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
299
-
300
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
301
- accept = _headers.pop('Accept', "application/json, text/json")
302
-
303
- # Construct URL
304
- _url = kwargs.pop("template_url", "/entities/linking")
305
-
306
- # Construct parameters
307
- if model_version is not None:
308
- _params['model-version'] = _SERIALIZER.query("model_version", model_version, 'str')
309
- if show_stats is not None:
310
- _params['showStats'] = _SERIALIZER.query("show_stats", show_stats, 'bool')
311
- if logging_opt_out is not None:
312
- _params['loggingOptOut'] = _SERIALIZER.query("logging_opt_out", logging_opt_out, 'bool')
313
- if string_index_type is not None:
314
- _params['stringIndexType'] = _SERIALIZER.query("string_index_type", string_index_type, 'str')
315
-
316
- # Construct headers
317
- if content_type is not None:
318
- _headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
319
- _headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
320
-
321
- return HttpRequest(
322
- method="POST",
323
- url=_url,
324
- params=_params,
325
- headers=_headers,
326
- **kwargs
327
- )
328
-
329
-
330
- def build_key_phrases_request(
331
- *,
332
- model_version: Optional[str] = None,
333
- show_stats: Optional[bool] = None,
334
- logging_opt_out: Optional[bool] = None,
335
- **kwargs: Any
336
- ) -> HttpRequest:
337
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
338
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
339
-
340
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
341
- accept = _headers.pop('Accept', "application/json, text/json")
342
-
343
- # Construct URL
344
- _url = kwargs.pop("template_url", "/keyPhrases")
345
-
346
- # Construct parameters
347
- if model_version is not None:
348
- _params['model-version'] = _SERIALIZER.query("model_version", model_version, 'str')
349
- if show_stats is not None:
350
- _params['showStats'] = _SERIALIZER.query("show_stats", show_stats, 'bool')
351
- if logging_opt_out is not None:
352
- _params['loggingOptOut'] = _SERIALIZER.query("logging_opt_out", logging_opt_out, 'bool')
353
-
354
- # Construct headers
355
- if content_type is not None:
356
- _headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
357
- _headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
358
-
359
- return HttpRequest(
360
- method="POST",
361
- url=_url,
362
- params=_params,
363
- headers=_headers,
364
- **kwargs
365
- )
366
-
367
-
368
- def build_languages_request(
369
- *,
370
- model_version: Optional[str] = None,
371
- show_stats: Optional[bool] = None,
372
- logging_opt_out: Optional[bool] = None,
373
- **kwargs: Any
374
- ) -> HttpRequest:
375
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
376
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
377
-
378
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
379
- accept = _headers.pop('Accept', "application/json, text/json")
380
-
381
- # Construct URL
382
- _url = kwargs.pop("template_url", "/languages")
383
-
384
- # Construct parameters
385
- if model_version is not None:
386
- _params['model-version'] = _SERIALIZER.query("model_version", model_version, 'str')
387
- if show_stats is not None:
388
- _params['showStats'] = _SERIALIZER.query("show_stats", show_stats, 'bool')
389
- if logging_opt_out is not None:
390
- _params['loggingOptOut'] = _SERIALIZER.query("logging_opt_out", logging_opt_out, 'bool')
391
-
392
- # Construct headers
393
- if content_type is not None:
394
- _headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
395
- _headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
396
-
397
- return HttpRequest(
398
- method="POST",
399
- url=_url,
400
- params=_params,
401
- headers=_headers,
402
- **kwargs
403
- )
404
-
405
-
406
- def build_sentiment_request(
407
- *,
408
- model_version: Optional[str] = None,
409
- show_stats: Optional[bool] = None,
410
- logging_opt_out: Optional[bool] = None,
411
- opinion_mining: Optional[bool] = None,
412
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
413
- **kwargs: Any
414
- ) -> HttpRequest:
415
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
416
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
417
-
418
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
419
- accept = _headers.pop('Accept', "application/json, text/json")
420
-
421
- # Construct URL
422
- _url = kwargs.pop("template_url", "/sentiment")
423
-
424
- # Construct parameters
425
- if model_version is not None:
426
- _params['model-version'] = _SERIALIZER.query("model_version", model_version, 'str')
427
- if show_stats is not None:
428
- _params['showStats'] = _SERIALIZER.query("show_stats", show_stats, 'bool')
429
- if logging_opt_out is not None:
430
- _params['loggingOptOut'] = _SERIALIZER.query("logging_opt_out", logging_opt_out, 'bool')
431
- if opinion_mining is not None:
432
- _params['opinionMining'] = _SERIALIZER.query("opinion_mining", opinion_mining, 'bool')
433
- if string_index_type is not None:
434
- _params['stringIndexType'] = _SERIALIZER.query("string_index_type", string_index_type, 'str')
435
-
436
- # Construct headers
437
- if content_type is not None:
438
- _headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
439
- _headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
440
-
441
- return HttpRequest(
442
- method="POST",
443
- url=_url,
444
- params=_params,
445
- headers=_headers,
446
- **kwargs
447
- )
448
-
449
- class TextAnalyticsClientOperationsMixin(MixinABC):
450
-
451
- def _analyze_initial(
452
- self,
453
- body: Optional[Union[_models.AnalyzeBatchInput, IO]] = None,
454
- **kwargs: Any
455
- ) -> Optional[_models.AnalyzeJobState]:
456
- error_map = {
457
- 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
458
- }
459
- error_map.update(kwargs.pop('error_map', {}) or {})
460
-
461
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
462
- _params = kwargs.pop("params", {}) or {}
463
-
464
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
465
- cls = kwargs.pop('cls', None) # type: ClsType[Optional[_models.AnalyzeJobState]]
466
-
467
- content_type = content_type or "application/json"
468
- _json = None
469
- _content = None
470
- if isinstance(body, (IO, bytes)):
471
- _content = body
472
- else:
473
- if body is not None:
474
- _json = self._serialize.body(body, 'AnalyzeBatchInput')
475
- else:
476
- _json = None
477
-
478
- request = build_analyze_request(
479
- content_type=content_type,
480
- json=_json,
481
- content=_content,
482
- template_url=self._analyze_initial.metadata['url'],
483
- headers=_headers,
484
- params=_params,
485
- )
486
- request = _convert_request(request)
487
- path_format_arguments = {
488
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
489
- }
490
- request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
491
-
492
- pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
493
- request,
494
- stream=False,
495
- **kwargs
496
- )
497
-
498
- response = pipeline_response.http_response
499
-
500
- if response.status_code not in [200, 202]:
501
- map_error(status_code=response.status_code, response=response, error_map=error_map)
502
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
503
- raise HttpResponseError(response=response, model=error)
504
-
505
- deserialized = None
506
- response_headers = {}
507
- if response.status_code == 200:
508
- deserialized = self._deserialize('AnalyzeJobState', pipeline_response)
509
-
510
- if response.status_code == 202:
511
- response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
512
-
513
-
514
- if cls:
515
- return cls(pipeline_response, deserialized, response_headers)
516
-
517
- return deserialized
518
-
519
- _analyze_initial.metadata = {'url': "/analyze"} # type: ignore
520
-
521
-
522
- @overload
523
- def begin_analyze(
524
- self,
525
- body: Optional[_models.AnalyzeBatchInput] = None,
526
- *,
527
- content_type: str = "application/json",
528
- **kwargs: Any
529
- ) -> AnalyzeActionsLROPoller[_models.AnalyzeJobState]:
530
- """Submit analysis job.
531
-
532
- Submit a collection of text documents for analysis. Specify one or more unique tasks to be
533
- executed.
534
-
535
- :param body: Collection of documents to analyze and tasks to execute. Default value is None.
536
- :type body: ~azure.ai.textanalytics.v3_1.models.AnalyzeBatchInput
537
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
538
- Default value is "application/json".
539
- :paramtype content_type: str
540
- :keyword callable cls: A custom type or function that will be passed the direct response
541
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
542
- :keyword polling: By default, your polling method will be AnalyzeActionsLROPollingMethod. Pass
543
- in False for this operation to not poll, or pass in your own initialized polling object for a
544
- personal polling strategy.
545
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
546
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
547
- Retry-After header is present.
548
- :return: An instance of AnalyzeActionsLROPoller that returns either AnalyzeJobState or the
549
- result of cls(response)
550
- :rtype: ~...._lro.AnalyzeActionsLROPoller[~azure.ai.textanalytics.v3_1.models.AnalyzeJobState]
551
- :raises ~azure.core.exceptions.HttpResponseError:
552
- """
553
-
554
- @overload
555
- def begin_analyze(
556
- self,
557
- body: Optional[IO] = None,
558
- *,
559
- content_type: str = "application/json",
560
- **kwargs: Any
561
- ) -> AnalyzeActionsLROPoller[_models.AnalyzeJobState]:
562
- """Submit analysis job.
563
-
564
- Submit a collection of text documents for analysis. Specify one or more unique tasks to be
565
- executed.
566
-
567
- :param body: Collection of documents to analyze and tasks to execute. Default value is None.
568
- :type body: IO
569
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
570
- Known values are: 'application/json', 'text/json'. Default value is "application/json".
571
- :paramtype content_type: str
572
- :keyword callable cls: A custom type or function that will be passed the direct response
573
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
574
- :keyword polling: By default, your polling method will be AnalyzeActionsLROPollingMethod. Pass
575
- in False for this operation to not poll, or pass in your own initialized polling object for a
576
- personal polling strategy.
577
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
578
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
579
- Retry-After header is present.
580
- :return: An instance of AnalyzeActionsLROPoller that returns either AnalyzeJobState or the
581
- result of cls(response)
582
- :rtype: ~...._lro.AnalyzeActionsLROPoller[~azure.ai.textanalytics.v3_1.models.AnalyzeJobState]
583
- :raises ~azure.core.exceptions.HttpResponseError:
584
- """
585
-
586
-
587
- @distributed_trace
588
- def begin_analyze(
589
- self,
590
- body: Optional[Union[_models.AnalyzeBatchInput, IO]] = None,
591
- **kwargs: Any
592
- ) -> AnalyzeActionsLROPoller[_models.AnalyzeJobState]:
593
- """Submit analysis job.
594
-
595
- Submit a collection of text documents for analysis. Specify one or more unique tasks to be
596
- executed.
597
-
598
- :param body: Collection of documents to analyze and tasks to execute. Is either a model type or
599
- a IO type. Default value is None.
600
- :type body: ~azure.ai.textanalytics.v3_1.models.AnalyzeBatchInput or IO
601
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json',
602
- 'text/json'. Default value is None.
603
- :paramtype content_type: str
604
- :keyword callable cls: A custom type or function that will be passed the direct response
605
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
606
- :keyword polling: By default, your polling method will be AnalyzeActionsLROPollingMethod. Pass
607
- in False for this operation to not poll, or pass in your own initialized polling object for a
608
- personal polling strategy.
609
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
610
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
611
- Retry-After header is present.
612
- :return: An instance of AnalyzeActionsLROPoller that returns either AnalyzeJobState or the
613
- result of cls(response)
614
- :rtype: ~...._lro.AnalyzeActionsLROPoller[~azure.ai.textanalytics.v3_1.models.AnalyzeJobState]
615
- :raises ~azure.core.exceptions.HttpResponseError:
616
- """
617
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
618
- _params = kwargs.pop("params", {}) or {}
619
-
620
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
621
- cls = kwargs.pop('cls', None) # type: ClsType[_models.AnalyzeJobState]
622
- polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
623
- lro_delay = kwargs.pop(
624
- 'polling_interval',
625
- self._config.polling_interval
626
- )
627
- cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
628
- if cont_token is None:
629
- raw_result = self._analyze_initial( # type: ignore
630
- body=body,
631
- content_type=content_type,
632
- cls=lambda x,y,z: x,
633
- headers=_headers,
634
- params=_params,
635
- **kwargs
636
- )
637
- kwargs.pop('error_map', None)
638
-
639
- def get_long_running_output(pipeline_response):
640
- deserialized = self._deserialize('AnalyzeJobState', pipeline_response)
641
- if cls:
642
- return cls(pipeline_response, deserialized, {})
643
- return deserialized
644
-
645
-
646
- path_format_arguments = {
647
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
648
- }
649
-
650
- if polling is True:
651
- polling_method = cast(PollingMethod, AnalyzeActionsLROPollingMethod(
652
- lro_delay,
653
-
654
- path_format_arguments=path_format_arguments,
655
- **kwargs
656
- )) # type: PollingMethod
657
- elif polling is False: polling_method = cast(PollingMethod, NoPolling())
658
- else: polling_method = polling
659
- if cont_token:
660
- return AnalyzeActionsLROPoller.from_continuation_token(
661
- polling_method=polling_method,
662
- continuation_token=cont_token,
663
- client=self._client,
664
- deserialization_callback=get_long_running_output
665
- )
666
- return AnalyzeActionsLROPoller(self._client, raw_result, get_long_running_output, polling_method)
667
-
668
- begin_analyze.metadata = {'url': "/analyze"} # type: ignore
669
-
670
- @distributed_trace
671
- def analyze_status(
672
- self,
673
- job_id: str,
674
- show_stats: Optional[bool] = None,
675
- top: int = 20,
676
- skip: int = 0,
677
- **kwargs: Any
678
- ) -> _models.AnalyzeJobState:
679
- """Get analysis status and results.
680
-
681
- Get the status of an analysis job. A job may consist of one or more tasks. Once all tasks are
682
- completed, the job will transition to the completed state and results will be available for
683
- each task.
684
-
685
- :param job_id: Job ID for Analyze. Required.
686
- :type job_id: str
687
- :param show_stats: (Optional) if set to true, response will contain request and document level
688
- statistics. Default value is None.
689
- :type show_stats: bool
690
- :param top: (Optional) Set the maximum number of results per task. When both $top and $skip are
691
- specified, $skip is applied first. Default value is 20.
692
- :type top: int
693
- :param skip: (Optional) Set the number of elements to offset in the response. When both $top
694
- and $skip are specified, $skip is applied first. Default value is 0.
695
- :type skip: int
696
- :keyword callable cls: A custom type or function that will be passed the direct response
697
- :return: AnalyzeJobState or the result of cls(response)
698
- :rtype: ~azure.ai.textanalytics.v3_1.models.AnalyzeJobState
699
- :raises ~azure.core.exceptions.HttpResponseError:
700
- """
701
- error_map = {
702
- 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
703
- }
704
- error_map.update(kwargs.pop('error_map', {}) or {})
705
-
706
- _headers = kwargs.pop("headers", {}) or {}
707
- _params = kwargs.pop("params", {}) or {}
708
-
709
- cls = kwargs.pop('cls', None) # type: ClsType[_models.AnalyzeJobState]
710
-
711
-
712
- request = build_analyze_status_request(
713
- job_id=job_id,
714
- show_stats=show_stats,
715
- top=top,
716
- skip=skip,
717
- template_url=self.analyze_status.metadata['url'],
718
- headers=_headers,
719
- params=_params,
720
- )
721
- request = _convert_request(request)
722
- path_format_arguments = {
723
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
724
- }
725
- request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
726
-
727
- pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
728
- request,
729
- stream=False,
730
- **kwargs
731
- )
732
-
733
- response = pipeline_response.http_response
734
-
735
- if response.status_code not in [200]:
736
- map_error(status_code=response.status_code, response=response, error_map=error_map)
737
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
738
- raise HttpResponseError(response=response, model=error)
739
-
740
- deserialized = self._deserialize('AnalyzeJobState', pipeline_response)
741
-
742
- if cls:
743
- return cls(pipeline_response, deserialized, {})
744
-
745
- return deserialized
746
-
747
- analyze_status.metadata = {'url': "/analyze/jobs/{jobId}"} # type: ignore
748
-
749
-
750
- @distributed_trace
751
- def health_status(
752
- self,
753
- job_id: str,
754
- top: int = 20,
755
- skip: int = 0,
756
- show_stats: Optional[bool] = None,
757
- **kwargs: Any
758
- ) -> _models.HealthcareJobState:
759
- """Get healthcare analysis job status and results.
760
-
761
- Get details of the healthcare prediction job specified by the jobId.
762
-
763
- :param job_id: Job ID. Required.
764
- :type job_id: str
765
- :param top: (Optional) Set the maximum number of results per task. When both $top and $skip are
766
- specified, $skip is applied first. Default value is 20.
767
- :type top: int
768
- :param skip: (Optional) Set the number of elements to offset in the response. When both $top
769
- and $skip are specified, $skip is applied first. Default value is 0.
770
- :type skip: int
771
- :param show_stats: (Optional) if set to true, response will contain request and document level
772
- statistics. Default value is None.
773
- :type show_stats: bool
774
- :keyword callable cls: A custom type or function that will be passed the direct response
775
- :return: HealthcareJobState or the result of cls(response)
776
- :rtype: ~azure.ai.textanalytics.v3_1.models.HealthcareJobState
777
- :raises ~azure.core.exceptions.HttpResponseError:
778
- """
779
- error_map = {
780
- 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
781
- }
782
- error_map.update(kwargs.pop('error_map', {}) or {})
783
-
784
- _headers = kwargs.pop("headers", {}) or {}
785
- _params = kwargs.pop("params", {}) or {}
786
-
787
- cls = kwargs.pop('cls', None) # type: ClsType[_models.HealthcareJobState]
788
-
789
-
790
- request = build_health_status_request(
791
- job_id=job_id,
792
- top=top,
793
- skip=skip,
794
- show_stats=show_stats,
795
- template_url=self.health_status.metadata['url'],
796
- headers=_headers,
797
- params=_params,
798
- )
799
- request = _convert_request(request)
800
- path_format_arguments = {
801
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
802
- }
803
- request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
804
-
805
- pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
806
- request,
807
- stream=False,
808
- **kwargs
809
- )
810
-
811
- response = pipeline_response.http_response
812
-
813
- if response.status_code not in [200]:
814
- map_error(status_code=response.status_code, response=response, error_map=error_map)
815
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
816
- raise HttpResponseError(response=response, model=error)
817
-
818
- deserialized = self._deserialize('HealthcareJobState', pipeline_response)
819
-
820
- if cls:
821
- return cls(pipeline_response, deserialized, {})
822
-
823
- return deserialized
824
-
825
- health_status.metadata = {'url': "/entities/health/jobs/{jobId}"} # type: ignore
826
-
827
-
828
- def _cancel_health_job_initial( # pylint: disable=inconsistent-return-statements
829
- self,
830
- job_id: str,
831
- **kwargs: Any
832
- ) -> None:
833
- error_map = {
834
- 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
835
- }
836
- error_map.update(kwargs.pop('error_map', {}) or {})
837
-
838
- _headers = kwargs.pop("headers", {}) or {}
839
- _params = kwargs.pop("params", {}) or {}
840
-
841
- cls = kwargs.pop('cls', None) # type: ClsType[None]
842
-
843
-
844
- request = build_cancel_health_job_request(
845
- job_id=job_id,
846
- template_url=self._cancel_health_job_initial.metadata['url'],
847
- headers=_headers,
848
- params=_params,
849
- )
850
- request = _convert_request(request)
851
- path_format_arguments = {
852
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
853
- }
854
- request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
855
-
856
- pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
857
- request,
858
- stream=False,
859
- **kwargs
860
- )
861
-
862
- response = pipeline_response.http_response
863
-
864
- if response.status_code not in [202]:
865
- map_error(status_code=response.status_code, response=response, error_map=error_map)
866
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
867
- raise HttpResponseError(response=response, model=error)
868
-
869
- response_headers = {}
870
- response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
871
-
872
-
873
- if cls:
874
- return cls(pipeline_response, None, response_headers)
875
-
876
- _cancel_health_job_initial.metadata = {'url': "/entities/health/jobs/{jobId}"} # type: ignore
877
-
878
-
879
- @distributed_trace
880
- def begin_cancel_health_job(
881
- self,
882
- job_id: str,
883
- **kwargs: Any
884
- ) -> LROPoller[None]:
885
- """Cancel healthcare prediction job.
886
-
887
- Cancel healthcare prediction job.
888
-
889
- :param job_id: Job ID. Required.
890
- :type job_id: str
891
- :keyword callable cls: A custom type or function that will be passed the direct response
892
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
893
- :keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
894
- this operation to not poll, or pass in your own initialized polling object for a personal
895
- polling strategy.
896
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
897
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
898
- Retry-After header is present.
899
- :return: An instance of LROPoller that returns either None or the result of cls(response)
900
- :rtype: ~azure.core.polling.LROPoller[None]
901
- :raises ~azure.core.exceptions.HttpResponseError:
902
- """
903
- _headers = kwargs.pop("headers", {}) or {}
904
- _params = kwargs.pop("params", {}) or {}
905
-
906
- cls = kwargs.pop('cls', None) # type: ClsType[None]
907
- polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
908
- lro_delay = kwargs.pop(
909
- 'polling_interval',
910
- self._config.polling_interval
911
- )
912
- cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
913
- if cont_token is None:
914
- raw_result = self._cancel_health_job_initial( # type: ignore
915
- job_id=job_id,
916
- cls=lambda x,y,z: x,
917
- headers=_headers,
918
- params=_params,
919
- **kwargs
920
- )
921
- kwargs.pop('error_map', None)
922
-
923
- def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
924
- if cls:
925
- return cls(pipeline_response, None, {})
926
-
927
-
928
- path_format_arguments = {
929
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
930
- }
931
-
932
- if polling is True:
933
- polling_method = cast(PollingMethod, LROBasePolling(
934
- lro_delay,
935
-
936
- path_format_arguments=path_format_arguments,
937
- **kwargs
938
- )) # type: PollingMethod
939
- elif polling is False: polling_method = cast(PollingMethod, NoPolling())
940
- else: polling_method = polling
941
- if cont_token:
942
- return LROPoller.from_continuation_token(
943
- polling_method=polling_method,
944
- continuation_token=cont_token,
945
- client=self._client,
946
- deserialization_callback=get_long_running_output
947
- )
948
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
949
-
950
- begin_cancel_health_job.metadata = {'url': "/entities/health/jobs/{jobId}"} # type: ignore
951
-
952
- def _health_initial(
953
- self,
954
- documents: List[_models.MultiLanguageInput],
955
- model_version: Optional[str] = None,
956
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
957
- logging_opt_out: Optional[bool] = None,
958
- **kwargs: Any
959
- ) -> Optional[_models.HealthcareJobState]:
960
- error_map = {
961
- 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
962
- }
963
- error_map.update(kwargs.pop('error_map', {}) or {})
964
-
965
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
966
- _params = kwargs.pop("params", {}) or {}
967
-
968
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
969
- cls = kwargs.pop('cls', None) # type: ClsType[Optional[_models.HealthcareJobState]]
970
-
971
- _input = _models.MultiLanguageBatchInput(documents=documents)
972
- _json = self._serialize.body(_input, 'MultiLanguageBatchInput')
973
-
974
- request = build_health_request(
975
- model_version=model_version,
976
- string_index_type=string_index_type,
977
- logging_opt_out=logging_opt_out,
978
- content_type=content_type,
979
- json=_json,
980
- template_url=self._health_initial.metadata['url'],
981
- headers=_headers,
982
- params=_params,
983
- )
984
- request = _convert_request(request)
985
- path_format_arguments = {
986
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
987
- }
988
- request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
989
-
990
- pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
991
- request,
992
- stream=False,
993
- **kwargs
994
- )
995
-
996
- response = pipeline_response.http_response
997
-
998
- if response.status_code not in [200, 202]:
999
- map_error(status_code=response.status_code, response=response, error_map=error_map)
1000
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
1001
- raise HttpResponseError(response=response, model=error)
1002
-
1003
- deserialized = None
1004
- response_headers = {}
1005
- if response.status_code == 200:
1006
- deserialized = self._deserialize('HealthcareJobState', pipeline_response)
1007
-
1008
- if response.status_code == 202:
1009
- response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
1010
-
1011
-
1012
- if cls:
1013
- return cls(pipeline_response, deserialized, response_headers)
1014
-
1015
- return deserialized
1016
-
1017
- _health_initial.metadata = {'url': "/entities/health/jobs"} # type: ignore
1018
-
1019
-
1020
- @distributed_trace
1021
- def begin_health(
1022
- self,
1023
- documents: List[_models.MultiLanguageInput],
1024
- model_version: Optional[str] = None,
1025
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
1026
- logging_opt_out: Optional[bool] = None,
1027
- **kwargs: Any
1028
- ) -> AnalyzeHealthcareEntitiesLROPoller[_models.HealthcareJobState]:
1029
- """Submit healthcare analysis job.
1030
-
1031
- Start a healthcare analysis job to recognize healthcare related entities (drugs, conditions,
1032
- symptoms, etc) and their relations.
1033
-
1034
- :param documents: The set of documents to process as part of this batch. Required.
1035
- :type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
1036
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
1037
- a model-version is not specified, the API should default to the latest, non-preview version.
1038
- Default value is None.
1039
- :type model_version: str
1040
- :param string_index_type: (Optional) Specifies the method used to interpret string offsets.
1041
- Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information
1042
- see https://aka.ms/text-analytics-offsets. Known values are: "TextElement_v8",
1043
- "UnicodeCodePoint", and "Utf16CodeUnit". Default value is None.
1044
- :type string_index_type: str or ~azure.ai.textanalytics.v3_1.models.StringIndexType
1045
- :param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
1046
- for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
1047
- allow for troubleshooting issues in providing you with the Text Analytics natural language
1048
- processing functions. Setting this parameter to true, disables input logging and may limit our
1049
- ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
1050
- notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
1051
- principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
1052
- :type logging_opt_out: bool
1053
- :keyword callable cls: A custom type or function that will be passed the direct response
1054
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
1055
- :keyword polling: By default, your polling method will be
1056
- AnalyzeHealthcareEntitiesLROPollingMethod. Pass in False for this operation to not poll, or
1057
- pass in your own initialized polling object for a personal polling strategy.
1058
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
1059
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
1060
- Retry-After header is present.
1061
- :return: An instance of AnalyzeHealthcareEntitiesLROPoller that returns either
1062
- HealthcareJobState or the result of cls(response)
1063
- :rtype:
1064
- ~...._lro.AnalyzeHealthcareEntitiesLROPoller[~azure.ai.textanalytics.v3_1.models.HealthcareJobState]
1065
- :raises ~azure.core.exceptions.HttpResponseError:
1066
- """
1067
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
1068
- _params = kwargs.pop("params", {}) or {}
1069
-
1070
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
1071
- cls = kwargs.pop('cls', None) # type: ClsType[_models.HealthcareJobState]
1072
- polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
1073
- lro_delay = kwargs.pop(
1074
- 'polling_interval',
1075
- self._config.polling_interval
1076
- )
1077
- cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
1078
- if cont_token is None:
1079
- raw_result = self._health_initial( # type: ignore
1080
- documents=documents,
1081
- model_version=model_version,
1082
- string_index_type=string_index_type,
1083
- logging_opt_out=logging_opt_out,
1084
- content_type=content_type,
1085
- cls=lambda x,y,z: x,
1086
- headers=_headers,
1087
- params=_params,
1088
- **kwargs
1089
- )
1090
- kwargs.pop('error_map', None)
1091
-
1092
- def get_long_running_output(pipeline_response):
1093
- deserialized = self._deserialize('HealthcareJobState', pipeline_response)
1094
- if cls:
1095
- return cls(pipeline_response, deserialized, {})
1096
- return deserialized
1097
-
1098
-
1099
- path_format_arguments = {
1100
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
1101
- }
1102
-
1103
- if polling is True:
1104
- polling_method = cast(PollingMethod, AnalyzeHealthcareEntitiesLROPollingMethod(
1105
- lro_delay,
1106
-
1107
- path_format_arguments=path_format_arguments,
1108
- **kwargs
1109
- )) # type: PollingMethod
1110
- elif polling is False: polling_method = cast(PollingMethod, NoPolling())
1111
- else: polling_method = polling
1112
- if cont_token:
1113
- return AnalyzeHealthcareEntitiesLROPoller.from_continuation_token(
1114
- polling_method=polling_method,
1115
- continuation_token=cont_token,
1116
- client=self._client,
1117
- deserialization_callback=get_long_running_output
1118
- )
1119
- return AnalyzeHealthcareEntitiesLROPoller(self._client, raw_result, get_long_running_output, polling_method)
1120
-
1121
- begin_health.metadata = {'url': "/entities/health/jobs"} # type: ignore
1122
-
1123
- @distributed_trace
1124
- def entities_recognition_general(
1125
- self,
1126
- documents: List[_models.MultiLanguageInput],
1127
- model_version: Optional[str] = None,
1128
- show_stats: Optional[bool] = None,
1129
- logging_opt_out: Optional[bool] = None,
1130
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
1131
- **kwargs: Any
1132
- ) -> _models.EntitiesResult:
1133
- """Named Entity Recognition.
1134
-
1135
- The API returns a list of general named entities in a given document. For the list of supported
1136
- entity types, check :code:`<a href="https://aka.ms/taner">Supported Entity Types in Text
1137
- Analytics API</a>`. See the :code:`<a href="https://aka.ms/talangs">Supported languages in Text
1138
- Analytics API</a>` for the list of enabled languages.
1139
-
1140
- :param documents: The set of documents to process as part of this batch. Required.
1141
- :type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
1142
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
1143
- a model-version is not specified, the API should default to the latest, non-preview version.
1144
- Default value is None.
1145
- :type model_version: str
1146
- :param show_stats: (Optional) if set to true, response will contain request and document level
1147
- statistics. Default value is None.
1148
- :type show_stats: bool
1149
- :param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
1150
- for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
1151
- allow for troubleshooting issues in providing you with the Text Analytics natural language
1152
- processing functions. Setting this parameter to true, disables input logging and may limit our
1153
- ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
1154
- notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
1155
- principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
1156
- :type logging_opt_out: bool
1157
- :param string_index_type: (Optional) Specifies the method used to interpret string offsets.
1158
- Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information
1159
- see https://aka.ms/text-analytics-offsets. Known values are: "TextElement_v8",
1160
- "UnicodeCodePoint", and "Utf16CodeUnit". Default value is None.
1161
- :type string_index_type: str or ~azure.ai.textanalytics.v3_1.models.StringIndexType
1162
- :keyword callable cls: A custom type or function that will be passed the direct response
1163
- :return: EntitiesResult or the result of cls(response)
1164
- :rtype: ~azure.ai.textanalytics.v3_1.models.EntitiesResult
1165
- :raises ~azure.core.exceptions.HttpResponseError:
1166
- """
1167
- error_map = {
1168
- 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
1169
- }
1170
- error_map.update(kwargs.pop('error_map', {}) or {})
1171
-
1172
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
1173
- _params = kwargs.pop("params", {}) or {}
1174
-
1175
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
1176
- cls = kwargs.pop('cls', None) # type: ClsType[_models.EntitiesResult]
1177
-
1178
- _input = _models.MultiLanguageBatchInput(documents=documents)
1179
- _json = self._serialize.body(_input, 'MultiLanguageBatchInput')
1180
-
1181
- request = build_entities_recognition_general_request(
1182
- model_version=model_version,
1183
- show_stats=show_stats,
1184
- logging_opt_out=logging_opt_out,
1185
- string_index_type=string_index_type,
1186
- content_type=content_type,
1187
- json=_json,
1188
- template_url=self.entities_recognition_general.metadata['url'],
1189
- headers=_headers,
1190
- params=_params,
1191
- )
1192
- request = _convert_request(request)
1193
- path_format_arguments = {
1194
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
1195
- }
1196
- request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
1197
-
1198
- pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
1199
- request,
1200
- stream=False,
1201
- **kwargs
1202
- )
1203
-
1204
- response = pipeline_response.http_response
1205
-
1206
- if response.status_code not in [200]:
1207
- map_error(status_code=response.status_code, response=response, error_map=error_map)
1208
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
1209
- raise HttpResponseError(response=response, model=error)
1210
-
1211
- deserialized = self._deserialize('EntitiesResult', pipeline_response)
1212
-
1213
- if cls:
1214
- return cls(pipeline_response, deserialized, {})
1215
-
1216
- return deserialized
1217
-
1218
- entities_recognition_general.metadata = {'url': "/entities/recognition/general"} # type: ignore
1219
-
1220
-
1221
- @distributed_trace
1222
- def entities_recognition_pii(
1223
- self,
1224
- documents: List[_models.MultiLanguageInput],
1225
- model_version: Optional[str] = None,
1226
- show_stats: Optional[bool] = None,
1227
- logging_opt_out: Optional[bool] = None,
1228
- domain: Optional[str] = None,
1229
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
1230
- pii_categories: Optional[List[Union[str, "_models.PiiCategory"]]] = None,
1231
- **kwargs: Any
1232
- ) -> _models.PiiResult:
1233
- """Entities containing personal information.
1234
-
1235
- The API returns a list of entities with personal information (\"SSN\", \"Bank Account\" etc) in
1236
- the document. For the list of supported entity types, check :code:`<a
1237
- href="https://aka.ms/tanerpii">Supported Entity Types in Text Analytics API</a>`. See the
1238
- :code:`<a href="https://aka.ms/talangs">Supported languages in Text Analytics API</a>` for the
1239
- list of enabled languages.
1240
-
1241
- :param documents: The set of documents to process as part of this batch. Required.
1242
- :type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
1243
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
1244
- a model-version is not specified, the API should default to the latest, non-preview version.
1245
- Default value is None.
1246
- :type model_version: str
1247
- :param show_stats: (Optional) if set to true, response will contain request and document level
1248
- statistics. Default value is None.
1249
- :type show_stats: bool
1250
- :param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
1251
- for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
1252
- allow for troubleshooting issues in providing you with the Text Analytics natural language
1253
- processing functions. Setting this parameter to true, disables input logging and may limit our
1254
- ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
1255
- notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
1256
- principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
1257
- :type logging_opt_out: bool
1258
- :param domain: (Optional) if specified, will set the PII domain to include only a subset of the
1259
- entity categories. Possible values include: 'PHI', 'none'. Default value is None.
1260
- :type domain: str
1261
- :param string_index_type: (Optional) Specifies the method used to interpret string offsets.
1262
- Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information
1263
- see https://aka.ms/text-analytics-offsets. Known values are: "TextElement_v8",
1264
- "UnicodeCodePoint", and "Utf16CodeUnit". Default value is None.
1265
- :type string_index_type: str or ~azure.ai.textanalytics.v3_1.models.StringIndexType
1266
- :param pii_categories: (Optional) describes the PII categories to return. Default value is
1267
- None.
1268
- :type pii_categories: list[str or ~azure.ai.textanalytics.v3_1.models.PiiCategory]
1269
- :keyword callable cls: A custom type or function that will be passed the direct response
1270
- :return: PiiResult or the result of cls(response)
1271
- :rtype: ~azure.ai.textanalytics.v3_1.models.PiiResult
1272
- :raises ~azure.core.exceptions.HttpResponseError:
1273
- """
1274
- error_map = {
1275
- 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
1276
- }
1277
- error_map.update(kwargs.pop('error_map', {}) or {})
1278
-
1279
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
1280
- _params = kwargs.pop("params", {}) or {}
1281
-
1282
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
1283
- cls = kwargs.pop('cls', None) # type: ClsType[_models.PiiResult]
1284
-
1285
- _input = _models.MultiLanguageBatchInput(documents=documents)
1286
- _json = self._serialize.body(_input, 'MultiLanguageBatchInput')
1287
-
1288
- request = build_entities_recognition_pii_request(
1289
- model_version=model_version,
1290
- show_stats=show_stats,
1291
- logging_opt_out=logging_opt_out,
1292
- domain=domain,
1293
- string_index_type=string_index_type,
1294
- pii_categories=pii_categories,
1295
- content_type=content_type,
1296
- json=_json,
1297
- template_url=self.entities_recognition_pii.metadata['url'],
1298
- headers=_headers,
1299
- params=_params,
1300
- )
1301
- request = _convert_request(request)
1302
- path_format_arguments = {
1303
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
1304
- }
1305
- request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
1306
-
1307
- pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
1308
- request,
1309
- stream=False,
1310
- **kwargs
1311
- )
1312
-
1313
- response = pipeline_response.http_response
1314
-
1315
- if response.status_code not in [200]:
1316
- map_error(status_code=response.status_code, response=response, error_map=error_map)
1317
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
1318
- raise HttpResponseError(response=response, model=error)
1319
-
1320
- deserialized = self._deserialize('PiiResult', pipeline_response)
1321
-
1322
- if cls:
1323
- return cls(pipeline_response, deserialized, {})
1324
-
1325
- return deserialized
1326
-
1327
- entities_recognition_pii.metadata = {'url': "/entities/recognition/pii"} # type: ignore
1328
-
1329
-
1330
- @distributed_trace
1331
- def entities_linking(
1332
- self,
1333
- documents: List[_models.MultiLanguageInput],
1334
- model_version: Optional[str] = None,
1335
- show_stats: Optional[bool] = None,
1336
- logging_opt_out: Optional[bool] = None,
1337
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
1338
- **kwargs: Any
1339
- ) -> _models.EntityLinkingResult:
1340
- """Linked entities from a well known knowledge base.
1341
-
1342
- The API returns a list of recognized entities with links to a well known knowledge base. See
1343
- the :code:`<a href="https://aka.ms/talangs">Supported languages in Text Analytics API</a>` for
1344
- the list of enabled languages.
1345
-
1346
- :param documents: The set of documents to process as part of this batch. Required.
1347
- :type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
1348
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
1349
- a model-version is not specified, the API should default to the latest, non-preview version.
1350
- Default value is None.
1351
- :type model_version: str
1352
- :param show_stats: (Optional) if set to true, response will contain request and document level
1353
- statistics. Default value is None.
1354
- :type show_stats: bool
1355
- :param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
1356
- for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
1357
- allow for troubleshooting issues in providing you with the Text Analytics natural language
1358
- processing functions. Setting this parameter to true, disables input logging and may limit our
1359
- ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
1360
- notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
1361
- principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
1362
- :type logging_opt_out: bool
1363
- :param string_index_type: (Optional) Specifies the method used to interpret string offsets.
1364
- Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information
1365
- see https://aka.ms/text-analytics-offsets. Known values are: "TextElement_v8",
1366
- "UnicodeCodePoint", and "Utf16CodeUnit". Default value is None.
1367
- :type string_index_type: str or ~azure.ai.textanalytics.v3_1.models.StringIndexType
1368
- :keyword callable cls: A custom type or function that will be passed the direct response
1369
- :return: EntityLinkingResult or the result of cls(response)
1370
- :rtype: ~azure.ai.textanalytics.v3_1.models.EntityLinkingResult
1371
- :raises ~azure.core.exceptions.HttpResponseError:
1372
- """
1373
- error_map = {
1374
- 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
1375
- }
1376
- error_map.update(kwargs.pop('error_map', {}) or {})
1377
-
1378
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
1379
- _params = kwargs.pop("params", {}) or {}
1380
-
1381
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
1382
- cls = kwargs.pop('cls', None) # type: ClsType[_models.EntityLinkingResult]
1383
-
1384
- _input = _models.MultiLanguageBatchInput(documents=documents)
1385
- _json = self._serialize.body(_input, 'MultiLanguageBatchInput')
1386
-
1387
- request = build_entities_linking_request(
1388
- model_version=model_version,
1389
- show_stats=show_stats,
1390
- logging_opt_out=logging_opt_out,
1391
- string_index_type=string_index_type,
1392
- content_type=content_type,
1393
- json=_json,
1394
- template_url=self.entities_linking.metadata['url'],
1395
- headers=_headers,
1396
- params=_params,
1397
- )
1398
- request = _convert_request(request)
1399
- path_format_arguments = {
1400
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
1401
- }
1402
- request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
1403
-
1404
- pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
1405
- request,
1406
- stream=False,
1407
- **kwargs
1408
- )
1409
-
1410
- response = pipeline_response.http_response
1411
-
1412
- if response.status_code not in [200]:
1413
- map_error(status_code=response.status_code, response=response, error_map=error_map)
1414
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
1415
- raise HttpResponseError(response=response, model=error)
1416
-
1417
- deserialized = self._deserialize('EntityLinkingResult', pipeline_response)
1418
-
1419
- if cls:
1420
- return cls(pipeline_response, deserialized, {})
1421
-
1422
- return deserialized
1423
-
1424
- entities_linking.metadata = {'url': "/entities/linking"} # type: ignore
1425
-
1426
-
1427
- @distributed_trace
1428
- def key_phrases(
1429
- self,
1430
- documents: List[_models.MultiLanguageInput],
1431
- model_version: Optional[str] = None,
1432
- show_stats: Optional[bool] = None,
1433
- logging_opt_out: Optional[bool] = None,
1434
- **kwargs: Any
1435
- ) -> _models.KeyPhraseResult:
1436
- """Key Phrases.
1437
-
1438
- The API returns a list of strings denoting the key phrases in the input text. See the :code:`<a
1439
- href="https://aka.ms/talangs">Supported languages in Text Analytics API</a>` for the list of
1440
- enabled languages.
1441
-
1442
- :param documents: The set of documents to process as part of this batch. Required.
1443
- :type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
1444
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
1445
- a model-version is not specified, the API should default to the latest, non-preview version.
1446
- Default value is None.
1447
- :type model_version: str
1448
- :param show_stats: (Optional) if set to true, response will contain request and document level
1449
- statistics. Default value is None.
1450
- :type show_stats: bool
1451
- :param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
1452
- for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
1453
- allow for troubleshooting issues in providing you with the Text Analytics natural language
1454
- processing functions. Setting this parameter to true, disables input logging and may limit our
1455
- ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
1456
- notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
1457
- principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
1458
- :type logging_opt_out: bool
1459
- :keyword callable cls: A custom type or function that will be passed the direct response
1460
- :return: KeyPhraseResult or the result of cls(response)
1461
- :rtype: ~azure.ai.textanalytics.v3_1.models.KeyPhraseResult
1462
- :raises ~azure.core.exceptions.HttpResponseError:
1463
- """
1464
- error_map = {
1465
- 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
1466
- }
1467
- error_map.update(kwargs.pop('error_map', {}) or {})
1468
-
1469
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
1470
- _params = kwargs.pop("params", {}) or {}
1471
-
1472
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
1473
- cls = kwargs.pop('cls', None) # type: ClsType[_models.KeyPhraseResult]
1474
-
1475
- _input = _models.MultiLanguageBatchInput(documents=documents)
1476
- _json = self._serialize.body(_input, 'MultiLanguageBatchInput')
1477
-
1478
- request = build_key_phrases_request(
1479
- model_version=model_version,
1480
- show_stats=show_stats,
1481
- logging_opt_out=logging_opt_out,
1482
- content_type=content_type,
1483
- json=_json,
1484
- template_url=self.key_phrases.metadata['url'],
1485
- headers=_headers,
1486
- params=_params,
1487
- )
1488
- request = _convert_request(request)
1489
- path_format_arguments = {
1490
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
1491
- }
1492
- request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
1493
-
1494
- pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
1495
- request,
1496
- stream=False,
1497
- **kwargs
1498
- )
1499
-
1500
- response = pipeline_response.http_response
1501
-
1502
- if response.status_code not in [200]:
1503
- map_error(status_code=response.status_code, response=response, error_map=error_map)
1504
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
1505
- raise HttpResponseError(response=response, model=error)
1506
-
1507
- deserialized = self._deserialize('KeyPhraseResult', pipeline_response)
1508
-
1509
- if cls:
1510
- return cls(pipeline_response, deserialized, {})
1511
-
1512
- return deserialized
1513
-
1514
- key_phrases.metadata = {'url': "/keyPhrases"} # type: ignore
1515
-
1516
-
1517
- @distributed_trace
1518
- def languages(
1519
- self,
1520
- documents: List[_models.LanguageInput],
1521
- model_version: Optional[str] = None,
1522
- show_stats: Optional[bool] = None,
1523
- logging_opt_out: Optional[bool] = None,
1524
- **kwargs: Any
1525
- ) -> _models.LanguageResult:
1526
- """Detect Language.
1527
-
1528
- The API returns the detected language and a numeric score between 0 and 1. Scores close to 1
1529
- indicate 100% certainty that the identified language is true. See the :code:`<a
1530
- href="https://aka.ms/talangs">Supported languages in Text Analytics API</a>` for the list of
1531
- enabled languages.
1532
-
1533
- :param documents: Required.
1534
- :type documents: list[~azure.ai.textanalytics.v3_1.models.LanguageInput]
1535
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
1536
- a model-version is not specified, the API should default to the latest, non-preview version.
1537
- Default value is None.
1538
- :type model_version: str
1539
- :param show_stats: (Optional) if set to true, response will contain request and document level
1540
- statistics. Default value is None.
1541
- :type show_stats: bool
1542
- :param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
1543
- for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
1544
- allow for troubleshooting issues in providing you with the Text Analytics natural language
1545
- processing functions. Setting this parameter to true, disables input logging and may limit our
1546
- ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
1547
- notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
1548
- principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
1549
- :type logging_opt_out: bool
1550
- :keyword callable cls: A custom type or function that will be passed the direct response
1551
- :return: LanguageResult or the result of cls(response)
1552
- :rtype: ~azure.ai.textanalytics.v3_1.models.LanguageResult
1553
- :raises ~azure.core.exceptions.HttpResponseError:
1554
- """
1555
- error_map = {
1556
- 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
1557
- }
1558
- error_map.update(kwargs.pop('error_map', {}) or {})
1559
-
1560
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
1561
- _params = kwargs.pop("params", {}) or {}
1562
-
1563
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
1564
- cls = kwargs.pop('cls', None) # type: ClsType[_models.LanguageResult]
1565
-
1566
- _input = _models.LanguageBatchInput(documents=documents)
1567
- _json = self._serialize.body(_input, 'LanguageBatchInput')
1568
-
1569
- request = build_languages_request(
1570
- model_version=model_version,
1571
- show_stats=show_stats,
1572
- logging_opt_out=logging_opt_out,
1573
- content_type=content_type,
1574
- json=_json,
1575
- template_url=self.languages.metadata['url'],
1576
- headers=_headers,
1577
- params=_params,
1578
- )
1579
- request = _convert_request(request)
1580
- path_format_arguments = {
1581
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
1582
- }
1583
- request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
1584
-
1585
- pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
1586
- request,
1587
- stream=False,
1588
- **kwargs
1589
- )
1590
-
1591
- response = pipeline_response.http_response
1592
-
1593
- if response.status_code not in [200]:
1594
- map_error(status_code=response.status_code, response=response, error_map=error_map)
1595
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
1596
- raise HttpResponseError(response=response, model=error)
1597
-
1598
- deserialized = self._deserialize('LanguageResult', pipeline_response)
1599
-
1600
- if cls:
1601
- return cls(pipeline_response, deserialized, {})
1602
-
1603
- return deserialized
1604
-
1605
- languages.metadata = {'url': "/languages"} # type: ignore
1606
-
1607
-
1608
- @distributed_trace
1609
- def sentiment(
1610
- self,
1611
- documents: List[_models.MultiLanguageInput],
1612
- model_version: Optional[str] = None,
1613
- show_stats: Optional[bool] = None,
1614
- logging_opt_out: Optional[bool] = None,
1615
- opinion_mining: Optional[bool] = None,
1616
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
1617
- **kwargs: Any
1618
- ) -> _models.SentimentResponse:
1619
- """Sentiment.
1620
-
1621
- The API returns a detailed sentiment analysis for the input text. The analysis is done in
1622
- multiple levels of granularity, start from the a document level, down to sentence and key terms
1623
- (targets and assessments).
1624
-
1625
- :param documents: The set of documents to process as part of this batch. Required.
1626
- :type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
1627
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
1628
- a model-version is not specified, the API should default to the latest, non-preview version.
1629
- Default value is None.
1630
- :type model_version: str
1631
- :param show_stats: (Optional) if set to true, response will contain request and document level
1632
- statistics. Default value is None.
1633
- :type show_stats: bool
1634
- :param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
1635
- for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
1636
- allow for troubleshooting issues in providing you with the Text Analytics natural language
1637
- processing functions. Setting this parameter to true, disables input logging and may limit our
1638
- ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
1639
- notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
1640
- principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
1641
- :type logging_opt_out: bool
1642
- :param opinion_mining: (Optional) if set to true, response will contain not only sentiment
1643
- prediction but also opinion mining (aspect-based sentiment analysis) results. Default value is
1644
- None.
1645
- :type opinion_mining: bool
1646
- :param string_index_type: (Optional) Specifies the method used to interpret string offsets.
1647
- Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information
1648
- see https://aka.ms/text-analytics-offsets. Known values are: "TextElement_v8",
1649
- "UnicodeCodePoint", and "Utf16CodeUnit". Default value is None.
1650
- :type string_index_type: str or ~azure.ai.textanalytics.v3_1.models.StringIndexType
1651
- :keyword callable cls: A custom type or function that will be passed the direct response
1652
- :return: SentimentResponse or the result of cls(response)
1653
- :rtype: ~azure.ai.textanalytics.v3_1.models.SentimentResponse
1654
- :raises ~azure.core.exceptions.HttpResponseError:
1655
- """
1656
- error_map = {
1657
- 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
1658
- }
1659
- error_map.update(kwargs.pop('error_map', {}) or {})
1660
-
1661
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
1662
- _params = kwargs.pop("params", {}) or {}
1663
-
1664
- content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
1665
- cls = kwargs.pop('cls', None) # type: ClsType[_models.SentimentResponse]
1666
-
1667
- _input = _models.MultiLanguageBatchInput(documents=documents)
1668
- _json = self._serialize.body(_input, 'MultiLanguageBatchInput')
1669
-
1670
- request = build_sentiment_request(
1671
- model_version=model_version,
1672
- show_stats=show_stats,
1673
- logging_opt_out=logging_opt_out,
1674
- opinion_mining=opinion_mining,
1675
- string_index_type=string_index_type,
1676
- content_type=content_type,
1677
- json=_json,
1678
- template_url=self.sentiment.metadata['url'],
1679
- headers=_headers,
1680
- params=_params,
1681
- )
1682
- request = _convert_request(request)
1683
- path_format_arguments = {
1684
- "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
1685
- }
1686
- request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
1687
-
1688
- pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
1689
- request,
1690
- stream=False,
1691
- **kwargs
1692
- )
1693
-
1694
- response = pipeline_response.http_response
1695
-
1696
- if response.status_code not in [200]:
1697
- map_error(status_code=response.status_code, response=response, error_map=error_map)
1698
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
1699
- raise HttpResponseError(response=response, model=error)
1700
-
1701
- deserialized = self._deserialize('SentimentResponse', pipeline_response)
1702
-
1703
- if cls:
1704
- return cls(pipeline_response, deserialized, {})
1705
-
1706
- return deserialized
1707
-
1708
- sentiment.metadata = {'url': "/sentiment"} # type: ignore
1709
-