azure-ai-textanalytics 5.3.0b2__py3-none-any.whl → 6.0.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-textanalytics might be problematic. Click here for more details.
- azure/ai/textanalytics/__init__.py +26 -193
- azure/ai/textanalytics/_client.py +111 -0
- azure/ai/textanalytics/_configuration.py +73 -0
- azure/ai/textanalytics/{_generated/v2022_05_01/operations → _operations}/__init__.py +13 -8
- azure/ai/textanalytics/_operations/_operations.py +716 -0
- azure/ai/textanalytics/{_generated/v2022_05_01/models → _operations}/_patch.py +8 -6
- azure/ai/textanalytics/_patch.py +350 -0
- azure/ai/textanalytics/{_generated/aio → _utils}/__init__.py +1 -5
- azure/ai/textanalytics/_utils/model_base.py +1237 -0
- azure/ai/textanalytics/{_generated/_serialization.py → _utils/serialization.py} +640 -616
- azure/ai/textanalytics/{_generated/v2022_05_01/aio/_vendor.py → _utils/utils.py} +10 -12
- azure/ai/textanalytics/_version.py +8 -7
- azure/ai/textanalytics/aio/__init__.py +25 -14
- azure/ai/textanalytics/aio/_client.py +115 -0
- azure/ai/textanalytics/aio/_configuration.py +75 -0
- azure/ai/textanalytics/{_generated/v2022_10_01_preview/aio/operations → aio/_operations}/__init__.py +13 -8
- azure/ai/textanalytics/aio/_operations/_operations.py +623 -0
- azure/ai/textanalytics/{_generated/v2022_05_01 → aio/_operations}/_patch.py +8 -6
- azure/ai/textanalytics/aio/_patch.py +344 -0
- azure/ai/textanalytics/models/__init__.py +402 -0
- azure/ai/textanalytics/models/_enums.py +1979 -0
- azure/ai/textanalytics/models/_models.py +6641 -0
- azure/ai/textanalytics/{_generated/v2022_05_01/aio → models}/_patch.py +8 -6
- azure/ai/textanalytics/py.typed +1 -0
- {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/METADATA +668 -403
- azure_ai_textanalytics-6.0.0b1.dist-info/RECORD +29 -0
- {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/WHEEL +1 -1
- azure/ai/textanalytics/_base_client.py +0 -113
- azure/ai/textanalytics/_check.py +0 -22
- azure/ai/textanalytics/_dict_mixin.py +0 -57
- azure/ai/textanalytics/_generated/__init__.py +0 -16
- azure/ai/textanalytics/_generated/_configuration.py +0 -70
- azure/ai/textanalytics/_generated/_operations_mixin.py +0 -795
- azure/ai/textanalytics/_generated/_text_analytics_client.py +0 -126
- azure/ai/textanalytics/_generated/_version.py +0 -8
- azure/ai/textanalytics/_generated/aio/_configuration.py +0 -66
- azure/ai/textanalytics/_generated/aio/_operations_mixin.py +0 -776
- azure/ai/textanalytics/_generated/aio/_text_analytics_client.py +0 -124
- azure/ai/textanalytics/_generated/models.py +0 -8
- azure/ai/textanalytics/_generated/v2022_05_01/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_05_01/_configuration.py +0 -72
- azure/ai/textanalytics/_generated/v2022_05_01/_text_analytics_client.py +0 -100
- azure/ai/textanalytics/_generated/v2022_05_01/_vendor.py +0 -45
- azure/ai/textanalytics/_generated/v2022_05_01/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_05_01/aio/_configuration.py +0 -71
- azure/ai/textanalytics/_generated/v2022_05_01/aio/_text_analytics_client.py +0 -97
- azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_patch.py +0 -121
- azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_text_analytics_client_operations.py +0 -603
- azure/ai/textanalytics/_generated/v2022_05_01/models/__init__.py +0 -281
- azure/ai/textanalytics/_generated/v2022_05_01/models/_models_py3.py +0 -5722
- azure/ai/textanalytics/_generated/v2022_05_01/models/_text_analytics_client_enums.py +0 -439
- azure/ai/textanalytics/_generated/v2022_05_01/operations/_patch.py +0 -120
- azure/ai/textanalytics/_generated/v2022_05_01/operations/_text_analytics_client_operations.py +0 -744
- azure/ai/textanalytics/_generated/v2022_10_01_preview/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_configuration.py +0 -72
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_text_analytics_client.py +0 -100
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_vendor.py +0 -45
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_configuration.py +0 -71
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_text_analytics_client.py +0 -97
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_vendor.py +0 -27
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_patch.py +0 -121
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_text_analytics_client_operations.py +0 -603
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/__init__.py +0 -405
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_models_py3.py +0 -8420
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_patch.py +0 -486
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_text_analytics_client_enums.py +0 -729
- azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_patch.py +0 -120
- azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_text_analytics_client_operations.py +0 -744
- azure/ai/textanalytics/_generated/v3_0/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_0/_configuration.py +0 -66
- azure/ai/textanalytics/_generated/v3_0/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_0/_text_analytics_client.py +0 -96
- azure/ai/textanalytics/_generated/v3_0/_vendor.py +0 -33
- azure/ai/textanalytics/_generated/v3_0/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_0/aio/_configuration.py +0 -65
- azure/ai/textanalytics/_generated/v3_0/aio/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_0/aio/_text_analytics_client.py +0 -93
- azure/ai/textanalytics/_generated/v3_0/aio/_vendor.py +0 -27
- azure/ai/textanalytics/_generated/v3_0/aio/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_0/aio/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_0/aio/operations/_text_analytics_client_operations.py +0 -428
- azure/ai/textanalytics/_generated/v3_0/models/__init__.py +0 -81
- azure/ai/textanalytics/_generated/v3_0/models/_models_py3.py +0 -1467
- azure/ai/textanalytics/_generated/v3_0/models/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_0/models/_text_analytics_client_enums.py +0 -58
- azure/ai/textanalytics/_generated/v3_0/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_0/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_0/operations/_text_analytics_client_operations.py +0 -604
- azure/ai/textanalytics/_generated/v3_1/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_1/_configuration.py +0 -66
- azure/ai/textanalytics/_generated/v3_1/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_1/_text_analytics_client.py +0 -98
- azure/ai/textanalytics/_generated/v3_1/_vendor.py +0 -45
- azure/ai/textanalytics/_generated/v3_1/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_1/aio/_configuration.py +0 -65
- azure/ai/textanalytics/_generated/v3_1/aio/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_1/aio/_text_analytics_client.py +0 -95
- azure/ai/textanalytics/_generated/v3_1/aio/_vendor.py +0 -27
- azure/ai/textanalytics/_generated/v3_1/aio/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_1/aio/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_1/aio/operations/_text_analytics_client_operations.py +0 -1291
- azure/ai/textanalytics/_generated/v3_1/models/__init__.py +0 -205
- azure/ai/textanalytics/_generated/v3_1/models/_models_py3.py +0 -3976
- azure/ai/textanalytics/_generated/v3_1/models/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_1/models/_text_analytics_client_enums.py +0 -367
- azure/ai/textanalytics/_generated/v3_1/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_1/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_1/operations/_text_analytics_client_operations.py +0 -1709
- azure/ai/textanalytics/_lro.py +0 -553
- azure/ai/textanalytics/_models.py +0 -3158
- azure/ai/textanalytics/_policies.py +0 -66
- azure/ai/textanalytics/_request_handlers.py +0 -104
- azure/ai/textanalytics/_response_handlers.py +0 -583
- azure/ai/textanalytics/_text_analytics_client.py +0 -2081
- azure/ai/textanalytics/_user_agent.py +0 -8
- azure/ai/textanalytics/_validate.py +0 -113
- azure/ai/textanalytics/aio/_base_client_async.py +0 -98
- azure/ai/textanalytics/aio/_lro_async.py +0 -503
- azure/ai/textanalytics/aio/_response_handlers_async.py +0 -94
- azure/ai/textanalytics/aio/_text_analytics_client_async.py +0 -2077
- azure_ai_textanalytics-5.3.0b2.dist-info/RECORD +0 -115
- {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info/licenses}/LICENSE +0 -0
- {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/top_level.txt +0 -0
|
@@ -1,1291 +0,0 @@
|
|
|
1
|
-
# pylint: disable=too-many-lines
|
|
2
|
-
# coding=utf-8
|
|
3
|
-
# --------------------------------------------------------------------------
|
|
4
|
-
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
5
|
-
# Licensed under the MIT License. See License.txt in the project root for license information.
|
|
6
|
-
# Code generated by Microsoft (R) AutoRest Code Generator.
|
|
7
|
-
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
|
8
|
-
# --------------------------------------------------------------------------
|
|
9
|
-
from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, cast, overload
|
|
10
|
-
|
|
11
|
-
from .....aio._lro_async import AsyncAnalyzeActionsLROPoller, AsyncAnalyzeActionsLROPollingMethod, AsyncAnalyzeHealthcareEntitiesLROPoller, AsyncAnalyzeHealthcareEntitiesLROPollingMethod
|
|
12
|
-
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
|
|
13
|
-
from azure.core.pipeline import PipelineResponse
|
|
14
|
-
from azure.core.pipeline.transport import AsyncHttpResponse
|
|
15
|
-
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
|
|
16
|
-
from azure.core.polling.async_base_polling import AsyncLROBasePolling
|
|
17
|
-
from azure.core.rest import HttpRequest
|
|
18
|
-
from azure.core.tracing.decorator_async import distributed_trace_async
|
|
19
|
-
from azure.core.utils import case_insensitive_dict
|
|
20
|
-
|
|
21
|
-
from ... import models as _models
|
|
22
|
-
from ..._vendor import _convert_request
|
|
23
|
-
from ...operations._text_analytics_client_operations import build_analyze_request, build_analyze_status_request, build_cancel_health_job_request, build_entities_linking_request, build_entities_recognition_general_request, build_entities_recognition_pii_request, build_health_request, build_health_status_request, build_key_phrases_request, build_languages_request, build_sentiment_request
|
|
24
|
-
from .._vendor import MixinABC
|
|
25
|
-
T = TypeVar('T')
|
|
26
|
-
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
|
|
27
|
-
|
|
28
|
-
class TextAnalyticsClientOperationsMixin(MixinABC):
|
|
29
|
-
|
|
30
|
-
async def _analyze_initial(
|
|
31
|
-
self,
|
|
32
|
-
body: Optional[Union[_models.AnalyzeBatchInput, IO]] = None,
|
|
33
|
-
**kwargs: Any
|
|
34
|
-
) -> Optional[_models.AnalyzeJobState]:
|
|
35
|
-
error_map = {
|
|
36
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
37
|
-
}
|
|
38
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
39
|
-
|
|
40
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
41
|
-
_params = kwargs.pop("params", {}) or {}
|
|
42
|
-
|
|
43
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
|
|
44
|
-
cls = kwargs.pop('cls', None) # type: ClsType[Optional[_models.AnalyzeJobState]]
|
|
45
|
-
|
|
46
|
-
content_type = content_type or "application/json"
|
|
47
|
-
_json = None
|
|
48
|
-
_content = None
|
|
49
|
-
if isinstance(body, (IO, bytes)):
|
|
50
|
-
_content = body
|
|
51
|
-
else:
|
|
52
|
-
if body is not None:
|
|
53
|
-
_json = self._serialize.body(body, 'AnalyzeBatchInput')
|
|
54
|
-
else:
|
|
55
|
-
_json = None
|
|
56
|
-
|
|
57
|
-
request = build_analyze_request(
|
|
58
|
-
content_type=content_type,
|
|
59
|
-
json=_json,
|
|
60
|
-
content=_content,
|
|
61
|
-
template_url=self._analyze_initial.metadata['url'],
|
|
62
|
-
headers=_headers,
|
|
63
|
-
params=_params,
|
|
64
|
-
)
|
|
65
|
-
request = _convert_request(request)
|
|
66
|
-
path_format_arguments = {
|
|
67
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
68
|
-
}
|
|
69
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
70
|
-
|
|
71
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
72
|
-
request,
|
|
73
|
-
stream=False,
|
|
74
|
-
**kwargs
|
|
75
|
-
)
|
|
76
|
-
|
|
77
|
-
response = pipeline_response.http_response
|
|
78
|
-
|
|
79
|
-
if response.status_code not in [200, 202]:
|
|
80
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
81
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
82
|
-
raise HttpResponseError(response=response, model=error)
|
|
83
|
-
|
|
84
|
-
deserialized = None
|
|
85
|
-
response_headers = {}
|
|
86
|
-
if response.status_code == 200:
|
|
87
|
-
deserialized = self._deserialize('AnalyzeJobState', pipeline_response)
|
|
88
|
-
|
|
89
|
-
if response.status_code == 202:
|
|
90
|
-
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
if cls:
|
|
94
|
-
return cls(pipeline_response, deserialized, response_headers)
|
|
95
|
-
|
|
96
|
-
return deserialized
|
|
97
|
-
|
|
98
|
-
_analyze_initial.metadata = {'url': "/analyze"} # type: ignore
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
@overload
|
|
102
|
-
async def begin_analyze(
|
|
103
|
-
self,
|
|
104
|
-
body: Optional[_models.AnalyzeBatchInput] = None,
|
|
105
|
-
*,
|
|
106
|
-
content_type: str = "application/json",
|
|
107
|
-
**kwargs: Any
|
|
108
|
-
) -> AsyncAnalyzeActionsLROPoller[_models.AnalyzeJobState]:
|
|
109
|
-
"""Submit analysis job.
|
|
110
|
-
|
|
111
|
-
Submit a collection of text documents for analysis. Specify one or more unique tasks to be
|
|
112
|
-
executed.
|
|
113
|
-
|
|
114
|
-
:param body: Collection of documents to analyze and tasks to execute. Default value is None.
|
|
115
|
-
:type body: ~azure.ai.textanalytics.v3_1.models.AnalyzeBatchInput
|
|
116
|
-
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
|
|
117
|
-
Default value is "application/json".
|
|
118
|
-
:paramtype content_type: str
|
|
119
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
120
|
-
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
|
|
121
|
-
:keyword polling: By default, your polling method will be AsyncAnalyzeActionsLROPollingMethod.
|
|
122
|
-
Pass in False for this operation to not poll, or pass in your own initialized polling object
|
|
123
|
-
for a personal polling strategy.
|
|
124
|
-
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
|
|
125
|
-
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
|
|
126
|
-
Retry-After header is present.
|
|
127
|
-
:return: An instance of AsyncAnalyzeActionsLROPoller that returns either AnalyzeJobState or the
|
|
128
|
-
result of cls(response)
|
|
129
|
-
:rtype:
|
|
130
|
-
~.....aio._lro_async.AsyncAnalyzeActionsLROPoller[~azure.ai.textanalytics.v3_1.models.AnalyzeJobState]
|
|
131
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
132
|
-
"""
|
|
133
|
-
|
|
134
|
-
@overload
|
|
135
|
-
async def begin_analyze(
|
|
136
|
-
self,
|
|
137
|
-
body: Optional[IO] = None,
|
|
138
|
-
*,
|
|
139
|
-
content_type: str = "application/json",
|
|
140
|
-
**kwargs: Any
|
|
141
|
-
) -> AsyncAnalyzeActionsLROPoller[_models.AnalyzeJobState]:
|
|
142
|
-
"""Submit analysis job.
|
|
143
|
-
|
|
144
|
-
Submit a collection of text documents for analysis. Specify one or more unique tasks to be
|
|
145
|
-
executed.
|
|
146
|
-
|
|
147
|
-
:param body: Collection of documents to analyze and tasks to execute. Default value is None.
|
|
148
|
-
:type body: IO
|
|
149
|
-
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
|
|
150
|
-
Known values are: 'application/json', 'text/json'. Default value is "application/json".
|
|
151
|
-
:paramtype content_type: str
|
|
152
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
153
|
-
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
|
|
154
|
-
:keyword polling: By default, your polling method will be AsyncAnalyzeActionsLROPollingMethod.
|
|
155
|
-
Pass in False for this operation to not poll, or pass in your own initialized polling object
|
|
156
|
-
for a personal polling strategy.
|
|
157
|
-
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
|
|
158
|
-
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
|
|
159
|
-
Retry-After header is present.
|
|
160
|
-
:return: An instance of AsyncAnalyzeActionsLROPoller that returns either AnalyzeJobState or the
|
|
161
|
-
result of cls(response)
|
|
162
|
-
:rtype:
|
|
163
|
-
~.....aio._lro_async.AsyncAnalyzeActionsLROPoller[~azure.ai.textanalytics.v3_1.models.AnalyzeJobState]
|
|
164
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
165
|
-
"""
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
@distributed_trace_async
|
|
169
|
-
async def begin_analyze(
|
|
170
|
-
self,
|
|
171
|
-
body: Optional[Union[_models.AnalyzeBatchInput, IO]] = None,
|
|
172
|
-
**kwargs: Any
|
|
173
|
-
) -> AsyncAnalyzeActionsLROPoller[_models.AnalyzeJobState]:
|
|
174
|
-
"""Submit analysis job.
|
|
175
|
-
|
|
176
|
-
Submit a collection of text documents for analysis. Specify one or more unique tasks to be
|
|
177
|
-
executed.
|
|
178
|
-
|
|
179
|
-
:param body: Collection of documents to analyze and tasks to execute. Is either a model type or
|
|
180
|
-
a IO type. Default value is None.
|
|
181
|
-
:type body: ~azure.ai.textanalytics.v3_1.models.AnalyzeBatchInput or IO
|
|
182
|
-
:keyword content_type: Body Parameter content-type. Known values are: 'application/json',
|
|
183
|
-
'text/json'. Default value is None.
|
|
184
|
-
:paramtype content_type: str
|
|
185
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
186
|
-
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
|
|
187
|
-
:keyword polling: By default, your polling method will be AsyncAnalyzeActionsLROPollingMethod.
|
|
188
|
-
Pass in False for this operation to not poll, or pass in your own initialized polling object
|
|
189
|
-
for a personal polling strategy.
|
|
190
|
-
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
|
|
191
|
-
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
|
|
192
|
-
Retry-After header is present.
|
|
193
|
-
:return: An instance of AsyncAnalyzeActionsLROPoller that returns either AnalyzeJobState or the
|
|
194
|
-
result of cls(response)
|
|
195
|
-
:rtype:
|
|
196
|
-
~.....aio._lro_async.AsyncAnalyzeActionsLROPoller[~azure.ai.textanalytics.v3_1.models.AnalyzeJobState]
|
|
197
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
198
|
-
"""
|
|
199
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
200
|
-
_params = kwargs.pop("params", {}) or {}
|
|
201
|
-
|
|
202
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
|
|
203
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.AnalyzeJobState]
|
|
204
|
-
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
|
|
205
|
-
lro_delay = kwargs.pop(
|
|
206
|
-
'polling_interval',
|
|
207
|
-
self._config.polling_interval
|
|
208
|
-
)
|
|
209
|
-
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
|
|
210
|
-
if cont_token is None:
|
|
211
|
-
raw_result = await self._analyze_initial( # type: ignore
|
|
212
|
-
body=body,
|
|
213
|
-
content_type=content_type,
|
|
214
|
-
cls=lambda x,y,z: x,
|
|
215
|
-
headers=_headers,
|
|
216
|
-
params=_params,
|
|
217
|
-
**kwargs
|
|
218
|
-
)
|
|
219
|
-
kwargs.pop('error_map', None)
|
|
220
|
-
|
|
221
|
-
def get_long_running_output(pipeline_response):
|
|
222
|
-
deserialized = self._deserialize('AnalyzeJobState', pipeline_response)
|
|
223
|
-
if cls:
|
|
224
|
-
return cls(pipeline_response, deserialized, {})
|
|
225
|
-
return deserialized
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
path_format_arguments = {
|
|
229
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
230
|
-
}
|
|
231
|
-
|
|
232
|
-
if polling is True:
|
|
233
|
-
polling_method = cast(AsyncPollingMethod, AsyncAnalyzeActionsLROPollingMethod(
|
|
234
|
-
lro_delay,
|
|
235
|
-
|
|
236
|
-
path_format_arguments=path_format_arguments,
|
|
237
|
-
**kwargs
|
|
238
|
-
)) # type: AsyncPollingMethod
|
|
239
|
-
elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
|
|
240
|
-
else: polling_method = polling
|
|
241
|
-
if cont_token:
|
|
242
|
-
return AsyncAnalyzeActionsLROPoller.from_continuation_token(
|
|
243
|
-
polling_method=polling_method,
|
|
244
|
-
continuation_token=cont_token,
|
|
245
|
-
client=self._client,
|
|
246
|
-
deserialization_callback=get_long_running_output
|
|
247
|
-
)
|
|
248
|
-
return AsyncAnalyzeActionsLROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
|
249
|
-
|
|
250
|
-
begin_analyze.metadata = {'url': "/analyze"} # type: ignore
|
|
251
|
-
|
|
252
|
-
@distributed_trace_async
|
|
253
|
-
async def analyze_status(
|
|
254
|
-
self,
|
|
255
|
-
job_id: str,
|
|
256
|
-
show_stats: Optional[bool] = None,
|
|
257
|
-
top: int = 20,
|
|
258
|
-
skip: int = 0,
|
|
259
|
-
**kwargs: Any
|
|
260
|
-
) -> _models.AnalyzeJobState:
|
|
261
|
-
"""Get analysis status and results.
|
|
262
|
-
|
|
263
|
-
Get the status of an analysis job. A job may consist of one or more tasks. Once all tasks are
|
|
264
|
-
completed, the job will transition to the completed state and results will be available for
|
|
265
|
-
each task.
|
|
266
|
-
|
|
267
|
-
:param job_id: Job ID for Analyze. Required.
|
|
268
|
-
:type job_id: str
|
|
269
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
270
|
-
statistics. Default value is None.
|
|
271
|
-
:type show_stats: bool
|
|
272
|
-
:param top: (Optional) Set the maximum number of results per task. When both $top and $skip are
|
|
273
|
-
specified, $skip is applied first. Default value is 20.
|
|
274
|
-
:type top: int
|
|
275
|
-
:param skip: (Optional) Set the number of elements to offset in the response. When both $top
|
|
276
|
-
and $skip are specified, $skip is applied first. Default value is 0.
|
|
277
|
-
:type skip: int
|
|
278
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
279
|
-
:return: AnalyzeJobState or the result of cls(response)
|
|
280
|
-
:rtype: ~azure.ai.textanalytics.v3_1.models.AnalyzeJobState
|
|
281
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
282
|
-
"""
|
|
283
|
-
error_map = {
|
|
284
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
285
|
-
}
|
|
286
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
287
|
-
|
|
288
|
-
_headers = kwargs.pop("headers", {}) or {}
|
|
289
|
-
_params = kwargs.pop("params", {}) or {}
|
|
290
|
-
|
|
291
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.AnalyzeJobState]
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
request = build_analyze_status_request(
|
|
295
|
-
job_id=job_id,
|
|
296
|
-
show_stats=show_stats,
|
|
297
|
-
top=top,
|
|
298
|
-
skip=skip,
|
|
299
|
-
template_url=self.analyze_status.metadata['url'],
|
|
300
|
-
headers=_headers,
|
|
301
|
-
params=_params,
|
|
302
|
-
)
|
|
303
|
-
request = _convert_request(request)
|
|
304
|
-
path_format_arguments = {
|
|
305
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
306
|
-
}
|
|
307
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
308
|
-
|
|
309
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
310
|
-
request,
|
|
311
|
-
stream=False,
|
|
312
|
-
**kwargs
|
|
313
|
-
)
|
|
314
|
-
|
|
315
|
-
response = pipeline_response.http_response
|
|
316
|
-
|
|
317
|
-
if response.status_code not in [200]:
|
|
318
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
319
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
320
|
-
raise HttpResponseError(response=response, model=error)
|
|
321
|
-
|
|
322
|
-
deserialized = self._deserialize('AnalyzeJobState', pipeline_response)
|
|
323
|
-
|
|
324
|
-
if cls:
|
|
325
|
-
return cls(pipeline_response, deserialized, {})
|
|
326
|
-
|
|
327
|
-
return deserialized
|
|
328
|
-
|
|
329
|
-
analyze_status.metadata = {'url': "/analyze/jobs/{jobId}"} # type: ignore
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
@distributed_trace_async
|
|
333
|
-
async def health_status(
|
|
334
|
-
self,
|
|
335
|
-
job_id: str,
|
|
336
|
-
top: int = 20,
|
|
337
|
-
skip: int = 0,
|
|
338
|
-
show_stats: Optional[bool] = None,
|
|
339
|
-
**kwargs: Any
|
|
340
|
-
) -> _models.HealthcareJobState:
|
|
341
|
-
"""Get healthcare analysis job status and results.
|
|
342
|
-
|
|
343
|
-
Get details of the healthcare prediction job specified by the jobId.
|
|
344
|
-
|
|
345
|
-
:param job_id: Job ID. Required.
|
|
346
|
-
:type job_id: str
|
|
347
|
-
:param top: (Optional) Set the maximum number of results per task. When both $top and $skip are
|
|
348
|
-
specified, $skip is applied first. Default value is 20.
|
|
349
|
-
:type top: int
|
|
350
|
-
:param skip: (Optional) Set the number of elements to offset in the response. When both $top
|
|
351
|
-
and $skip are specified, $skip is applied first. Default value is 0.
|
|
352
|
-
:type skip: int
|
|
353
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
354
|
-
statistics. Default value is None.
|
|
355
|
-
:type show_stats: bool
|
|
356
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
357
|
-
:return: HealthcareJobState or the result of cls(response)
|
|
358
|
-
:rtype: ~azure.ai.textanalytics.v3_1.models.HealthcareJobState
|
|
359
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
360
|
-
"""
|
|
361
|
-
error_map = {
|
|
362
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
363
|
-
}
|
|
364
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
365
|
-
|
|
366
|
-
_headers = kwargs.pop("headers", {}) or {}
|
|
367
|
-
_params = kwargs.pop("params", {}) or {}
|
|
368
|
-
|
|
369
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.HealthcareJobState]
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
request = build_health_status_request(
|
|
373
|
-
job_id=job_id,
|
|
374
|
-
top=top,
|
|
375
|
-
skip=skip,
|
|
376
|
-
show_stats=show_stats,
|
|
377
|
-
template_url=self.health_status.metadata['url'],
|
|
378
|
-
headers=_headers,
|
|
379
|
-
params=_params,
|
|
380
|
-
)
|
|
381
|
-
request = _convert_request(request)
|
|
382
|
-
path_format_arguments = {
|
|
383
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
384
|
-
}
|
|
385
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
386
|
-
|
|
387
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
388
|
-
request,
|
|
389
|
-
stream=False,
|
|
390
|
-
**kwargs
|
|
391
|
-
)
|
|
392
|
-
|
|
393
|
-
response = pipeline_response.http_response
|
|
394
|
-
|
|
395
|
-
if response.status_code not in [200]:
|
|
396
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
397
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
398
|
-
raise HttpResponseError(response=response, model=error)
|
|
399
|
-
|
|
400
|
-
deserialized = self._deserialize('HealthcareJobState', pipeline_response)
|
|
401
|
-
|
|
402
|
-
if cls:
|
|
403
|
-
return cls(pipeline_response, deserialized, {})
|
|
404
|
-
|
|
405
|
-
return deserialized
|
|
406
|
-
|
|
407
|
-
health_status.metadata = {'url': "/entities/health/jobs/{jobId}"} # type: ignore
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
async def _cancel_health_job_initial( # pylint: disable=inconsistent-return-statements
|
|
411
|
-
self,
|
|
412
|
-
job_id: str,
|
|
413
|
-
**kwargs: Any
|
|
414
|
-
) -> None:
|
|
415
|
-
error_map = {
|
|
416
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
417
|
-
}
|
|
418
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
419
|
-
|
|
420
|
-
_headers = kwargs.pop("headers", {}) or {}
|
|
421
|
-
_params = kwargs.pop("params", {}) or {}
|
|
422
|
-
|
|
423
|
-
cls = kwargs.pop('cls', None) # type: ClsType[None]
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
request = build_cancel_health_job_request(
|
|
427
|
-
job_id=job_id,
|
|
428
|
-
template_url=self._cancel_health_job_initial.metadata['url'],
|
|
429
|
-
headers=_headers,
|
|
430
|
-
params=_params,
|
|
431
|
-
)
|
|
432
|
-
request = _convert_request(request)
|
|
433
|
-
path_format_arguments = {
|
|
434
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
435
|
-
}
|
|
436
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
437
|
-
|
|
438
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
439
|
-
request,
|
|
440
|
-
stream=False,
|
|
441
|
-
**kwargs
|
|
442
|
-
)
|
|
443
|
-
|
|
444
|
-
response = pipeline_response.http_response
|
|
445
|
-
|
|
446
|
-
if response.status_code not in [202]:
|
|
447
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
448
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
449
|
-
raise HttpResponseError(response=response, model=error)
|
|
450
|
-
|
|
451
|
-
response_headers = {}
|
|
452
|
-
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
if cls:
|
|
456
|
-
return cls(pipeline_response, None, response_headers)
|
|
457
|
-
|
|
458
|
-
_cancel_health_job_initial.metadata = {'url': "/entities/health/jobs/{jobId}"} # type: ignore
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
@distributed_trace_async
|
|
462
|
-
async def begin_cancel_health_job(
|
|
463
|
-
self,
|
|
464
|
-
job_id: str,
|
|
465
|
-
**kwargs: Any
|
|
466
|
-
) -> AsyncLROPoller[None]:
|
|
467
|
-
"""Cancel healthcare prediction job.
|
|
468
|
-
|
|
469
|
-
Cancel healthcare prediction job.
|
|
470
|
-
|
|
471
|
-
:param job_id: Job ID. Required.
|
|
472
|
-
:type job_id: str
|
|
473
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
474
|
-
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
|
|
475
|
-
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
|
|
476
|
-
for this operation to not poll, or pass in your own initialized polling object for a personal
|
|
477
|
-
polling strategy.
|
|
478
|
-
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
|
|
479
|
-
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
|
|
480
|
-
Retry-After header is present.
|
|
481
|
-
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
|
|
482
|
-
:rtype: ~azure.core.polling.AsyncLROPoller[None]
|
|
483
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
484
|
-
"""
|
|
485
|
-
_headers = kwargs.pop("headers", {}) or {}
|
|
486
|
-
_params = kwargs.pop("params", {}) or {}
|
|
487
|
-
|
|
488
|
-
cls = kwargs.pop('cls', None) # type: ClsType[None]
|
|
489
|
-
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
|
|
490
|
-
lro_delay = kwargs.pop(
|
|
491
|
-
'polling_interval',
|
|
492
|
-
self._config.polling_interval
|
|
493
|
-
)
|
|
494
|
-
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
|
|
495
|
-
if cont_token is None:
|
|
496
|
-
raw_result = await self._cancel_health_job_initial( # type: ignore
|
|
497
|
-
job_id=job_id,
|
|
498
|
-
cls=lambda x,y,z: x,
|
|
499
|
-
headers=_headers,
|
|
500
|
-
params=_params,
|
|
501
|
-
**kwargs
|
|
502
|
-
)
|
|
503
|
-
kwargs.pop('error_map', None)
|
|
504
|
-
|
|
505
|
-
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
|
|
506
|
-
if cls:
|
|
507
|
-
return cls(pipeline_response, None, {})
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
path_format_arguments = {
|
|
511
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
512
|
-
}
|
|
513
|
-
|
|
514
|
-
if polling is True:
|
|
515
|
-
polling_method = cast(AsyncPollingMethod, AsyncLROBasePolling(
|
|
516
|
-
lro_delay,
|
|
517
|
-
|
|
518
|
-
path_format_arguments=path_format_arguments,
|
|
519
|
-
**kwargs
|
|
520
|
-
)) # type: AsyncPollingMethod
|
|
521
|
-
elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
|
|
522
|
-
else: polling_method = polling
|
|
523
|
-
if cont_token:
|
|
524
|
-
return AsyncLROPoller.from_continuation_token(
|
|
525
|
-
polling_method=polling_method,
|
|
526
|
-
continuation_token=cont_token,
|
|
527
|
-
client=self._client,
|
|
528
|
-
deserialization_callback=get_long_running_output
|
|
529
|
-
)
|
|
530
|
-
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
|
531
|
-
|
|
532
|
-
begin_cancel_health_job.metadata = {'url': "/entities/health/jobs/{jobId}"} # type: ignore
|
|
533
|
-
|
|
534
|
-
async def _health_initial(
|
|
535
|
-
self,
|
|
536
|
-
documents: List[_models.MultiLanguageInput],
|
|
537
|
-
model_version: Optional[str] = None,
|
|
538
|
-
string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
|
|
539
|
-
logging_opt_out: Optional[bool] = None,
|
|
540
|
-
**kwargs: Any
|
|
541
|
-
) -> Optional[_models.HealthcareJobState]:
|
|
542
|
-
error_map = {
|
|
543
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
544
|
-
}
|
|
545
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
546
|
-
|
|
547
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
548
|
-
_params = kwargs.pop("params", {}) or {}
|
|
549
|
-
|
|
550
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
|
|
551
|
-
cls = kwargs.pop('cls', None) # type: ClsType[Optional[_models.HealthcareJobState]]
|
|
552
|
-
|
|
553
|
-
_input = _models.MultiLanguageBatchInput(documents=documents)
|
|
554
|
-
_json = self._serialize.body(_input, 'MultiLanguageBatchInput')
|
|
555
|
-
|
|
556
|
-
request = build_health_request(
|
|
557
|
-
model_version=model_version,
|
|
558
|
-
string_index_type=string_index_type,
|
|
559
|
-
logging_opt_out=logging_opt_out,
|
|
560
|
-
content_type=content_type,
|
|
561
|
-
json=_json,
|
|
562
|
-
template_url=self._health_initial.metadata['url'],
|
|
563
|
-
headers=_headers,
|
|
564
|
-
params=_params,
|
|
565
|
-
)
|
|
566
|
-
request = _convert_request(request)
|
|
567
|
-
path_format_arguments = {
|
|
568
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
569
|
-
}
|
|
570
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
571
|
-
|
|
572
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
573
|
-
request,
|
|
574
|
-
stream=False,
|
|
575
|
-
**kwargs
|
|
576
|
-
)
|
|
577
|
-
|
|
578
|
-
response = pipeline_response.http_response
|
|
579
|
-
|
|
580
|
-
if response.status_code not in [200, 202]:
|
|
581
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
582
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
583
|
-
raise HttpResponseError(response=response, model=error)
|
|
584
|
-
|
|
585
|
-
deserialized = None
|
|
586
|
-
response_headers = {}
|
|
587
|
-
if response.status_code == 200:
|
|
588
|
-
deserialized = self._deserialize('HealthcareJobState', pipeline_response)
|
|
589
|
-
|
|
590
|
-
if response.status_code == 202:
|
|
591
|
-
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
if cls:
|
|
595
|
-
return cls(pipeline_response, deserialized, response_headers)
|
|
596
|
-
|
|
597
|
-
return deserialized
|
|
598
|
-
|
|
599
|
-
_health_initial.metadata = {'url': "/entities/health/jobs"} # type: ignore
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
@distributed_trace_async
|
|
603
|
-
async def begin_health(
|
|
604
|
-
self,
|
|
605
|
-
documents: List[_models.MultiLanguageInput],
|
|
606
|
-
model_version: Optional[str] = None,
|
|
607
|
-
string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
|
|
608
|
-
logging_opt_out: Optional[bool] = None,
|
|
609
|
-
**kwargs: Any
|
|
610
|
-
) -> AsyncAnalyzeHealthcareEntitiesLROPoller[_models.HealthcareJobState]:
|
|
611
|
-
"""Submit healthcare analysis job.
|
|
612
|
-
|
|
613
|
-
Start a healthcare analysis job to recognize healthcare related entities (drugs, conditions,
|
|
614
|
-
symptoms, etc) and their relations.
|
|
615
|
-
|
|
616
|
-
:param documents: The set of documents to process as part of this batch. Required.
|
|
617
|
-
:type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
|
|
618
|
-
:param model_version: (Optional) This value indicates which model will be used for scoring. If
|
|
619
|
-
a model-version is not specified, the API should default to the latest, non-preview version.
|
|
620
|
-
Default value is None.
|
|
621
|
-
:type model_version: str
|
|
622
|
-
:param string_index_type: (Optional) Specifies the method used to interpret string offsets.
|
|
623
|
-
Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information
|
|
624
|
-
see https://aka.ms/text-analytics-offsets. Known values are: "TextElement_v8",
|
|
625
|
-
"UnicodeCodePoint", and "Utf16CodeUnit". Default value is None.
|
|
626
|
-
:type string_index_type: str or ~azure.ai.textanalytics.v3_1.models.StringIndexType
|
|
627
|
-
:param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
|
|
628
|
-
for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
|
|
629
|
-
allow for troubleshooting issues in providing you with the Text Analytics natural language
|
|
630
|
-
processing functions. Setting this parameter to true, disables input logging and may limit our
|
|
631
|
-
ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
|
|
632
|
-
notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
|
|
633
|
-
principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
|
|
634
|
-
:type logging_opt_out: bool
|
|
635
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
636
|
-
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
|
|
637
|
-
:keyword polling: By default, your polling method will be
|
|
638
|
-
AsyncAnalyzeHealthcareEntitiesLROPollingMethod. Pass in False for this operation to not poll,
|
|
639
|
-
or pass in your own initialized polling object for a personal polling strategy.
|
|
640
|
-
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
|
|
641
|
-
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
|
|
642
|
-
Retry-After header is present.
|
|
643
|
-
:return: An instance of AsyncAnalyzeHealthcareEntitiesLROPoller that returns either
|
|
644
|
-
HealthcareJobState or the result of cls(response)
|
|
645
|
-
:rtype:
|
|
646
|
-
~.....aio._lro_async.AsyncAnalyzeHealthcareEntitiesLROPoller[~azure.ai.textanalytics.v3_1.models.HealthcareJobState]
|
|
647
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
648
|
-
"""
|
|
649
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
650
|
-
_params = kwargs.pop("params", {}) or {}
|
|
651
|
-
|
|
652
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
|
|
653
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.HealthcareJobState]
|
|
654
|
-
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
|
|
655
|
-
lro_delay = kwargs.pop(
|
|
656
|
-
'polling_interval',
|
|
657
|
-
self._config.polling_interval
|
|
658
|
-
)
|
|
659
|
-
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
|
|
660
|
-
if cont_token is None:
|
|
661
|
-
raw_result = await self._health_initial( # type: ignore
|
|
662
|
-
documents=documents,
|
|
663
|
-
model_version=model_version,
|
|
664
|
-
string_index_type=string_index_type,
|
|
665
|
-
logging_opt_out=logging_opt_out,
|
|
666
|
-
content_type=content_type,
|
|
667
|
-
cls=lambda x,y,z: x,
|
|
668
|
-
headers=_headers,
|
|
669
|
-
params=_params,
|
|
670
|
-
**kwargs
|
|
671
|
-
)
|
|
672
|
-
kwargs.pop('error_map', None)
|
|
673
|
-
|
|
674
|
-
def get_long_running_output(pipeline_response):
|
|
675
|
-
deserialized = self._deserialize('HealthcareJobState', pipeline_response)
|
|
676
|
-
if cls:
|
|
677
|
-
return cls(pipeline_response, deserialized, {})
|
|
678
|
-
return deserialized
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
path_format_arguments = {
|
|
682
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
683
|
-
}
|
|
684
|
-
|
|
685
|
-
if polling is True:
|
|
686
|
-
polling_method = cast(AsyncPollingMethod, AsyncAnalyzeHealthcareEntitiesLROPollingMethod(
|
|
687
|
-
lro_delay,
|
|
688
|
-
|
|
689
|
-
path_format_arguments=path_format_arguments,
|
|
690
|
-
**kwargs
|
|
691
|
-
)) # type: AsyncPollingMethod
|
|
692
|
-
elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
|
|
693
|
-
else: polling_method = polling
|
|
694
|
-
if cont_token:
|
|
695
|
-
return AsyncAnalyzeHealthcareEntitiesLROPoller.from_continuation_token(
|
|
696
|
-
polling_method=polling_method,
|
|
697
|
-
continuation_token=cont_token,
|
|
698
|
-
client=self._client,
|
|
699
|
-
deserialization_callback=get_long_running_output
|
|
700
|
-
)
|
|
701
|
-
return AsyncAnalyzeHealthcareEntitiesLROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
|
702
|
-
|
|
703
|
-
begin_health.metadata = {'url': "/entities/health/jobs"} # type: ignore
|
|
704
|
-
|
|
705
|
-
@distributed_trace_async
|
|
706
|
-
async def entities_recognition_general(
|
|
707
|
-
self,
|
|
708
|
-
documents: List[_models.MultiLanguageInput],
|
|
709
|
-
model_version: Optional[str] = None,
|
|
710
|
-
show_stats: Optional[bool] = None,
|
|
711
|
-
logging_opt_out: Optional[bool] = None,
|
|
712
|
-
string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
|
|
713
|
-
**kwargs: Any
|
|
714
|
-
) -> _models.EntitiesResult:
|
|
715
|
-
"""Named Entity Recognition.
|
|
716
|
-
|
|
717
|
-
The API returns a list of general named entities in a given document. For the list of supported
|
|
718
|
-
entity types, check :code:`<a href="https://aka.ms/taner">Supported Entity Types in Text
|
|
719
|
-
Analytics API</a>`. See the :code:`<a href="https://aka.ms/talangs">Supported languages in Text
|
|
720
|
-
Analytics API</a>` for the list of enabled languages.
|
|
721
|
-
|
|
722
|
-
:param documents: The set of documents to process as part of this batch. Required.
|
|
723
|
-
:type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
|
|
724
|
-
:param model_version: (Optional) This value indicates which model will be used for scoring. If
|
|
725
|
-
a model-version is not specified, the API should default to the latest, non-preview version.
|
|
726
|
-
Default value is None.
|
|
727
|
-
:type model_version: str
|
|
728
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
729
|
-
statistics. Default value is None.
|
|
730
|
-
:type show_stats: bool
|
|
731
|
-
:param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
|
|
732
|
-
for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
|
|
733
|
-
allow for troubleshooting issues in providing you with the Text Analytics natural language
|
|
734
|
-
processing functions. Setting this parameter to true, disables input logging and may limit our
|
|
735
|
-
ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
|
|
736
|
-
notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
|
|
737
|
-
principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
|
|
738
|
-
:type logging_opt_out: bool
|
|
739
|
-
:param string_index_type: (Optional) Specifies the method used to interpret string offsets.
|
|
740
|
-
Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information
|
|
741
|
-
see https://aka.ms/text-analytics-offsets. Known values are: "TextElement_v8",
|
|
742
|
-
"UnicodeCodePoint", and "Utf16CodeUnit". Default value is None.
|
|
743
|
-
:type string_index_type: str or ~azure.ai.textanalytics.v3_1.models.StringIndexType
|
|
744
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
745
|
-
:return: EntitiesResult or the result of cls(response)
|
|
746
|
-
:rtype: ~azure.ai.textanalytics.v3_1.models.EntitiesResult
|
|
747
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
748
|
-
"""
|
|
749
|
-
error_map = {
|
|
750
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
751
|
-
}
|
|
752
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
753
|
-
|
|
754
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
755
|
-
_params = kwargs.pop("params", {}) or {}
|
|
756
|
-
|
|
757
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
|
|
758
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.EntitiesResult]
|
|
759
|
-
|
|
760
|
-
_input = _models.MultiLanguageBatchInput(documents=documents)
|
|
761
|
-
_json = self._serialize.body(_input, 'MultiLanguageBatchInput')
|
|
762
|
-
|
|
763
|
-
request = build_entities_recognition_general_request(
|
|
764
|
-
model_version=model_version,
|
|
765
|
-
show_stats=show_stats,
|
|
766
|
-
logging_opt_out=logging_opt_out,
|
|
767
|
-
string_index_type=string_index_type,
|
|
768
|
-
content_type=content_type,
|
|
769
|
-
json=_json,
|
|
770
|
-
template_url=self.entities_recognition_general.metadata['url'],
|
|
771
|
-
headers=_headers,
|
|
772
|
-
params=_params,
|
|
773
|
-
)
|
|
774
|
-
request = _convert_request(request)
|
|
775
|
-
path_format_arguments = {
|
|
776
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
777
|
-
}
|
|
778
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
779
|
-
|
|
780
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
781
|
-
request,
|
|
782
|
-
stream=False,
|
|
783
|
-
**kwargs
|
|
784
|
-
)
|
|
785
|
-
|
|
786
|
-
response = pipeline_response.http_response
|
|
787
|
-
|
|
788
|
-
if response.status_code not in [200]:
|
|
789
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
790
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
791
|
-
raise HttpResponseError(response=response, model=error)
|
|
792
|
-
|
|
793
|
-
deserialized = self._deserialize('EntitiesResult', pipeline_response)
|
|
794
|
-
|
|
795
|
-
if cls:
|
|
796
|
-
return cls(pipeline_response, deserialized, {})
|
|
797
|
-
|
|
798
|
-
return deserialized
|
|
799
|
-
|
|
800
|
-
entities_recognition_general.metadata = {'url': "/entities/recognition/general"} # type: ignore
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
@distributed_trace_async
|
|
804
|
-
async def entities_recognition_pii(
|
|
805
|
-
self,
|
|
806
|
-
documents: List[_models.MultiLanguageInput],
|
|
807
|
-
model_version: Optional[str] = None,
|
|
808
|
-
show_stats: Optional[bool] = None,
|
|
809
|
-
logging_opt_out: Optional[bool] = None,
|
|
810
|
-
domain: Optional[str] = None,
|
|
811
|
-
string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
|
|
812
|
-
pii_categories: Optional[List[Union[str, "_models.PiiCategory"]]] = None,
|
|
813
|
-
**kwargs: Any
|
|
814
|
-
) -> _models.PiiResult:
|
|
815
|
-
"""Entities containing personal information.
|
|
816
|
-
|
|
817
|
-
The API returns a list of entities with personal information (\"SSN\", \"Bank Account\" etc) in
|
|
818
|
-
the document. For the list of supported entity types, check :code:`<a
|
|
819
|
-
href="https://aka.ms/tanerpii">Supported Entity Types in Text Analytics API</a>`. See the
|
|
820
|
-
:code:`<a href="https://aka.ms/talangs">Supported languages in Text Analytics API</a>` for the
|
|
821
|
-
list of enabled languages.
|
|
822
|
-
|
|
823
|
-
:param documents: The set of documents to process as part of this batch. Required.
|
|
824
|
-
:type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
|
|
825
|
-
:param model_version: (Optional) This value indicates which model will be used for scoring. If
|
|
826
|
-
a model-version is not specified, the API should default to the latest, non-preview version.
|
|
827
|
-
Default value is None.
|
|
828
|
-
:type model_version: str
|
|
829
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
830
|
-
statistics. Default value is None.
|
|
831
|
-
:type show_stats: bool
|
|
832
|
-
:param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
|
|
833
|
-
for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
|
|
834
|
-
allow for troubleshooting issues in providing you with the Text Analytics natural language
|
|
835
|
-
processing functions. Setting this parameter to true, disables input logging and may limit our
|
|
836
|
-
ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
|
|
837
|
-
notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
|
|
838
|
-
principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
|
|
839
|
-
:type logging_opt_out: bool
|
|
840
|
-
:param domain: (Optional) if specified, will set the PII domain to include only a subset of the
|
|
841
|
-
entity categories. Possible values include: 'PHI', 'none'. Default value is None.
|
|
842
|
-
:type domain: str
|
|
843
|
-
:param string_index_type: (Optional) Specifies the method used to interpret string offsets.
|
|
844
|
-
Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information
|
|
845
|
-
see https://aka.ms/text-analytics-offsets. Known values are: "TextElement_v8",
|
|
846
|
-
"UnicodeCodePoint", and "Utf16CodeUnit". Default value is None.
|
|
847
|
-
:type string_index_type: str or ~azure.ai.textanalytics.v3_1.models.StringIndexType
|
|
848
|
-
:param pii_categories: (Optional) describes the PII categories to return. Default value is
|
|
849
|
-
None.
|
|
850
|
-
:type pii_categories: list[str or ~azure.ai.textanalytics.v3_1.models.PiiCategory]
|
|
851
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
852
|
-
:return: PiiResult or the result of cls(response)
|
|
853
|
-
:rtype: ~azure.ai.textanalytics.v3_1.models.PiiResult
|
|
854
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
855
|
-
"""
|
|
856
|
-
error_map = {
|
|
857
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
858
|
-
}
|
|
859
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
860
|
-
|
|
861
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
862
|
-
_params = kwargs.pop("params", {}) or {}
|
|
863
|
-
|
|
864
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
|
|
865
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.PiiResult]
|
|
866
|
-
|
|
867
|
-
_input = _models.MultiLanguageBatchInput(documents=documents)
|
|
868
|
-
_json = self._serialize.body(_input, 'MultiLanguageBatchInput')
|
|
869
|
-
|
|
870
|
-
request = build_entities_recognition_pii_request(
|
|
871
|
-
model_version=model_version,
|
|
872
|
-
show_stats=show_stats,
|
|
873
|
-
logging_opt_out=logging_opt_out,
|
|
874
|
-
domain=domain,
|
|
875
|
-
string_index_type=string_index_type,
|
|
876
|
-
pii_categories=pii_categories,
|
|
877
|
-
content_type=content_type,
|
|
878
|
-
json=_json,
|
|
879
|
-
template_url=self.entities_recognition_pii.metadata['url'],
|
|
880
|
-
headers=_headers,
|
|
881
|
-
params=_params,
|
|
882
|
-
)
|
|
883
|
-
request = _convert_request(request)
|
|
884
|
-
path_format_arguments = {
|
|
885
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
886
|
-
}
|
|
887
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
888
|
-
|
|
889
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
890
|
-
request,
|
|
891
|
-
stream=False,
|
|
892
|
-
**kwargs
|
|
893
|
-
)
|
|
894
|
-
|
|
895
|
-
response = pipeline_response.http_response
|
|
896
|
-
|
|
897
|
-
if response.status_code not in [200]:
|
|
898
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
899
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
900
|
-
raise HttpResponseError(response=response, model=error)
|
|
901
|
-
|
|
902
|
-
deserialized = self._deserialize('PiiResult', pipeline_response)
|
|
903
|
-
|
|
904
|
-
if cls:
|
|
905
|
-
return cls(pipeline_response, deserialized, {})
|
|
906
|
-
|
|
907
|
-
return deserialized
|
|
908
|
-
|
|
909
|
-
entities_recognition_pii.metadata = {'url': "/entities/recognition/pii"} # type: ignore
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
@distributed_trace_async
|
|
913
|
-
async def entities_linking(
|
|
914
|
-
self,
|
|
915
|
-
documents: List[_models.MultiLanguageInput],
|
|
916
|
-
model_version: Optional[str] = None,
|
|
917
|
-
show_stats: Optional[bool] = None,
|
|
918
|
-
logging_opt_out: Optional[bool] = None,
|
|
919
|
-
string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
|
|
920
|
-
**kwargs: Any
|
|
921
|
-
) -> _models.EntityLinkingResult:
|
|
922
|
-
"""Linked entities from a well known knowledge base.
|
|
923
|
-
|
|
924
|
-
The API returns a list of recognized entities with links to a well known knowledge base. See
|
|
925
|
-
the :code:`<a href="https://aka.ms/talangs">Supported languages in Text Analytics API</a>` for
|
|
926
|
-
the list of enabled languages.
|
|
927
|
-
|
|
928
|
-
:param documents: The set of documents to process as part of this batch. Required.
|
|
929
|
-
:type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
|
|
930
|
-
:param model_version: (Optional) This value indicates which model will be used for scoring. If
|
|
931
|
-
a model-version is not specified, the API should default to the latest, non-preview version.
|
|
932
|
-
Default value is None.
|
|
933
|
-
:type model_version: str
|
|
934
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
935
|
-
statistics. Default value is None.
|
|
936
|
-
:type show_stats: bool
|
|
937
|
-
:param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
|
|
938
|
-
for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
|
|
939
|
-
allow for troubleshooting issues in providing you with the Text Analytics natural language
|
|
940
|
-
processing functions. Setting this parameter to true, disables input logging and may limit our
|
|
941
|
-
ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
|
|
942
|
-
notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
|
|
943
|
-
principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
|
|
944
|
-
:type logging_opt_out: bool
|
|
945
|
-
:param string_index_type: (Optional) Specifies the method used to interpret string offsets.
|
|
946
|
-
Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information
|
|
947
|
-
see https://aka.ms/text-analytics-offsets. Known values are: "TextElement_v8",
|
|
948
|
-
"UnicodeCodePoint", and "Utf16CodeUnit". Default value is None.
|
|
949
|
-
:type string_index_type: str or ~azure.ai.textanalytics.v3_1.models.StringIndexType
|
|
950
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
951
|
-
:return: EntityLinkingResult or the result of cls(response)
|
|
952
|
-
:rtype: ~azure.ai.textanalytics.v3_1.models.EntityLinkingResult
|
|
953
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
954
|
-
"""
|
|
955
|
-
error_map = {
|
|
956
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
957
|
-
}
|
|
958
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
959
|
-
|
|
960
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
961
|
-
_params = kwargs.pop("params", {}) or {}
|
|
962
|
-
|
|
963
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
|
|
964
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.EntityLinkingResult]
|
|
965
|
-
|
|
966
|
-
_input = _models.MultiLanguageBatchInput(documents=documents)
|
|
967
|
-
_json = self._serialize.body(_input, 'MultiLanguageBatchInput')
|
|
968
|
-
|
|
969
|
-
request = build_entities_linking_request(
|
|
970
|
-
model_version=model_version,
|
|
971
|
-
show_stats=show_stats,
|
|
972
|
-
logging_opt_out=logging_opt_out,
|
|
973
|
-
string_index_type=string_index_type,
|
|
974
|
-
content_type=content_type,
|
|
975
|
-
json=_json,
|
|
976
|
-
template_url=self.entities_linking.metadata['url'],
|
|
977
|
-
headers=_headers,
|
|
978
|
-
params=_params,
|
|
979
|
-
)
|
|
980
|
-
request = _convert_request(request)
|
|
981
|
-
path_format_arguments = {
|
|
982
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
983
|
-
}
|
|
984
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
985
|
-
|
|
986
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
987
|
-
request,
|
|
988
|
-
stream=False,
|
|
989
|
-
**kwargs
|
|
990
|
-
)
|
|
991
|
-
|
|
992
|
-
response = pipeline_response.http_response
|
|
993
|
-
|
|
994
|
-
if response.status_code not in [200]:
|
|
995
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
996
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
997
|
-
raise HttpResponseError(response=response, model=error)
|
|
998
|
-
|
|
999
|
-
deserialized = self._deserialize('EntityLinkingResult', pipeline_response)
|
|
1000
|
-
|
|
1001
|
-
if cls:
|
|
1002
|
-
return cls(pipeline_response, deserialized, {})
|
|
1003
|
-
|
|
1004
|
-
return deserialized
|
|
1005
|
-
|
|
1006
|
-
entities_linking.metadata = {'url': "/entities/linking"} # type: ignore
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
@distributed_trace_async
|
|
1010
|
-
async def key_phrases(
|
|
1011
|
-
self,
|
|
1012
|
-
documents: List[_models.MultiLanguageInput],
|
|
1013
|
-
model_version: Optional[str] = None,
|
|
1014
|
-
show_stats: Optional[bool] = None,
|
|
1015
|
-
logging_opt_out: Optional[bool] = None,
|
|
1016
|
-
**kwargs: Any
|
|
1017
|
-
) -> _models.KeyPhraseResult:
|
|
1018
|
-
"""Key Phrases.
|
|
1019
|
-
|
|
1020
|
-
The API returns a list of strings denoting the key phrases in the input text. See the :code:`<a
|
|
1021
|
-
href="https://aka.ms/talangs">Supported languages in Text Analytics API</a>` for the list of
|
|
1022
|
-
enabled languages.
|
|
1023
|
-
|
|
1024
|
-
:param documents: The set of documents to process as part of this batch. Required.
|
|
1025
|
-
:type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
|
|
1026
|
-
:param model_version: (Optional) This value indicates which model will be used for scoring. If
|
|
1027
|
-
a model-version is not specified, the API should default to the latest, non-preview version.
|
|
1028
|
-
Default value is None.
|
|
1029
|
-
:type model_version: str
|
|
1030
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
1031
|
-
statistics. Default value is None.
|
|
1032
|
-
:type show_stats: bool
|
|
1033
|
-
:param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
|
|
1034
|
-
for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
|
|
1035
|
-
allow for troubleshooting issues in providing you with the Text Analytics natural language
|
|
1036
|
-
processing functions. Setting this parameter to true, disables input logging and may limit our
|
|
1037
|
-
ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
|
|
1038
|
-
notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
|
|
1039
|
-
principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
|
|
1040
|
-
:type logging_opt_out: bool
|
|
1041
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
1042
|
-
:return: KeyPhraseResult or the result of cls(response)
|
|
1043
|
-
:rtype: ~azure.ai.textanalytics.v3_1.models.KeyPhraseResult
|
|
1044
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
1045
|
-
"""
|
|
1046
|
-
error_map = {
|
|
1047
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
1048
|
-
}
|
|
1049
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
1050
|
-
|
|
1051
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
1052
|
-
_params = kwargs.pop("params", {}) or {}
|
|
1053
|
-
|
|
1054
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
|
|
1055
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.KeyPhraseResult]
|
|
1056
|
-
|
|
1057
|
-
_input = _models.MultiLanguageBatchInput(documents=documents)
|
|
1058
|
-
_json = self._serialize.body(_input, 'MultiLanguageBatchInput')
|
|
1059
|
-
|
|
1060
|
-
request = build_key_phrases_request(
|
|
1061
|
-
model_version=model_version,
|
|
1062
|
-
show_stats=show_stats,
|
|
1063
|
-
logging_opt_out=logging_opt_out,
|
|
1064
|
-
content_type=content_type,
|
|
1065
|
-
json=_json,
|
|
1066
|
-
template_url=self.key_phrases.metadata['url'],
|
|
1067
|
-
headers=_headers,
|
|
1068
|
-
params=_params,
|
|
1069
|
-
)
|
|
1070
|
-
request = _convert_request(request)
|
|
1071
|
-
path_format_arguments = {
|
|
1072
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
1073
|
-
}
|
|
1074
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
1075
|
-
|
|
1076
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
1077
|
-
request,
|
|
1078
|
-
stream=False,
|
|
1079
|
-
**kwargs
|
|
1080
|
-
)
|
|
1081
|
-
|
|
1082
|
-
response = pipeline_response.http_response
|
|
1083
|
-
|
|
1084
|
-
if response.status_code not in [200]:
|
|
1085
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
1086
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
1087
|
-
raise HttpResponseError(response=response, model=error)
|
|
1088
|
-
|
|
1089
|
-
deserialized = self._deserialize('KeyPhraseResult', pipeline_response)
|
|
1090
|
-
|
|
1091
|
-
if cls:
|
|
1092
|
-
return cls(pipeline_response, deserialized, {})
|
|
1093
|
-
|
|
1094
|
-
return deserialized
|
|
1095
|
-
|
|
1096
|
-
key_phrases.metadata = {'url': "/keyPhrases"} # type: ignore
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
@distributed_trace_async
|
|
1100
|
-
async def languages(
|
|
1101
|
-
self,
|
|
1102
|
-
documents: List[_models.LanguageInput],
|
|
1103
|
-
model_version: Optional[str] = None,
|
|
1104
|
-
show_stats: Optional[bool] = None,
|
|
1105
|
-
logging_opt_out: Optional[bool] = None,
|
|
1106
|
-
**kwargs: Any
|
|
1107
|
-
) -> _models.LanguageResult:
|
|
1108
|
-
"""Detect Language.
|
|
1109
|
-
|
|
1110
|
-
The API returns the detected language and a numeric score between 0 and 1. Scores close to 1
|
|
1111
|
-
indicate 100% certainty that the identified language is true. See the :code:`<a
|
|
1112
|
-
href="https://aka.ms/talangs">Supported languages in Text Analytics API</a>` for the list of
|
|
1113
|
-
enabled languages.
|
|
1114
|
-
|
|
1115
|
-
:param documents: Required.
|
|
1116
|
-
:type documents: list[~azure.ai.textanalytics.v3_1.models.LanguageInput]
|
|
1117
|
-
:param model_version: (Optional) This value indicates which model will be used for scoring. If
|
|
1118
|
-
a model-version is not specified, the API should default to the latest, non-preview version.
|
|
1119
|
-
Default value is None.
|
|
1120
|
-
:type model_version: str
|
|
1121
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
1122
|
-
statistics. Default value is None.
|
|
1123
|
-
:type show_stats: bool
|
|
1124
|
-
:param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
|
|
1125
|
-
for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
|
|
1126
|
-
allow for troubleshooting issues in providing you with the Text Analytics natural language
|
|
1127
|
-
processing functions. Setting this parameter to true, disables input logging and may limit our
|
|
1128
|
-
ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
|
|
1129
|
-
notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
|
|
1130
|
-
principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
|
|
1131
|
-
:type logging_opt_out: bool
|
|
1132
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
1133
|
-
:return: LanguageResult or the result of cls(response)
|
|
1134
|
-
:rtype: ~azure.ai.textanalytics.v3_1.models.LanguageResult
|
|
1135
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
1136
|
-
"""
|
|
1137
|
-
error_map = {
|
|
1138
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
1139
|
-
}
|
|
1140
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
1141
|
-
|
|
1142
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
1143
|
-
_params = kwargs.pop("params", {}) or {}
|
|
1144
|
-
|
|
1145
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
|
|
1146
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.LanguageResult]
|
|
1147
|
-
|
|
1148
|
-
_input = _models.LanguageBatchInput(documents=documents)
|
|
1149
|
-
_json = self._serialize.body(_input, 'LanguageBatchInput')
|
|
1150
|
-
|
|
1151
|
-
request = build_languages_request(
|
|
1152
|
-
model_version=model_version,
|
|
1153
|
-
show_stats=show_stats,
|
|
1154
|
-
logging_opt_out=logging_opt_out,
|
|
1155
|
-
content_type=content_type,
|
|
1156
|
-
json=_json,
|
|
1157
|
-
template_url=self.languages.metadata['url'],
|
|
1158
|
-
headers=_headers,
|
|
1159
|
-
params=_params,
|
|
1160
|
-
)
|
|
1161
|
-
request = _convert_request(request)
|
|
1162
|
-
path_format_arguments = {
|
|
1163
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
1164
|
-
}
|
|
1165
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
1166
|
-
|
|
1167
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
1168
|
-
request,
|
|
1169
|
-
stream=False,
|
|
1170
|
-
**kwargs
|
|
1171
|
-
)
|
|
1172
|
-
|
|
1173
|
-
response = pipeline_response.http_response
|
|
1174
|
-
|
|
1175
|
-
if response.status_code not in [200]:
|
|
1176
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
1177
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
1178
|
-
raise HttpResponseError(response=response, model=error)
|
|
1179
|
-
|
|
1180
|
-
deserialized = self._deserialize('LanguageResult', pipeline_response)
|
|
1181
|
-
|
|
1182
|
-
if cls:
|
|
1183
|
-
return cls(pipeline_response, deserialized, {})
|
|
1184
|
-
|
|
1185
|
-
return deserialized
|
|
1186
|
-
|
|
1187
|
-
languages.metadata = {'url': "/languages"} # type: ignore
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
@distributed_trace_async
|
|
1191
|
-
async def sentiment(
|
|
1192
|
-
self,
|
|
1193
|
-
documents: List[_models.MultiLanguageInput],
|
|
1194
|
-
model_version: Optional[str] = None,
|
|
1195
|
-
show_stats: Optional[bool] = None,
|
|
1196
|
-
logging_opt_out: Optional[bool] = None,
|
|
1197
|
-
opinion_mining: Optional[bool] = None,
|
|
1198
|
-
string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None,
|
|
1199
|
-
**kwargs: Any
|
|
1200
|
-
) -> _models.SentimentResponse:
|
|
1201
|
-
"""Sentiment.
|
|
1202
|
-
|
|
1203
|
-
The API returns a detailed sentiment analysis for the input text. The analysis is done in
|
|
1204
|
-
multiple levels of granularity, start from the a document level, down to sentence and key terms
|
|
1205
|
-
(targets and assessments).
|
|
1206
|
-
|
|
1207
|
-
:param documents: The set of documents to process as part of this batch. Required.
|
|
1208
|
-
:type documents: list[~azure.ai.textanalytics.v3_1.models.MultiLanguageInput]
|
|
1209
|
-
:param model_version: (Optional) This value indicates which model will be used for scoring. If
|
|
1210
|
-
a model-version is not specified, the API should default to the latest, non-preview version.
|
|
1211
|
-
Default value is None.
|
|
1212
|
-
:type model_version: str
|
|
1213
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
1214
|
-
statistics. Default value is None.
|
|
1215
|
-
:type show_stats: bool
|
|
1216
|
-
:param logging_opt_out: (Optional) If set to true, you opt-out of having your text input logged
|
|
1217
|
-
for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to
|
|
1218
|
-
allow for troubleshooting issues in providing you with the Text Analytics natural language
|
|
1219
|
-
processing functions. Setting this parameter to true, disables input logging and may limit our
|
|
1220
|
-
ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy
|
|
1221
|
-
notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI
|
|
1222
|
-
principles at https://www.microsoft.com/en-us/ai/responsible-ai. Default value is None.
|
|
1223
|
-
:type logging_opt_out: bool
|
|
1224
|
-
:param opinion_mining: (Optional) if set to true, response will contain not only sentiment
|
|
1225
|
-
prediction but also opinion mining (aspect-based sentiment analysis) results. Default value is
|
|
1226
|
-
None.
|
|
1227
|
-
:type opinion_mining: bool
|
|
1228
|
-
:param string_index_type: (Optional) Specifies the method used to interpret string offsets.
|
|
1229
|
-
Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information
|
|
1230
|
-
see https://aka.ms/text-analytics-offsets. Known values are: "TextElement_v8",
|
|
1231
|
-
"UnicodeCodePoint", and "Utf16CodeUnit". Default value is None.
|
|
1232
|
-
:type string_index_type: str or ~azure.ai.textanalytics.v3_1.models.StringIndexType
|
|
1233
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
1234
|
-
:return: SentimentResponse or the result of cls(response)
|
|
1235
|
-
:rtype: ~azure.ai.textanalytics.v3_1.models.SentimentResponse
|
|
1236
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
1237
|
-
"""
|
|
1238
|
-
error_map = {
|
|
1239
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
1240
|
-
}
|
|
1241
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
1242
|
-
|
|
1243
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
1244
|
-
_params = kwargs.pop("params", {}) or {}
|
|
1245
|
-
|
|
1246
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: str
|
|
1247
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.SentimentResponse]
|
|
1248
|
-
|
|
1249
|
-
_input = _models.MultiLanguageBatchInput(documents=documents)
|
|
1250
|
-
_json = self._serialize.body(_input, 'MultiLanguageBatchInput')
|
|
1251
|
-
|
|
1252
|
-
request = build_sentiment_request(
|
|
1253
|
-
model_version=model_version,
|
|
1254
|
-
show_stats=show_stats,
|
|
1255
|
-
logging_opt_out=logging_opt_out,
|
|
1256
|
-
opinion_mining=opinion_mining,
|
|
1257
|
-
string_index_type=string_index_type,
|
|
1258
|
-
content_type=content_type,
|
|
1259
|
-
json=_json,
|
|
1260
|
-
template_url=self.sentiment.metadata['url'],
|
|
1261
|
-
headers=_headers,
|
|
1262
|
-
params=_params,
|
|
1263
|
-
)
|
|
1264
|
-
request = _convert_request(request)
|
|
1265
|
-
path_format_arguments = {
|
|
1266
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
1267
|
-
}
|
|
1268
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
1269
|
-
|
|
1270
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
1271
|
-
request,
|
|
1272
|
-
stream=False,
|
|
1273
|
-
**kwargs
|
|
1274
|
-
)
|
|
1275
|
-
|
|
1276
|
-
response = pipeline_response.http_response
|
|
1277
|
-
|
|
1278
|
-
if response.status_code not in [200]:
|
|
1279
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
1280
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
1281
|
-
raise HttpResponseError(response=response, model=error)
|
|
1282
|
-
|
|
1283
|
-
deserialized = self._deserialize('SentimentResponse', pipeline_response)
|
|
1284
|
-
|
|
1285
|
-
if cls:
|
|
1286
|
-
return cls(pipeline_response, deserialized, {})
|
|
1287
|
-
|
|
1288
|
-
return deserialized
|
|
1289
|
-
|
|
1290
|
-
sentiment.metadata = {'url': "/sentiment"} # type: ignore
|
|
1291
|
-
|