azure-ai-textanalytics 5.3.0b2__py3-none-any.whl → 6.0.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-textanalytics might be problematic. Click here for more details.
- azure/ai/textanalytics/__init__.py +26 -193
- azure/ai/textanalytics/_client.py +111 -0
- azure/ai/textanalytics/_configuration.py +73 -0
- azure/ai/textanalytics/{_generated/v2022_05_01/operations → _operations}/__init__.py +13 -8
- azure/ai/textanalytics/_operations/_operations.py +716 -0
- azure/ai/textanalytics/{_generated/v2022_05_01/models → _operations}/_patch.py +8 -6
- azure/ai/textanalytics/_patch.py +350 -0
- azure/ai/textanalytics/{_generated/aio → _utils}/__init__.py +1 -5
- azure/ai/textanalytics/_utils/model_base.py +1237 -0
- azure/ai/textanalytics/{_generated/_serialization.py → _utils/serialization.py} +640 -616
- azure/ai/textanalytics/{_generated/v2022_05_01/aio/_vendor.py → _utils/utils.py} +10 -12
- azure/ai/textanalytics/_version.py +8 -7
- azure/ai/textanalytics/aio/__init__.py +25 -14
- azure/ai/textanalytics/aio/_client.py +115 -0
- azure/ai/textanalytics/aio/_configuration.py +75 -0
- azure/ai/textanalytics/{_generated/v2022_10_01_preview/aio/operations → aio/_operations}/__init__.py +13 -8
- azure/ai/textanalytics/aio/_operations/_operations.py +623 -0
- azure/ai/textanalytics/{_generated/v2022_05_01 → aio/_operations}/_patch.py +8 -6
- azure/ai/textanalytics/aio/_patch.py +344 -0
- azure/ai/textanalytics/models/__init__.py +402 -0
- azure/ai/textanalytics/models/_enums.py +1979 -0
- azure/ai/textanalytics/models/_models.py +6641 -0
- azure/ai/textanalytics/{_generated/v2022_05_01/aio → models}/_patch.py +8 -6
- azure/ai/textanalytics/py.typed +1 -0
- {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/METADATA +668 -403
- azure_ai_textanalytics-6.0.0b1.dist-info/RECORD +29 -0
- {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/WHEEL +1 -1
- azure/ai/textanalytics/_base_client.py +0 -113
- azure/ai/textanalytics/_check.py +0 -22
- azure/ai/textanalytics/_dict_mixin.py +0 -57
- azure/ai/textanalytics/_generated/__init__.py +0 -16
- azure/ai/textanalytics/_generated/_configuration.py +0 -70
- azure/ai/textanalytics/_generated/_operations_mixin.py +0 -795
- azure/ai/textanalytics/_generated/_text_analytics_client.py +0 -126
- azure/ai/textanalytics/_generated/_version.py +0 -8
- azure/ai/textanalytics/_generated/aio/_configuration.py +0 -66
- azure/ai/textanalytics/_generated/aio/_operations_mixin.py +0 -776
- azure/ai/textanalytics/_generated/aio/_text_analytics_client.py +0 -124
- azure/ai/textanalytics/_generated/models.py +0 -8
- azure/ai/textanalytics/_generated/v2022_05_01/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_05_01/_configuration.py +0 -72
- azure/ai/textanalytics/_generated/v2022_05_01/_text_analytics_client.py +0 -100
- azure/ai/textanalytics/_generated/v2022_05_01/_vendor.py +0 -45
- azure/ai/textanalytics/_generated/v2022_05_01/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_05_01/aio/_configuration.py +0 -71
- azure/ai/textanalytics/_generated/v2022_05_01/aio/_text_analytics_client.py +0 -97
- azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_patch.py +0 -121
- azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_text_analytics_client_operations.py +0 -603
- azure/ai/textanalytics/_generated/v2022_05_01/models/__init__.py +0 -281
- azure/ai/textanalytics/_generated/v2022_05_01/models/_models_py3.py +0 -5722
- azure/ai/textanalytics/_generated/v2022_05_01/models/_text_analytics_client_enums.py +0 -439
- azure/ai/textanalytics/_generated/v2022_05_01/operations/_patch.py +0 -120
- azure/ai/textanalytics/_generated/v2022_05_01/operations/_text_analytics_client_operations.py +0 -744
- azure/ai/textanalytics/_generated/v2022_10_01_preview/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_configuration.py +0 -72
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_text_analytics_client.py +0 -100
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_vendor.py +0 -45
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_configuration.py +0 -71
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_text_analytics_client.py +0 -97
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_vendor.py +0 -27
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_patch.py +0 -121
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_text_analytics_client_operations.py +0 -603
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/__init__.py +0 -405
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_models_py3.py +0 -8420
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_patch.py +0 -486
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_text_analytics_client_enums.py +0 -729
- azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_patch.py +0 -120
- azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_text_analytics_client_operations.py +0 -744
- azure/ai/textanalytics/_generated/v3_0/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_0/_configuration.py +0 -66
- azure/ai/textanalytics/_generated/v3_0/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_0/_text_analytics_client.py +0 -96
- azure/ai/textanalytics/_generated/v3_0/_vendor.py +0 -33
- azure/ai/textanalytics/_generated/v3_0/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_0/aio/_configuration.py +0 -65
- azure/ai/textanalytics/_generated/v3_0/aio/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_0/aio/_text_analytics_client.py +0 -93
- azure/ai/textanalytics/_generated/v3_0/aio/_vendor.py +0 -27
- azure/ai/textanalytics/_generated/v3_0/aio/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_0/aio/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_0/aio/operations/_text_analytics_client_operations.py +0 -428
- azure/ai/textanalytics/_generated/v3_0/models/__init__.py +0 -81
- azure/ai/textanalytics/_generated/v3_0/models/_models_py3.py +0 -1467
- azure/ai/textanalytics/_generated/v3_0/models/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_0/models/_text_analytics_client_enums.py +0 -58
- azure/ai/textanalytics/_generated/v3_0/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_0/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_0/operations/_text_analytics_client_operations.py +0 -604
- azure/ai/textanalytics/_generated/v3_1/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_1/_configuration.py +0 -66
- azure/ai/textanalytics/_generated/v3_1/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_1/_text_analytics_client.py +0 -98
- azure/ai/textanalytics/_generated/v3_1/_vendor.py +0 -45
- azure/ai/textanalytics/_generated/v3_1/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_1/aio/_configuration.py +0 -65
- azure/ai/textanalytics/_generated/v3_1/aio/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_1/aio/_text_analytics_client.py +0 -95
- azure/ai/textanalytics/_generated/v3_1/aio/_vendor.py +0 -27
- azure/ai/textanalytics/_generated/v3_1/aio/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_1/aio/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_1/aio/operations/_text_analytics_client_operations.py +0 -1291
- azure/ai/textanalytics/_generated/v3_1/models/__init__.py +0 -205
- azure/ai/textanalytics/_generated/v3_1/models/_models_py3.py +0 -3976
- azure/ai/textanalytics/_generated/v3_1/models/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_1/models/_text_analytics_client_enums.py +0 -367
- azure/ai/textanalytics/_generated/v3_1/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_1/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_1/operations/_text_analytics_client_operations.py +0 -1709
- azure/ai/textanalytics/_lro.py +0 -553
- azure/ai/textanalytics/_models.py +0 -3158
- azure/ai/textanalytics/_policies.py +0 -66
- azure/ai/textanalytics/_request_handlers.py +0 -104
- azure/ai/textanalytics/_response_handlers.py +0 -583
- azure/ai/textanalytics/_text_analytics_client.py +0 -2081
- azure/ai/textanalytics/_user_agent.py +0 -8
- azure/ai/textanalytics/_validate.py +0 -113
- azure/ai/textanalytics/aio/_base_client_async.py +0 -98
- azure/ai/textanalytics/aio/_lro_async.py +0 -503
- azure/ai/textanalytics/aio/_response_handlers_async.py +0 -94
- azure/ai/textanalytics/aio/_text_analytics_client_async.py +0 -2077
- azure_ai_textanalytics-5.3.0b2.dist-info/RECORD +0 -115
- {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info/licenses}/LICENSE +0 -0
- {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/top_level.txt +0 -0
|
@@ -1,603 +0,0 @@
|
|
|
1
|
-
# pylint: disable=too-many-lines
|
|
2
|
-
# coding=utf-8
|
|
3
|
-
# --------------------------------------------------------------------------
|
|
4
|
-
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
5
|
-
# Licensed under the MIT License. See License.txt in the project root for license information.
|
|
6
|
-
# Code generated by Microsoft (R) AutoRest Code Generator.
|
|
7
|
-
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
|
8
|
-
# --------------------------------------------------------------------------
|
|
9
|
-
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
|
|
10
|
-
|
|
11
|
-
from .....aio._lro_async import AsyncAnalyzeActionsLROPoller, AsyncAnalyzeActionsLROPollingMethod
|
|
12
|
-
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
|
|
13
|
-
from azure.core.pipeline import PipelineResponse
|
|
14
|
-
from azure.core.pipeline.transport import AsyncHttpResponse
|
|
15
|
-
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
|
|
16
|
-
from azure.core.polling.async_base_polling import AsyncLROBasePolling
|
|
17
|
-
from azure.core.rest import HttpRequest
|
|
18
|
-
from azure.core.tracing.decorator_async import distributed_trace_async
|
|
19
|
-
from azure.core.utils import case_insensitive_dict
|
|
20
|
-
|
|
21
|
-
from ... import models as _models
|
|
22
|
-
from ..._vendor import _convert_request
|
|
23
|
-
from ...operations._text_analytics_client_operations import build_analyze_text_cancel_job_request, build_analyze_text_job_status_request, build_analyze_text_request, build_analyze_text_submit_job_request
|
|
24
|
-
from .._vendor import MixinABC
|
|
25
|
-
T = TypeVar('T')
|
|
26
|
-
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
|
|
27
|
-
|
|
28
|
-
class TextAnalyticsClientOperationsMixin(MixinABC):
|
|
29
|
-
|
|
30
|
-
@overload
|
|
31
|
-
async def analyze_text(
|
|
32
|
-
self,
|
|
33
|
-
body: _models.AnalyzeTextTask,
|
|
34
|
-
show_stats: Optional[bool] = None,
|
|
35
|
-
*,
|
|
36
|
-
content_type: str = "application/json",
|
|
37
|
-
**kwargs: Any
|
|
38
|
-
) -> _models.AnalyzeTextTaskResult:
|
|
39
|
-
"""Request text analysis over a collection of documents.
|
|
40
|
-
|
|
41
|
-
Submit a collection of text documents for analysis. Specify a single unique task to be
|
|
42
|
-
executed immediately.
|
|
43
|
-
|
|
44
|
-
:param body: Collection of documents to analyze and a single task to execute. Required.
|
|
45
|
-
:type body: ~azure.ai.textanalytics.v2022_10_01_preview.models.AnalyzeTextTask
|
|
46
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
47
|
-
statistics. Default value is None.
|
|
48
|
-
:type show_stats: bool
|
|
49
|
-
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
|
|
50
|
-
Default value is "application/json".
|
|
51
|
-
:paramtype content_type: str
|
|
52
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
53
|
-
:return: AnalyzeTextTaskResult or the result of cls(response)
|
|
54
|
-
:rtype: ~azure.ai.textanalytics.v2022_10_01_preview.models.AnalyzeTextTaskResult
|
|
55
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
56
|
-
"""
|
|
57
|
-
|
|
58
|
-
@overload
|
|
59
|
-
async def analyze_text(
|
|
60
|
-
self,
|
|
61
|
-
body: IO,
|
|
62
|
-
show_stats: Optional[bool] = None,
|
|
63
|
-
*,
|
|
64
|
-
content_type: str = "application/json",
|
|
65
|
-
**kwargs: Any
|
|
66
|
-
) -> _models.AnalyzeTextTaskResult:
|
|
67
|
-
"""Request text analysis over a collection of documents.
|
|
68
|
-
|
|
69
|
-
Submit a collection of text documents for analysis. Specify a single unique task to be
|
|
70
|
-
executed immediately.
|
|
71
|
-
|
|
72
|
-
:param body: Collection of documents to analyze and a single task to execute. Required.
|
|
73
|
-
:type body: IO
|
|
74
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
75
|
-
statistics. Default value is None.
|
|
76
|
-
:type show_stats: bool
|
|
77
|
-
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
|
|
78
|
-
Default value is "application/json".
|
|
79
|
-
:paramtype content_type: str
|
|
80
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
81
|
-
:return: AnalyzeTextTaskResult or the result of cls(response)
|
|
82
|
-
:rtype: ~azure.ai.textanalytics.v2022_10_01_preview.models.AnalyzeTextTaskResult
|
|
83
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
84
|
-
"""
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
@distributed_trace_async
|
|
88
|
-
async def analyze_text(
|
|
89
|
-
self,
|
|
90
|
-
body: Union[_models.AnalyzeTextTask, IO],
|
|
91
|
-
show_stats: Optional[bool] = None,
|
|
92
|
-
**kwargs: Any
|
|
93
|
-
) -> _models.AnalyzeTextTaskResult:
|
|
94
|
-
"""Request text analysis over a collection of documents.
|
|
95
|
-
|
|
96
|
-
Submit a collection of text documents for analysis. Specify a single unique task to be
|
|
97
|
-
executed immediately.
|
|
98
|
-
|
|
99
|
-
:param body: Collection of documents to analyze and a single task to execute. Is either a model
|
|
100
|
-
type or a IO type. Required.
|
|
101
|
-
:type body: ~azure.ai.textanalytics.v2022_10_01_preview.models.AnalyzeTextTask or IO
|
|
102
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
103
|
-
statistics. Default value is None.
|
|
104
|
-
:type show_stats: bool
|
|
105
|
-
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
|
|
106
|
-
Default value is None.
|
|
107
|
-
:paramtype content_type: str
|
|
108
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
109
|
-
:return: AnalyzeTextTaskResult or the result of cls(response)
|
|
110
|
-
:rtype: ~azure.ai.textanalytics.v2022_10_01_preview.models.AnalyzeTextTaskResult
|
|
111
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
112
|
-
"""
|
|
113
|
-
error_map = {
|
|
114
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
115
|
-
}
|
|
116
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
117
|
-
|
|
118
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
119
|
-
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
|
|
120
|
-
|
|
121
|
-
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-10-01-preview")) # type: str
|
|
122
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
|
|
123
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.AnalyzeTextTaskResult]
|
|
124
|
-
|
|
125
|
-
content_type = content_type or "application/json"
|
|
126
|
-
_json = None
|
|
127
|
-
_content = None
|
|
128
|
-
if isinstance(body, (IO, bytes)):
|
|
129
|
-
_content = body
|
|
130
|
-
else:
|
|
131
|
-
_json = self._serialize.body(body, 'AnalyzeTextTask')
|
|
132
|
-
|
|
133
|
-
request = build_analyze_text_request(
|
|
134
|
-
show_stats=show_stats,
|
|
135
|
-
api_version=api_version,
|
|
136
|
-
content_type=content_type,
|
|
137
|
-
json=_json,
|
|
138
|
-
content=_content,
|
|
139
|
-
template_url=self.analyze_text.metadata['url'],
|
|
140
|
-
headers=_headers,
|
|
141
|
-
params=_params,
|
|
142
|
-
)
|
|
143
|
-
request = _convert_request(request)
|
|
144
|
-
path_format_arguments = {
|
|
145
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
146
|
-
}
|
|
147
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
148
|
-
|
|
149
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
150
|
-
request,
|
|
151
|
-
stream=False,
|
|
152
|
-
**kwargs
|
|
153
|
-
)
|
|
154
|
-
|
|
155
|
-
response = pipeline_response.http_response
|
|
156
|
-
|
|
157
|
-
if response.status_code not in [200]:
|
|
158
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
159
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
160
|
-
raise HttpResponseError(response=response, model=error)
|
|
161
|
-
|
|
162
|
-
deserialized = self._deserialize('AnalyzeTextTaskResult', pipeline_response)
|
|
163
|
-
|
|
164
|
-
if cls:
|
|
165
|
-
return cls(pipeline_response, deserialized, {})
|
|
166
|
-
|
|
167
|
-
return deserialized
|
|
168
|
-
|
|
169
|
-
analyze_text.metadata = {'url': "/:analyze-text"} # type: ignore
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
async def _analyze_text_submit_job_initial(
|
|
173
|
-
self,
|
|
174
|
-
body: Union[_models.AnalyzeTextJobsInput, IO],
|
|
175
|
-
**kwargs: Any
|
|
176
|
-
) -> Optional[_models.AnalyzeTextJobState]:
|
|
177
|
-
error_map = {
|
|
178
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
179
|
-
}
|
|
180
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
181
|
-
|
|
182
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
183
|
-
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
|
|
184
|
-
|
|
185
|
-
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-10-01-preview")) # type: str
|
|
186
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
|
|
187
|
-
cls = kwargs.pop('cls', None) # type: ClsType[Optional[_models.AnalyzeTextJobState]]
|
|
188
|
-
|
|
189
|
-
content_type = content_type or "application/json"
|
|
190
|
-
_json = None
|
|
191
|
-
_content = None
|
|
192
|
-
if isinstance(body, (IO, bytes)):
|
|
193
|
-
_content = body
|
|
194
|
-
else:
|
|
195
|
-
_json = self._serialize.body(body, 'AnalyzeTextJobsInput')
|
|
196
|
-
|
|
197
|
-
request = build_analyze_text_submit_job_request(
|
|
198
|
-
api_version=api_version,
|
|
199
|
-
content_type=content_type,
|
|
200
|
-
json=_json,
|
|
201
|
-
content=_content,
|
|
202
|
-
template_url=self._analyze_text_submit_job_initial.metadata['url'],
|
|
203
|
-
headers=_headers,
|
|
204
|
-
params=_params,
|
|
205
|
-
)
|
|
206
|
-
request = _convert_request(request)
|
|
207
|
-
path_format_arguments = {
|
|
208
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
209
|
-
}
|
|
210
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
211
|
-
|
|
212
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
213
|
-
request,
|
|
214
|
-
stream=False,
|
|
215
|
-
**kwargs
|
|
216
|
-
)
|
|
217
|
-
|
|
218
|
-
response = pipeline_response.http_response
|
|
219
|
-
|
|
220
|
-
if response.status_code not in [200, 202]:
|
|
221
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
222
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
223
|
-
raise HttpResponseError(response=response, model=error)
|
|
224
|
-
|
|
225
|
-
deserialized = None
|
|
226
|
-
response_headers = {}
|
|
227
|
-
if response.status_code == 200:
|
|
228
|
-
deserialized = self._deserialize('AnalyzeTextJobState', pipeline_response)
|
|
229
|
-
|
|
230
|
-
if response.status_code == 202:
|
|
231
|
-
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
if cls:
|
|
235
|
-
return cls(pipeline_response, deserialized, response_headers)
|
|
236
|
-
|
|
237
|
-
return deserialized
|
|
238
|
-
|
|
239
|
-
_analyze_text_submit_job_initial.metadata = {'url': "/analyze-text/jobs"} # type: ignore
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
@overload
|
|
243
|
-
async def begin_analyze_text_submit_job(
|
|
244
|
-
self,
|
|
245
|
-
body: _models.AnalyzeTextJobsInput,
|
|
246
|
-
*,
|
|
247
|
-
content_type: str = "application/json",
|
|
248
|
-
**kwargs: Any
|
|
249
|
-
) -> AsyncAnalyzeActionsLROPoller[_models.AnalyzeTextJobState]:
|
|
250
|
-
"""Submit text analysis job.
|
|
251
|
-
|
|
252
|
-
Submit a collection of text documents for analysis. Specify one or more unique tasks to be
|
|
253
|
-
executed as a long-running operation.
|
|
254
|
-
|
|
255
|
-
:param body: Collection of documents to analyze and one or more tasks to execute. Required.
|
|
256
|
-
:type body: ~azure.ai.textanalytics.v2022_10_01_preview.models.AnalyzeTextJobsInput
|
|
257
|
-
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
|
|
258
|
-
Default value is "application/json".
|
|
259
|
-
:paramtype content_type: str
|
|
260
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
261
|
-
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
|
|
262
|
-
:keyword polling: By default, your polling method will be AsyncAnalyzeActionsLROPollingMethod.
|
|
263
|
-
Pass in False for this operation to not poll, or pass in your own initialized polling object
|
|
264
|
-
for a personal polling strategy.
|
|
265
|
-
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
|
|
266
|
-
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
|
|
267
|
-
Retry-After header is present.
|
|
268
|
-
:return: An instance of AsyncAnalyzeActionsLROPoller that returns either AnalyzeTextJobState or
|
|
269
|
-
the result of cls(response)
|
|
270
|
-
:rtype:
|
|
271
|
-
~.....aio._lro_async.AsyncAnalyzeActionsLROPoller[~azure.ai.textanalytics.v2022_10_01_preview.models.AnalyzeTextJobState]
|
|
272
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
273
|
-
"""
|
|
274
|
-
|
|
275
|
-
@overload
|
|
276
|
-
async def begin_analyze_text_submit_job(
|
|
277
|
-
self,
|
|
278
|
-
body: IO,
|
|
279
|
-
*,
|
|
280
|
-
content_type: str = "application/json",
|
|
281
|
-
**kwargs: Any
|
|
282
|
-
) -> AsyncAnalyzeActionsLROPoller[_models.AnalyzeTextJobState]:
|
|
283
|
-
"""Submit text analysis job.
|
|
284
|
-
|
|
285
|
-
Submit a collection of text documents for analysis. Specify one or more unique tasks to be
|
|
286
|
-
executed as a long-running operation.
|
|
287
|
-
|
|
288
|
-
:param body: Collection of documents to analyze and one or more tasks to execute. Required.
|
|
289
|
-
:type body: IO
|
|
290
|
-
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
|
|
291
|
-
Default value is "application/json".
|
|
292
|
-
:paramtype content_type: str
|
|
293
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
294
|
-
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
|
|
295
|
-
:keyword polling: By default, your polling method will be AsyncAnalyzeActionsLROPollingMethod.
|
|
296
|
-
Pass in False for this operation to not poll, or pass in your own initialized polling object
|
|
297
|
-
for a personal polling strategy.
|
|
298
|
-
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
|
|
299
|
-
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
|
|
300
|
-
Retry-After header is present.
|
|
301
|
-
:return: An instance of AsyncAnalyzeActionsLROPoller that returns either AnalyzeTextJobState or
|
|
302
|
-
the result of cls(response)
|
|
303
|
-
:rtype:
|
|
304
|
-
~.....aio._lro_async.AsyncAnalyzeActionsLROPoller[~azure.ai.textanalytics.v2022_10_01_preview.models.AnalyzeTextJobState]
|
|
305
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
306
|
-
"""
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
@distributed_trace_async
|
|
310
|
-
async def begin_analyze_text_submit_job(
|
|
311
|
-
self,
|
|
312
|
-
body: Union[_models.AnalyzeTextJobsInput, IO],
|
|
313
|
-
**kwargs: Any
|
|
314
|
-
) -> AsyncAnalyzeActionsLROPoller[_models.AnalyzeTextJobState]:
|
|
315
|
-
"""Submit text analysis job.
|
|
316
|
-
|
|
317
|
-
Submit a collection of text documents for analysis. Specify one or more unique tasks to be
|
|
318
|
-
executed as a long-running operation.
|
|
319
|
-
|
|
320
|
-
:param body: Collection of documents to analyze and one or more tasks to execute. Is either a
|
|
321
|
-
model type or a IO type. Required.
|
|
322
|
-
:type body: ~azure.ai.textanalytics.v2022_10_01_preview.models.AnalyzeTextJobsInput or IO
|
|
323
|
-
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
|
|
324
|
-
Default value is None.
|
|
325
|
-
:paramtype content_type: str
|
|
326
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
327
|
-
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
|
|
328
|
-
:keyword polling: By default, your polling method will be AsyncAnalyzeActionsLROPollingMethod.
|
|
329
|
-
Pass in False for this operation to not poll, or pass in your own initialized polling object
|
|
330
|
-
for a personal polling strategy.
|
|
331
|
-
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
|
|
332
|
-
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
|
|
333
|
-
Retry-After header is present.
|
|
334
|
-
:return: An instance of AsyncAnalyzeActionsLROPoller that returns either AnalyzeTextJobState or
|
|
335
|
-
the result of cls(response)
|
|
336
|
-
:rtype:
|
|
337
|
-
~.....aio._lro_async.AsyncAnalyzeActionsLROPoller[~azure.ai.textanalytics.v2022_10_01_preview.models.AnalyzeTextJobState]
|
|
338
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
339
|
-
"""
|
|
340
|
-
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
341
|
-
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
|
|
342
|
-
|
|
343
|
-
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-10-01-preview")) # type: str
|
|
344
|
-
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
|
|
345
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.AnalyzeTextJobState]
|
|
346
|
-
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
|
|
347
|
-
lro_delay = kwargs.pop(
|
|
348
|
-
'polling_interval',
|
|
349
|
-
self._config.polling_interval
|
|
350
|
-
)
|
|
351
|
-
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
|
|
352
|
-
if cont_token is None:
|
|
353
|
-
raw_result = await self._analyze_text_submit_job_initial( # type: ignore
|
|
354
|
-
body=body,
|
|
355
|
-
api_version=api_version,
|
|
356
|
-
content_type=content_type,
|
|
357
|
-
cls=lambda x,y,z: x,
|
|
358
|
-
headers=_headers,
|
|
359
|
-
params=_params,
|
|
360
|
-
**kwargs
|
|
361
|
-
)
|
|
362
|
-
kwargs.pop('error_map', None)
|
|
363
|
-
|
|
364
|
-
def get_long_running_output(pipeline_response):
|
|
365
|
-
deserialized = self._deserialize('AnalyzeTextJobState', pipeline_response)
|
|
366
|
-
if cls:
|
|
367
|
-
return cls(pipeline_response, deserialized, {})
|
|
368
|
-
return deserialized
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
path_format_arguments = {
|
|
372
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
373
|
-
}
|
|
374
|
-
|
|
375
|
-
if polling is True:
|
|
376
|
-
polling_method = cast(AsyncPollingMethod, AsyncAnalyzeActionsLROPollingMethod(
|
|
377
|
-
lro_delay,
|
|
378
|
-
|
|
379
|
-
path_format_arguments=path_format_arguments,
|
|
380
|
-
**kwargs
|
|
381
|
-
)) # type: AsyncPollingMethod
|
|
382
|
-
elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
|
|
383
|
-
else: polling_method = polling
|
|
384
|
-
if cont_token:
|
|
385
|
-
return AsyncAnalyzeActionsLROPoller.from_continuation_token(
|
|
386
|
-
polling_method=polling_method,
|
|
387
|
-
continuation_token=cont_token,
|
|
388
|
-
client=self._client,
|
|
389
|
-
deserialization_callback=get_long_running_output
|
|
390
|
-
)
|
|
391
|
-
return AsyncAnalyzeActionsLROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
|
392
|
-
|
|
393
|
-
begin_analyze_text_submit_job.metadata = {'url': "/analyze-text/jobs"} # type: ignore
|
|
394
|
-
|
|
395
|
-
@distributed_trace_async
|
|
396
|
-
async def analyze_text_job_status(
|
|
397
|
-
self,
|
|
398
|
-
job_id: str,
|
|
399
|
-
show_stats: Optional[bool] = None,
|
|
400
|
-
top: Optional[int] = None,
|
|
401
|
-
skip: Optional[int] = None,
|
|
402
|
-
**kwargs: Any
|
|
403
|
-
) -> _models.AnalyzeTextJobState:
|
|
404
|
-
"""Get analysis status and results.
|
|
405
|
-
|
|
406
|
-
Get the status of an analysis job. A job may consist of one or more tasks. Once all tasks are
|
|
407
|
-
succeeded, the job will transition to the succeeded state and results will be available for
|
|
408
|
-
each task.
|
|
409
|
-
|
|
410
|
-
:param job_id: Job ID. Required.
|
|
411
|
-
:type job_id: str
|
|
412
|
-
:param show_stats: (Optional) if set to true, response will contain request and document level
|
|
413
|
-
statistics. Default value is None.
|
|
414
|
-
:type show_stats: bool
|
|
415
|
-
:param top: The maximum number of resources to return from the collection. Default value is
|
|
416
|
-
None.
|
|
417
|
-
:type top: int
|
|
418
|
-
:param skip: An offset into the collection of the first resource to be returned. Default value
|
|
419
|
-
is None.
|
|
420
|
-
:type skip: int
|
|
421
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
422
|
-
:return: AnalyzeTextJobState or the result of cls(response)
|
|
423
|
-
:rtype: ~azure.ai.textanalytics.v2022_10_01_preview.models.AnalyzeTextJobState
|
|
424
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
425
|
-
"""
|
|
426
|
-
error_map = {
|
|
427
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
428
|
-
}
|
|
429
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
430
|
-
|
|
431
|
-
_headers = kwargs.pop("headers", {}) or {}
|
|
432
|
-
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
|
|
433
|
-
|
|
434
|
-
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-10-01-preview")) # type: str
|
|
435
|
-
cls = kwargs.pop('cls', None) # type: ClsType[_models.AnalyzeTextJobState]
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
request = build_analyze_text_job_status_request(
|
|
439
|
-
job_id=job_id,
|
|
440
|
-
show_stats=show_stats,
|
|
441
|
-
top=top,
|
|
442
|
-
skip=skip,
|
|
443
|
-
api_version=api_version,
|
|
444
|
-
template_url=self.analyze_text_job_status.metadata['url'],
|
|
445
|
-
headers=_headers,
|
|
446
|
-
params=_params,
|
|
447
|
-
)
|
|
448
|
-
request = _convert_request(request)
|
|
449
|
-
path_format_arguments = {
|
|
450
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
451
|
-
}
|
|
452
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
453
|
-
|
|
454
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
455
|
-
request,
|
|
456
|
-
stream=False,
|
|
457
|
-
**kwargs
|
|
458
|
-
)
|
|
459
|
-
|
|
460
|
-
response = pipeline_response.http_response
|
|
461
|
-
|
|
462
|
-
if response.status_code not in [200]:
|
|
463
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
464
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
465
|
-
raise HttpResponseError(response=response, model=error)
|
|
466
|
-
|
|
467
|
-
deserialized = self._deserialize('AnalyzeTextJobState', pipeline_response)
|
|
468
|
-
|
|
469
|
-
if cls:
|
|
470
|
-
return cls(pipeline_response, deserialized, {})
|
|
471
|
-
|
|
472
|
-
return deserialized
|
|
473
|
-
|
|
474
|
-
analyze_text_job_status.metadata = {'url': "/analyze-text/jobs/{jobId}"} # type: ignore
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
async def _analyze_text_cancel_job_initial( # pylint: disable=inconsistent-return-statements
|
|
478
|
-
self,
|
|
479
|
-
job_id: str,
|
|
480
|
-
**kwargs: Any
|
|
481
|
-
) -> None:
|
|
482
|
-
error_map = {
|
|
483
|
-
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
|
|
484
|
-
}
|
|
485
|
-
error_map.update(kwargs.pop('error_map', {}) or {})
|
|
486
|
-
|
|
487
|
-
_headers = kwargs.pop("headers", {}) or {}
|
|
488
|
-
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
|
|
489
|
-
|
|
490
|
-
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-10-01-preview")) # type: str
|
|
491
|
-
cls = kwargs.pop('cls', None) # type: ClsType[None]
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
request = build_analyze_text_cancel_job_request(
|
|
495
|
-
job_id=job_id,
|
|
496
|
-
api_version=api_version,
|
|
497
|
-
template_url=self._analyze_text_cancel_job_initial.metadata['url'],
|
|
498
|
-
headers=_headers,
|
|
499
|
-
params=_params,
|
|
500
|
-
)
|
|
501
|
-
request = _convert_request(request)
|
|
502
|
-
path_format_arguments = {
|
|
503
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
504
|
-
}
|
|
505
|
-
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
|
|
506
|
-
|
|
507
|
-
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
|
|
508
|
-
request,
|
|
509
|
-
stream=False,
|
|
510
|
-
**kwargs
|
|
511
|
-
)
|
|
512
|
-
|
|
513
|
-
response = pipeline_response.http_response
|
|
514
|
-
|
|
515
|
-
if response.status_code not in [202]:
|
|
516
|
-
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
517
|
-
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
|
|
518
|
-
raise HttpResponseError(response=response, model=error)
|
|
519
|
-
|
|
520
|
-
response_headers = {}
|
|
521
|
-
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
if cls:
|
|
525
|
-
return cls(pipeline_response, None, response_headers)
|
|
526
|
-
|
|
527
|
-
_analyze_text_cancel_job_initial.metadata = {'url': "/analyze-text/jobs/{jobId}:cancel"} # type: ignore
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
@distributed_trace_async
|
|
531
|
-
async def begin_analyze_text_cancel_job(
|
|
532
|
-
self,
|
|
533
|
-
job_id: str,
|
|
534
|
-
**kwargs: Any
|
|
535
|
-
) -> AsyncLROPoller[None]:
|
|
536
|
-
"""Cancel a long-running Text Analysis job.
|
|
537
|
-
|
|
538
|
-
Cancel a long-running Text Analysis job.
|
|
539
|
-
|
|
540
|
-
:param job_id: Job ID. Required.
|
|
541
|
-
:type job_id: str
|
|
542
|
-
:keyword callable cls: A custom type or function that will be passed the direct response
|
|
543
|
-
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
|
|
544
|
-
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
|
|
545
|
-
for this operation to not poll, or pass in your own initialized polling object for a personal
|
|
546
|
-
polling strategy.
|
|
547
|
-
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
|
|
548
|
-
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
|
|
549
|
-
Retry-After header is present.
|
|
550
|
-
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
|
|
551
|
-
:rtype: ~azure.core.polling.AsyncLROPoller[None]
|
|
552
|
-
:raises ~azure.core.exceptions.HttpResponseError:
|
|
553
|
-
"""
|
|
554
|
-
_headers = kwargs.pop("headers", {}) or {}
|
|
555
|
-
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
|
|
556
|
-
|
|
557
|
-
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-10-01-preview")) # type: str
|
|
558
|
-
cls = kwargs.pop('cls', None) # type: ClsType[None]
|
|
559
|
-
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
|
|
560
|
-
lro_delay = kwargs.pop(
|
|
561
|
-
'polling_interval',
|
|
562
|
-
self._config.polling_interval
|
|
563
|
-
)
|
|
564
|
-
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
|
|
565
|
-
if cont_token is None:
|
|
566
|
-
raw_result = await self._analyze_text_cancel_job_initial( # type: ignore
|
|
567
|
-
job_id=job_id,
|
|
568
|
-
api_version=api_version,
|
|
569
|
-
cls=lambda x,y,z: x,
|
|
570
|
-
headers=_headers,
|
|
571
|
-
params=_params,
|
|
572
|
-
**kwargs
|
|
573
|
-
)
|
|
574
|
-
kwargs.pop('error_map', None)
|
|
575
|
-
|
|
576
|
-
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
|
|
577
|
-
if cls:
|
|
578
|
-
return cls(pipeline_response, None, {})
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
path_format_arguments = {
|
|
582
|
-
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
|
|
583
|
-
}
|
|
584
|
-
|
|
585
|
-
if polling is True:
|
|
586
|
-
polling_method = cast(AsyncPollingMethod, AsyncLROBasePolling(
|
|
587
|
-
lro_delay,
|
|
588
|
-
|
|
589
|
-
path_format_arguments=path_format_arguments,
|
|
590
|
-
**kwargs
|
|
591
|
-
)) # type: AsyncPollingMethod
|
|
592
|
-
elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
|
|
593
|
-
else: polling_method = polling
|
|
594
|
-
if cont_token:
|
|
595
|
-
return AsyncLROPoller.from_continuation_token(
|
|
596
|
-
polling_method=polling_method,
|
|
597
|
-
continuation_token=cont_token,
|
|
598
|
-
client=self._client,
|
|
599
|
-
deserialization_callback=get_long_running_output
|
|
600
|
-
)
|
|
601
|
-
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
|
602
|
-
|
|
603
|
-
begin_analyze_text_cancel_job.metadata = {'url': "/analyze-text/jobs/{jobId}:cancel"} # type: ignore
|