azure-ai-textanalytics 5.3.0b1__py3-none-any.whl → 6.0.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-textanalytics might be problematic. Click here for more details.
- azure/ai/textanalytics/__init__.py +26 -197
- azure/ai/textanalytics/_client.py +111 -0
- azure/ai/textanalytics/_configuration.py +73 -0
- azure/ai/textanalytics/{_generated/v2022_05_01/operations → _operations}/__init__.py +13 -8
- azure/ai/textanalytics/_operations/_operations.py +716 -0
- azure/ai/textanalytics/{_generated/v2022_05_01/models → _operations}/_patch.py +8 -6
- azure/ai/textanalytics/_patch.py +350 -0
- azure/ai/textanalytics/{_generated/aio → _utils}/__init__.py +1 -5
- azure/ai/textanalytics/_utils/model_base.py +1237 -0
- azure/ai/textanalytics/{_generated/_serialization.py → _utils/serialization.py} +640 -616
- azure/ai/textanalytics/{_generated/v2022_05_01/aio/_vendor.py → _utils/utils.py} +10 -12
- azure/ai/textanalytics/_version.py +8 -7
- azure/ai/textanalytics/aio/__init__.py +25 -14
- azure/ai/textanalytics/aio/_client.py +115 -0
- azure/ai/textanalytics/aio/_configuration.py +75 -0
- azure/ai/textanalytics/{_generated/v2022_10_01_preview/aio/operations → aio/_operations}/__init__.py +13 -8
- azure/ai/textanalytics/aio/_operations/_operations.py +623 -0
- azure/ai/textanalytics/{_generated/v2022_05_01 → aio/_operations}/_patch.py +8 -6
- azure/ai/textanalytics/aio/_patch.py +344 -0
- azure/ai/textanalytics/models/__init__.py +402 -0
- azure/ai/textanalytics/models/_enums.py +1979 -0
- azure/ai/textanalytics/models/_models.py +6641 -0
- azure/ai/textanalytics/{_generated/v2022_05_01/aio → models}/_patch.py +8 -6
- azure/ai/textanalytics/py.typed +1 -0
- {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/METADATA +755 -319
- azure_ai_textanalytics-6.0.0b1.dist-info/RECORD +29 -0
- {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/WHEEL +1 -1
- azure/ai/textanalytics/_base_client.py +0 -111
- azure/ai/textanalytics/_check.py +0 -22
- azure/ai/textanalytics/_dict_mixin.py +0 -54
- azure/ai/textanalytics/_generated/__init__.py +0 -16
- azure/ai/textanalytics/_generated/_configuration.py +0 -70
- azure/ai/textanalytics/_generated/_operations_mixin.py +0 -795
- azure/ai/textanalytics/_generated/_text_analytics_client.py +0 -126
- azure/ai/textanalytics/_generated/_version.py +0 -8
- azure/ai/textanalytics/_generated/aio/_configuration.py +0 -66
- azure/ai/textanalytics/_generated/aio/_operations_mixin.py +0 -776
- azure/ai/textanalytics/_generated/aio/_text_analytics_client.py +0 -124
- azure/ai/textanalytics/_generated/models.py +0 -8
- azure/ai/textanalytics/_generated/v2022_05_01/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_05_01/_configuration.py +0 -72
- azure/ai/textanalytics/_generated/v2022_05_01/_text_analytics_client.py +0 -100
- azure/ai/textanalytics/_generated/v2022_05_01/_vendor.py +0 -45
- azure/ai/textanalytics/_generated/v2022_05_01/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_05_01/aio/_configuration.py +0 -71
- azure/ai/textanalytics/_generated/v2022_05_01/aio/_text_analytics_client.py +0 -97
- azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_patch.py +0 -121
- azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_text_analytics_client_operations.py +0 -603
- azure/ai/textanalytics/_generated/v2022_05_01/models/__init__.py +0 -281
- azure/ai/textanalytics/_generated/v2022_05_01/models/_models_py3.py +0 -5722
- azure/ai/textanalytics/_generated/v2022_05_01/models/_text_analytics_client_enums.py +0 -439
- azure/ai/textanalytics/_generated/v2022_05_01/operations/_patch.py +0 -120
- azure/ai/textanalytics/_generated/v2022_05_01/operations/_text_analytics_client_operations.py +0 -744
- azure/ai/textanalytics/_generated/v2022_10_01_preview/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_configuration.py +0 -72
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_text_analytics_client.py +0 -100
- azure/ai/textanalytics/_generated/v2022_10_01_preview/_vendor.py +0 -45
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_configuration.py +0 -71
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_text_analytics_client.py +0 -97
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_vendor.py +0 -27
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_patch.py +0 -121
- azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_text_analytics_client_operations.py +0 -603
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/__init__.py +0 -407
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_models_py3.py +0 -8462
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_patch.py +0 -72
- azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_text_analytics_client_enums.py +0 -730
- azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_patch.py +0 -120
- azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_text_analytics_client_operations.py +0 -744
- azure/ai/textanalytics/_generated/v3_0/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_0/_configuration.py +0 -66
- azure/ai/textanalytics/_generated/v3_0/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_0/_text_analytics_client.py +0 -96
- azure/ai/textanalytics/_generated/v3_0/_vendor.py +0 -33
- azure/ai/textanalytics/_generated/v3_0/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_0/aio/_configuration.py +0 -65
- azure/ai/textanalytics/_generated/v3_0/aio/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_0/aio/_text_analytics_client.py +0 -93
- azure/ai/textanalytics/_generated/v3_0/aio/_vendor.py +0 -27
- azure/ai/textanalytics/_generated/v3_0/aio/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_0/aio/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_0/aio/operations/_text_analytics_client_operations.py +0 -428
- azure/ai/textanalytics/_generated/v3_0/models/__init__.py +0 -81
- azure/ai/textanalytics/_generated/v3_0/models/_models_py3.py +0 -1467
- azure/ai/textanalytics/_generated/v3_0/models/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_0/models/_text_analytics_client_enums.py +0 -58
- azure/ai/textanalytics/_generated/v3_0/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_0/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_0/operations/_text_analytics_client_operations.py +0 -604
- azure/ai/textanalytics/_generated/v3_1/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_1/_configuration.py +0 -66
- azure/ai/textanalytics/_generated/v3_1/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_1/_text_analytics_client.py +0 -98
- azure/ai/textanalytics/_generated/v3_1/_vendor.py +0 -45
- azure/ai/textanalytics/_generated/v3_1/aio/__init__.py +0 -20
- azure/ai/textanalytics/_generated/v3_1/aio/_configuration.py +0 -65
- azure/ai/textanalytics/_generated/v3_1/aio/_patch.py +0 -31
- azure/ai/textanalytics/_generated/v3_1/aio/_text_analytics_client.py +0 -95
- azure/ai/textanalytics/_generated/v3_1/aio/_vendor.py +0 -27
- azure/ai/textanalytics/_generated/v3_1/aio/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_1/aio/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_1/aio/operations/_text_analytics_client_operations.py +0 -1291
- azure/ai/textanalytics/_generated/v3_1/models/__init__.py +0 -205
- azure/ai/textanalytics/_generated/v3_1/models/_models_py3.py +0 -3976
- azure/ai/textanalytics/_generated/v3_1/models/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_1/models/_text_analytics_client_enums.py +0 -367
- azure/ai/textanalytics/_generated/v3_1/operations/__init__.py +0 -18
- azure/ai/textanalytics/_generated/v3_1/operations/_patch.py +0 -19
- azure/ai/textanalytics/_generated/v3_1/operations/_text_analytics_client_operations.py +0 -1709
- azure/ai/textanalytics/_lro.py +0 -552
- azure/ai/textanalytics/_models.py +0 -3142
- azure/ai/textanalytics/_policies.py +0 -66
- azure/ai/textanalytics/_request_handlers.py +0 -104
- azure/ai/textanalytics/_response_handlers.py +0 -580
- azure/ai/textanalytics/_text_analytics_client.py +0 -1802
- azure/ai/textanalytics/_user_agent.py +0 -8
- azure/ai/textanalytics/_validate.py +0 -113
- azure/ai/textanalytics/aio/_base_client_async.py +0 -95
- azure/ai/textanalytics/aio/_lro_async.py +0 -501
- azure/ai/textanalytics/aio/_response_handlers_async.py +0 -94
- azure/ai/textanalytics/aio/_text_analytics_client_async.py +0 -1800
- azure_ai_textanalytics-5.3.0b1.dist-info/RECORD +0 -115
- {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info/licenses}/LICENSE +0 -0
- {azure_ai_textanalytics-5.3.0b1.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,716 @@
|
|
|
1
|
+
# coding=utf-8
|
|
2
|
+
# --------------------------------------------------------------------------
|
|
3
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
4
|
+
# Licensed under the MIT License. See License.txt in the project root for license information.
|
|
5
|
+
# Code generated by Microsoft (R) Python Code Generator.
|
|
6
|
+
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
|
7
|
+
# --------------------------------------------------------------------------
|
|
8
|
+
from collections.abc import MutableMapping # pylint:disable=import-error
|
|
9
|
+
from io import IOBase
|
|
10
|
+
import json
|
|
11
|
+
from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload
|
|
12
|
+
|
|
13
|
+
from azure.core import PipelineClient
|
|
14
|
+
from azure.core.exceptions import (
|
|
15
|
+
ClientAuthenticationError,
|
|
16
|
+
HttpResponseError,
|
|
17
|
+
ResourceExistsError,
|
|
18
|
+
ResourceNotFoundError,
|
|
19
|
+
ResourceNotModifiedError,
|
|
20
|
+
StreamClosedError,
|
|
21
|
+
StreamConsumedError,
|
|
22
|
+
map_error,
|
|
23
|
+
)
|
|
24
|
+
from azure.core.pipeline import PipelineResponse
|
|
25
|
+
from azure.core.polling import LROPoller, NoPolling, PollingMethod
|
|
26
|
+
from azure.core.polling.base_polling import LROBasePolling
|
|
27
|
+
from azure.core.rest import HttpRequest, HttpResponse
|
|
28
|
+
from azure.core.tracing.decorator import distributed_trace
|
|
29
|
+
from azure.core.utils import case_insensitive_dict
|
|
30
|
+
|
|
31
|
+
from .. import models as _models
|
|
32
|
+
from .._configuration import TextAnalysisClientConfiguration
|
|
33
|
+
from .._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize
|
|
34
|
+
from .._utils.serialization import Serializer
|
|
35
|
+
from .._utils.utils import ClientMixinABC
|
|
36
|
+
|
|
37
|
+
JSON = MutableMapping[str, Any]
|
|
38
|
+
_Unset: Any = object()
|
|
39
|
+
T = TypeVar("T")
|
|
40
|
+
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
|
|
41
|
+
|
|
42
|
+
_SERIALIZER = Serializer()
|
|
43
|
+
_SERIALIZER.client_side_validation = False
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def build_text_analysis_analyze_text_request(*, show_stats: Optional[bool] = None, **kwargs: Any) -> HttpRequest:
|
|
47
|
+
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
48
|
+
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
|
|
49
|
+
|
|
50
|
+
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
|
|
51
|
+
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview"))
|
|
52
|
+
accept = _headers.pop("Accept", "application/json")
|
|
53
|
+
|
|
54
|
+
# Construct URL
|
|
55
|
+
_url = "/:analyze-text"
|
|
56
|
+
|
|
57
|
+
# Construct parameters
|
|
58
|
+
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
|
|
59
|
+
if show_stats is not None:
|
|
60
|
+
_params["showStats"] = _SERIALIZER.query("show_stats", show_stats, "bool")
|
|
61
|
+
|
|
62
|
+
# Construct headers
|
|
63
|
+
if content_type is not None:
|
|
64
|
+
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
|
|
65
|
+
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
|
|
66
|
+
|
|
67
|
+
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def build_text_analysis_get_job_status_request( # pylint: disable=name-too-long
|
|
71
|
+
job_id: str,
|
|
72
|
+
*,
|
|
73
|
+
show_stats: Optional[bool] = None,
|
|
74
|
+
top: Optional[int] = None,
|
|
75
|
+
skip: Optional[int] = None,
|
|
76
|
+
**kwargs: Any
|
|
77
|
+
) -> HttpRequest:
|
|
78
|
+
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
79
|
+
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
|
|
80
|
+
|
|
81
|
+
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview"))
|
|
82
|
+
accept = _headers.pop("Accept", "application/json")
|
|
83
|
+
|
|
84
|
+
# Construct URL
|
|
85
|
+
_url = "/analyze-text/jobs/{jobId}"
|
|
86
|
+
path_format_arguments = {
|
|
87
|
+
"jobId": _SERIALIZER.url("job_id", job_id, "str"),
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
_url: str = _url.format(**path_format_arguments) # type: ignore
|
|
91
|
+
|
|
92
|
+
# Construct parameters
|
|
93
|
+
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
|
|
94
|
+
if show_stats is not None:
|
|
95
|
+
_params["showStats"] = _SERIALIZER.query("show_stats", show_stats, "bool")
|
|
96
|
+
if top is not None:
|
|
97
|
+
_params["top"] = _SERIALIZER.query("top", top, "int")
|
|
98
|
+
if skip is not None:
|
|
99
|
+
_params["skip"] = _SERIALIZER.query("skip", skip, "int")
|
|
100
|
+
|
|
101
|
+
# Construct headers
|
|
102
|
+
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
|
|
103
|
+
|
|
104
|
+
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def build_text_analysis_analyze_text_job_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long
|
|
108
|
+
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
109
|
+
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
|
|
110
|
+
|
|
111
|
+
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
|
|
112
|
+
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview"))
|
|
113
|
+
# Construct URL
|
|
114
|
+
_url = "/analyze-text/jobs"
|
|
115
|
+
|
|
116
|
+
# Construct parameters
|
|
117
|
+
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
|
|
118
|
+
|
|
119
|
+
# Construct headers
|
|
120
|
+
if content_type is not None:
|
|
121
|
+
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
|
|
122
|
+
|
|
123
|
+
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def build_text_analysis_cancel_job_request(job_id: str, **kwargs: Any) -> HttpRequest:
|
|
127
|
+
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
|
|
128
|
+
|
|
129
|
+
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview"))
|
|
130
|
+
# Construct URL
|
|
131
|
+
_url = "/analyze-text/jobs/{jobId}:cancel"
|
|
132
|
+
path_format_arguments = {
|
|
133
|
+
"jobId": _SERIALIZER.url("job_id", job_id, "str"),
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
_url: str = _url.format(**path_format_arguments) # type: ignore
|
|
137
|
+
|
|
138
|
+
# Construct parameters
|
|
139
|
+
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
|
|
140
|
+
|
|
141
|
+
return HttpRequest(method="POST", url=_url, params=_params, **kwargs)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class _TextAnalysisClientOperationsMixin(
|
|
145
|
+
ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], TextAnalysisClientConfiguration]
|
|
146
|
+
):
|
|
147
|
+
|
|
148
|
+
@overload
|
|
149
|
+
def analyze_text(
|
|
150
|
+
self,
|
|
151
|
+
body: _models.AnalyzeTextInput,
|
|
152
|
+
*,
|
|
153
|
+
show_stats: Optional[bool] = None,
|
|
154
|
+
content_type: str = "application/json",
|
|
155
|
+
**kwargs: Any
|
|
156
|
+
) -> _models.AnalyzeTextResult:
|
|
157
|
+
"""Request text analysis over a collection of documents.
|
|
158
|
+
|
|
159
|
+
:param body: The input documents to analyze. Required.
|
|
160
|
+
:type body: ~azure.ai.textanalytics.models.AnalyzeTextInput
|
|
161
|
+
:keyword show_stats: (Optional) if set to true, response will contain request and document
|
|
162
|
+
level statistics. Default value is None.
|
|
163
|
+
:paramtype show_stats: bool
|
|
164
|
+
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
|
|
165
|
+
Default value is "application/json".
|
|
166
|
+
:paramtype content_type: str
|
|
167
|
+
:return: AnalyzeTextResult. The AnalyzeTextResult is compatible with MutableMapping
|
|
168
|
+
:rtype: ~azure.ai.textanalytics.models.AnalyzeTextResult
|
|
169
|
+
:raises ~azure.core.exceptions.HttpResponseError:
|
|
170
|
+
"""
|
|
171
|
+
|
|
172
|
+
@overload
|
|
173
|
+
def analyze_text(
|
|
174
|
+
self, body: JSON, *, show_stats: Optional[bool] = None, content_type: str = "application/json", **kwargs: Any
|
|
175
|
+
) -> _models.AnalyzeTextResult:
|
|
176
|
+
"""Request text analysis over a collection of documents.
|
|
177
|
+
|
|
178
|
+
:param body: The input documents to analyze. Required.
|
|
179
|
+
:type body: JSON
|
|
180
|
+
:keyword show_stats: (Optional) if set to true, response will contain request and document
|
|
181
|
+
level statistics. Default value is None.
|
|
182
|
+
:paramtype show_stats: bool
|
|
183
|
+
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
|
|
184
|
+
Default value is "application/json".
|
|
185
|
+
:paramtype content_type: str
|
|
186
|
+
:return: AnalyzeTextResult. The AnalyzeTextResult is compatible with MutableMapping
|
|
187
|
+
:rtype: ~azure.ai.textanalytics.models.AnalyzeTextResult
|
|
188
|
+
:raises ~azure.core.exceptions.HttpResponseError:
|
|
189
|
+
"""
|
|
190
|
+
|
|
191
|
+
@overload
|
|
192
|
+
def analyze_text(
|
|
193
|
+
self,
|
|
194
|
+
body: IO[bytes],
|
|
195
|
+
*,
|
|
196
|
+
show_stats: Optional[bool] = None,
|
|
197
|
+
content_type: str = "application/json",
|
|
198
|
+
**kwargs: Any
|
|
199
|
+
) -> _models.AnalyzeTextResult:
|
|
200
|
+
"""Request text analysis over a collection of documents.
|
|
201
|
+
|
|
202
|
+
:param body: The input documents to analyze. Required.
|
|
203
|
+
:type body: IO[bytes]
|
|
204
|
+
:keyword show_stats: (Optional) if set to true, response will contain request and document
|
|
205
|
+
level statistics. Default value is None.
|
|
206
|
+
:paramtype show_stats: bool
|
|
207
|
+
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
|
|
208
|
+
Default value is "application/json".
|
|
209
|
+
:paramtype content_type: str
|
|
210
|
+
:return: AnalyzeTextResult. The AnalyzeTextResult is compatible with MutableMapping
|
|
211
|
+
:rtype: ~azure.ai.textanalytics.models.AnalyzeTextResult
|
|
212
|
+
:raises ~azure.core.exceptions.HttpResponseError:
|
|
213
|
+
"""
|
|
214
|
+
|
|
215
|
+
@distributed_trace
|
|
216
|
+
def analyze_text(
|
|
217
|
+
self,
|
|
218
|
+
body: Union[_models.AnalyzeTextInput, JSON, IO[bytes]],
|
|
219
|
+
*,
|
|
220
|
+
show_stats: Optional[bool] = None,
|
|
221
|
+
**kwargs: Any
|
|
222
|
+
) -> _models.AnalyzeTextResult:
|
|
223
|
+
"""Request text analysis over a collection of documents.
|
|
224
|
+
|
|
225
|
+
:param body: The input documents to analyze. Is one of the following types: AnalyzeTextInput,
|
|
226
|
+
JSON, IO[bytes] Required.
|
|
227
|
+
:type body: ~azure.ai.textanalytics.models.AnalyzeTextInput or JSON or IO[bytes]
|
|
228
|
+
:keyword show_stats: (Optional) if set to true, response will contain request and document
|
|
229
|
+
level statistics. Default value is None.
|
|
230
|
+
:paramtype show_stats: bool
|
|
231
|
+
:return: AnalyzeTextResult. The AnalyzeTextResult is compatible with MutableMapping
|
|
232
|
+
:rtype: ~azure.ai.textanalytics.models.AnalyzeTextResult
|
|
233
|
+
:raises ~azure.core.exceptions.HttpResponseError:
|
|
234
|
+
"""
|
|
235
|
+
error_map: MutableMapping = {
|
|
236
|
+
401: ClientAuthenticationError,
|
|
237
|
+
404: ResourceNotFoundError,
|
|
238
|
+
409: ResourceExistsError,
|
|
239
|
+
304: ResourceNotModifiedError,
|
|
240
|
+
}
|
|
241
|
+
error_map.update(kwargs.pop("error_map", {}) or {})
|
|
242
|
+
|
|
243
|
+
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
244
|
+
_params = kwargs.pop("params", {}) or {}
|
|
245
|
+
|
|
246
|
+
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
|
|
247
|
+
cls: ClsType[_models.AnalyzeTextResult] = kwargs.pop("cls", None)
|
|
248
|
+
|
|
249
|
+
content_type = content_type or "application/json"
|
|
250
|
+
_content = None
|
|
251
|
+
if isinstance(body, (IOBase, bytes)):
|
|
252
|
+
_content = body
|
|
253
|
+
else:
|
|
254
|
+
_content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore
|
|
255
|
+
|
|
256
|
+
_request = build_text_analysis_analyze_text_request(
|
|
257
|
+
show_stats=show_stats,
|
|
258
|
+
content_type=content_type,
|
|
259
|
+
api_version=self._config.api_version,
|
|
260
|
+
content=_content,
|
|
261
|
+
headers=_headers,
|
|
262
|
+
params=_params,
|
|
263
|
+
)
|
|
264
|
+
path_format_arguments = {
|
|
265
|
+
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
|
|
266
|
+
}
|
|
267
|
+
_request.url = self._client.format_url(_request.url, **path_format_arguments)
|
|
268
|
+
|
|
269
|
+
_stream = kwargs.pop("stream", False)
|
|
270
|
+
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
|
|
271
|
+
_request, stream=_stream, **kwargs
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
response = pipeline_response.http_response
|
|
275
|
+
|
|
276
|
+
if response.status_code not in [200]:
|
|
277
|
+
if _stream:
|
|
278
|
+
try:
|
|
279
|
+
response.read() # Load the body in memory and close the socket
|
|
280
|
+
except (StreamConsumedError, StreamClosedError):
|
|
281
|
+
pass
|
|
282
|
+
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
283
|
+
error = _failsafe_deserialize(_models.ErrorResponse, response)
|
|
284
|
+
raise HttpResponseError(response=response, model=error)
|
|
285
|
+
|
|
286
|
+
if _stream:
|
|
287
|
+
deserialized = response.iter_bytes()
|
|
288
|
+
else:
|
|
289
|
+
deserialized = _deserialize(_models.AnalyzeTextResult, response.json())
|
|
290
|
+
|
|
291
|
+
if cls:
|
|
292
|
+
return cls(pipeline_response, deserialized, {}) # type: ignore
|
|
293
|
+
|
|
294
|
+
return deserialized # type: ignore
|
|
295
|
+
|
|
296
|
+
@distributed_trace
|
|
297
|
+
def get_job_status(
|
|
298
|
+
self,
|
|
299
|
+
job_id: str,
|
|
300
|
+
*,
|
|
301
|
+
show_stats: Optional[bool] = None,
|
|
302
|
+
top: Optional[int] = None,
|
|
303
|
+
skip: Optional[int] = None,
|
|
304
|
+
**kwargs: Any
|
|
305
|
+
) -> _models.AnalyzeTextOperationState:
|
|
306
|
+
"""Get analysis status and results.
|
|
307
|
+
|
|
308
|
+
Get the status of an analysis job. A job can consist of one or more tasks. After all tasks
|
|
309
|
+
succeed, the job transitions to the succeeded state and results are available for each task.
|
|
310
|
+
|
|
311
|
+
:param job_id: job ID. Required.
|
|
312
|
+
:type job_id: str
|
|
313
|
+
:keyword show_stats: (Optional) if set to true, response will contain request and document
|
|
314
|
+
level statistics. Default value is None.
|
|
315
|
+
:paramtype show_stats: bool
|
|
316
|
+
:keyword top: The maximum number of resources to return from the collection. Default value is
|
|
317
|
+
None.
|
|
318
|
+
:paramtype top: int
|
|
319
|
+
:keyword skip: An offset into the collection of the first resource to be returned. Default
|
|
320
|
+
value is None.
|
|
321
|
+
:paramtype skip: int
|
|
322
|
+
:return: AnalyzeTextOperationState. The AnalyzeTextOperationState is compatible with
|
|
323
|
+
MutableMapping
|
|
324
|
+
:rtype: ~azure.ai.textanalytics.models.AnalyzeTextOperationState
|
|
325
|
+
:raises ~azure.core.exceptions.HttpResponseError:
|
|
326
|
+
"""
|
|
327
|
+
error_map: MutableMapping = {
|
|
328
|
+
401: ClientAuthenticationError,
|
|
329
|
+
404: ResourceNotFoundError,
|
|
330
|
+
409: ResourceExistsError,
|
|
331
|
+
304: ResourceNotModifiedError,
|
|
332
|
+
}
|
|
333
|
+
error_map.update(kwargs.pop("error_map", {}) or {})
|
|
334
|
+
|
|
335
|
+
_headers = kwargs.pop("headers", {}) or {}
|
|
336
|
+
_params = kwargs.pop("params", {}) or {}
|
|
337
|
+
|
|
338
|
+
cls: ClsType[_models.AnalyzeTextOperationState] = kwargs.pop("cls", None)
|
|
339
|
+
|
|
340
|
+
_request = build_text_analysis_get_job_status_request(
|
|
341
|
+
job_id=job_id,
|
|
342
|
+
show_stats=show_stats,
|
|
343
|
+
top=top,
|
|
344
|
+
skip=skip,
|
|
345
|
+
api_version=self._config.api_version,
|
|
346
|
+
headers=_headers,
|
|
347
|
+
params=_params,
|
|
348
|
+
)
|
|
349
|
+
path_format_arguments = {
|
|
350
|
+
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
|
|
351
|
+
}
|
|
352
|
+
_request.url = self._client.format_url(_request.url, **path_format_arguments)
|
|
353
|
+
|
|
354
|
+
_stream = kwargs.pop("stream", False)
|
|
355
|
+
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
|
|
356
|
+
_request, stream=_stream, **kwargs
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
response = pipeline_response.http_response
|
|
360
|
+
|
|
361
|
+
if response.status_code not in [200]:
|
|
362
|
+
if _stream:
|
|
363
|
+
try:
|
|
364
|
+
response.read() # Load the body in memory and close the socket
|
|
365
|
+
except (StreamConsumedError, StreamClosedError):
|
|
366
|
+
pass
|
|
367
|
+
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
368
|
+
error = _failsafe_deserialize(_models.ErrorResponse, response)
|
|
369
|
+
raise HttpResponseError(response=response, model=error)
|
|
370
|
+
|
|
371
|
+
if _stream:
|
|
372
|
+
deserialized = response.iter_bytes()
|
|
373
|
+
else:
|
|
374
|
+
deserialized = _deserialize(_models.AnalyzeTextOperationState, response.json())
|
|
375
|
+
|
|
376
|
+
if cls:
|
|
377
|
+
return cls(pipeline_response, deserialized, {}) # type: ignore
|
|
378
|
+
|
|
379
|
+
return deserialized # type: ignore
|
|
380
|
+
|
|
381
|
+
def _analyze_text_job_initial(
|
|
382
|
+
self,
|
|
383
|
+
body: Union[JSON, IO[bytes]] = _Unset,
|
|
384
|
+
*,
|
|
385
|
+
text_input: _models.MultiLanguageTextInput = _Unset,
|
|
386
|
+
actions: list[_models.AnalyzeTextOperationAction] = _Unset,
|
|
387
|
+
display_name: Optional[str] = None,
|
|
388
|
+
default_language: Optional[str] = None,
|
|
389
|
+
cancel_after: Optional[float] = None,
|
|
390
|
+
**kwargs: Any
|
|
391
|
+
) -> Iterator[bytes]:
|
|
392
|
+
error_map: MutableMapping = {
|
|
393
|
+
401: ClientAuthenticationError,
|
|
394
|
+
404: ResourceNotFoundError,
|
|
395
|
+
409: ResourceExistsError,
|
|
396
|
+
304: ResourceNotModifiedError,
|
|
397
|
+
}
|
|
398
|
+
error_map.update(kwargs.pop("error_map", {}) or {})
|
|
399
|
+
|
|
400
|
+
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
401
|
+
_params = kwargs.pop("params", {}) or {}
|
|
402
|
+
|
|
403
|
+
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
|
|
404
|
+
cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
|
|
405
|
+
|
|
406
|
+
if body is _Unset:
|
|
407
|
+
if text_input is _Unset:
|
|
408
|
+
raise TypeError("missing required argument: text_input")
|
|
409
|
+
if actions is _Unset:
|
|
410
|
+
raise TypeError("missing required argument: actions")
|
|
411
|
+
body = {
|
|
412
|
+
"analysisInput": text_input,
|
|
413
|
+
"cancelAfter": cancel_after,
|
|
414
|
+
"defaultLanguage": default_language,
|
|
415
|
+
"displayName": display_name,
|
|
416
|
+
"tasks": actions,
|
|
417
|
+
}
|
|
418
|
+
body = {k: v for k, v in body.items() if v is not None}
|
|
419
|
+
content_type = content_type or "application/json"
|
|
420
|
+
_content = None
|
|
421
|
+
if isinstance(body, (IOBase, bytes)):
|
|
422
|
+
_content = body
|
|
423
|
+
else:
|
|
424
|
+
_content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore
|
|
425
|
+
|
|
426
|
+
_request = build_text_analysis_analyze_text_job_request(
|
|
427
|
+
content_type=content_type,
|
|
428
|
+
api_version=self._config.api_version,
|
|
429
|
+
content=_content,
|
|
430
|
+
headers=_headers,
|
|
431
|
+
params=_params,
|
|
432
|
+
)
|
|
433
|
+
path_format_arguments = {
|
|
434
|
+
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
|
|
435
|
+
}
|
|
436
|
+
_request.url = self._client.format_url(_request.url, **path_format_arguments)
|
|
437
|
+
|
|
438
|
+
_stream = True
|
|
439
|
+
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
|
|
440
|
+
_request, stream=_stream, **kwargs
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
response = pipeline_response.http_response
|
|
444
|
+
|
|
445
|
+
if response.status_code not in [202]:
|
|
446
|
+
try:
|
|
447
|
+
response.read() # Load the body in memory and close the socket
|
|
448
|
+
except (StreamConsumedError, StreamClosedError):
|
|
449
|
+
pass
|
|
450
|
+
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
451
|
+
error = _failsafe_deserialize(_models.ErrorResponse, response)
|
|
452
|
+
raise HttpResponseError(response=response, model=error)
|
|
453
|
+
|
|
454
|
+
response_headers = {}
|
|
455
|
+
response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location"))
|
|
456
|
+
|
|
457
|
+
deserialized = response.iter_bytes()
|
|
458
|
+
|
|
459
|
+
if cls:
|
|
460
|
+
return cls(pipeline_response, deserialized, response_headers) # type: ignore
|
|
461
|
+
|
|
462
|
+
return deserialized # type: ignore
|
|
463
|
+
|
|
464
|
+
@overload
|
|
465
|
+
def _begin_analyze_text_job(
|
|
466
|
+
self,
|
|
467
|
+
*,
|
|
468
|
+
text_input: _models.MultiLanguageTextInput,
|
|
469
|
+
actions: list[_models.AnalyzeTextOperationAction],
|
|
470
|
+
content_type: str = "application/json",
|
|
471
|
+
display_name: Optional[str] = None,
|
|
472
|
+
default_language: Optional[str] = None,
|
|
473
|
+
cancel_after: Optional[float] = None,
|
|
474
|
+
**kwargs: Any
|
|
475
|
+
) -> LROPoller[None]:
|
|
476
|
+
"""Submit a collection of text documents for analysis. Specify one or more unique tasks to be
|
|
477
|
+
executed as a long-running operation.
|
|
478
|
+
|
|
479
|
+
:keyword text_input: Contains the input to be analyzed. Required.
|
|
480
|
+
:paramtype text_input: ~azure.ai.textanalytics.models.MultiLanguageTextInput
|
|
481
|
+
:keyword actions: List of tasks to be performed as part of the LRO. Required.
|
|
482
|
+
:paramtype actions: list[~azure.ai.textanalytics.models.AnalyzeTextOperationAction]
|
|
483
|
+
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
|
|
484
|
+
Default value is "application/json".
|
|
485
|
+
:paramtype content_type: str
|
|
486
|
+
:keyword display_name: Name for the task. Default value is None.
|
|
487
|
+
:paramtype display_name: str
|
|
488
|
+
:keyword default_language: Default language to use for records requesting automatic language
|
|
489
|
+
detection. Default value is None.
|
|
490
|
+
:paramtype default_language: str
|
|
491
|
+
:keyword cancel_after: Optional duration in seconds after which the job will be canceled if not
|
|
492
|
+
completed. Default value is None.
|
|
493
|
+
:paramtype cancel_after: float
|
|
494
|
+
:return: An instance of LROPoller that returns None
|
|
495
|
+
:rtype: ~azure.core.polling.LROPoller[None]
|
|
496
|
+
:raises ~azure.core.exceptions.HttpResponseError:
|
|
497
|
+
"""
|
|
498
|
+
|
|
499
|
+
@overload
|
|
500
|
+
def _begin_analyze_text_job(
|
|
501
|
+
self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
|
|
502
|
+
) -> LROPoller[None]:
|
|
503
|
+
"""Submit a collection of text documents for analysis. Specify one or more unique tasks to be
|
|
504
|
+
executed as a long-running operation.
|
|
505
|
+
|
|
506
|
+
:param body: Required.
|
|
507
|
+
:type body: JSON
|
|
508
|
+
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
|
|
509
|
+
Default value is "application/json".
|
|
510
|
+
:paramtype content_type: str
|
|
511
|
+
:return: An instance of LROPoller that returns None
|
|
512
|
+
:rtype: ~azure.core.polling.LROPoller[None]
|
|
513
|
+
:raises ~azure.core.exceptions.HttpResponseError:
|
|
514
|
+
"""
|
|
515
|
+
|
|
516
|
+
@overload
|
|
517
|
+
def _begin_analyze_text_job(
|
|
518
|
+
self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
|
|
519
|
+
) -> LROPoller[None]:
|
|
520
|
+
"""Submit a collection of text documents for analysis. Specify one or more unique tasks to be
|
|
521
|
+
executed as a long-running operation.
|
|
522
|
+
|
|
523
|
+
:param body: Required.
|
|
524
|
+
:type body: IO[bytes]
|
|
525
|
+
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
|
|
526
|
+
Default value is "application/json".
|
|
527
|
+
:paramtype content_type: str
|
|
528
|
+
:return: An instance of LROPoller that returns None
|
|
529
|
+
:rtype: ~azure.core.polling.LROPoller[None]
|
|
530
|
+
:raises ~azure.core.exceptions.HttpResponseError:
|
|
531
|
+
"""
|
|
532
|
+
|
|
533
|
+
@distributed_trace
|
|
534
|
+
def _begin_analyze_text_job(
|
|
535
|
+
self,
|
|
536
|
+
body: Union[JSON, IO[bytes]] = _Unset,
|
|
537
|
+
*,
|
|
538
|
+
text_input: _models.MultiLanguageTextInput = _Unset,
|
|
539
|
+
actions: list[_models.AnalyzeTextOperationAction] = _Unset,
|
|
540
|
+
display_name: Optional[str] = None,
|
|
541
|
+
default_language: Optional[str] = None,
|
|
542
|
+
cancel_after: Optional[float] = None,
|
|
543
|
+
**kwargs: Any
|
|
544
|
+
) -> LROPoller[None]:
|
|
545
|
+
"""Submit a collection of text documents for analysis. Specify one or more unique tasks to be
|
|
546
|
+
executed as a long-running operation.
|
|
547
|
+
|
|
548
|
+
:param body: Is either a JSON type or a IO[bytes] type. Required.
|
|
549
|
+
:type body: JSON or IO[bytes]
|
|
550
|
+
:keyword text_input: Contains the input to be analyzed. Required.
|
|
551
|
+
:paramtype text_input: ~azure.ai.textanalytics.models.MultiLanguageTextInput
|
|
552
|
+
:keyword actions: List of tasks to be performed as part of the LRO. Required.
|
|
553
|
+
:paramtype actions: list[~azure.ai.textanalytics.models.AnalyzeTextOperationAction]
|
|
554
|
+
:keyword display_name: Name for the task. Default value is None.
|
|
555
|
+
:paramtype display_name: str
|
|
556
|
+
:keyword default_language: Default language to use for records requesting automatic language
|
|
557
|
+
detection. Default value is None.
|
|
558
|
+
:paramtype default_language: str
|
|
559
|
+
:keyword cancel_after: Optional duration in seconds after which the job will be canceled if not
|
|
560
|
+
completed. Default value is None.
|
|
561
|
+
:paramtype cancel_after: float
|
|
562
|
+
:return: An instance of LROPoller that returns None
|
|
563
|
+
:rtype: ~azure.core.polling.LROPoller[None]
|
|
564
|
+
:raises ~azure.core.exceptions.HttpResponseError:
|
|
565
|
+
"""
|
|
566
|
+
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
|
|
567
|
+
_params = kwargs.pop("params", {}) or {}
|
|
568
|
+
|
|
569
|
+
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
|
|
570
|
+
cls: ClsType[None] = kwargs.pop("cls", None)
|
|
571
|
+
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
|
|
572
|
+
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
|
|
573
|
+
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
|
|
574
|
+
if cont_token is None:
|
|
575
|
+
raw_result = self._analyze_text_job_initial(
|
|
576
|
+
body=body,
|
|
577
|
+
text_input=text_input,
|
|
578
|
+
actions=actions,
|
|
579
|
+
display_name=display_name,
|
|
580
|
+
default_language=default_language,
|
|
581
|
+
cancel_after=cancel_after,
|
|
582
|
+
content_type=content_type,
|
|
583
|
+
cls=lambda x, y, z: x,
|
|
584
|
+
headers=_headers,
|
|
585
|
+
params=_params,
|
|
586
|
+
**kwargs
|
|
587
|
+
)
|
|
588
|
+
raw_result.http_response.read() # type: ignore
|
|
589
|
+
kwargs.pop("error_map", None)
|
|
590
|
+
|
|
591
|
+
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
|
|
592
|
+
if cls:
|
|
593
|
+
return cls(pipeline_response, None, {}) # type: ignore
|
|
594
|
+
|
|
595
|
+
path_format_arguments = {
|
|
596
|
+
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
if polling is True:
|
|
600
|
+
polling_method: PollingMethod = cast(
|
|
601
|
+
PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
|
|
602
|
+
)
|
|
603
|
+
elif polling is False:
|
|
604
|
+
polling_method = cast(PollingMethod, NoPolling())
|
|
605
|
+
else:
|
|
606
|
+
polling_method = polling
|
|
607
|
+
if cont_token:
|
|
608
|
+
return LROPoller[None].from_continuation_token(
|
|
609
|
+
polling_method=polling_method,
|
|
610
|
+
continuation_token=cont_token,
|
|
611
|
+
client=self._client,
|
|
612
|
+
deserialization_callback=get_long_running_output,
|
|
613
|
+
)
|
|
614
|
+
return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
|
|
615
|
+
|
|
616
|
+
def _cancel_job_initial(self, job_id: str, **kwargs: Any) -> Iterator[bytes]:
|
|
617
|
+
error_map: MutableMapping = {
|
|
618
|
+
401: ClientAuthenticationError,
|
|
619
|
+
404: ResourceNotFoundError,
|
|
620
|
+
409: ResourceExistsError,
|
|
621
|
+
304: ResourceNotModifiedError,
|
|
622
|
+
}
|
|
623
|
+
error_map.update(kwargs.pop("error_map", {}) or {})
|
|
624
|
+
|
|
625
|
+
_headers = kwargs.pop("headers", {}) or {}
|
|
626
|
+
_params = kwargs.pop("params", {}) or {}
|
|
627
|
+
|
|
628
|
+
cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
|
|
629
|
+
|
|
630
|
+
_request = build_text_analysis_cancel_job_request(
|
|
631
|
+
job_id=job_id,
|
|
632
|
+
api_version=self._config.api_version,
|
|
633
|
+
headers=_headers,
|
|
634
|
+
params=_params,
|
|
635
|
+
)
|
|
636
|
+
path_format_arguments = {
|
|
637
|
+
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
|
|
638
|
+
}
|
|
639
|
+
_request.url = self._client.format_url(_request.url, **path_format_arguments)
|
|
640
|
+
|
|
641
|
+
_stream = True
|
|
642
|
+
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
|
|
643
|
+
_request, stream=_stream, **kwargs
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
response = pipeline_response.http_response
|
|
647
|
+
|
|
648
|
+
if response.status_code not in [202]:
|
|
649
|
+
try:
|
|
650
|
+
response.read() # Load the body in memory and close the socket
|
|
651
|
+
except (StreamConsumedError, StreamClosedError):
|
|
652
|
+
pass
|
|
653
|
+
map_error(status_code=response.status_code, response=response, error_map=error_map)
|
|
654
|
+
error = _failsafe_deserialize(_models.ErrorResponse, response)
|
|
655
|
+
raise HttpResponseError(response=response, model=error)
|
|
656
|
+
|
|
657
|
+
response_headers = {}
|
|
658
|
+
response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location"))
|
|
659
|
+
|
|
660
|
+
deserialized = response.iter_bytes()
|
|
661
|
+
|
|
662
|
+
if cls:
|
|
663
|
+
return cls(pipeline_response, deserialized, response_headers) # type: ignore
|
|
664
|
+
|
|
665
|
+
return deserialized # type: ignore
|
|
666
|
+
|
|
667
|
+
@distributed_trace
|
|
668
|
+
def begin_cancel_job(self, job_id: str, **kwargs: Any) -> LROPoller[None]:
|
|
669
|
+
"""Cancel a long-running Text Analysis job.
|
|
670
|
+
|
|
671
|
+
Cancel a long-running Text Analysis job.
|
|
672
|
+
|
|
673
|
+
:param job_id: The job ID to cancel. Required.
|
|
674
|
+
:type job_id: str
|
|
675
|
+
:return: An instance of LROPoller that returns None
|
|
676
|
+
:rtype: ~azure.core.polling.LROPoller[None]
|
|
677
|
+
:raises ~azure.core.exceptions.HttpResponseError:
|
|
678
|
+
"""
|
|
679
|
+
_headers = kwargs.pop("headers", {}) or {}
|
|
680
|
+
_params = kwargs.pop("params", {}) or {}
|
|
681
|
+
|
|
682
|
+
cls: ClsType[None] = kwargs.pop("cls", None)
|
|
683
|
+
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
|
|
684
|
+
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
|
|
685
|
+
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
|
|
686
|
+
if cont_token is None:
|
|
687
|
+
raw_result = self._cancel_job_initial(
|
|
688
|
+
job_id=job_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs
|
|
689
|
+
)
|
|
690
|
+
raw_result.http_response.read() # type: ignore
|
|
691
|
+
kwargs.pop("error_map", None)
|
|
692
|
+
|
|
693
|
+
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
|
|
694
|
+
if cls:
|
|
695
|
+
return cls(pipeline_response, None, {}) # type: ignore
|
|
696
|
+
|
|
697
|
+
path_format_arguments = {
|
|
698
|
+
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
if polling is True:
|
|
702
|
+
polling_method: PollingMethod = cast(
|
|
703
|
+
PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
|
|
704
|
+
)
|
|
705
|
+
elif polling is False:
|
|
706
|
+
polling_method = cast(PollingMethod, NoPolling())
|
|
707
|
+
else:
|
|
708
|
+
polling_method = polling
|
|
709
|
+
if cont_token:
|
|
710
|
+
return LROPoller[None].from_continuation_token(
|
|
711
|
+
polling_method=polling_method,
|
|
712
|
+
continuation_token=cont_token,
|
|
713
|
+
client=self._client,
|
|
714
|
+
deserialization_callback=get_long_running_output,
|
|
715
|
+
)
|
|
716
|
+
return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
|