azure-storage-blob 12.26.0b1__py3-none-any.whl → 12.27.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- azure/storage/blob/__init__.py +6 -5
- azure/storage/blob/_blob_client.py +59 -38
- azure/storage/blob/_blob_client.pyi +780 -0
- azure/storage/blob/_blob_client_helpers.py +4 -3
- azure/storage/blob/_blob_service_client.py +57 -17
- azure/storage/blob/_blob_service_client.pyi +182 -0
- azure/storage/blob/_container_client.py +47 -22
- azure/storage/blob/_container_client.pyi +380 -0
- azure/storage/blob/_deserialize.py +1 -1
- azure/storage/blob/_download.py +7 -7
- azure/storage/blob/_encryption.py +177 -184
- azure/storage/blob/_generated/_azure_blob_storage.py +3 -2
- azure/storage/blob/_generated/_configuration.py +2 -2
- azure/storage/blob/_generated/_utils/__init__.py +6 -0
- azure/storage/blob/_generated/{_serialization.py → _utils/serialization.py} +4 -22
- azure/storage/blob/_generated/aio/_azure_blob_storage.py +3 -2
- azure/storage/blob/_generated/aio/_configuration.py +2 -2
- azure/storage/blob/_generated/aio/operations/_append_blob_operations.py +6 -10
- azure/storage/blob/_generated/aio/operations/_blob_operations.py +35 -39
- azure/storage/blob/_generated/aio/operations/_block_blob_operations.py +9 -13
- azure/storage/blob/_generated/aio/operations/_container_operations.py +20 -24
- azure/storage/blob/_generated/aio/operations/_page_blob_operations.py +13 -17
- azure/storage/blob/_generated/aio/operations/_service_operations.py +10 -14
- azure/storage/blob/_generated/models/_models_py3.py +30 -9
- azure/storage/blob/_generated/operations/_append_blob_operations.py +11 -15
- azure/storage/blob/_generated/operations/_blob_operations.py +60 -64
- azure/storage/blob/_generated/operations/_block_blob_operations.py +16 -20
- azure/storage/blob/_generated/operations/_container_operations.py +39 -43
- azure/storage/blob/_generated/operations/_page_blob_operations.py +23 -27
- azure/storage/blob/_generated/operations/_service_operations.py +19 -23
- azure/storage/blob/_lease.py +3 -2
- azure/storage/blob/_lease.pyi +81 -0
- azure/storage/blob/_list_blobs_helper.py +1 -1
- azure/storage/blob/_quick_query_helper.py +3 -3
- azure/storage/blob/_serialize.py +1 -0
- azure/storage/blob/_shared/__init__.py +7 -7
- azure/storage/blob/_shared/authentication.py +49 -32
- azure/storage/blob/_shared/avro/avro_io.py +44 -42
- azure/storage/blob/_shared/avro/avro_io_async.py +42 -41
- azure/storage/blob/_shared/avro/datafile.py +24 -21
- azure/storage/blob/_shared/avro/datafile_async.py +15 -15
- azure/storage/blob/_shared/avro/schema.py +196 -217
- azure/storage/blob/_shared/base_client.py +79 -70
- azure/storage/blob/_shared/base_client_async.py +53 -68
- azure/storage/blob/_shared/constants.py +1 -1
- azure/storage/blob/_shared/models.py +94 -92
- azure/storage/blob/_shared/parser.py +3 -3
- azure/storage/blob/_shared/policies.py +186 -147
- azure/storage/blob/_shared/policies_async.py +58 -69
- azure/storage/blob/_shared/request_handlers.py +50 -45
- azure/storage/blob/_shared/response_handlers.py +54 -45
- azure/storage/blob/_shared/shared_access_signature.py +65 -73
- azure/storage/blob/_shared/uploads.py +56 -49
- azure/storage/blob/_shared/uploads_async.py +70 -58
- azure/storage/blob/_version.py +1 -1
- azure/storage/blob/aio/__init__.py +8 -10
- azure/storage/blob/aio/_blob_client_async.py +81 -48
- azure/storage/blob/aio/_blob_client_async.pyi +763 -0
- azure/storage/blob/aio/_blob_service_client_async.py +54 -15
- azure/storage/blob/aio/_blob_service_client_async.pyi +187 -0
- azure/storage/blob/aio/_container_client_async.py +55 -26
- azure/storage/blob/aio/_container_client_async.pyi +384 -0
- azure/storage/blob/aio/_download_async.py +15 -11
- azure/storage/blob/aio/_lease_async.py +3 -2
- azure/storage/blob/aio/_lease_async.pyi +81 -0
- azure/storage/blob/aio/_quick_query_helper_async.py +3 -3
- {azure_storage_blob-12.26.0b1.dist-info → azure_storage_blob-12.27.0.dist-info}/METADATA +18 -6
- azure_storage_blob-12.27.0.dist-info/RECORD +94 -0
- {azure_storage_blob-12.26.0b1.dist-info → azure_storage_blob-12.27.0.dist-info}/WHEEL +1 -1
- azure_storage_blob-12.26.0b1.dist-info/RECORD +0 -85
- {azure_storage_blob-12.26.0b1.dist-info → azure_storage_blob-12.27.0.dist-info/licenses}/LICENSE +0 -0
- {azure_storage_blob-12.26.0b1.dist-info → azure_storage_blob-12.27.0.dist-info}/top_level.txt +0 -0
@@ -28,18 +28,18 @@ from azure.core.pipeline.policies import (
|
|
28
28
|
HTTPPolicy,
|
29
29
|
NetworkTraceLoggingPolicy,
|
30
30
|
RequestHistory,
|
31
|
-
SansIOHTTPPolicy
|
31
|
+
SansIOHTTPPolicy,
|
32
32
|
)
|
33
33
|
|
34
34
|
from .authentication import AzureSigningError, StorageHttpChallenge
|
35
35
|
from .constants import DEFAULT_OAUTH_SCOPE
|
36
|
-
from .models import LocationMode
|
36
|
+
from .models import LocationMode, StorageErrorCode
|
37
37
|
|
38
38
|
if TYPE_CHECKING:
|
39
39
|
from azure.core.credentials import TokenCredential
|
40
40
|
from azure.core.pipeline.transport import ( # pylint: disable=non-abstract-transport-import
|
41
41
|
PipelineRequest,
|
42
|
-
PipelineResponse
|
42
|
+
PipelineResponse,
|
43
43
|
)
|
44
44
|
|
45
45
|
|
@@ -48,14 +48,14 @@ _LOGGER = logging.getLogger(__name__)
|
|
48
48
|
|
49
49
|
def encode_base64(data):
|
50
50
|
if isinstance(data, str):
|
51
|
-
data = data.encode(
|
51
|
+
data = data.encode("utf-8")
|
52
52
|
encoded = base64.b64encode(data)
|
53
|
-
return encoded.decode(
|
53
|
+
return encoded.decode("utf-8")
|
54
54
|
|
55
55
|
|
56
56
|
# Are we out of retries?
|
57
57
|
def is_exhausted(settings):
|
58
|
-
retry_counts = (settings[
|
58
|
+
retry_counts = (settings["total"], settings["connect"], settings["read"], settings["status"])
|
59
59
|
retry_counts = list(filter(None, retry_counts))
|
60
60
|
if not retry_counts:
|
61
61
|
return False
|
@@ -63,8 +63,8 @@ def is_exhausted(settings):
|
|
63
63
|
|
64
64
|
|
65
65
|
def retry_hook(settings, **kwargs):
|
66
|
-
if settings[
|
67
|
-
settings[
|
66
|
+
if settings["hook"]:
|
67
|
+
settings["hook"](retry_count=settings["count"] - 1, location_mode=settings["mode"], **kwargs)
|
68
68
|
|
69
69
|
|
70
70
|
# Is this method/status code retryable? (Based on allowlists and control
|
@@ -72,7 +72,7 @@ def retry_hook(settings, **kwargs):
|
|
72
72
|
# respect the Retry-After header, whether this header is present, and
|
73
73
|
# whether the returned status code is on the list of status codes to
|
74
74
|
# be retried upon on the presence of the aforementioned header)
|
75
|
-
def is_retry(response, mode):
|
75
|
+
def is_retry(response, mode): # pylint: disable=too-many-return-statements
|
76
76
|
status = response.http_response.status_code
|
77
77
|
if 300 <= status < 500:
|
78
78
|
# An exception occurred, but in most cases it was expected. Examples could
|
@@ -83,6 +83,14 @@ def is_retry(response, mode):
|
|
83
83
|
if status == 408:
|
84
84
|
# Response code 408 is a timeout and should be retried.
|
85
85
|
return True
|
86
|
+
if status >= 400:
|
87
|
+
error_code = response.http_response.headers.get("x-ms-copy-source-error-code")
|
88
|
+
if error_code in [
|
89
|
+
StorageErrorCode.OPERATION_TIMED_OUT,
|
90
|
+
StorageErrorCode.INTERNAL_ERROR,
|
91
|
+
StorageErrorCode.SERVER_BUSY,
|
92
|
+
]:
|
93
|
+
return True
|
86
94
|
return False
|
87
95
|
if status >= 500:
|
88
96
|
# Response codes above 500 with the exception of 501 Not Implemented and
|
@@ -95,40 +103,39 @@ def is_retry(response, mode):
|
|
95
103
|
|
96
104
|
def is_checksum_retry(response):
|
97
105
|
# retry if invalid content md5
|
98
|
-
if response.context.get(
|
99
|
-
computed_md5 = response.http_request.headers.get(
|
100
|
-
|
101
|
-
|
106
|
+
if response.context.get("validate_content", False) and response.http_response.headers.get("content-md5"):
|
107
|
+
computed_md5 = response.http_request.headers.get("content-md5", None) or encode_base64(
|
108
|
+
StorageContentValidation.get_content_md5(response.http_response.body())
|
109
|
+
)
|
110
|
+
if response.http_response.headers["content-md5"] != computed_md5:
|
102
111
|
return True
|
103
112
|
return False
|
104
113
|
|
105
114
|
|
106
115
|
def urljoin(base_url, stub_url):
|
107
116
|
parsed = urlparse(base_url)
|
108
|
-
parsed = parsed._replace(path=parsed.path +
|
117
|
+
parsed = parsed._replace(path=parsed.path + "/" + stub_url)
|
109
118
|
return parsed.geturl()
|
110
119
|
|
111
120
|
|
112
121
|
class QueueMessagePolicy(SansIOHTTPPolicy):
|
113
122
|
|
114
123
|
def on_request(self, request):
|
115
|
-
message_id = request.context.options.pop(
|
124
|
+
message_id = request.context.options.pop("queue_message_id", None)
|
116
125
|
if message_id:
|
117
|
-
request.http_request.url = urljoin(
|
118
|
-
request.http_request.url,
|
119
|
-
message_id)
|
126
|
+
request.http_request.url = urljoin(request.http_request.url, message_id)
|
120
127
|
|
121
128
|
|
122
129
|
class StorageHeadersPolicy(HeadersPolicy):
|
123
|
-
request_id_header_name =
|
130
|
+
request_id_header_name = "x-ms-client-request-id"
|
124
131
|
|
125
132
|
def on_request(self, request: "PipelineRequest") -> None:
|
126
133
|
super(StorageHeadersPolicy, self).on_request(request)
|
127
134
|
current_time = format_date_time(time())
|
128
|
-
request.http_request.headers[
|
135
|
+
request.http_request.headers["x-ms-date"] = current_time
|
129
136
|
|
130
|
-
custom_id = request.context.options.pop(
|
131
|
-
request.http_request.headers[
|
137
|
+
custom_id = request.context.options.pop("client_request_id", None)
|
138
|
+
request.http_request.headers["x-ms-client-request-id"] = custom_id or str(uuid.uuid1())
|
132
139
|
|
133
140
|
# def on_response(self, request, response):
|
134
141
|
# # raise exception if the echoed client request id from the service is not identical to the one we sent
|
@@ -153,7 +160,7 @@ class StorageHosts(SansIOHTTPPolicy):
|
|
153
160
|
super(StorageHosts, self).__init__()
|
154
161
|
|
155
162
|
def on_request(self, request: "PipelineRequest") -> None:
|
156
|
-
request.context.options[
|
163
|
+
request.context.options["hosts"] = self.hosts
|
157
164
|
parsed_url = urlparse(request.http_request.url)
|
158
165
|
|
159
166
|
# Detect what location mode we're currently requesting with
|
@@ -163,10 +170,10 @@ class StorageHosts(SansIOHTTPPolicy):
|
|
163
170
|
location_mode = key
|
164
171
|
|
165
172
|
# See if a specific location mode has been specified, and if so, redirect
|
166
|
-
use_location = request.context.options.pop(
|
173
|
+
use_location = request.context.options.pop("use_location", None)
|
167
174
|
if use_location:
|
168
175
|
# Lock retries to the specific location
|
169
|
-
request.context.options[
|
176
|
+
request.context.options["retry_to_secondary"] = False
|
170
177
|
if use_location not in self.hosts:
|
171
178
|
raise ValueError(f"Attempting to use undefined host location {use_location}")
|
172
179
|
if use_location != location_mode:
|
@@ -175,7 +182,7 @@ class StorageHosts(SansIOHTTPPolicy):
|
|
175
182
|
request.http_request.url = updated.geturl()
|
176
183
|
location_mode = use_location
|
177
184
|
|
178
|
-
request.context.options[
|
185
|
+
request.context.options["location_mode"] = location_mode
|
179
186
|
|
180
187
|
|
181
188
|
class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
|
@@ -200,19 +207,19 @@ class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
|
|
200
207
|
try:
|
201
208
|
log_url = http_request.url
|
202
209
|
query_params = http_request.query
|
203
|
-
if
|
204
|
-
log_url = log_url.replace(query_params[
|
210
|
+
if "sig" in query_params:
|
211
|
+
log_url = log_url.replace(query_params["sig"], "sig=*****")
|
205
212
|
_LOGGER.debug("Request URL: %r", log_url)
|
206
213
|
_LOGGER.debug("Request method: %r", http_request.method)
|
207
214
|
_LOGGER.debug("Request headers:")
|
208
215
|
for header, value in http_request.headers.items():
|
209
|
-
if header.lower() ==
|
210
|
-
value =
|
211
|
-
elif header.lower() ==
|
216
|
+
if header.lower() == "authorization":
|
217
|
+
value = "*****"
|
218
|
+
elif header.lower() == "x-ms-copy-source" and "sig" in value:
|
212
219
|
# take the url apart and scrub away the signed signature
|
213
220
|
scheme, netloc, path, params, query, fragment = urlparse(value)
|
214
221
|
parsed_qs = dict(parse_qsl(query))
|
215
|
-
parsed_qs[
|
222
|
+
parsed_qs["sig"] = "*****"
|
216
223
|
|
217
224
|
# the SAS needs to be put back together
|
218
225
|
value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment))
|
@@ -242,11 +249,11 @@ class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
|
|
242
249
|
# We don't want to log binary data if the response is a file.
|
243
250
|
_LOGGER.debug("Response content:")
|
244
251
|
pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
|
245
|
-
header = response.http_response.headers.get(
|
252
|
+
header = response.http_response.headers.get("content-disposition")
|
246
253
|
resp_content_type = response.http_response.headers.get("content-type", "")
|
247
254
|
|
248
255
|
if header and pattern.match(header):
|
249
|
-
filename = header.partition(
|
256
|
+
filename = header.partition("=")[2]
|
250
257
|
_LOGGER.debug("File attachments: %s", filename)
|
251
258
|
elif resp_content_type.endswith("octet-stream"):
|
252
259
|
_LOGGER.debug("Body contains binary data.")
|
@@ -268,11 +275,11 @@ class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
|
|
268
275
|
class StorageRequestHook(SansIOHTTPPolicy):
|
269
276
|
|
270
277
|
def __init__(self, **kwargs):
|
271
|
-
self._request_callback = kwargs.get(
|
278
|
+
self._request_callback = kwargs.get("raw_request_hook")
|
272
279
|
super(StorageRequestHook, self).__init__()
|
273
280
|
|
274
281
|
def on_request(self, request: "PipelineRequest") -> None:
|
275
|
-
request_callback = request.context.options.pop(
|
282
|
+
request_callback = request.context.options.pop("raw_request_hook", self._request_callback)
|
276
283
|
if request_callback:
|
277
284
|
request_callback(request)
|
278
285
|
|
@@ -280,49 +287,50 @@ class StorageRequestHook(SansIOHTTPPolicy):
|
|
280
287
|
class StorageResponseHook(HTTPPolicy):
|
281
288
|
|
282
289
|
def __init__(self, **kwargs):
|
283
|
-
self._response_callback = kwargs.get(
|
290
|
+
self._response_callback = kwargs.get("raw_response_hook")
|
284
291
|
super(StorageResponseHook, self).__init__()
|
285
292
|
|
286
293
|
def send(self, request: "PipelineRequest") -> "PipelineResponse":
|
287
294
|
# Values could be 0
|
288
|
-
data_stream_total = request.context.get(
|
295
|
+
data_stream_total = request.context.get("data_stream_total")
|
289
296
|
if data_stream_total is None:
|
290
|
-
data_stream_total = request.context.options.pop(
|
291
|
-
download_stream_current = request.context.get(
|
297
|
+
data_stream_total = request.context.options.pop("data_stream_total", None)
|
298
|
+
download_stream_current = request.context.get("download_stream_current")
|
292
299
|
if download_stream_current is None:
|
293
|
-
download_stream_current = request.context.options.pop(
|
294
|
-
upload_stream_current = request.context.get(
|
300
|
+
download_stream_current = request.context.options.pop("download_stream_current", None)
|
301
|
+
upload_stream_current = request.context.get("upload_stream_current")
|
295
302
|
if upload_stream_current is None:
|
296
|
-
upload_stream_current = request.context.options.pop(
|
303
|
+
upload_stream_current = request.context.options.pop("upload_stream_current", None)
|
297
304
|
|
298
|
-
response_callback = request.context.get(
|
299
|
-
|
305
|
+
response_callback = request.context.get("response_callback") or request.context.options.pop(
|
306
|
+
"raw_response_hook", self._response_callback
|
307
|
+
)
|
300
308
|
|
301
309
|
response = self.next.send(request)
|
302
310
|
|
303
|
-
will_retry = is_retry(response, request.context.options.get(
|
311
|
+
will_retry = is_retry(response, request.context.options.get("mode")) or is_checksum_retry(response)
|
304
312
|
# Auth error could come from Bearer challenge, in which case this request will be made again
|
305
313
|
is_auth_error = response.http_response.status_code == 401
|
306
314
|
should_update_counts = not (will_retry or is_auth_error)
|
307
315
|
|
308
316
|
if should_update_counts and download_stream_current is not None:
|
309
|
-
download_stream_current += int(response.http_response.headers.get(
|
317
|
+
download_stream_current += int(response.http_response.headers.get("Content-Length", 0))
|
310
318
|
if data_stream_total is None:
|
311
|
-
content_range = response.http_response.headers.get(
|
319
|
+
content_range = response.http_response.headers.get("Content-Range")
|
312
320
|
if content_range:
|
313
|
-
data_stream_total = int(content_range.split(
|
321
|
+
data_stream_total = int(content_range.split(" ", 1)[1].split("/", 1)[1])
|
314
322
|
else:
|
315
323
|
data_stream_total = download_stream_current
|
316
324
|
elif should_update_counts and upload_stream_current is not None:
|
317
|
-
upload_stream_current += int(response.http_request.headers.get(
|
325
|
+
upload_stream_current += int(response.http_request.headers.get("Content-Length", 0))
|
318
326
|
for pipeline_obj in [request, response]:
|
319
|
-
if hasattr(pipeline_obj,
|
320
|
-
pipeline_obj.context[
|
321
|
-
pipeline_obj.context[
|
322
|
-
pipeline_obj.context[
|
327
|
+
if hasattr(pipeline_obj, "context"):
|
328
|
+
pipeline_obj.context["data_stream_total"] = data_stream_total
|
329
|
+
pipeline_obj.context["download_stream_current"] = download_stream_current
|
330
|
+
pipeline_obj.context["upload_stream_current"] = upload_stream_current
|
323
331
|
if response_callback:
|
324
332
|
response_callback(response)
|
325
|
-
request.context[
|
333
|
+
request.context["response_callback"] = response_callback
|
326
334
|
return response
|
327
335
|
|
328
336
|
|
@@ -332,7 +340,8 @@ class StorageContentValidation(SansIOHTTPPolicy):
|
|
332
340
|
|
333
341
|
This will overwrite any headers already defined in the request.
|
334
342
|
"""
|
335
|
-
|
343
|
+
|
344
|
+
header_name = "Content-MD5"
|
336
345
|
|
337
346
|
def __init__(self, **kwargs: Any) -> None: # pylint: disable=unused-argument
|
338
347
|
super(StorageContentValidation, self).__init__()
|
@@ -342,10 +351,10 @@ class StorageContentValidation(SansIOHTTPPolicy):
|
|
342
351
|
# Since HTTP does not differentiate between no content and empty content,
|
343
352
|
# we have to perform a None check.
|
344
353
|
data = data or b""
|
345
|
-
md5 = hashlib.md5()
|
354
|
+
md5 = hashlib.md5() # nosec
|
346
355
|
if isinstance(data, bytes):
|
347
356
|
md5.update(data)
|
348
|
-
elif hasattr(data,
|
357
|
+
elif hasattr(data, "read"):
|
349
358
|
pos = 0
|
350
359
|
try:
|
351
360
|
pos = data.tell()
|
@@ -363,22 +372,25 @@ class StorageContentValidation(SansIOHTTPPolicy):
|
|
363
372
|
return md5.digest()
|
364
373
|
|
365
374
|
def on_request(self, request: "PipelineRequest") -> None:
|
366
|
-
validate_content = request.context.options.pop(
|
367
|
-
if validate_content and request.http_request.method !=
|
375
|
+
validate_content = request.context.options.pop("validate_content", False)
|
376
|
+
if validate_content and request.http_request.method != "GET":
|
368
377
|
computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
|
369
378
|
request.http_request.headers[self.header_name] = computed_md5
|
370
|
-
request.context[
|
371
|
-
request.context[
|
379
|
+
request.context["validate_content_md5"] = computed_md5
|
380
|
+
request.context["validate_content"] = validate_content
|
372
381
|
|
373
382
|
def on_response(self, request: "PipelineRequest", response: "PipelineResponse") -> None:
|
374
|
-
if response.context.get(
|
375
|
-
computed_md5 = request.context.get(
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
383
|
+
if response.context.get("validate_content", False) and response.http_response.headers.get("content-md5"):
|
384
|
+
computed_md5 = request.context.get("validate_content_md5") or encode_base64(
|
385
|
+
StorageContentValidation.get_content_md5(response.http_response.body())
|
386
|
+
)
|
387
|
+
if response.http_response.headers["content-md5"] != computed_md5:
|
388
|
+
raise AzureError(
|
389
|
+
(
|
390
|
+
f"MD5 mismatch. Expected value is '{response.http_response.headers['content-md5']}', "
|
391
|
+
f"computed value is '{computed_md5}'."
|
392
|
+
),
|
393
|
+
response=response.http_response,
|
382
394
|
)
|
383
395
|
|
384
396
|
|
@@ -399,33 +411,41 @@ class StorageRetryPolicy(HTTPPolicy):
|
|
399
411
|
"""Whether the secondary endpoint should be retried."""
|
400
412
|
|
401
413
|
def __init__(self, **kwargs: Any) -> None:
|
402
|
-
self.total_retries = kwargs.pop(
|
403
|
-
self.connect_retries = kwargs.pop(
|
404
|
-
self.read_retries = kwargs.pop(
|
405
|
-
self.status_retries = kwargs.pop(
|
406
|
-
self.retry_to_secondary = kwargs.pop(
|
414
|
+
self.total_retries = kwargs.pop("retry_total", 10)
|
415
|
+
self.connect_retries = kwargs.pop("retry_connect", 3)
|
416
|
+
self.read_retries = kwargs.pop("retry_read", 3)
|
417
|
+
self.status_retries = kwargs.pop("retry_status", 3)
|
418
|
+
self.retry_to_secondary = kwargs.pop("retry_to_secondary", False)
|
407
419
|
super(StorageRetryPolicy, self).__init__()
|
408
420
|
|
409
421
|
def _set_next_host_location(self, settings: Dict[str, Any], request: "PipelineRequest") -> None:
|
410
422
|
"""
|
411
423
|
A function which sets the next host location on the request, if applicable.
|
412
424
|
|
413
|
-
:param Dict[str, Any]
|
425
|
+
:param Dict[str, Any] settings: The configurable values pertaining to the next host location.
|
414
426
|
:param PipelineRequest request: A pipeline request object.
|
415
427
|
"""
|
416
|
-
if settings[
|
428
|
+
if settings["hosts"] and all(settings["hosts"].values()):
|
417
429
|
url = urlparse(request.url)
|
418
430
|
# If there's more than one possible location, retry to the alternative
|
419
|
-
if settings[
|
420
|
-
settings[
|
431
|
+
if settings["mode"] == LocationMode.PRIMARY:
|
432
|
+
settings["mode"] = LocationMode.SECONDARY
|
421
433
|
else:
|
422
|
-
settings[
|
423
|
-
updated = url._replace(netloc=settings[
|
434
|
+
settings["mode"] = LocationMode.PRIMARY
|
435
|
+
updated = url._replace(netloc=settings["hosts"].get(settings["mode"]))
|
424
436
|
request.url = updated.geturl()
|
425
437
|
|
426
438
|
def configure_retries(self, request: "PipelineRequest") -> Dict[str, Any]:
|
439
|
+
"""
|
440
|
+
Configure the retry settings for the request.
|
441
|
+
|
442
|
+
:param request: A pipeline request object.
|
443
|
+
:type request: ~azure.core.pipeline.PipelineRequest
|
444
|
+
:return: A dictionary containing the retry settings.
|
445
|
+
:rtype: Dict[str, Any]
|
446
|
+
"""
|
427
447
|
body_position = None
|
428
|
-
if hasattr(request.http_request.body,
|
448
|
+
if hasattr(request.http_request.body, "read"):
|
429
449
|
try:
|
430
450
|
body_position = request.http_request.body.tell()
|
431
451
|
except (AttributeError, UnsupportedOperation):
|
@@ -433,129 +453,140 @@ class StorageRetryPolicy(HTTPPolicy):
|
|
433
453
|
pass
|
434
454
|
options = request.context.options
|
435
455
|
return {
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
456
|
+
"total": options.pop("retry_total", self.total_retries),
|
457
|
+
"connect": options.pop("retry_connect", self.connect_retries),
|
458
|
+
"read": options.pop("retry_read", self.read_retries),
|
459
|
+
"status": options.pop("retry_status", self.status_retries),
|
460
|
+
"retry_secondary": options.pop("retry_to_secondary", self.retry_to_secondary),
|
461
|
+
"mode": options.pop("location_mode", LocationMode.PRIMARY),
|
462
|
+
"hosts": options.pop("hosts", None),
|
463
|
+
"hook": options.pop("retry_hook", None),
|
464
|
+
"body_position": body_position,
|
465
|
+
"count": 0,
|
466
|
+
"history": [],
|
447
467
|
}
|
448
468
|
|
449
469
|
def get_backoff_time(self, settings: Dict[str, Any]) -> float: # pylint: disable=unused-argument
|
450
|
-
"""
|
470
|
+
"""Formula for computing the current backoff.
|
451
471
|
Should be calculated by child class.
|
452
472
|
|
453
473
|
:param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
|
454
|
-
:
|
474
|
+
:return: The backoff time.
|
455
475
|
:rtype: float
|
456
476
|
"""
|
457
477
|
return 0
|
458
478
|
|
459
479
|
def sleep(self, settings, transport):
|
480
|
+
"""Sleep for the backoff time.
|
481
|
+
|
482
|
+
:param Dict[str, Any] settings: The configurable values pertaining to the sleep operation.
|
483
|
+
:param transport: The transport to use for sleeping.
|
484
|
+
:type transport:
|
485
|
+
~azure.core.pipeline.transport.AsyncioBaseTransport or
|
486
|
+
~azure.core.pipeline.transport.BaseTransport
|
487
|
+
"""
|
460
488
|
backoff = self.get_backoff_time(settings)
|
461
489
|
if not backoff or backoff < 0:
|
462
490
|
return
|
463
491
|
transport.sleep(backoff)
|
464
492
|
|
465
493
|
def increment(
|
466
|
-
self,
|
494
|
+
self,
|
495
|
+
settings: Dict[str, Any],
|
467
496
|
request: "PipelineRequest",
|
468
497
|
response: Optional["PipelineResponse"] = None,
|
469
|
-
error: Optional[AzureError] = None
|
498
|
+
error: Optional[AzureError] = None,
|
470
499
|
) -> bool:
|
471
500
|
"""Increment the retry counters.
|
472
501
|
|
473
502
|
:param Dict[str, Any] settings: The configurable values pertaining to the increment operation.
|
474
|
-
:param
|
475
|
-
:
|
476
|
-
:param
|
503
|
+
:param request: A pipeline request object.
|
504
|
+
:type request: ~azure.core.pipeline.PipelineRequest
|
505
|
+
:param response: A pipeline response object.
|
506
|
+
:type response: ~azure.core.pipeline.PipelineResponse or None
|
507
|
+
:param error: An error encountered during the request, or
|
477
508
|
None if the response was received successfully.
|
478
|
-
:
|
509
|
+
:type error: ~azure.core.exceptions.AzureError or None
|
510
|
+
:return: Whether the retry attempts are exhausted.
|
479
511
|
:rtype: bool
|
480
512
|
"""
|
481
|
-
settings[
|
513
|
+
settings["total"] -= 1
|
482
514
|
|
483
515
|
if error and isinstance(error, ServiceRequestError):
|
484
516
|
# Errors when we're fairly sure that the server did not receive the
|
485
517
|
# request, so it should be safe to retry.
|
486
|
-
settings[
|
487
|
-
settings[
|
518
|
+
settings["connect"] -= 1
|
519
|
+
settings["history"].append(RequestHistory(request, error=error))
|
488
520
|
|
489
521
|
elif error and isinstance(error, ServiceResponseError):
|
490
522
|
# Errors that occur after the request has been started, so we should
|
491
523
|
# assume that the server began processing it.
|
492
|
-
settings[
|
493
|
-
settings[
|
524
|
+
settings["read"] -= 1
|
525
|
+
settings["history"].append(RequestHistory(request, error=error))
|
494
526
|
|
495
527
|
else:
|
496
528
|
# Incrementing because of a server error like a 500 in
|
497
529
|
# status_forcelist and a the given method is in the allowlist
|
498
530
|
if response:
|
499
|
-
settings[
|
500
|
-
settings[
|
531
|
+
settings["status"] -= 1
|
532
|
+
settings["history"].append(RequestHistory(request, http_response=response))
|
501
533
|
|
502
534
|
if not is_exhausted(settings):
|
503
|
-
if request.method not in [
|
535
|
+
if request.method not in ["PUT"] and settings["retry_secondary"]:
|
504
536
|
self._set_next_host_location(settings, request)
|
505
537
|
|
506
538
|
# rewind the request body if it is a stream
|
507
|
-
if request.body and hasattr(request.body,
|
539
|
+
if request.body and hasattr(request.body, "read"):
|
508
540
|
# no position was saved, then retry would not work
|
509
|
-
if settings[
|
541
|
+
if settings["body_position"] is None:
|
510
542
|
return False
|
511
543
|
try:
|
512
544
|
# attempt to rewind the body to the initial position
|
513
|
-
request.body.seek(settings[
|
545
|
+
request.body.seek(settings["body_position"], SEEK_SET)
|
514
546
|
except (UnsupportedOperation, ValueError):
|
515
547
|
# if body is not seekable, then retry would not work
|
516
548
|
return False
|
517
|
-
settings[
|
549
|
+
settings["count"] += 1
|
518
550
|
return True
|
519
551
|
return False
|
520
552
|
|
521
553
|
def send(self, request):
|
554
|
+
"""Send the request with retry logic.
|
555
|
+
|
556
|
+
:param request: A pipeline request object.
|
557
|
+
:type request: ~azure.core.pipeline.PipelineRequest
|
558
|
+
:return: A pipeline response object.
|
559
|
+
:rtype: ~azure.core.pipeline.PipelineResponse
|
560
|
+
"""
|
522
561
|
retries_remaining = True
|
523
562
|
response = None
|
524
563
|
retry_settings = self.configure_retries(request)
|
525
564
|
while retries_remaining:
|
526
565
|
try:
|
527
566
|
response = self.next.send(request)
|
528
|
-
if is_retry(response, retry_settings[
|
567
|
+
if is_retry(response, retry_settings["mode"]) or is_checksum_retry(response):
|
529
568
|
retries_remaining = self.increment(
|
530
|
-
retry_settings,
|
531
|
-
|
532
|
-
response=response.http_response)
|
569
|
+
retry_settings, request=request.http_request, response=response.http_response
|
570
|
+
)
|
533
571
|
if retries_remaining:
|
534
572
|
retry_hook(
|
535
|
-
retry_settings,
|
536
|
-
|
537
|
-
response=response.http_response,
|
538
|
-
error=None)
|
573
|
+
retry_settings, request=request.http_request, response=response.http_response, error=None
|
574
|
+
)
|
539
575
|
self.sleep(retry_settings, request.context.transport)
|
540
576
|
continue
|
541
577
|
break
|
542
578
|
except AzureError as err:
|
543
579
|
if isinstance(err, AzureSigningError):
|
544
580
|
raise
|
545
|
-
retries_remaining = self.increment(
|
546
|
-
retry_settings, request=request.http_request, error=err)
|
581
|
+
retries_remaining = self.increment(retry_settings, request=request.http_request, error=err)
|
547
582
|
if retries_remaining:
|
548
|
-
retry_hook(
|
549
|
-
retry_settings,
|
550
|
-
request=request.http_request,
|
551
|
-
response=None,
|
552
|
-
error=err)
|
583
|
+
retry_hook(retry_settings, request=request.http_request, response=None, error=err)
|
553
584
|
self.sleep(retry_settings, request.context.transport)
|
554
585
|
continue
|
555
586
|
raise err
|
556
|
-
if retry_settings[
|
557
|
-
response.context[
|
558
|
-
response.http_response.location_mode = retry_settings[
|
587
|
+
if retry_settings["history"]:
|
588
|
+
response.context["history"] = retry_settings["history"]
|
589
|
+
response.http_response.location_mode = retry_settings["mode"]
|
559
590
|
return response
|
560
591
|
|
561
592
|
|
@@ -571,12 +602,13 @@ class ExponentialRetry(StorageRetryPolicy):
|
|
571
602
|
"""A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
|
572
603
|
|
573
604
|
def __init__(
|
574
|
-
self,
|
605
|
+
self,
|
606
|
+
initial_backoff: int = 15,
|
575
607
|
increment_base: int = 3,
|
576
608
|
retry_total: int = 3,
|
577
609
|
retry_to_secondary: bool = False,
|
578
610
|
random_jitter_range: int = 3,
|
579
|
-
**kwargs: Any
|
611
|
+
**kwargs: Any,
|
580
612
|
) -> None:
|
581
613
|
"""
|
582
614
|
Constructs an Exponential retry object. The initial_backoff is used for
|
@@ -601,21 +633,20 @@ class ExponentialRetry(StorageRetryPolicy):
|
|
601
633
|
self.initial_backoff = initial_backoff
|
602
634
|
self.increment_base = increment_base
|
603
635
|
self.random_jitter_range = random_jitter_range
|
604
|
-
super(ExponentialRetry, self).__init__(
|
605
|
-
retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
|
636
|
+
super(ExponentialRetry, self).__init__(retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
|
606
637
|
|
607
638
|
def get_backoff_time(self, settings: Dict[str, Any]) -> float:
|
608
639
|
"""
|
609
640
|
Calculates how long to sleep before retrying.
|
610
641
|
|
611
|
-
:param Dict[str, Any]
|
612
|
-
:
|
642
|
+
:param Dict[str, Any] settings: The configurable values pertaining to get backoff time.
|
643
|
+
:return:
|
613
644
|
A float indicating how long to wait before retrying the request,
|
614
645
|
or None to indicate no retry should be performed.
|
615
646
|
:rtype: float
|
616
647
|
"""
|
617
648
|
random_generator = random.Random()
|
618
|
-
backoff = self.initial_backoff + (0 if settings[
|
649
|
+
backoff = self.initial_backoff + (0 if settings["count"] == 0 else pow(self.increment_base, settings["count"]))
|
619
650
|
random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
|
620
651
|
random_range_end = backoff + self.random_jitter_range
|
621
652
|
return random_generator.uniform(random_range_start, random_range_end)
|
@@ -630,11 +661,12 @@ class LinearRetry(StorageRetryPolicy):
|
|
630
661
|
"""A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
|
631
662
|
|
632
663
|
def __init__(
|
633
|
-
self,
|
664
|
+
self,
|
665
|
+
backoff: int = 15,
|
634
666
|
retry_total: int = 3,
|
635
667
|
retry_to_secondary: bool = False,
|
636
668
|
random_jitter_range: int = 3,
|
637
|
-
**kwargs: Any
|
669
|
+
**kwargs: Any,
|
638
670
|
) -> None:
|
639
671
|
"""
|
640
672
|
Constructs a Linear retry object.
|
@@ -653,15 +685,14 @@ class LinearRetry(StorageRetryPolicy):
|
|
653
685
|
"""
|
654
686
|
self.backoff = backoff
|
655
687
|
self.random_jitter_range = random_jitter_range
|
656
|
-
super(LinearRetry, self).__init__(
|
657
|
-
retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
|
688
|
+
super(LinearRetry, self).__init__(retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
|
658
689
|
|
659
690
|
def get_backoff_time(self, settings: Dict[str, Any]) -> float:
|
660
691
|
"""
|
661
692
|
Calculates how long to sleep before retrying.
|
662
693
|
|
663
|
-
:param Dict[str, Any]
|
664
|
-
:
|
694
|
+
:param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
|
695
|
+
:return:
|
665
696
|
A float indicating how long to wait before retrying the request,
|
666
697
|
or None to indicate no retry should be performed.
|
667
698
|
:rtype: float
|
@@ -669,19 +700,27 @@ class LinearRetry(StorageRetryPolicy):
|
|
669
700
|
random_generator = random.Random()
|
670
701
|
# the backoff interval normally does not change, however there is the possibility
|
671
702
|
# that it was modified by accessing the property directly after initializing the object
|
672
|
-
random_range_start = self.backoff - self.random_jitter_range
|
673
|
-
if self.backoff > self.random_jitter_range else 0
|
703
|
+
random_range_start = self.backoff - self.random_jitter_range if self.backoff > self.random_jitter_range else 0
|
674
704
|
random_range_end = self.backoff + self.random_jitter_range
|
675
705
|
return random_generator.uniform(random_range_start, random_range_end)
|
676
706
|
|
677
707
|
|
678
708
|
class StorageBearerTokenCredentialPolicy(BearerTokenCredentialPolicy):
|
679
|
-
"""
|
709
|
+
"""Custom Bearer token credential policy for following Storage Bearer challenges"""
|
680
710
|
|
681
711
|
def __init__(self, credential: "TokenCredential", audience: str, **kwargs: Any) -> None:
|
682
712
|
super(StorageBearerTokenCredentialPolicy, self).__init__(credential, audience, **kwargs)
|
683
713
|
|
684
714
|
def on_challenge(self, request: "PipelineRequest", response: "PipelineResponse") -> bool:
|
715
|
+
"""Handle the challenge from the service and authorize the request.
|
716
|
+
|
717
|
+
:param request: The request object.
|
718
|
+
:type request: ~azure.core.pipeline.PipelineRequest
|
719
|
+
:param response: The response object.
|
720
|
+
:type response: ~azure.core.pipeline.PipelineResponse
|
721
|
+
:return: True if the request was authorized, False otherwise.
|
722
|
+
:rtype: bool
|
723
|
+
"""
|
685
724
|
try:
|
686
725
|
auth_header = response.http_response.headers.get("WWW-Authenticate")
|
687
726
|
challenge = StorageHttpChallenge(auth_header)
|