azure-storage-blob 12.19.1__py3-none-any.whl → 12.20.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- azure/storage/blob/__init__.py +7 -5
- azure/storage/blob/_blob_client.py +12 -4
- azure/storage/blob/_blob_service_client.py +4 -3
- azure/storage/blob/_container_client.py +28 -12
- azure/storage/blob/_download.py +3 -3
- azure/storage/blob/_encryption.py +254 -165
- azure/storage/blob/_generated/_azure_blob_storage.py +21 -3
- azure/storage/blob/_generated/_configuration.py +4 -11
- azure/storage/blob/_generated/_serialization.py +41 -49
- azure/storage/blob/_generated/aio/_azure_blob_storage.py +23 -3
- azure/storage/blob/_generated/aio/_configuration.py +4 -11
- azure/storage/blob/_generated/aio/operations/_append_blob_operations.py +24 -58
- azure/storage/blob/_generated/aio/operations/_blob_operations.py +123 -306
- azure/storage/blob/_generated/aio/operations/_block_blob_operations.py +37 -86
- azure/storage/blob/_generated/aio/operations/_container_operations.py +98 -289
- azure/storage/blob/_generated/aio/operations/_page_blob_operations.py +51 -150
- azure/storage/blob/_generated/aio/operations/_service_operations.py +49 -125
- azure/storage/blob/_generated/models/_models_py3.py +31 -31
- azure/storage/blob/_generated/operations/_append_blob_operations.py +25 -59
- azure/storage/blob/_generated/operations/_blob_operations.py +123 -306
- azure/storage/blob/_generated/operations/_block_blob_operations.py +39 -88
- azure/storage/blob/_generated/operations/_container_operations.py +100 -291
- azure/storage/blob/_generated/operations/_page_blob_operations.py +52 -151
- azure/storage/blob/_generated/operations/_service_operations.py +50 -126
- azure/storage/blob/_models.py +3 -4
- azure/storage/blob/_serialize.py +1 -0
- azure/storage/blob/_shared/authentication.py +1 -1
- azure/storage/blob/_shared/avro/avro_io.py +0 -6
- azure/storage/blob/_shared/avro/avro_io_async.py +0 -6
- azure/storage/blob/_shared/avro/datafile.py +0 -4
- azure/storage/blob/_shared/avro/datafile_async.py +0 -4
- azure/storage/blob/_shared/avro/schema.py +4 -4
- azure/storage/blob/_shared/base_client.py +72 -87
- azure/storage/blob/_shared/base_client_async.py +115 -27
- azure/storage/blob/_shared/models.py +112 -20
- azure/storage/blob/_shared/parser.py +7 -6
- azure/storage/blob/_shared/policies.py +96 -66
- azure/storage/blob/_shared/policies_async.py +48 -21
- azure/storage/blob/_shared/response_handlers.py +14 -16
- azure/storage/blob/_shared/shared_access_signature.py +2 -3
- azure/storage/blob/_shared_access_signature.py +37 -27
- azure/storage/blob/_upload_helpers.py +4 -7
- azure/storage/blob/_version.py +1 -1
- azure/storage/blob/aio/__init__.py +2 -2
- azure/storage/blob/aio/_blob_client_async.py +16 -5
- azure/storage/blob/aio/_blob_service_client_async.py +3 -1
- azure/storage/blob/aio/_container_client_async.py +25 -8
- azure/storage/blob/aio/_download_async.py +9 -9
- azure/storage/blob/aio/_encryption_async.py +72 -0
- azure/storage/blob/aio/_upload_helpers.py +8 -10
- {azure_storage_blob-12.19.1.dist-info → azure_storage_blob-12.20.0b1.dist-info}/METADATA +9 -9
- azure_storage_blob-12.20.0b1.dist-info/RECORD +81 -0
- {azure_storage_blob-12.19.1.dist-info → azure_storage_blob-12.20.0b1.dist-info}/WHEEL +1 -1
- azure/storage/blob/_generated/py.typed +0 -1
- azure_storage_blob-12.19.1.dist-info/RECORD +0 -81
- {azure_storage_blob-12.19.1.dist-info → azure_storage_blob-12.20.0b1.dist-info}/LICENSE +0 -0
- {azure_storage_blob-12.19.1.dist-info → azure_storage_blob-12.20.0b1.dist-info}/top_level.txt +0 -0
@@ -5,8 +5,11 @@
|
|
5
5
|
# --------------------------------------------------------------------------
|
6
6
|
# pylint: disable=too-many-instance-attributes
|
7
7
|
from enum import Enum
|
8
|
+
from typing import Optional
|
8
9
|
|
9
10
|
from azure.core import CaseInsensitiveEnumMeta
|
11
|
+
from azure.core.configuration import Configuration
|
12
|
+
from azure.core.pipeline.policies import UserAgentPolicy
|
10
13
|
|
11
14
|
|
12
15
|
def get_enum_value(value):
|
@@ -269,7 +272,17 @@ class ResourceTypes(object):
|
|
269
272
|
files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
|
270
273
|
"""
|
271
274
|
|
272
|
-
|
275
|
+
service: bool = False
|
276
|
+
container: bool = False
|
277
|
+
object: bool = False
|
278
|
+
_str: str
|
279
|
+
|
280
|
+
def __init__(
|
281
|
+
self,
|
282
|
+
service: bool = False,
|
283
|
+
container: bool = False,
|
284
|
+
object: bool = False # pylint: disable=redefined-builtin
|
285
|
+
) -> None:
|
273
286
|
self.service = service
|
274
287
|
self.container = container
|
275
288
|
self.object = object
|
@@ -344,9 +357,34 @@ class AccountSasPermissions(object):
|
|
344
357
|
To enable permanent delete on the blob is permitted.
|
345
358
|
Valid for Object resource type of Blob only.
|
346
359
|
"""
|
347
|
-
|
348
|
-
|
349
|
-
|
360
|
+
|
361
|
+
read: bool = False
|
362
|
+
write: bool = False
|
363
|
+
delete: bool = False
|
364
|
+
delete_previous_version: bool = False
|
365
|
+
list: bool = False
|
366
|
+
add: bool = False
|
367
|
+
create: bool = False
|
368
|
+
update: bool = False
|
369
|
+
process: bool = False
|
370
|
+
tag: bool = False
|
371
|
+
filter_by_tags: bool = False
|
372
|
+
set_immutability_policy: bool = False
|
373
|
+
permanent_delete: bool = False
|
374
|
+
|
375
|
+
def __init__(
|
376
|
+
self,
|
377
|
+
read: bool = False,
|
378
|
+
write: bool = False,
|
379
|
+
delete: bool = False,
|
380
|
+
list: bool = False, # pylint: disable=redefined-builtin
|
381
|
+
add: bool = False,
|
382
|
+
create: bool = False,
|
383
|
+
update: bool = False,
|
384
|
+
process: bool = False,
|
385
|
+
delete_previous_version: bool = False,
|
386
|
+
**kwargs
|
387
|
+
) -> None:
|
350
388
|
self.read = read
|
351
389
|
self.write = write
|
352
390
|
self.delete = delete
|
@@ -423,7 +461,11 @@ class Services(object):
|
|
423
461
|
Access for the `~azure.storage.fileshare.ShareServiceClient`
|
424
462
|
"""
|
425
463
|
|
426
|
-
|
464
|
+
blob: bool = False
|
465
|
+
queue: bool = False
|
466
|
+
fileshare: bool = False
|
467
|
+
|
468
|
+
def __init__(self, blob: bool = False, queue: bool = False, fileshare: bool = False):
|
427
469
|
self.blob = blob
|
428
470
|
self.queue = queue
|
429
471
|
self.fileshare = fileshare
|
@@ -463,22 +505,23 @@ class UserDelegationKey(object):
|
|
463
505
|
|
464
506
|
The fields are saved as simple strings since the user does not have to interact with this object;
|
465
507
|
to generate an identify SAS, the user can simply pass it to the right API.
|
466
|
-
|
467
|
-
:ivar str signed_oid:
|
468
|
-
Object ID of this token.
|
469
|
-
:ivar str signed_tid:
|
470
|
-
Tenant ID of the tenant that issued this token.
|
471
|
-
:ivar str signed_start:
|
472
|
-
The datetime this token becomes valid.
|
473
|
-
:ivar str signed_expiry:
|
474
|
-
The datetime this token expires.
|
475
|
-
:ivar str signed_service:
|
476
|
-
What service this key is valid for.
|
477
|
-
:ivar str signed_version:
|
478
|
-
The version identifier of the REST service that created this token.
|
479
|
-
:ivar str value:
|
480
|
-
The user delegation key.
|
481
508
|
"""
|
509
|
+
|
510
|
+
signed_oid: Optional[str] = None
|
511
|
+
"""Object ID of this token."""
|
512
|
+
signed_tid: Optional[str] = None
|
513
|
+
"""Tenant ID of the tenant that issued this token."""
|
514
|
+
signed_start: Optional[str] = None
|
515
|
+
"""The datetime this token becomes valid."""
|
516
|
+
signed_expiry: Optional[str] = None
|
517
|
+
"""The datetime this token expires."""
|
518
|
+
signed_service: Optional[str] = None
|
519
|
+
"""What service this key is valid for."""
|
520
|
+
signed_version: Optional[str] = None
|
521
|
+
"""The version identifier of the REST service that created this token."""
|
522
|
+
value: Optional[str] = None
|
523
|
+
"""The user delegation key."""
|
524
|
+
|
482
525
|
def __init__(self):
|
483
526
|
self.signed_oid = None
|
484
527
|
self.signed_tid = None
|
@@ -487,3 +530,52 @@ class UserDelegationKey(object):
|
|
487
530
|
self.signed_service = None
|
488
531
|
self.signed_version = None
|
489
532
|
self.value = None
|
533
|
+
|
534
|
+
|
535
|
+
class StorageConfiguration(Configuration):
|
536
|
+
"""
|
537
|
+
Specifies the configurable values used in Azure Storage.
|
538
|
+
|
539
|
+
:param int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
|
540
|
+
uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
|
541
|
+
the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
|
542
|
+
:param int copy_polling_interval: The interval in seconds for polling copy operations.
|
543
|
+
:param int max_block_size: The maximum chunk size for uploading a block blob in chunks.
|
544
|
+
Defaults to 4*1024*1024, or 4MB.
|
545
|
+
:param int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
|
546
|
+
algorithm when uploading a block blob.
|
547
|
+
:param bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
|
548
|
+
:param int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
|
549
|
+
:param int min_large_chunk_upload_threshold: The max size for a single put operation.
|
550
|
+
:param int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
|
551
|
+
the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
|
552
|
+
:param int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
|
553
|
+
or 4MB.
|
554
|
+
:param int max_range_size: The max range size for file upload.
|
555
|
+
|
556
|
+
"""
|
557
|
+
|
558
|
+
max_single_put_size: int
|
559
|
+
copy_polling_interval: int
|
560
|
+
max_block_size: int
|
561
|
+
min_large_block_upload_threshold: int
|
562
|
+
use_byte_buffer: bool
|
563
|
+
max_page_size: int
|
564
|
+
min_large_chunk_upload_threshold: int
|
565
|
+
max_single_get_size: int
|
566
|
+
max_chunk_get_size: int
|
567
|
+
max_range_size: int
|
568
|
+
user_agent_policy: UserAgentPolicy
|
569
|
+
|
570
|
+
def __init__(self, **kwargs):
|
571
|
+
super(StorageConfiguration, self).__init__(**kwargs)
|
572
|
+
self.max_single_put_size = kwargs.pop('max_single_put_size', 64 * 1024 * 1024)
|
573
|
+
self.copy_polling_interval = 15
|
574
|
+
self.max_block_size = kwargs.pop('max_block_size', 4 * 1024 * 1024)
|
575
|
+
self.min_large_block_upload_threshold = kwargs.get('min_large_block_upload_threshold', 4 * 1024 * 1024 + 1)
|
576
|
+
self.use_byte_buffer = kwargs.pop('use_byte_buffer', False)
|
577
|
+
self.max_page_size = kwargs.pop('max_page_size', 4 * 1024 * 1024)
|
578
|
+
self.min_large_chunk_upload_threshold = kwargs.pop('min_large_chunk_upload_threshold', 100 * 1024 * 1024 + 1)
|
579
|
+
self.max_single_get_size = kwargs.pop('max_single_get_size', 32 * 1024 * 1024)
|
580
|
+
self.max_chunk_get_size = kwargs.pop('max_chunk_get_size', 4 * 1024 * 1024)
|
581
|
+
self.max_range_size = kwargs.pop('max_range_size', 4 * 1024 * 1024)
|
@@ -6,6 +6,7 @@
|
|
6
6
|
|
7
7
|
import sys
|
8
8
|
from datetime import datetime, timezone
|
9
|
+
from typing import Optional
|
9
10
|
|
10
11
|
EPOCH_AS_FILETIME = 116444736000000000 # January 1, 1970 as MS filetime
|
11
12
|
HUNDREDS_OF_NANOSECONDS = 10000000
|
@@ -20,10 +21,10 @@ else:
|
|
20
21
|
_str = str
|
21
22
|
|
22
23
|
|
23
|
-
def _to_utc_datetime(value):
|
24
|
+
def _to_utc_datetime(value: datetime) -> str:
|
24
25
|
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
|
25
26
|
|
26
|
-
def _rfc_1123_to_datetime(rfc_1123: str) -> datetime:
|
27
|
+
def _rfc_1123_to_datetime(rfc_1123: str) -> Optional[datetime]:
|
27
28
|
"""Converts an RFC 1123 date string to a UTC datetime.
|
28
29
|
|
29
30
|
:param str rfc_1123: The time and date in RFC 1123 format.
|
@@ -35,7 +36,7 @@ def _rfc_1123_to_datetime(rfc_1123: str) -> datetime:
|
|
35
36
|
|
36
37
|
return datetime.strptime(rfc_1123, "%a, %d %b %Y %H:%M:%S %Z")
|
37
38
|
|
38
|
-
def _filetime_to_datetime(filetime: str) -> datetime:
|
39
|
+
def _filetime_to_datetime(filetime: str) -> Optional[datetime]:
|
39
40
|
"""Converts an MS filetime string to a UTC datetime. "0" indicates None.
|
40
41
|
If parsing MS Filetime fails, tries RFC 1123 as backup.
|
41
42
|
|
@@ -48,11 +49,11 @@ def _filetime_to_datetime(filetime: str) -> datetime:
|
|
48
49
|
|
49
50
|
# Try to convert to MS Filetime
|
50
51
|
try:
|
51
|
-
|
52
|
-
if
|
52
|
+
temp_filetime = int(filetime)
|
53
|
+
if temp_filetime == 0:
|
53
54
|
return None
|
54
55
|
|
55
|
-
return datetime.fromtimestamp((
|
56
|
+
return datetime.fromtimestamp((temp_filetime - EPOCH_AS_FILETIME) / HUNDREDS_OF_NANOSECONDS, tz=timezone.utc)
|
56
57
|
except ValueError:
|
57
58
|
pass
|
58
59
|
|
@@ -6,29 +6,22 @@
|
|
6
6
|
|
7
7
|
import base64
|
8
8
|
import hashlib
|
9
|
-
import re
|
10
|
-
import random
|
11
|
-
from time import time
|
12
|
-
from io import SEEK_SET, UnsupportedOperation
|
13
9
|
import logging
|
10
|
+
import random
|
11
|
+
import re
|
14
12
|
import uuid
|
15
|
-
from
|
16
|
-
from
|
17
|
-
|
18
|
-
|
19
|
-
urlparse,
|
13
|
+
from io import SEEK_SET, UnsupportedOperation
|
14
|
+
from time import time
|
15
|
+
from typing import Any, Dict, Optional, TYPE_CHECKING
|
16
|
+
from urllib.parse import (
|
20
17
|
parse_qsl,
|
21
|
-
urlunparse,
|
22
18
|
urlencode,
|
23
|
-
)
|
24
|
-
except ImportError:
|
25
|
-
from urllib import urlencode # type: ignore
|
26
|
-
from urlparse import ( # type: ignore
|
27
19
|
urlparse,
|
28
|
-
parse_qsl,
|
29
20
|
urlunparse,
|
30
|
-
|
21
|
+
)
|
22
|
+
from wsgiref.handlers import format_date_time
|
31
23
|
|
24
|
+
from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
|
32
25
|
from azure.core.pipeline.policies import (
|
33
26
|
BearerTokenCredentialPolicy,
|
34
27
|
HeadersPolicy,
|
@@ -37,7 +30,6 @@ from azure.core.pipeline.policies import (
|
|
37
30
|
RequestHistory,
|
38
31
|
SansIOHTTPPolicy,
|
39
32
|
)
|
40
|
-
from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
|
41
33
|
|
42
34
|
from .authentication import StorageHttpChallenge
|
43
35
|
from .constants import DEFAULT_OAUTH_SCOPE
|
@@ -50,7 +42,10 @@ except NameError:
|
|
50
42
|
|
51
43
|
if TYPE_CHECKING:
|
52
44
|
from azure.core.credentials import TokenCredential
|
53
|
-
from azure.core.pipeline import
|
45
|
+
from azure.core.pipeline.transport import ( # pylint: disable=non-abstract-transport-import
|
46
|
+
PipelineRequest,
|
47
|
+
PipelineResponse
|
48
|
+
)
|
54
49
|
|
55
50
|
|
56
51
|
_LOGGER = logging.getLogger(__name__)
|
@@ -128,8 +123,7 @@ class QueueMessagePolicy(SansIOHTTPPolicy):
|
|
128
123
|
class StorageHeadersPolicy(HeadersPolicy):
|
129
124
|
request_id_header_name = 'x-ms-client-request-id'
|
130
125
|
|
131
|
-
def on_request(self, request):
|
132
|
-
# type: (PipelineRequest, Any) -> None
|
126
|
+
def on_request(self, request: "PipelineRequest") -> None:
|
133
127
|
super(StorageHeadersPolicy, self).on_request(request)
|
134
128
|
current_time = format_date_time(time())
|
135
129
|
request.http_request.headers['x-ms-date'] = current_time
|
@@ -159,8 +153,7 @@ class StorageHosts(SansIOHTTPPolicy):
|
|
159
153
|
self.hosts = hosts
|
160
154
|
super(StorageHosts, self).__init__()
|
161
155
|
|
162
|
-
def on_request(self, request):
|
163
|
-
# type: (PipelineRequest, Any) -> None
|
156
|
+
def on_request(self, request: "PipelineRequest") -> None:
|
164
157
|
request.context.options['hosts'] = self.hosts
|
165
158
|
parsed_url = urlparse(request.http_request.url)
|
166
159
|
|
@@ -191,12 +184,12 @@ class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
|
|
191
184
|
|
192
185
|
This accepts both global configuration, and per-request level with "enable_http_logger"
|
193
186
|
"""
|
194
|
-
|
187
|
+
|
188
|
+
def __init__(self, logging_enable: bool = False, **kwargs) -> None:
|
195
189
|
self.logging_body = kwargs.pop("logging_body", False)
|
196
190
|
super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs)
|
197
191
|
|
198
|
-
def on_request(self, request):
|
199
|
-
# type: (PipelineRequest, Any) -> None
|
192
|
+
def on_request(self, request: "PipelineRequest") -> None:
|
200
193
|
http_request = request.http_request
|
201
194
|
options = request.context.options
|
202
195
|
self.logging_body = self.logging_body or options.pop("logging_body", False)
|
@@ -236,8 +229,7 @@ class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
|
|
236
229
|
except Exception as err: # pylint: disable=broad-except
|
237
230
|
_LOGGER.debug("Failed to log request: %r", err)
|
238
231
|
|
239
|
-
def on_response(self, request, response):
|
240
|
-
# type: (PipelineRequest, PipelineResponse, Any) -> None
|
232
|
+
def on_response(self, request: "PipelineRequest", response: "PipelineResponse") -> None:
|
241
233
|
if response.context.pop("logging_enable", self.enable_http_logger):
|
242
234
|
if not _LOGGER.isEnabledFor(logging.DEBUG):
|
243
235
|
return
|
@@ -280,8 +272,7 @@ class StorageRequestHook(SansIOHTTPPolicy):
|
|
280
272
|
self._request_callback = kwargs.get('raw_request_hook')
|
281
273
|
super(StorageRequestHook, self).__init__()
|
282
274
|
|
283
|
-
def on_request(self, request):
|
284
|
-
# type: (PipelineRequest, **Any) -> PipelineResponse
|
275
|
+
def on_request(self, request: "PipelineRequest") -> None:
|
285
276
|
request_callback = request.context.options.pop('raw_request_hook', self._request_callback)
|
286
277
|
if request_callback:
|
287
278
|
request_callback(request)
|
@@ -293,8 +284,7 @@ class StorageResponseHook(HTTPPolicy):
|
|
293
284
|
self._response_callback = kwargs.get('raw_response_hook')
|
294
285
|
super(StorageResponseHook, self).__init__()
|
295
286
|
|
296
|
-
def send(self, request):
|
297
|
-
# type: (PipelineRequest) -> PipelineResponse
|
287
|
+
def send(self, request: "PipelineRequest") -> "PipelineResponse":
|
298
288
|
# Values could be 0
|
299
289
|
data_stream_total = request.context.get('data_stream_total')
|
300
290
|
if data_stream_total is None:
|
@@ -327,9 +317,10 @@ class StorageResponseHook(HTTPPolicy):
|
|
327
317
|
elif should_update_counts and upload_stream_current is not None:
|
328
318
|
upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
|
329
319
|
for pipeline_obj in [request, response]:
|
330
|
-
pipeline_obj
|
331
|
-
|
332
|
-
|
320
|
+
if hasattr(pipeline_obj, 'context'):
|
321
|
+
pipeline_obj.context['data_stream_total'] = data_stream_total
|
322
|
+
pipeline_obj.context['download_stream_current'] = download_stream_current
|
323
|
+
pipeline_obj.context['upload_stream_current'] = upload_stream_current
|
333
324
|
if response_callback:
|
334
325
|
response_callback(response)
|
335
326
|
request.context['response_callback'] = response_callback
|
@@ -344,7 +335,7 @@ class StorageContentValidation(SansIOHTTPPolicy):
|
|
344
335
|
"""
|
345
336
|
header_name = 'Content-MD5'
|
346
337
|
|
347
|
-
def __init__(self, **kwargs): # pylint: disable=unused-argument
|
338
|
+
def __init__(self, **kwargs: Any) -> None: # pylint: disable=unused-argument
|
348
339
|
super(StorageContentValidation, self).__init__()
|
349
340
|
|
350
341
|
@staticmethod
|
@@ -372,8 +363,7 @@ class StorageContentValidation(SansIOHTTPPolicy):
|
|
372
363
|
|
373
364
|
return md5.digest()
|
374
365
|
|
375
|
-
def on_request(self, request):
|
376
|
-
# type: (PipelineRequest, Any) -> None
|
366
|
+
def on_request(self, request: "PipelineRequest") -> None:
|
377
367
|
validate_content = request.context.options.pop('validate_content', False)
|
378
368
|
if validate_content and request.http_request.method != 'GET':
|
379
369
|
computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
|
@@ -381,7 +371,7 @@ class StorageContentValidation(SansIOHTTPPolicy):
|
|
381
371
|
request.context['validate_content_md5'] = computed_md5
|
382
372
|
request.context['validate_content'] = validate_content
|
383
373
|
|
384
|
-
def on_response(self, request, response):
|
374
|
+
def on_response(self, request: "PipelineRequest", response: "PipelineResponse") -> None:
|
385
375
|
if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
|
386
376
|
computed_md5 = request.context.get('validate_content_md5') or \
|
387
377
|
encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
|
@@ -398,7 +388,18 @@ class StorageRetryPolicy(HTTPPolicy):
|
|
398
388
|
The base class for Exponential and Linear retries containing shared code.
|
399
389
|
"""
|
400
390
|
|
401
|
-
|
391
|
+
total_retries: int
|
392
|
+
"""The max number of retries."""
|
393
|
+
connect_retries: int
|
394
|
+
"""The max number of connect retries."""
|
395
|
+
retry_read: int
|
396
|
+
"""The max number of read retries."""
|
397
|
+
retry_status: int
|
398
|
+
"""The max number of status retries."""
|
399
|
+
retry_to_secondary: bool
|
400
|
+
"""Whether the secondary endpoint should be retried."""
|
401
|
+
|
402
|
+
def __init__(self, **kwargs: Any) -> None:
|
402
403
|
self.total_retries = kwargs.pop('retry_total', 10)
|
403
404
|
self.connect_retries = kwargs.pop('retry_connect', 3)
|
404
405
|
self.read_retries = kwargs.pop('retry_read', 3)
|
@@ -406,11 +407,11 @@ class StorageRetryPolicy(HTTPPolicy):
|
|
406
407
|
self.retry_to_secondary = kwargs.pop('retry_to_secondary', False)
|
407
408
|
super(StorageRetryPolicy, self).__init__()
|
408
409
|
|
409
|
-
def _set_next_host_location(self, settings, request):
|
410
|
+
def _set_next_host_location(self, settings: Dict[str, Any], request: "PipelineRequest") -> None:
|
410
411
|
"""
|
411
412
|
A function which sets the next host location on the request, if applicable.
|
412
413
|
|
413
|
-
:param
|
414
|
+
:param Dict[str, Any]] settings: The configurable values pertaining to the next host location.
|
414
415
|
:param PipelineRequest request: A pipeline request object.
|
415
416
|
"""
|
416
417
|
if settings['hosts'] and all(settings['hosts'].values()):
|
@@ -423,7 +424,7 @@ class StorageRetryPolicy(HTTPPolicy):
|
|
423
424
|
updated = url._replace(netloc=settings['hosts'].get(settings['mode']))
|
424
425
|
request.url = updated.geturl()
|
425
426
|
|
426
|
-
def configure_retries(self, request):
|
427
|
+
def configure_retries(self, request: "PipelineRequest") -> Dict[str, Any]:
|
427
428
|
body_position = None
|
428
429
|
if hasattr(request.http_request.body, 'read'):
|
429
430
|
try:
|
@@ -446,11 +447,11 @@ class StorageRetryPolicy(HTTPPolicy):
|
|
446
447
|
'history': []
|
447
448
|
}
|
448
449
|
|
449
|
-
def get_backoff_time(self, settings): # pylint: disable=unused-argument
|
450
|
+
def get_backoff_time(self, settings: Dict[str, Any]) -> float: # pylint: disable=unused-argument
|
450
451
|
""" Formula for computing the current backoff.
|
451
452
|
Should be calculated by child class.
|
452
453
|
|
453
|
-
:param
|
454
|
+
:param Dict[str, Any]] settings: The configurable values pertaining to the backoff time.
|
454
455
|
:returns: The backoff time.
|
455
456
|
:rtype: float
|
456
457
|
"""
|
@@ -462,15 +463,20 @@ class StorageRetryPolicy(HTTPPolicy):
|
|
462
463
|
return
|
463
464
|
transport.sleep(backoff)
|
464
465
|
|
465
|
-
def increment(
|
466
|
+
def increment(
|
467
|
+
self, settings: Dict[str, Any],
|
468
|
+
request: "PipelineRequest",
|
469
|
+
response: Optional["PipelineResponse"] = None,
|
470
|
+
error: Optional[AzureError] = None
|
471
|
+
) -> bool:
|
466
472
|
"""Increment the retry counters.
|
467
473
|
|
468
|
-
:param
|
469
|
-
:param
|
470
|
-
:param
|
474
|
+
:param Dict[str, Any]] settings: The configurable values pertaining to the increment operation.
|
475
|
+
:param PipelineRequest request: A pipeline request object.
|
476
|
+
:param Optional[PipelineResponse] response: A pipeline response object.
|
471
477
|
:param error: An error encountered during the request, or
|
472
478
|
None if the response was received successfully.
|
473
|
-
:
|
479
|
+
:type error: Optional[AzureError]
|
474
480
|
:returns: Whether the retry attempts are exhausted.
|
475
481
|
:rtype: bool
|
476
482
|
"""
|
@@ -556,9 +562,23 @@ class StorageRetryPolicy(HTTPPolicy):
|
|
556
562
|
class ExponentialRetry(StorageRetryPolicy):
|
557
563
|
"""Exponential retry."""
|
558
564
|
|
559
|
-
|
560
|
-
|
561
|
-
|
565
|
+
initial_backoff: int
|
566
|
+
"""The initial backoff interval, in seconds, for the first retry."""
|
567
|
+
increment_base: int
|
568
|
+
"""The base, in seconds, to increment the initial_backoff by after the
|
569
|
+
first retry."""
|
570
|
+
random_jitter_range: int
|
571
|
+
"""A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
|
572
|
+
|
573
|
+
def __init__(
|
574
|
+
self, initial_backoff: int = 15,
|
575
|
+
increment_base: int = 3,
|
576
|
+
retry_total: int = 3,
|
577
|
+
retry_to_secondary: bool = False,
|
578
|
+
random_jitter_range: int = 3,
|
579
|
+
**kwargs: Any
|
580
|
+
) -> None:
|
581
|
+
"""
|
562
582
|
Constructs an Exponential retry object. The initial_backoff is used for
|
563
583
|
the first retry. Subsequent retries are retried after initial_backoff +
|
564
584
|
increment_power^retry_count seconds.
|
@@ -568,7 +588,7 @@ class ExponentialRetry(StorageRetryPolicy):
|
|
568
588
|
:param int increment_base:
|
569
589
|
The base, in seconds, to increment the initial_backoff by after the
|
570
590
|
first retry.
|
571
|
-
:param int
|
591
|
+
:param int retry_total:
|
572
592
|
The maximum number of retry attempts.
|
573
593
|
:param bool retry_to_secondary:
|
574
594
|
Whether the request should be retried to secondary, if able. This should
|
@@ -577,22 +597,22 @@ class ExponentialRetry(StorageRetryPolicy):
|
|
577
597
|
:param int random_jitter_range:
|
578
598
|
A number in seconds which indicates a range to jitter/randomize for the back-off interval.
|
579
599
|
For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
|
580
|
-
|
600
|
+
"""
|
581
601
|
self.initial_backoff = initial_backoff
|
582
602
|
self.increment_base = increment_base
|
583
603
|
self.random_jitter_range = random_jitter_range
|
584
604
|
super(ExponentialRetry, self).__init__(
|
585
605
|
retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
|
586
606
|
|
587
|
-
def get_backoff_time(self, settings):
|
607
|
+
def get_backoff_time(self, settings: Dict[str, Any]) -> float:
|
588
608
|
"""
|
589
609
|
Calculates how long to sleep before retrying.
|
590
610
|
|
591
|
-
:param
|
611
|
+
:param Dict[str, Any]] settings: The configurable values pertaining to get backoff time.
|
592
612
|
:returns:
|
593
|
-
|
613
|
+
A float indicating how long to wait before retrying the request,
|
594
614
|
or None to indicate no retry should be performed.
|
595
|
-
:rtype:
|
615
|
+
:rtype: float
|
596
616
|
"""
|
597
617
|
random_generator = random.Random()
|
598
618
|
backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
|
@@ -604,13 +624,24 @@ class ExponentialRetry(StorageRetryPolicy):
|
|
604
624
|
class LinearRetry(StorageRetryPolicy):
|
605
625
|
"""Linear retry."""
|
606
626
|
|
607
|
-
|
627
|
+
initial_backoff: int
|
628
|
+
"""The backoff interval, in seconds, between retries."""
|
629
|
+
random_jitter_range: int
|
630
|
+
"""A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
|
631
|
+
|
632
|
+
def __init__(
|
633
|
+
self, backoff: int = 15,
|
634
|
+
retry_total: int = 3,
|
635
|
+
retry_to_secondary: bool = False,
|
636
|
+
random_jitter_range: int = 3,
|
637
|
+
**kwargs: Any
|
638
|
+
) -> None:
|
608
639
|
"""
|
609
640
|
Constructs a Linear retry object.
|
610
641
|
|
611
642
|
:param int backoff:
|
612
643
|
The backoff interval, in seconds, between retries.
|
613
|
-
:param int
|
644
|
+
:param int retry_total:
|
614
645
|
The maximum number of retry attempts.
|
615
646
|
:param bool retry_to_secondary:
|
616
647
|
Whether the request should be retried to secondary, if able. This should
|
@@ -625,15 +656,15 @@ class LinearRetry(StorageRetryPolicy):
|
|
625
656
|
super(LinearRetry, self).__init__(
|
626
657
|
retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
|
627
658
|
|
628
|
-
def get_backoff_time(self, settings):
|
659
|
+
def get_backoff_time(self, settings: Dict[str, Any]) -> float:
|
629
660
|
"""
|
630
661
|
Calculates how long to sleep before retrying.
|
631
662
|
|
632
|
-
:param
|
663
|
+
:param Dict[str, Any]] settings: The configurable values pertaining to the backoff time.
|
633
664
|
:returns:
|
634
|
-
|
665
|
+
A float indicating how long to wait before retrying the request,
|
635
666
|
or None to indicate no retry should be performed.
|
636
|
-
:rtype:
|
667
|
+
:rtype: float
|
637
668
|
"""
|
638
669
|
random_generator = random.Random()
|
639
670
|
# the backoff interval normally does not change, however there is the possibility
|
@@ -650,8 +681,7 @@ class StorageBearerTokenCredentialPolicy(BearerTokenCredentialPolicy):
|
|
650
681
|
def __init__(self, credential: "TokenCredential", audience: str, **kwargs: Any) -> None:
|
651
682
|
super(StorageBearerTokenCredentialPolicy, self).__init__(credential, audience, **kwargs)
|
652
683
|
|
653
|
-
def on_challenge(self, request, response):
|
654
|
-
# type: (PipelineRequest, PipelineResponse) -> bool
|
684
|
+
def on_challenge(self, request: "PipelineRequest", response: "PipelineResponse") -> bool:
|
655
685
|
try:
|
656
686
|
auth_header = response.http_response.headers.get("WWW-Authenticate")
|
657
687
|
challenge = StorageHttpChallenge(auth_header)
|