azure-storage-blob 12.25.0b1__py3-none-any.whl → 12.26.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- azure/storage/blob/__init__.py +3 -2
- azure/storage/blob/_blob_client.py +94 -41
- azure/storage/blob/_blob_client_helpers.py +19 -4
- azure/storage/blob/_blob_service_client.py +16 -13
- azure/storage/blob/_container_client.py +25 -22
- azure/storage/blob/_deserialize.py +1 -1
- azure/storage/blob/_download.py +7 -7
- azure/storage/blob/_encryption.py +177 -184
- azure/storage/blob/_generated/_azure_blob_storage.py +1 -1
- azure/storage/blob/_generated/_configuration.py +2 -2
- azure/storage/blob/_generated/_serialization.py +3 -3
- azure/storage/blob/_generated/aio/_azure_blob_storage.py +1 -1
- azure/storage/blob/_generated/aio/_configuration.py +2 -2
- azure/storage/blob/_generated/aio/operations/_append_blob_operations.py +5 -4
- azure/storage/blob/_generated/aio/operations/_blob_operations.py +5 -25
- azure/storage/blob/_generated/aio/operations/_block_blob_operations.py +9 -7
- azure/storage/blob/_generated/aio/operations/_container_operations.py +1 -19
- azure/storage/blob/_generated/aio/operations/_page_blob_operations.py +5 -10
- azure/storage/blob/_generated/aio/operations/_service_operations.py +1 -8
- azure/storage/blob/_generated/models/__init__.py +2 -0
- azure/storage/blob/_generated/models/_azure_blob_storage_enums.py +6 -0
- azure/storage/blob/_generated/operations/_append_blob_operations.py +12 -9
- azure/storage/blob/_generated/operations/_blob_operations.py +32 -49
- azure/storage/blob/_generated/operations/_block_blob_operations.py +21 -13
- azure/storage/blob/_generated/operations/_container_operations.py +19 -37
- azure/storage/blob/_generated/operations/_page_blob_operations.py +17 -19
- azure/storage/blob/_generated/operations/_service_operations.py +9 -17
- azure/storage/blob/_lease.py +1 -0
- azure/storage/blob/_quick_query_helper.py +20 -24
- azure/storage/blob/_serialize.py +1 -0
- azure/storage/blob/_shared/__init__.py +7 -7
- azure/storage/blob/_shared/authentication.py +49 -32
- azure/storage/blob/_shared/avro/avro_io.py +45 -43
- azure/storage/blob/_shared/avro/avro_io_async.py +42 -41
- azure/storage/blob/_shared/avro/datafile.py +24 -21
- azure/storage/blob/_shared/avro/datafile_async.py +15 -15
- azure/storage/blob/_shared/avro/schema.py +196 -217
- azure/storage/blob/_shared/base_client.py +87 -61
- azure/storage/blob/_shared/base_client_async.py +58 -51
- azure/storage/blob/_shared/constants.py +1 -1
- azure/storage/blob/_shared/models.py +93 -92
- azure/storage/blob/_shared/parser.py +3 -3
- azure/storage/blob/_shared/policies.py +176 -145
- azure/storage/blob/_shared/policies_async.py +59 -70
- azure/storage/blob/_shared/request_handlers.py +51 -47
- azure/storage/blob/_shared/response_handlers.py +49 -45
- azure/storage/blob/_shared/shared_access_signature.py +67 -71
- azure/storage/blob/_shared/uploads.py +56 -49
- azure/storage/blob/_shared/uploads_async.py +72 -61
- azure/storage/blob/_shared_access_signature.py +3 -1
- azure/storage/blob/_version.py +1 -1
- azure/storage/blob/aio/__init__.py +3 -2
- azure/storage/blob/aio/_blob_client_async.py +241 -44
- azure/storage/blob/aio/_blob_service_client_async.py +13 -11
- azure/storage/blob/aio/_container_client_async.py +28 -25
- azure/storage/blob/aio/_download_async.py +16 -12
- azure/storage/blob/aio/_lease_async.py +1 -0
- azure/storage/blob/aio/_quick_query_helper_async.py +194 -0
- {azure_storage_blob-12.25.0b1.dist-info → azure_storage_blob-12.26.0.dist-info}/METADATA +7 -7
- azure_storage_blob-12.26.0.dist-info/RECORD +85 -0
- {azure_storage_blob-12.25.0b1.dist-info → azure_storage_blob-12.26.0.dist-info}/WHEEL +1 -1
- azure_storage_blob-12.25.0b1.dist-info/RECORD +0 -84
- {azure_storage_blob-12.25.0b1.dist-info → azure_storage_blob-12.26.0.dist-info}/LICENSE +0 -0
- {azure_storage_blob-12.25.0b1.dist-info → azure_storage_blob-12.26.0.dist-info}/top_level.txt +0 -0
@@ -11,42 +11,45 @@ from .parser import _to_utc_datetime
|
|
11
11
|
from .constants import X_MS_VERSION
|
12
12
|
from . import sign_string, url_quote
|
13
13
|
|
14
|
+
|
14
15
|
# cspell:ignoreRegExp rsc.
|
15
16
|
# cspell:ignoreRegExp s..?id
|
16
17
|
class QueryStringConstants(object):
|
17
|
-
SIGNED_SIGNATURE =
|
18
|
-
SIGNED_PERMISSION =
|
19
|
-
SIGNED_START =
|
20
|
-
SIGNED_EXPIRY =
|
21
|
-
SIGNED_RESOURCE =
|
22
|
-
SIGNED_IDENTIFIER =
|
23
|
-
SIGNED_IP =
|
24
|
-
SIGNED_PROTOCOL =
|
25
|
-
SIGNED_VERSION =
|
26
|
-
SIGNED_CACHE_CONTROL =
|
27
|
-
SIGNED_CONTENT_DISPOSITION =
|
28
|
-
SIGNED_CONTENT_ENCODING =
|
29
|
-
SIGNED_CONTENT_LANGUAGE =
|
30
|
-
SIGNED_CONTENT_TYPE =
|
31
|
-
START_PK =
|
32
|
-
START_RK =
|
33
|
-
END_PK =
|
34
|
-
END_RK =
|
35
|
-
SIGNED_RESOURCE_TYPES =
|
36
|
-
SIGNED_SERVICES =
|
37
|
-
SIGNED_OID =
|
38
|
-
SIGNED_TID =
|
39
|
-
SIGNED_KEY_START =
|
40
|
-
SIGNED_KEY_EXPIRY =
|
41
|
-
SIGNED_KEY_SERVICE =
|
42
|
-
SIGNED_KEY_VERSION =
|
43
|
-
SIGNED_ENCRYPTION_SCOPE =
|
18
|
+
SIGNED_SIGNATURE = "sig"
|
19
|
+
SIGNED_PERMISSION = "sp"
|
20
|
+
SIGNED_START = "st"
|
21
|
+
SIGNED_EXPIRY = "se"
|
22
|
+
SIGNED_RESOURCE = "sr"
|
23
|
+
SIGNED_IDENTIFIER = "si"
|
24
|
+
SIGNED_IP = "sip"
|
25
|
+
SIGNED_PROTOCOL = "spr"
|
26
|
+
SIGNED_VERSION = "sv"
|
27
|
+
SIGNED_CACHE_CONTROL = "rscc"
|
28
|
+
SIGNED_CONTENT_DISPOSITION = "rscd"
|
29
|
+
SIGNED_CONTENT_ENCODING = "rsce"
|
30
|
+
SIGNED_CONTENT_LANGUAGE = "rscl"
|
31
|
+
SIGNED_CONTENT_TYPE = "rsct"
|
32
|
+
START_PK = "spk"
|
33
|
+
START_RK = "srk"
|
34
|
+
END_PK = "epk"
|
35
|
+
END_RK = "erk"
|
36
|
+
SIGNED_RESOURCE_TYPES = "srt"
|
37
|
+
SIGNED_SERVICES = "ss"
|
38
|
+
SIGNED_OID = "skoid"
|
39
|
+
SIGNED_TID = "sktid"
|
40
|
+
SIGNED_KEY_START = "skt"
|
41
|
+
SIGNED_KEY_EXPIRY = "ske"
|
42
|
+
SIGNED_KEY_SERVICE = "sks"
|
43
|
+
SIGNED_KEY_VERSION = "skv"
|
44
|
+
SIGNED_ENCRYPTION_SCOPE = "ses"
|
45
|
+
SIGNED_KEY_DELEGATED_USER_TID = "skdutid"
|
46
|
+
SIGNED_DELEGATED_USER_OID = "sduoid"
|
44
47
|
|
45
48
|
# for ADLS
|
46
|
-
SIGNED_AUTHORIZED_OID =
|
47
|
-
SIGNED_UNAUTHORIZED_OID =
|
48
|
-
SIGNED_CORRELATION_ID =
|
49
|
-
SIGNED_DIRECTORY_DEPTH =
|
49
|
+
SIGNED_AUTHORIZED_OID = "saoid"
|
50
|
+
SIGNED_UNAUTHORIZED_OID = "suoid"
|
51
|
+
SIGNED_CORRELATION_ID = "scid"
|
52
|
+
SIGNED_DIRECTORY_DEPTH = "sdd"
|
50
53
|
|
51
54
|
@staticmethod
|
52
55
|
def to_list():
|
@@ -78,6 +81,8 @@ class QueryStringConstants(object):
|
|
78
81
|
QueryStringConstants.SIGNED_KEY_SERVICE,
|
79
82
|
QueryStringConstants.SIGNED_KEY_VERSION,
|
80
83
|
QueryStringConstants.SIGNED_ENCRYPTION_SCOPE,
|
84
|
+
QueryStringConstants.SIGNED_KEY_DELEGATED_USER_TID,
|
85
|
+
QueryStringConstants.SIGNED_DELEGATED_USER_OID,
|
81
86
|
# for ADLS
|
82
87
|
QueryStringConstants.SIGNED_AUTHORIZED_OID,
|
83
88
|
QueryStringConstants.SIGNED_UNAUTHORIZED_OID,
|
@@ -87,38 +92,30 @@ class QueryStringConstants(object):
|
|
87
92
|
|
88
93
|
|
89
94
|
class SharedAccessSignature(object):
|
90
|
-
|
95
|
+
"""
|
91
96
|
Provides a factory for creating account access
|
92
97
|
signature tokens with an account name and account key. Users can either
|
93
98
|
use the factory or can construct the appropriate service and use the
|
94
99
|
generate_*_shared_access_signature method directly.
|
95
|
-
|
100
|
+
"""
|
96
101
|
|
97
102
|
def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION):
|
98
|
-
|
103
|
+
"""
|
99
104
|
:param str account_name:
|
100
105
|
The storage account name used to generate the shared access signatures.
|
101
106
|
:param str account_key:
|
102
107
|
The access key to generate the shares access signatures.
|
103
108
|
:param str x_ms_version:
|
104
109
|
The service version used to generate the shared access signatures.
|
105
|
-
|
110
|
+
"""
|
106
111
|
self.account_name = account_name
|
107
112
|
self.account_key = account_key
|
108
113
|
self.x_ms_version = x_ms_version
|
109
114
|
|
110
115
|
def generate_account(
|
111
|
-
self, services,
|
112
|
-
resource_types,
|
113
|
-
permission,
|
114
|
-
expiry,
|
115
|
-
start=None,
|
116
|
-
ip=None,
|
117
|
-
protocol=None,
|
118
|
-
sts_hook=None,
|
119
|
-
**kwargs
|
116
|
+
self, services, resource_types, permission, expiry, start=None, ip=None, protocol=None, sts_hook=None, **kwargs
|
120
117
|
) -> str:
|
121
|
-
|
118
|
+
"""
|
122
119
|
Generates a shared access signature for the account.
|
123
120
|
Use the returned signature with the sas_token parameter of the service
|
124
121
|
or to create a new account object.
|
@@ -165,9 +162,9 @@ class SharedAccessSignature(object):
|
|
165
162
|
For debugging purposes only. If provided, the hook is called with the string to sign
|
166
163
|
that was used to generate the SAS.
|
167
164
|
:type sts_hook: Optional[Callable[[str], None]]
|
168
|
-
:
|
165
|
+
:return: The generated SAS token for the account.
|
169
166
|
:rtype: str
|
170
|
-
|
167
|
+
"""
|
171
168
|
sas = _SharedAccessHelper()
|
172
169
|
sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
|
173
170
|
sas.add_account(services, resource_types)
|
@@ -190,7 +187,7 @@ class _SharedAccessHelper(object):
|
|
190
187
|
self.query_dict[name] = str(val) if val is not None else None
|
191
188
|
|
192
189
|
def add_encryption_scope(self, **kwargs):
|
193
|
-
self._add_query(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE, kwargs.pop(
|
190
|
+
self._add_query(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE, kwargs.pop("encryption_scope", None))
|
194
191
|
|
195
192
|
def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
|
196
193
|
if isinstance(start, date):
|
@@ -216,11 +213,9 @@ class _SharedAccessHelper(object):
|
|
216
213
|
self._add_query(QueryStringConstants.SIGNED_SERVICES, services)
|
217
214
|
self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
|
218
215
|
|
219
|
-
def add_override_response_headers(
|
220
|
-
|
221
|
-
|
222
|
-
content_language,
|
223
|
-
content_type):
|
216
|
+
def add_override_response_headers(
|
217
|
+
self, cache_control, content_disposition, content_encoding, content_language, content_type
|
218
|
+
):
|
224
219
|
self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
|
225
220
|
self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
|
226
221
|
self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
|
@@ -229,24 +224,25 @@ class _SharedAccessHelper(object):
|
|
229
224
|
|
230
225
|
def add_account_signature(self, account_name, account_key):
|
231
226
|
def get_value_to_append(query):
|
232
|
-
return_value = self.query_dict.get(query) or
|
233
|
-
return return_value +
|
234
|
-
|
235
|
-
string_to_sign =
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
227
|
+
return_value = self.query_dict.get(query) or ""
|
228
|
+
return return_value + "\n"
|
229
|
+
|
230
|
+
string_to_sign = (
|
231
|
+
account_name
|
232
|
+
+ "\n"
|
233
|
+
+ get_value_to_append(QueryStringConstants.SIGNED_PERMISSION)
|
234
|
+
+ get_value_to_append(QueryStringConstants.SIGNED_SERVICES)
|
235
|
+
+ get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES)
|
236
|
+
+ get_value_to_append(QueryStringConstants.SIGNED_START)
|
237
|
+
+ get_value_to_append(QueryStringConstants.SIGNED_EXPIRY)
|
238
|
+
+ get_value_to_append(QueryStringConstants.SIGNED_IP)
|
239
|
+
+ get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL)
|
240
|
+
+ get_value_to_append(QueryStringConstants.SIGNED_VERSION)
|
241
|
+
+ get_value_to_append(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE)
|
242
|
+
)
|
243
|
+
|
244
|
+
self._add_query(QueryStringConstants.SIGNED_SIGNATURE, sign_string(account_key, string_to_sign))
|
249
245
|
self.string_to_sign = string_to_sign
|
250
246
|
|
251
247
|
def get_token(self) -> str:
|
252
|
-
return
|
248
|
+
return "&".join([f"{n}={url_quote(v)}" for n, v in self.query_dict.items() if v is not None])
|
@@ -12,7 +12,7 @@ from threading import Lock
|
|
12
12
|
|
13
13
|
from azure.core.tracing.common import with_current_context
|
14
14
|
|
15
|
-
from .import encode_base64, url_quote
|
15
|
+
from . import encode_base64, url_quote
|
16
16
|
from .request_handlers import get_length
|
17
17
|
from .response_handlers import return_response_headers
|
18
18
|
|
@@ -41,20 +41,21 @@ def _parallel_uploads(executor, uploader, pending, running):
|
|
41
41
|
|
42
42
|
|
43
43
|
def upload_data_chunks(
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
44
|
+
service=None,
|
45
|
+
uploader_class=None,
|
46
|
+
total_size=None,
|
47
|
+
chunk_size=None,
|
48
|
+
max_concurrency=None,
|
49
|
+
stream=None,
|
50
|
+
validate_content=None,
|
51
|
+
progress_hook=None,
|
52
|
+
**kwargs,
|
53
|
+
):
|
53
54
|
|
54
55
|
parallel = max_concurrency > 1
|
55
|
-
if parallel and
|
56
|
+
if parallel and "modified_access_conditions" in kwargs:
|
56
57
|
# Access conditions do not work with parallelism
|
57
|
-
kwargs[
|
58
|
+
kwargs["modified_access_conditions"] = None
|
58
59
|
|
59
60
|
uploader = uploader_class(
|
60
61
|
service=service,
|
@@ -64,7 +65,8 @@ def upload_data_chunks(
|
|
64
65
|
parallel=parallel,
|
65
66
|
validate_content=validate_content,
|
66
67
|
progress_hook=progress_hook,
|
67
|
-
**kwargs
|
68
|
+
**kwargs,
|
69
|
+
)
|
68
70
|
if parallel:
|
69
71
|
with futures.ThreadPoolExecutor(max_concurrency) as executor:
|
70
72
|
upload_tasks = uploader.get_chunk_streams()
|
@@ -81,18 +83,19 @@ def upload_data_chunks(
|
|
81
83
|
|
82
84
|
|
83
85
|
def upload_substream_blocks(
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
86
|
+
service=None,
|
87
|
+
uploader_class=None,
|
88
|
+
total_size=None,
|
89
|
+
chunk_size=None,
|
90
|
+
max_concurrency=None,
|
91
|
+
stream=None,
|
92
|
+
progress_hook=None,
|
93
|
+
**kwargs,
|
94
|
+
):
|
92
95
|
parallel = max_concurrency > 1
|
93
|
-
if parallel and
|
96
|
+
if parallel and "modified_access_conditions" in kwargs:
|
94
97
|
# Access conditions do not work with parallelism
|
95
|
-
kwargs[
|
98
|
+
kwargs["modified_access_conditions"] = None
|
96
99
|
uploader = uploader_class(
|
97
100
|
service=service,
|
98
101
|
total_size=total_size,
|
@@ -100,7 +103,8 @@ def upload_substream_blocks(
|
|
100
103
|
stream=stream,
|
101
104
|
parallel=parallel,
|
102
105
|
progress_hook=progress_hook,
|
103
|
-
**kwargs
|
106
|
+
**kwargs,
|
107
|
+
)
|
104
108
|
|
105
109
|
if parallel:
|
106
110
|
with futures.ThreadPoolExecutor(max_concurrency) as executor:
|
@@ -120,15 +124,17 @@ def upload_substream_blocks(
|
|
120
124
|
class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
|
121
125
|
|
122
126
|
def __init__(
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
127
|
+
self,
|
128
|
+
service,
|
129
|
+
total_size,
|
130
|
+
chunk_size,
|
131
|
+
stream,
|
132
|
+
parallel,
|
133
|
+
encryptor=None,
|
134
|
+
padder=None,
|
135
|
+
progress_hook=None,
|
136
|
+
**kwargs,
|
137
|
+
):
|
132
138
|
self.service = service
|
133
139
|
self.total_size = total_size
|
134
140
|
self.chunk_size = chunk_size
|
@@ -253,7 +259,7 @@ class BlockBlobChunkUploader(_ChunkUploader):
|
|
253
259
|
|
254
260
|
def _upload_chunk(self, chunk_offset, chunk_data):
|
255
261
|
# TODO: This is incorrect, but works with recording.
|
256
|
-
index = f
|
262
|
+
index = f"{chunk_offset:032d}"
|
257
263
|
block_id = encode_base64(url_quote(encode_base64(index)))
|
258
264
|
self.service.stage_block(
|
259
265
|
block_id,
|
@@ -261,20 +267,20 @@ class BlockBlobChunkUploader(_ChunkUploader):
|
|
261
267
|
chunk_data,
|
262
268
|
data_stream_total=self.total_size,
|
263
269
|
upload_stream_current=self.progress_total,
|
264
|
-
**self.request_options
|
270
|
+
**self.request_options,
|
265
271
|
)
|
266
272
|
return index, block_id
|
267
273
|
|
268
274
|
def _upload_substream_block(self, index, block_stream):
|
269
275
|
try:
|
270
|
-
block_id = f
|
276
|
+
block_id = f"BlockId{(index//self.chunk_size):05}"
|
271
277
|
self.service.stage_block(
|
272
278
|
block_id,
|
273
279
|
len(block_stream),
|
274
280
|
block_stream,
|
275
281
|
data_stream_total=self.total_size,
|
276
282
|
upload_stream_current=self.progress_total,
|
277
|
-
**self.request_options
|
283
|
+
**self.request_options,
|
278
284
|
)
|
279
285
|
finally:
|
280
286
|
block_stream.close()
|
@@ -302,11 +308,11 @@ class PageBlobChunkUploader(_ChunkUploader):
|
|
302
308
|
cls=return_response_headers,
|
303
309
|
data_stream_total=self.total_size,
|
304
310
|
upload_stream_current=self.progress_total,
|
305
|
-
**self.request_options
|
311
|
+
**self.request_options,
|
306
312
|
)
|
307
313
|
|
308
|
-
if not self.parallel and self.request_options.get(
|
309
|
-
self.request_options[
|
314
|
+
if not self.parallel and self.request_options.get("modified_access_conditions"):
|
315
|
+
self.request_options["modified_access_conditions"].if_match = self.response_headers["etag"]
|
310
316
|
|
311
317
|
def _upload_substream_block(self, index, block_stream):
|
312
318
|
pass
|
@@ -326,19 +332,20 @@ class AppendBlobChunkUploader(_ChunkUploader):
|
|
326
332
|
cls=return_response_headers,
|
327
333
|
data_stream_total=self.total_size,
|
328
334
|
upload_stream_current=self.progress_total,
|
329
|
-
**self.request_options
|
335
|
+
**self.request_options,
|
330
336
|
)
|
331
337
|
self.current_length = int(self.response_headers["blob_append_offset"])
|
332
338
|
else:
|
333
|
-
self.request_options[
|
339
|
+
self.request_options["append_position_access_conditions"].append_position = (
|
334
340
|
self.current_length + chunk_offset
|
341
|
+
)
|
335
342
|
self.response_headers = self.service.append_block(
|
336
343
|
body=chunk_data,
|
337
344
|
content_length=len(chunk_data),
|
338
345
|
cls=return_response_headers,
|
339
346
|
data_stream_total=self.total_size,
|
340
347
|
upload_stream_current=self.progress_total,
|
341
|
-
**self.request_options
|
348
|
+
**self.request_options,
|
342
349
|
)
|
343
350
|
|
344
351
|
def _upload_substream_block(self, index, block_stream):
|
@@ -356,11 +363,11 @@ class DataLakeFileChunkUploader(_ChunkUploader):
|
|
356
363
|
cls=return_response_headers,
|
357
364
|
data_stream_total=self.total_size,
|
358
365
|
upload_stream_current=self.progress_total,
|
359
|
-
**self.request_options
|
366
|
+
**self.request_options,
|
360
367
|
)
|
361
368
|
|
362
|
-
if not self.parallel and self.request_options.get(
|
363
|
-
self.request_options[
|
369
|
+
if not self.parallel and self.request_options.get("modified_access_conditions"):
|
370
|
+
self.request_options["modified_access_conditions"].if_match = self.response_headers["etag"]
|
364
371
|
|
365
372
|
def _upload_substream_block(self, index, block_stream):
|
366
373
|
try:
|
@@ -371,7 +378,7 @@ class DataLakeFileChunkUploader(_ChunkUploader):
|
|
371
378
|
cls=return_response_headers,
|
372
379
|
data_stream_total=self.total_size,
|
373
380
|
upload_stream_current=self.progress_total,
|
374
|
-
**self.request_options
|
381
|
+
**self.request_options,
|
375
382
|
)
|
376
383
|
finally:
|
377
384
|
block_stream.close()
|
@@ -388,9 +395,9 @@ class FileChunkUploader(_ChunkUploader):
|
|
388
395
|
length,
|
389
396
|
data_stream_total=self.total_size,
|
390
397
|
upload_stream_current=self.progress_total,
|
391
|
-
**self.request_options
|
398
|
+
**self.request_options,
|
392
399
|
)
|
393
|
-
return f
|
400
|
+
return f"bytes={chunk_offset}-{chunk_end}", response
|
394
401
|
|
395
402
|
# TODO: Implement this method.
|
396
403
|
def _upload_substream_block(self, index, block_stream):
|