azure-storage-blob 12.25.0b1__py3-none-any.whl → 12.26.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. azure/storage/blob/__init__.py +3 -2
  2. azure/storage/blob/_blob_client.py +94 -41
  3. azure/storage/blob/_blob_client_helpers.py +19 -4
  4. azure/storage/blob/_blob_service_client.py +16 -13
  5. azure/storage/blob/_container_client.py +25 -22
  6. azure/storage/blob/_deserialize.py +1 -1
  7. azure/storage/blob/_download.py +7 -7
  8. azure/storage/blob/_encryption.py +177 -184
  9. azure/storage/blob/_generated/_azure_blob_storage.py +1 -1
  10. azure/storage/blob/_generated/_configuration.py +2 -2
  11. azure/storage/blob/_generated/_serialization.py +3 -3
  12. azure/storage/blob/_generated/aio/_azure_blob_storage.py +1 -1
  13. azure/storage/blob/_generated/aio/_configuration.py +2 -2
  14. azure/storage/blob/_generated/aio/operations/_append_blob_operations.py +5 -4
  15. azure/storage/blob/_generated/aio/operations/_blob_operations.py +5 -25
  16. azure/storage/blob/_generated/aio/operations/_block_blob_operations.py +9 -7
  17. azure/storage/blob/_generated/aio/operations/_container_operations.py +1 -19
  18. azure/storage/blob/_generated/aio/operations/_page_blob_operations.py +5 -10
  19. azure/storage/blob/_generated/aio/operations/_service_operations.py +1 -8
  20. azure/storage/blob/_generated/models/__init__.py +2 -0
  21. azure/storage/blob/_generated/models/_azure_blob_storage_enums.py +6 -0
  22. azure/storage/blob/_generated/operations/_append_blob_operations.py +12 -9
  23. azure/storage/blob/_generated/operations/_blob_operations.py +32 -49
  24. azure/storage/blob/_generated/operations/_block_blob_operations.py +21 -13
  25. azure/storage/blob/_generated/operations/_container_operations.py +19 -37
  26. azure/storage/blob/_generated/operations/_page_blob_operations.py +17 -19
  27. azure/storage/blob/_generated/operations/_service_operations.py +9 -17
  28. azure/storage/blob/_lease.py +1 -0
  29. azure/storage/blob/_quick_query_helper.py +20 -24
  30. azure/storage/blob/_serialize.py +1 -0
  31. azure/storage/blob/_shared/__init__.py +7 -7
  32. azure/storage/blob/_shared/authentication.py +49 -32
  33. azure/storage/blob/_shared/avro/avro_io.py +45 -43
  34. azure/storage/blob/_shared/avro/avro_io_async.py +42 -41
  35. azure/storage/blob/_shared/avro/datafile.py +24 -21
  36. azure/storage/blob/_shared/avro/datafile_async.py +15 -15
  37. azure/storage/blob/_shared/avro/schema.py +196 -217
  38. azure/storage/blob/_shared/base_client.py +87 -61
  39. azure/storage/blob/_shared/base_client_async.py +58 -51
  40. azure/storage/blob/_shared/constants.py +1 -1
  41. azure/storage/blob/_shared/models.py +93 -92
  42. azure/storage/blob/_shared/parser.py +3 -3
  43. azure/storage/blob/_shared/policies.py +176 -145
  44. azure/storage/blob/_shared/policies_async.py +59 -70
  45. azure/storage/blob/_shared/request_handlers.py +51 -47
  46. azure/storage/blob/_shared/response_handlers.py +49 -45
  47. azure/storage/blob/_shared/shared_access_signature.py +67 -71
  48. azure/storage/blob/_shared/uploads.py +56 -49
  49. azure/storage/blob/_shared/uploads_async.py +72 -61
  50. azure/storage/blob/_shared_access_signature.py +3 -1
  51. azure/storage/blob/_version.py +1 -1
  52. azure/storage/blob/aio/__init__.py +3 -2
  53. azure/storage/blob/aio/_blob_client_async.py +241 -44
  54. azure/storage/blob/aio/_blob_service_client_async.py +13 -11
  55. azure/storage/blob/aio/_container_client_async.py +28 -25
  56. azure/storage/blob/aio/_download_async.py +16 -12
  57. azure/storage/blob/aio/_lease_async.py +1 -0
  58. azure/storage/blob/aio/_quick_query_helper_async.py +194 -0
  59. {azure_storage_blob-12.25.0b1.dist-info → azure_storage_blob-12.26.0.dist-info}/METADATA +7 -7
  60. azure_storage_blob-12.26.0.dist-info/RECORD +85 -0
  61. {azure_storage_blob-12.25.0b1.dist-info → azure_storage_blob-12.26.0.dist-info}/WHEEL +1 -1
  62. azure_storage_blob-12.25.0b1.dist-info/RECORD +0 -84
  63. {azure_storage_blob-12.25.0b1.dist-info → azure_storage_blob-12.26.0.dist-info}/LICENSE +0 -0
  64. {azure_storage_blob-12.25.0b1.dist-info → azure_storage_blob-12.26.0.dist-info}/top_level.txt +0 -0
@@ -4,16 +4,15 @@
4
4
  # license information.
5
5
  # --------------------------------------------------------------------------
6
6
 
7
- import asyncio
7
+ import asyncio # pylint: disable=do-not-import-asyncio
8
8
  import inspect
9
9
  import threading
10
- from asyncio import Lock
11
10
  from io import UnsupportedOperation
12
11
  from itertools import islice
13
12
  from math import ceil
14
13
  from typing import AsyncGenerator, Union
15
14
 
16
- from .import encode_base64, url_quote
15
+ from . import encode_base64, url_quote
17
16
  from .request_handlers import get_length
18
17
  from .response_handlers import return_response_headers
19
18
  from .uploads import SubStream, IterStreamer # pylint: disable=unused-import
@@ -60,19 +59,20 @@ async def _parallel_uploads(uploader, pending, running):
60
59
 
61
60
 
62
61
  async def upload_data_chunks(
63
- service=None,
64
- uploader_class=None,
65
- total_size=None,
66
- chunk_size=None,
67
- max_concurrency=None,
68
- stream=None,
69
- progress_hook=None,
70
- **kwargs):
62
+ service=None,
63
+ uploader_class=None,
64
+ total_size=None,
65
+ chunk_size=None,
66
+ max_concurrency=None,
67
+ stream=None,
68
+ progress_hook=None,
69
+ **kwargs,
70
+ ):
71
71
 
72
72
  parallel = max_concurrency > 1
73
- if parallel and 'modified_access_conditions' in kwargs:
73
+ if parallel and "modified_access_conditions" in kwargs:
74
74
  # Access conditions do not work with parallelism
75
- kwargs['modified_access_conditions'] = None
75
+ kwargs["modified_access_conditions"] = None
76
76
 
77
77
  uploader = uploader_class(
78
78
  service=service,
@@ -81,7 +81,8 @@ async def upload_data_chunks(
81
81
  stream=stream,
82
82
  parallel=parallel,
83
83
  progress_hook=progress_hook,
84
- **kwargs)
84
+ **kwargs,
85
+ )
85
86
 
86
87
  if parallel:
87
88
  upload_tasks = uploader.get_chunk_streams()
@@ -105,18 +106,19 @@ async def upload_data_chunks(
105
106
 
106
107
 
107
108
  async def upload_substream_blocks(
108
- service=None,
109
- uploader_class=None,
110
- total_size=None,
111
- chunk_size=None,
112
- max_concurrency=None,
113
- stream=None,
114
- progress_hook=None,
115
- **kwargs):
109
+ service=None,
110
+ uploader_class=None,
111
+ total_size=None,
112
+ chunk_size=None,
113
+ max_concurrency=None,
114
+ stream=None,
115
+ progress_hook=None,
116
+ **kwargs,
117
+ ):
116
118
  parallel = max_concurrency > 1
117
- if parallel and 'modified_access_conditions' in kwargs:
119
+ if parallel and "modified_access_conditions" in kwargs:
118
120
  # Access conditions do not work with parallelism
119
- kwargs['modified_access_conditions'] = None
121
+ kwargs["modified_access_conditions"] = None
120
122
  uploader = uploader_class(
121
123
  service=service,
122
124
  total_size=total_size,
@@ -124,13 +126,13 @@ async def upload_substream_blocks(
124
126
  stream=stream,
125
127
  parallel=parallel,
126
128
  progress_hook=progress_hook,
127
- **kwargs)
129
+ **kwargs,
130
+ )
128
131
 
129
132
  if parallel:
130
133
  upload_tasks = uploader.get_substream_blocks()
131
134
  running_futures = [
132
- asyncio.ensure_future(uploader.process_substream_block(u))
133
- for u in islice(upload_tasks, 0, max_concurrency)
135
+ asyncio.ensure_future(uploader.process_substream_block(u)) for u in islice(upload_tasks, 0, max_concurrency)
134
136
  ]
135
137
  range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures)
136
138
  else:
@@ -145,15 +147,17 @@ async def upload_substream_blocks(
145
147
  class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
146
148
 
147
149
  def __init__(
148
- self, service,
149
- total_size,
150
- chunk_size,
151
- stream,
152
- parallel,
153
- encryptor=None,
154
- padder=None,
155
- progress_hook=None,
156
- **kwargs):
150
+ self,
151
+ service,
152
+ total_size,
153
+ chunk_size,
154
+ stream,
155
+ parallel,
156
+ encryptor=None,
157
+ padder=None,
158
+ progress_hook=None,
159
+ **kwargs,
160
+ ):
157
161
  self.service = service
158
162
  self.total_size = total_size
159
163
  self.chunk_size = chunk_size
@@ -165,7 +169,7 @@ class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
165
169
 
166
170
  # Progress feedback
167
171
  self.progress_total = 0
168
- self.progress_lock = Lock() if parallel else None
172
+ self.progress_lock = asyncio.Lock() if parallel else None
169
173
  self.progress_hook = progress_hook
170
174
 
171
175
  # Encryption
@@ -179,7 +183,7 @@ class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
179
183
  async def get_chunk_streams(self):
180
184
  index = 0
181
185
  while True:
182
- data = b''
186
+ data = b""
183
187
  read_size = self.chunk_size
184
188
 
185
189
  # Buffer until we either reach the end of the stream or get a whole chunk.
@@ -190,12 +194,12 @@ class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
190
194
  if inspect.isawaitable(temp):
191
195
  temp = await temp
192
196
  if not isinstance(temp, bytes):
193
- raise TypeError('Blob data should be of type bytes.')
197
+ raise TypeError("Blob data should be of type bytes.")
194
198
  data += temp or b""
195
199
 
196
200
  # We have read an empty string and so are at the end
197
201
  # of the buffer or we have read a full chunk.
198
- if temp == b'' or len(data) == self.chunk_size:
202
+ if temp == b"" or len(data) == self.chunk_size:
199
203
  break
200
204
 
201
205
  if len(data) == self.chunk_size:
@@ -274,13 +278,13 @@ class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
274
278
  class BlockBlobChunkUploader(_ChunkUploader):
275
279
 
276
280
  def __init__(self, *args, **kwargs):
277
- kwargs.pop('modified_access_conditions', None)
281
+ kwargs.pop("modified_access_conditions", None)
278
282
  super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
279
283
  self.current_length = None
280
284
 
281
285
  async def _upload_chunk(self, chunk_offset, chunk_data):
282
286
  # TODO: This is incorrect, but works with recording.
283
- index = f'{chunk_offset:032d}'
287
+ index = f"{chunk_offset:032d}"
284
288
  block_id = encode_base64(url_quote(encode_base64(index)))
285
289
  await self.service.stage_block(
286
290
  block_id,
@@ -288,19 +292,21 @@ class BlockBlobChunkUploader(_ChunkUploader):
288
292
  body=chunk_data,
289
293
  data_stream_total=self.total_size,
290
294
  upload_stream_current=self.progress_total,
291
- **self.request_options)
295
+ **self.request_options,
296
+ )
292
297
  return index, block_id
293
298
 
294
299
  async def _upload_substream_block(self, index, block_stream):
295
300
  try:
296
- block_id = f'BlockId{(index//self.chunk_size):05}'
301
+ block_id = f"BlockId{(index//self.chunk_size):05}"
297
302
  await self.service.stage_block(
298
303
  block_id,
299
304
  len(block_stream),
300
305
  block_stream,
301
306
  data_stream_total=self.total_size,
302
307
  upload_stream_current=self.progress_total,
303
- **self.request_options)
308
+ **self.request_options,
309
+ )
304
310
  finally:
305
311
  block_stream.close()
306
312
  return block_id
@@ -312,7 +318,7 @@ class PageBlobChunkUploader(_ChunkUploader):
312
318
  # read until non-zero byte is encountered
313
319
  # if reached the end without returning, then chunk_data is all 0's
314
320
  for each_byte in chunk_data:
315
- if each_byte not in [0, b'\x00']:
321
+ if each_byte not in [0, b"\x00"]:
316
322
  return False
317
323
  return True
318
324
 
@@ -320,7 +326,7 @@ class PageBlobChunkUploader(_ChunkUploader):
320
326
  # avoid uploading the empty pages
321
327
  if not self._is_chunk_empty(chunk_data):
322
328
  chunk_end = chunk_offset + len(chunk_data) - 1
323
- content_range = f'bytes={chunk_offset}-{chunk_end}'
329
+ content_range = f"bytes={chunk_offset}-{chunk_end}"
324
330
  computed_md5 = None
325
331
  self.response_headers = await self.service.upload_pages(
326
332
  body=chunk_data,
@@ -330,10 +336,11 @@ class PageBlobChunkUploader(_ChunkUploader):
330
336
  cls=return_response_headers,
331
337
  data_stream_total=self.total_size,
332
338
  upload_stream_current=self.progress_total,
333
- **self.request_options)
339
+ **self.request_options,
340
+ )
334
341
 
335
- if not self.parallel and self.request_options.get('modified_access_conditions'):
336
- self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
342
+ if not self.parallel and self.request_options.get("modified_access_conditions"):
343
+ self.request_options["modified_access_conditions"].if_match = self.response_headers["etag"]
337
344
 
338
345
  async def _upload_substream_block(self, index, block_stream):
339
346
  pass
@@ -353,18 +360,21 @@ class AppendBlobChunkUploader(_ChunkUploader):
353
360
  cls=return_response_headers,
354
361
  data_stream_total=self.total_size,
355
362
  upload_stream_current=self.progress_total,
356
- **self.request_options)
357
- self.current_length = int(self.response_headers['blob_append_offset'])
363
+ **self.request_options,
364
+ )
365
+ self.current_length = int(self.response_headers["blob_append_offset"])
358
366
  else:
359
- self.request_options['append_position_access_conditions'].append_position = \
367
+ self.request_options["append_position_access_conditions"].append_position = (
360
368
  self.current_length + chunk_offset
369
+ )
361
370
  self.response_headers = await self.service.append_block(
362
371
  body=chunk_data,
363
372
  content_length=len(chunk_data),
364
373
  cls=return_response_headers,
365
374
  data_stream_total=self.total_size,
366
375
  upload_stream_current=self.progress_total,
367
- **self.request_options)
376
+ **self.request_options,
377
+ )
368
378
 
369
379
  async def _upload_substream_block(self, index, block_stream):
370
380
  pass
@@ -380,11 +390,11 @@ class DataLakeFileChunkUploader(_ChunkUploader):
380
390
  cls=return_response_headers,
381
391
  data_stream_total=self.total_size,
382
392
  upload_stream_current=self.progress_total,
383
- **self.request_options
393
+ **self.request_options,
384
394
  )
385
395
 
386
- if not self.parallel and self.request_options.get('modified_access_conditions'):
387
- self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
396
+ if not self.parallel and self.request_options.get("modified_access_conditions"):
397
+ self.request_options["modified_access_conditions"].if_match = self.response_headers["etag"]
388
398
 
389
399
  async def _upload_substream_block(self, index, block_stream):
390
400
  try:
@@ -395,7 +405,7 @@ class DataLakeFileChunkUploader(_ChunkUploader):
395
405
  cls=return_response_headers,
396
406
  data_stream_total=self.total_size,
397
407
  upload_stream_current=self.progress_total,
398
- **self.request_options
408
+ **self.request_options,
399
409
  )
400
410
  finally:
401
411
  block_stream.close()
@@ -412,9 +422,9 @@ class FileChunkUploader(_ChunkUploader):
412
422
  length,
413
423
  data_stream_total=self.total_size,
414
424
  upload_stream_current=self.progress_total,
415
- **self.request_options
425
+ **self.request_options,
416
426
  )
417
- range_id = f'bytes={chunk_offset}-{chunk_end}'
427
+ range_id = f"bytes={chunk_offset}-{chunk_end}"
418
428
  return range_id, response
419
429
 
420
430
  # TODO: Implement this method.
@@ -422,10 +432,11 @@ class FileChunkUploader(_ChunkUploader):
422
432
  pass
423
433
 
424
434
 
425
- class AsyncIterStreamer():
435
+ class AsyncIterStreamer:
426
436
  """
427
437
  File-like streaming object for AsyncGenerators.
428
438
  """
439
+
429
440
  def __init__(self, generator: AsyncGenerator[Union[bytes, str], None], encoding: str = "UTF-8"):
430
441
  self.iterator = generator.__aiter__()
431
442
  self.leftover = b""
@@ -311,7 +311,9 @@ class _BlobSharedAccessHelper(_SharedAccessHelper):
311
311
  self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) +
312
312
  self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) +
313
313
  self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) +
314
- self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID))
314
+ self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID) +
315
+ self.get_value_to_append(QueryStringConstants.SIGNED_KEY_DELEGATED_USER_TID) +
316
+ self.get_value_to_append(QueryStringConstants.SIGNED_DELEGATED_USER_OID))
315
317
  else:
316
318
  string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER)
317
319
 
@@ -4,4 +4,4 @@
4
4
  # license information.
5
5
  # --------------------------------------------------------------------------
6
6
 
7
- VERSION = "12.25.0b1"
7
+ VERSION = "12.26.0"
@@ -73,7 +73,7 @@ async def upload_blob_to_url(
73
73
  entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
74
74
  :keyword str encoding:
75
75
  Encoding to use if text is supplied as input. Defaults to UTF-8.
76
- :returns: Blob-updated property dict (Etag and last modified)
76
+ :return: Blob-updated property dict (Etag and last modified)
77
77
  :rtype: dict[str, Any]
78
78
  """
79
79
  async with BlobClient.from_blob_url(blob_url, credential=credential) as client:
@@ -102,7 +102,7 @@ async def download_blob_from_url(
102
102
  :param output:
103
103
  Where the data should be downloaded to. This could be either a file path to write to,
104
104
  or an open IO handle to write to.
105
- :type output: str or writable stream
105
+ :type output: str or IO
106
106
  :param credential:
107
107
  The credentials with which to authenticate. This is optional if the
108
108
  blob URL already has a SAS token or the blob is public. The value can be a SAS token string,
@@ -139,6 +139,7 @@ async def download_blob_from_url(
139
139
  blob. Also note that if enabled, the memory-efficient upload algorithm
140
140
  will not be used, because computing the MD5 hash requires buffering
141
141
  entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
142
+ :return: None
142
143
  :rtype: None
143
144
  """
144
145
  overwrite = kwargs.pop('overwrite', False)