tracktolib 0.66.1__tar.gz → 0.66.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tracktolib
3
- Version: 0.66.1
3
+ Version: 0.66.2
4
4
  Summary: Utility library for python
5
5
  Keywords: utility
6
6
  Author-email: julien.brayere@tracktor.fr
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "tracktolib"
3
- version = "0.66.1"
3
+ version = "0.66.2"
4
4
  authors = [
5
5
  { email = "julien.brayere@tracktor.fr" }
6
6
  ]
@@ -108,7 +108,7 @@ pythonPlatform = "Linux"
108
108
 
109
109
  [tool.commitizen]
110
110
  name = "cz_conventional_commits"
111
- version = "0.66.1"
111
+ version = "0.66.2"
112
112
  tag_format = "$version"
113
113
  version_files = [
114
114
  "pyproject.toml:version"
@@ -6,8 +6,8 @@ from pathlib import Path
6
6
  import http
7
7
  import xml.etree.ElementTree as ET
8
8
  from contextlib import asynccontextmanager
9
- from dataclasses import dataclass
10
- from typing import AsyncIterator, Callable, Literal, Self, TypedDict
9
+ from dataclasses import dataclass, field
10
+ from typing import AsyncIterator, Callable, Literal, Self, TypedDict, Unpack
11
11
 
12
12
  try:
13
13
  import botocore.client
@@ -38,7 +38,10 @@ __all__ = (
38
38
  "s3_file_upload",
39
39
  "S3MultipartUpload",
40
40
  "S3Object",
41
+ "S3ObjectParams",
41
42
  "UploadPart",
43
+ "build_s3_headers",
44
+ "build_s3_presigned_params",
42
45
  )
43
46
 
44
47
  ACL = Literal[
@@ -51,6 +54,112 @@ ACL = Literal[
51
54
  "bucket-owner-full-control",
52
55
  ]
53
56
 
57
+ StorageClass = Literal[
58
+ "STANDARD",
59
+ "REDUCED_REDUNDANCY",
60
+ "STANDARD_IA",
61
+ "ONEZONE_IA",
62
+ "INTELLIGENT_TIERING",
63
+ "GLACIER",
64
+ "DEEP_ARCHIVE",
65
+ "OUTPOSTS",
66
+ "GLACIER_IR",
67
+ "EXPRESS_ONEZONE",
68
+ ]
69
+
70
+ ServerSideEncryption = Literal["AES256", "aws:kms", "aws:kms:dsse"]
71
+
72
+
73
+ class S3ObjectParams(TypedDict, total=False):
74
+ """
75
+ Parameters for S3 object uploads (PutObject, CreateMultipartUpload).
76
+
77
+ See:
78
+ - https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
79
+ - https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
80
+ """
81
+
82
+ acl: ACL | None
83
+ content_type: str | None
84
+ content_disposition: str | None
85
+ content_encoding: str | None
86
+ content_language: str | None
87
+ cache_control: str | None
88
+ storage_class: StorageClass | None
89
+ server_side_encryption: ServerSideEncryption | None
90
+ sse_kms_key_id: str | None
91
+ tagging: str | None # URL-encoded key=value pairs
92
+ metadata: dict[str, str] | None # User-defined metadata (x-amz-meta-*)
93
+
94
+
95
+ def build_s3_headers(params: S3ObjectParams) -> dict[str, str]:
96
+ """
97
+ Build S3 request headers from S3ObjectParams.
98
+
99
+ Returns a dict of HTTP headers to include in the request.
100
+ """
101
+ headers: dict[str, str] = {}
102
+
103
+ if (acl := params.get("acl")) is not None:
104
+ headers["x-amz-acl"] = acl
105
+ if (content_type := params.get("content_type")) is not None:
106
+ headers["Content-Type"] = content_type
107
+ if (content_disposition := params.get("content_disposition")) is not None:
108
+ headers["Content-Disposition"] = content_disposition
109
+ if (content_encoding := params.get("content_encoding")) is not None:
110
+ headers["Content-Encoding"] = content_encoding
111
+ if (content_language := params.get("content_language")) is not None:
112
+ headers["Content-Language"] = content_language
113
+ if (cache_control := params.get("cache_control")) is not None:
114
+ headers["Cache-Control"] = cache_control
115
+ if (storage_class := params.get("storage_class")) is not None:
116
+ headers["x-amz-storage-class"] = storage_class
117
+ if (sse := params.get("server_side_encryption")) is not None:
118
+ headers["x-amz-server-side-encryption"] = sse
119
+ if (sse_kms_key_id := params.get("sse_kms_key_id")) is not None:
120
+ headers["x-amz-server-side-encryption-aws-kms-key-id"] = sse_kms_key_id
121
+ if (tagging := params.get("tagging")) is not None:
122
+ headers["x-amz-tagging"] = tagging
123
+ if (metadata := params.get("metadata")) is not None:
124
+ for key, value in metadata.items():
125
+ headers[f"x-amz-meta-{key}"] = value
126
+
127
+ return headers
128
+
129
+
130
+ def build_s3_presigned_params(bucket: str, key: str, params: S3ObjectParams) -> dict:
131
+ """
132
+ Build parameters dict for botocore generate_presigned_url.
133
+
134
+ Maps S3ObjectParams to the Params dict expected by botocore.
135
+ """
136
+ presigned_params: dict = {"Bucket": bucket, "Key": key}
137
+
138
+ if (acl := params.get("acl")) is not None:
139
+ presigned_params["ACL"] = acl
140
+ if (content_type := params.get("content_type")) is not None:
141
+ presigned_params["ContentType"] = content_type
142
+ if (content_disposition := params.get("content_disposition")) is not None:
143
+ presigned_params["ContentDisposition"] = content_disposition
144
+ if (content_encoding := params.get("content_encoding")) is not None:
145
+ presigned_params["ContentEncoding"] = content_encoding
146
+ if (content_language := params.get("content_language")) is not None:
147
+ presigned_params["ContentLanguage"] = content_language
148
+ if (cache_control := params.get("cache_control")) is not None:
149
+ presigned_params["CacheControl"] = cache_control
150
+ if (storage_class := params.get("storage_class")) is not None:
151
+ presigned_params["StorageClass"] = storage_class
152
+ if (sse := params.get("server_side_encryption")) is not None:
153
+ presigned_params["ServerSideEncryption"] = sse
154
+ if (sse_kms_key_id := params.get("sse_kms_key_id")) is not None:
155
+ presigned_params["SSEKMSKeyId"] = sse_kms_key_id
156
+ if (tagging := params.get("tagging")) is not None:
157
+ presigned_params["Tagging"] = tagging
158
+ if (metadata := params.get("metadata")) is not None:
159
+ presigned_params["Metadata"] = metadata
160
+
161
+ return presigned_params
162
+
54
163
 
55
164
  @dataclass
56
165
  class S3Session:
@@ -82,13 +191,13 @@ class S3Session:
82
191
  secret_key: str
83
192
  region: str
84
193
  s3_config: Config | None = None
85
- _s3_client: botocore.client.BaseClient | None = None
86
- _http_client: niquests.AsyncSession | None = None
194
+ s3_client: botocore.client.BaseClient | None = None
195
+ http_client: niquests.AsyncSession = field(default_factory=niquests.AsyncSession)
87
196
 
88
197
  def __post_init__(self):
89
- if self._s3_client is None:
198
+ if self.s3_client is None:
90
199
  session = botocore.session.Session()
91
- self._s3_client = session.create_client(
200
+ self.s3_client = session.create_client(
92
201
  "s3",
93
202
  endpoint_url=self.endpoint_url,
94
203
  region_name=self.region,
@@ -96,20 +205,12 @@ class S3Session:
96
205
  aws_secret_access_key=self.secret_key,
97
206
  config=self.s3_config,
98
207
  )
99
- if self._http_client is None:
100
- self._http_client = niquests.AsyncSession()
101
208
 
102
209
  @property
103
- def s3_client(self) -> botocore.client.BaseClient:
104
- if self._s3_client is None:
105
- raise ValueError("s3_client is not initialized")
106
- return self._s3_client
107
-
108
- @property
109
- def http_client(self) -> niquests.AsyncSession:
110
- if self._http_client is None:
111
- raise ValueError("http_client is not initialized")
112
- return self._http_client
210
+ def _s3(self) -> botocore.client.BaseClient:
211
+ if self.s3_client is None:
212
+ raise ValueError("s3_client not initialized")
213
+ return self.s3_client
113
214
 
114
215
  async def __aenter__(self) -> Self:
115
216
  await self.http_client.__aenter__()
@@ -117,15 +218,15 @@ class S3Session:
117
218
 
118
219
  async def __aexit__(self, exc_type, exc_val, exc_tb):
119
220
  await self.http_client.__aexit__(exc_type, exc_val, exc_tb)
120
- self.s3_client.close()
221
+ self._s3.close()
121
222
 
122
223
  async def delete_object(self, bucket: str, key: str) -> niquests.Response:
123
224
  """Delete an object from S3."""
124
- return await s3_delete_object(self.s3_client, self.http_client, bucket, key)
225
+ return await s3_delete_object(self._s3, self.http_client, bucket, key)
125
226
 
126
227
  async def delete_objects(self, bucket: str, keys: list[str]) -> list[niquests.Response]:
127
228
  """Delete multiple objects from S3."""
128
- return await s3_delete_objects(self.s3_client, self.http_client, bucket, keys)
229
+ return await s3_delete_objects(self._s3, self.http_client, bucket, keys)
129
230
 
130
231
  def list_files(
131
232
  self,
@@ -139,7 +240,7 @@ class S3Session:
139
240
  ) -> AsyncIterator[S3Object]:
140
241
  """List files in an S3 bucket with a given prefix."""
141
242
  return s3_list_files(
142
- self.s3_client,
243
+ self._s3,
143
244
  self.http_client,
144
245
  bucket,
145
246
  prefix,
@@ -149,19 +250,21 @@ class S3Session:
149
250
  starting_token=starting_token,
150
251
  )
151
252
 
152
- async def put_object(self, bucket: str, key: str, data: bytes, *, acl: ACL | None = "private") -> niquests.Response:
253
+ async def put_object(
254
+ self, bucket: str, key: str, data: bytes, **kwargs: Unpack[S3ObjectParams]
255
+ ) -> niquests.Response:
153
256
  """Upload an object to S3."""
154
- return await s3_put_object(self.s3_client, self.http_client, bucket, key, data, acl=acl)
257
+ return await s3_put_object(self._s3, self.http_client, bucket, key, data, **kwargs)
155
258
 
156
259
  async def upload_file(
157
- self, bucket: str, file: Path, path: str, *, acl: ACL | None = "private"
260
+ self, bucket: str, file: Path, path: str, **kwargs: Unpack[S3ObjectParams]
158
261
  ) -> niquests.Response:
159
262
  """Upload a file to S3."""
160
- return await s3_upload_file(self.s3_client, self.http_client, bucket, file, path, acl=acl)
263
+ return await s3_upload_file(self._s3, self.http_client, bucket, file, path, **kwargs)
161
264
 
162
265
  async def get_object(self, bucket: str, key: str) -> bytes | None:
163
266
  """Download an object from S3."""
164
- return await s3_get_object(self.s3_client, self.http_client, bucket, key)
267
+ return await s3_get_object(self._s3, self.http_client, bucket, key)
165
268
 
166
269
  async def download_file(
167
270
  self,
@@ -171,27 +274,29 @@ class S3Session:
171
274
  chunk_size: int = 1024 * 1024,
172
275
  ) -> AsyncIterator[bytes]:
173
276
  """Download a file from S3 with streaming support."""
174
- async for chunk in s3_download_file(self.s3_client, self.http_client, bucket, key, chunk_size=chunk_size):
277
+ async for chunk in s3_download_file(self._s3, self.http_client, bucket, key, chunk_size=chunk_size):
175
278
  if on_chunk:
176
279
  on_chunk(chunk)
177
280
  yield chunk
178
281
 
179
- def multipart_upload(self, bucket: str, key: str, *, expires_in: int = 3600):
282
+ def multipart_upload(self, bucket: str, key: str, *, expires_in: int = 3600, **kwargs: Unpack[S3ObjectParams]):
180
283
  """Create a multipart upload context manager."""
181
- return s3_multipart_upload(self.s3_client, self.http_client, bucket, key, expires_in=expires_in)
284
+ return s3_multipart_upload(self._s3, self.http_client, bucket, key, expires_in=expires_in, **kwargs)
182
285
 
183
286
  async def file_upload(
184
287
  self,
185
288
  bucket: str,
186
289
  key: str,
187
290
  data: AsyncIterator[bytes],
291
+ *,
188
292
  min_part_size: int = 5 * 1024 * 1024,
189
293
  on_chunk_received: Callable[[bytes], None] | None = None,
190
294
  content_length: int | None = None,
295
+ **kwargs: Unpack[S3ObjectParams],
191
296
  ) -> None:
192
297
  """Upload a file to S3 using streaming (multipart for large files)."""
193
298
  return await s3_file_upload(
194
- self.s3_client,
299
+ self._s3,
195
300
  self.http_client,
196
301
  bucket,
197
302
  key,
@@ -199,6 +304,7 @@ class S3Session:
199
304
  min_part_size=min_part_size,
200
305
  on_chunk_received=on_chunk_received,
201
306
  content_length=content_length,
307
+ **kwargs,
202
308
  )
203
309
 
204
310
 
@@ -320,22 +426,22 @@ async def s3_put_object(
320
426
  bucket: str,
321
427
  key: str,
322
428
  data: bytes,
323
- *,
324
- acl: ACL | None = "private",
429
+ **kwargs: Unpack[S3ObjectParams],
325
430
  ) -> niquests.Response:
326
- """Upload an object to S3 using presigned URL."""
327
- params: dict = {
328
- "Bucket": bucket,
329
- "Key": key,
330
- }
331
- if acl is not None:
332
- params["ACL"] = acl
431
+ """
432
+ Upload an object to S3 using presigned URL.
433
+
434
+ See: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
435
+ """
436
+ obj_params: S3ObjectParams = kwargs
437
+ presigned_params = build_s3_presigned_params(bucket, key, obj_params)
438
+ headers = build_s3_headers(obj_params)
333
439
 
334
440
  url = s3.generate_presigned_url(
335
441
  ClientMethod="put_object",
336
- Params=params,
442
+ Params=presigned_params,
337
443
  )
338
- resp = (await client.put(url, data=data)).raise_for_status()
444
+ resp = (await client.put(url, data=data, headers=headers if headers else None)).raise_for_status()
339
445
  return resp
340
446
 
341
447
 
@@ -345,14 +451,13 @@ async def s3_upload_file(
345
451
  bucket: str,
346
452
  file: Path,
347
453
  path: str,
348
- *,
349
- acl: ACL | None = "private",
454
+ **kwargs: Unpack[S3ObjectParams],
350
455
  ) -> niquests.Response:
351
456
  """
352
457
  Upload a file to S3 using presigned URL.
353
458
  This is a convenience wrapper around s3_put_object that reads the file content.
354
459
  """
355
- return await s3_put_object(s3, client, bucket, path, file.read_bytes(), acl=acl)
460
+ return await s3_put_object(s3, client, bucket, path, file.read_bytes(), **kwargs)
356
461
 
357
462
 
358
463
  async def s3_get_object(
@@ -400,17 +505,26 @@ async def s3_create_multipart_upload(
400
505
  *,
401
506
  expires_in: int = 3600,
402
507
  generate_presigned_url: Callable[..., str] | None = None,
508
+ **kwargs: Unpack[S3ObjectParams],
403
509
  ) -> str:
404
- """Initiate a multipart upload and return the UploadId."""
510
+ """
511
+ Initiate a multipart upload and return the UploadId.
512
+
513
+ See: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
514
+ """
515
+ obj_params: S3ObjectParams = kwargs
516
+ headers = build_s3_headers(obj_params)
517
+
405
518
  if generate_presigned_url is not None:
406
519
  url = generate_presigned_url("create_multipart_upload")
407
520
  else:
521
+ presigned_params = build_s3_presigned_params(bucket, key, obj_params)
408
522
  url = s3.generate_presigned_url(
409
523
  ClientMethod="create_multipart_upload",
410
- Params={"Bucket": bucket, "Key": key},
524
+ Params=presigned_params,
411
525
  ExpiresIn=expires_in,
412
526
  )
413
- resp = (await client.post(url)).raise_for_status()
527
+ resp = (await client.post(url, headers=headers if headers else None)).raise_for_status()
414
528
  if resp.content is None:
415
529
  raise ValueError("Empty response from create_multipart_upload")
416
530
  api_version = s3.meta.service_model.api_version
@@ -430,8 +544,10 @@ async def s3_multipart_upload(
430
544
  key: str,
431
545
  *,
432
546
  expires_in: int = 3600,
547
+ **kwargs: Unpack[S3ObjectParams],
433
548
  ) -> AsyncIterator[S3MultipartUpload]:
434
549
  """Async context manager for S3 multipart upload with automatic cleanup."""
550
+ obj_params: S3ObjectParams = kwargs
435
551
  upload_id: str | None = None
436
552
  _part_number: int = 1
437
553
  _parts: list[UploadPart] = []
@@ -474,14 +590,16 @@ async def s3_multipart_upload(
474
590
  return _part
475
591
 
476
592
  def _generate_presigned_url(method: str, **params):
477
- return s3.generate_presigned_url(
478
- ClientMethod=method, Params={"Bucket": bucket, "Key": key, **params}, ExpiresIn=expires_in
479
- )
593
+ if method == "create_multipart_upload":
594
+ _params = {**build_s3_presigned_params(bucket, key, obj_params), **params}
595
+ else:
596
+ _params = {"Bucket": bucket, "Key": key, **params}
597
+ return s3.generate_presigned_url(ClientMethod=method, Params=_params, ExpiresIn=expires_in)
480
598
 
481
599
  async def fetch_create() -> str:
482
600
  nonlocal upload_id
483
601
  upload_id = await s3_create_multipart_upload(
484
- s3, client, bucket, key, expires_in=expires_in, generate_presigned_url=_generate_presigned_url
602
+ s3, client, bucket, key, expires_in=expires_in, generate_presigned_url=_generate_presigned_url, **kwargs
485
603
  )
486
604
  return upload_id
487
605
 
@@ -508,10 +626,12 @@ async def s3_file_upload(
508
626
  bucket: str,
509
627
  key: str,
510
628
  data: AsyncIterator[bytes],
629
+ *,
511
630
  # 5MB minimum for S3 parts
512
631
  min_part_size: int = 5 * 1024 * 1024,
513
632
  on_chunk_received: Callable[[bytes], None] | None = None,
514
633
  content_length: int | None = None,
634
+ **kwargs: Unpack[S3ObjectParams],
515
635
  ) -> None:
516
636
  """
517
637
  Upload a file to S3 from an async byte stream.
@@ -527,10 +647,10 @@ async def s3_file_upload(
527
647
  _data += chunk
528
648
  if on_chunk_received:
529
649
  on_chunk_received(chunk)
530
- await s3_put_object(s3, client, bucket=bucket, key=key, data=_data, acl=None)
650
+ await s3_put_object(s3, client, bucket=bucket, key=key, data=_data, **kwargs)
531
651
  return
532
652
 
533
- async with s3_multipart_upload(s3, client, bucket=bucket, key=key) as mpart:
653
+ async with s3_multipart_upload(s3, client, bucket=bucket, key=key, **kwargs) as mpart:
534
654
  await mpart.fetch_create()
535
655
  has_uploaded_parts = False
536
656
  async for chunk in get_stream_chunk(data, min_size=min_part_size):
@@ -540,7 +660,7 @@ async def s3_file_upload(
540
660
  if not has_uploaded_parts:
541
661
  # No parts uploaded yet, abort multipart and use single PUT
542
662
  await mpart.fetch_abort()
543
- await s3_put_object(s3, client, bucket=bucket, key=key, data=chunk, acl=None)
663
+ await s3_put_object(s3, client, bucket=bucket, key=key, data=chunk, **kwargs)
544
664
  else:
545
665
  # Parts already uploaded, upload final chunk as last part (S3 allows last part to be smaller)
546
666
  await mpart.upload_part(chunk)
File without changes