sibi-dst 2025.1.6__py3-none-any.whl → 2025.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,11 +3,12 @@ from __future__ import annotations
3
3
  from ._df_helper import DfHelper
4
4
  from ._parquet_artifact import ParquetArtifact
5
5
  from ._parquet_reader import ParquetReader
6
- from ._artifact_updater_multi_wrapper import ArtifactUpdaterMultiWrapperThreaded
6
+ from ._artifact_updater_multi_wrapper import ArtifactUpdaterMultiWrapperThreaded, ArtifactUpdaterMultiWrapperAsync
7
7
 
8
8
  __all__ = [
9
9
  'DfHelper',
10
10
  'ParquetArtifact',
11
11
  'ParquetReader',
12
12
  'ArtifactUpdaterMultiWrapperThreaded',
13
+ 'ArtifactUpdaterMultiWrapperAsync',
13
14
  ]
@@ -1,11 +1,8 @@
1
- import datetime
2
1
  import time
3
- import random
4
2
  from concurrent.futures import ThreadPoolExecutor, as_completed
5
- from typing import Any, Callable, Dict, List, Optional, Type, ClassVar
6
-
7
- from sibi_dst.utils import ManagedResource
3
+ from typing import Any, Callable, Dict, List, Optional, Type
8
4
 
5
+ from sibi_dst.utils import ManagedResource
9
6
 
10
7
  class ArtifactUpdaterMultiWrapperThreaded(ManagedResource):
11
8
  """
@@ -190,242 +187,234 @@ class ArtifactUpdaterMultiWrapperThreaded(ManagedResource):
190
187
  return "\n".join(lines)
191
188
 
192
189
 
193
- # import asyncio
194
- # import logging
195
- # import datetime
196
- # import random
197
- # from typing import Any, Callable, Dict, List, Optional, Type
198
- #
199
- # from sibi_dst.utils import Logger
200
- #
201
- #
202
- # class ArtifactUpdaterMultiWrapperAsync:
203
- # """
204
- # Simplified wrapper that updates artifacts concurrently using an asyncio.Semaphore.
205
- #
206
- # Features:
207
- # - Caps concurrency at max_workers via semaphore
208
- # - Optionally prioritises tasks via a priority function or static method on artifact classes
209
- # - Tracks per-artifact completion times
210
- # - Configurable retry/backoff strategy
211
- # - Optional metrics integration
212
- # - Thread-safe within a single asyncio loop
213
- #
214
- # Usage:
215
- # wrapper = ArtifactUpdaterMultiWrapper(
216
- # wrapped_classes={
217
- # 'mydata': [DataArtifactA, DataArtifactB],
218
- # },
219
- # max_workers=4,
220
- # retry_attempts=3,
221
- # update_timeout_seconds=600,
222
- # backoff_base=2,
223
- # backoff_max=60,
224
- # backoff_jitter=0.1,
225
- # priority_fn=None, # or custom
226
- # metrics_client=None,
227
- # debug=True,
228
- # logger=None,
229
- # artifact_class_kwargs={
230
- # 'fs': my_fs,
231
- # 'parquet_storage_path': 's3://bucket/data',
232
- # 'logger': my_logger,
233
- # 'debug': True,
234
- # }
235
- # )
236
- # await wrapper.update_data('mydata', period='ytd', overwrite=True)
237
- # """
238
- # def __init__(
239
- # self,
240
- # wrapped_classes: Dict[str, List[Type]],
241
- # *,
242
- # max_workers: int = 3,
243
- # retry_attempts: int = 3,
244
- # update_timeout_seconds: int = 600,
245
- # backoff_base: int = 2,
246
- # backoff_max: Optional[int] = 60,
247
- # backoff_jitter: float = 0.1,
248
- # priority_fn: Optional[Callable[[Type], int]] = None,
249
- # metrics_client: Any = None,
250
- # debug: bool = False,
251
- # logger: Optional[logging.Logger] = None,
252
- # artifact_class_kwargs: Optional[Dict[str, Any]] = None,
253
- # ) -> None:
254
- # self.wrapped_classes = wrapped_classes
255
- # self.max_workers = max_workers
256
- # self.retry_attempts = retry_attempts
257
- # self.update_timeout_seconds = update_timeout_seconds
258
- # self.backoff_base = backoff_base
259
- # self.backoff_max = backoff_max
260
- # self.backoff_jitter = backoff_jitter
261
- # self.priority_fn = priority_fn
262
- # self.metrics_client = metrics_client
263
- #
264
- # self.debug = debug
265
- # self.logger = logger or Logger.default_logger(
266
- # logger_name=self.__class__.__name__,
267
- # log_level=Logger.DEBUG if debug else Logger.INFO
268
- # )
269
- #
270
- # # Default artifact init kwargs
271
- # today = datetime.datetime.today() + datetime.timedelta(days=1)
272
- # default_kwargs = {
273
- # 'parquet_start_date': today.strftime('%Y-%m-%d'),
274
- # 'parquet_end_date': today.strftime('%Y-%m-%d'),
275
- # 'logger': self.logger,
276
- # 'debug': self.debug,
277
- # }
278
- # self.artifact_class_kwargs = artifact_class_kwargs or default_kwargs.copy()
279
- #
280
- # # State
281
- # self.completion_times: Dict[str, float] = {}
282
- # self.failed: List[str] = []
283
- # self.original_classes: List[Type] = []
284
- #
285
- # def get_artifact_classes(self, data_type: str) -> List[Type]:
286
- # """
287
- # Retrieve artifact classes by data type.
288
- # """
289
- # self.logger.info(f"Fetching artifact classes for '{data_type}'")
290
- # if data_type not in self.wrapped_classes:
291
- # raise ValueError(f"Unsupported data type: {data_type}")
292
- # classes = self.wrapped_classes[data_type]
293
- # self.logger.info(f"Found {len(classes)} artifact classes for '{data_type}'")
294
- # return classes
295
- #
296
- # def estimate_priority(self, artifact_cls: Type) -> int:
297
- # """
298
- # Determine task priority for ordering. Lower values run first.
299
- # """
300
- # name = artifact_cls.__name__
301
- # if self.priority_fn:
302
- # try:
303
- # pr = self.priority_fn(artifact_cls)
304
- # self.logger.debug(f"priority_fn for {name}: {pr}")
305
- # return pr
306
- # except Exception as e:
307
- # self.logger.warning(f"priority_fn error for {name}: {e}")
308
- # try:
309
- # fs = self.artifact_class_kwargs.get('fs')
310
- # path = self.artifact_class_kwargs.get('parquet_storage_path')
311
- # pr=1
312
- # if hasattr(artifact_cls, 'get_size_estimate'):
313
- # pr = artifact_cls.get_size_estimate(fs, path)
314
- # self.logger.debug(f"Estimated priority for {name}: {pr}")
315
- # return pr
316
- # except Exception:
317
- # return 1
318
- #
319
- # async def _bounded_update(self, artifact_cls: Type, sem: asyncio.Semaphore, **update_kwargs) -> None:
320
- # """
321
- # Wrap update_artifact in a semaphore slot to limit concurrency.
322
- # """
323
- # async with sem:
324
- # name = artifact_cls.__name__
325
- # start = asyncio.get_event_loop().time()
326
- # self.logger.info(f"Starting update for {name}")
327
- # try:
328
- # for attempt in range(1, self.retry_attempts + 1):
329
- # try:
330
- # artifact = await asyncio.to_thread(
331
- # artifact_cls, **self.artifact_class_kwargs
332
- # )
333
- # await asyncio.wait_for(
334
- # asyncio.to_thread(
335
- # artifact.update_parquet, **update_kwargs
336
- # ),
337
- # timeout=self.update_timeout_seconds
338
- # )
339
- # duration = asyncio.get_event_loop().time() - start
340
- # self.completion_times[name] = duration
341
- # self.logger.info(f"✅ {name} updated in {duration:.2f}s (attempt {attempt})")
342
- # if self.metrics_client:
343
- # self.metrics_client.increment('task_succeeded')
344
- # return
345
- # except asyncio.TimeoutError:
346
- # self.logger.warning(f"Timeout on {name}, attempt {attempt}")
347
- # except Exception as e:
348
- # self.logger.error(f"Error on {name} attempt {attempt}: {e}")
349
- #
350
- # delay = min(self.backoff_base ** (attempt - 1), self.backoff_max)
351
- # delay *= 1 + random.uniform(0, self.backoff_jitter)
352
- # self.logger.info(f"Sleeping {delay:.1f}s before retrying {name}")
353
- # await asyncio.sleep(delay)
354
- #
355
- # except asyncio.CancelledError:
356
- # self.logger.warning(f"{name} update cancelled")
357
- # raise
358
- #
359
- # # permanent failure
360
- # self.logger.error(f"✖️ {name} permanently failed after {self.retry_attempts} attempts")
361
- # if self.metrics_client:
362
- # self.metrics_client.increment('task_failed')
363
- # self.failed.append(name)
364
- #
365
- # async def update_data(self, data_type: str, **kwargs: Any) -> None:
366
- # """
367
- # Entry point to update all artifacts of a given type concurrently.
368
- # """
369
- # self.logger.info(f"Starting update_data for '{data_type}' with kwargs={kwargs}")
370
- #
371
- # # RESET STATE
372
- # self.completion_times.clear()
373
- # self.failed.clear()
374
- # self.original_classes = self.get_artifact_classes(data_type)
375
- #
376
- # # NON-DESTRUCTIVE SORTING
377
- # ordered = sorted(self.original_classes, key=self.estimate_priority)
378
- #
379
- # sem = asyncio.Semaphore(self.max_workers)
380
- # tasks = [
381
- # asyncio.create_task(self._bounded_update(cls, sem, **kwargs))
382
- # for cls in ordered
383
- # ]
384
- #
385
- # try:
386
- # for coro in asyncio.as_completed(tasks):
387
- # await coro
388
- # except asyncio.CancelledError:
389
- # self.logger.warning("update_data was cancelled—aborting remaining retries")
390
- # for t in tasks:
391
- # t.cancel()
392
- # raise
393
- # finally:
394
- # total = len(self.original_classes)
395
- # completed = len(self.completion_times)
396
- # failed = len(self.failed)
397
- # self.logger.info(f"All artifacts processed: total={total}, completed={completed}, failed={failed}")
398
- #
399
- # def get_update_status(self) -> Dict[str, Any]:
400
- # """
401
- # Returns summary status including completion times.
402
- # """
403
- # total = len(self.original_classes)
404
- # completed = set(self.completion_times.keys())
405
- # failed = set(self.failed)
406
- # pending = {cls.__name__ for cls in self.original_classes} - completed - failed
407
- #
408
- # return {
409
- # 'total': total,
410
- # 'completed': list(completed),
411
- # 'failed': list(failed),
412
- # 'pending': list(pending),
413
- # 'completion_times': self.completion_times,
414
- # }
415
- #
416
- # @staticmethod
417
- # def format_status_table(status: Dict[str, Any]) -> str:
418
- # """
419
- # Formats the status dict into a readable table.
420
- # """
421
- # lines = [
422
- # f"Total: {status['total']}",
423
- # f"Completed: {len(status['completed'])} {status['completed']}",
424
- # f"Failed: {len(status['failed'])} {status['failed']}",
425
- # f"Pending: {len(status['pending'])} {status['pending']}",
426
- # "",
427
- # "Per-artifact timings:"
428
- # ]
429
- # for name, dur in status['completion_times'].items():
430
- # lines.append(f" {name}: {dur:.2f}s")
431
- # return "\n".join(lines)
190
+ import asyncio
191
+ import datetime
192
+ import random
193
+ from typing import Any, Callable, Dict, List, Optional, Type
194
+
195
+ class ArtifactUpdaterMultiWrapperAsync(ManagedResource):
196
+ """
197
+ Simplified wrapper that updates artifacts concurrently using an asyncio.Semaphore.
198
+
199
+ Features:
200
+ - Caps concurrency at max_workers via semaphore
201
+ - Optionally prioritises tasks via a priority function or static method on artifact classes
202
+ - Tracks per-artifact completion times
203
+ - Configurable retry/backoff strategy
204
+ - Optional metrics integration
205
+ - Thread-safe within a single asyncio loop
206
+
207
+ Usage:
208
+ wrapper = ArtifactUpdaterMultiWrapper(
209
+ wrapped_classes={
210
+ 'mydata': [DataArtifactA, DataArtifactB],
211
+ },
212
+ max_workers=4,
213
+ retry_attempts=3,
214
+ update_timeout_seconds=600,
215
+ backoff_base=2,
216
+ backoff_max=60,
217
+ backoff_jitter=0.1,
218
+ priority_fn=None, # or custom
219
+ metrics_client=None,
220
+ debug=True,
221
+ logger=None,
222
+ artifact_class_kwargs={
223
+ 'fs': my_fs,
224
+ 'parquet_storage_path': 's3://bucket/data',
225
+ 'logger': my_logger,
226
+ 'debug': True,
227
+ }
228
+ )
229
+ await wrapper.update_data('mydata', period='ytd', overwrite=True)
230
+ """
231
+ def __init__(
232
+ self,
233
+ wrapped_classes: Dict[str, List[Type]],
234
+ *,
235
+ max_workers: int = 3,
236
+ retry_attempts: int = 3,
237
+ update_timeout_seconds: int = 600,
238
+ backoff_base: int = 2,
239
+ backoff_max: Optional[int] = 60,
240
+ backoff_jitter: float = 0.1,
241
+ priority_fn: Optional[Callable[[Type], int]] = None,
242
+ metrics_client: Any = None,
243
+ artifact_class_kwargs: Optional[Dict[str, Any]] = None,
244
+ **kwargs: Dict[str, Any]
245
+ ) -> None:
246
+ super().__init__(**kwargs)
247
+ self.wrapped_classes = wrapped_classes
248
+ self.max_workers = max_workers
249
+ self.retry_attempts = retry_attempts
250
+ self.update_timeout_seconds = update_timeout_seconds
251
+ self.backoff_base = backoff_base
252
+ self.backoff_max = backoff_max
253
+ self.backoff_jitter = backoff_jitter
254
+ self.priority_fn = priority_fn
255
+ self.metrics_client = metrics_client
256
+
257
+ # Default artifact init kwargs
258
+ today = datetime.datetime.today() + datetime.timedelta(days=1)
259
+ default_kwargs = {
260
+ 'parquet_start_date': today.strftime('%Y-%m-%d'),
261
+ 'parquet_end_date': today.strftime('%Y-%m-%d'),
262
+ 'logger': self.logger,
263
+ 'debug': self.debug,
264
+ 'fs': self.fs,
265
+ 'verbose': self.verbose,
266
+ }
267
+ self.artifact_class_kwargs = artifact_class_kwargs or default_kwargs.copy()
268
+
269
+ # State
270
+ self.completion_times: Dict[str, float] = {}
271
+ self.failed: List[str] = []
272
+ self.original_classes: List[Type] = []
273
+
274
+ def get_artifact_classes(self, data_type: str) -> List[Type]:
275
+ """
276
+ Retrieve artifact classes by data type.
277
+ """
278
+ self.logger.info(f"Fetching artifact classes for '{data_type}'")
279
+ if data_type not in self.wrapped_classes:
280
+ raise ValueError(f"Unsupported data type: {data_type}")
281
+ classes = self.wrapped_classes[data_type]
282
+ self.logger.info(f"Found {len(classes)} artifact classes for '{data_type}'")
283
+ return classes
284
+
285
+ def estimate_priority(self, artifact_cls: Type) -> int:
286
+ """
287
+ Determine task priority for ordering. Lower values run first.
288
+ """
289
+ name = artifact_cls.__name__
290
+ if self.priority_fn:
291
+ try:
292
+ pr = self.priority_fn(artifact_cls)
293
+ self.logger.debug(f"priority_fn for {name}: {pr}")
294
+ return pr
295
+ except Exception as e:
296
+ self.logger.warning(f"priority_fn error for {name}: {e}")
297
+ try:
298
+ fs = self.artifact_class_kwargs.get('fs')
299
+ path = self.artifact_class_kwargs.get('parquet_storage_path')
300
+ pr=1
301
+ if hasattr(artifact_cls, 'get_size_estimate'):
302
+ pr = artifact_cls.get_size_estimate(fs, path)
303
+ self.logger.debug(f"Estimated priority for {name}: {pr}")
304
+ return pr
305
+ except Exception:
306
+ return 1
307
+
308
+ async def _bounded_update(self, artifact_cls: Type, sem: asyncio.Semaphore, **update_kwargs) -> None:
309
+ """
310
+ Wrap update_artifact in a semaphore slot to limit concurrency.
311
+ """
312
+ async with sem:
313
+ name = artifact_cls.__name__
314
+ start = asyncio.get_event_loop().time()
315
+ self.logger.info(f"Starting update for {name}")
316
+ try:
317
+ for attempt in range(1, self.retry_attempts + 1):
318
+ try:
319
+ artifact = await asyncio.to_thread(
320
+ artifact_cls, **self.artifact_class_kwargs
321
+ )
322
+ await asyncio.wait_for(
323
+ asyncio.to_thread(
324
+ artifact.update_parquet, **update_kwargs
325
+ ),
326
+ timeout=self.update_timeout_seconds
327
+ )
328
+ duration = asyncio.get_event_loop().time() - start
329
+ self.completion_times[name] = duration
330
+ self.logger.info(f"✅ {name} updated in {duration:.2f}s (attempt {attempt})")
331
+ if self.metrics_client:
332
+ self.metrics_client.increment('task_succeeded')
333
+ return
334
+ except asyncio.TimeoutError:
335
+ self.logger.warning(f"Timeout on {name}, attempt {attempt}")
336
+ except Exception as e:
337
+ self.logger.error(f"Error on {name} attempt {attempt}: {e}")
338
+
339
+ delay = min(self.backoff_base ** (attempt - 1), self.backoff_max)
340
+ delay *= 1 + random.uniform(0, self.backoff_jitter)
341
+ self.logger.info(f"Sleeping {delay:.1f}s before retrying {name}")
342
+ await asyncio.sleep(delay)
343
+
344
+ except asyncio.CancelledError:
345
+ self.logger.warning(f"{name} update cancelled")
346
+ raise
347
+
348
+ # permanent failure
349
+ self.logger.error(f"✖️ {name} permanently failed after {self.retry_attempts} attempts")
350
+ if self.metrics_client:
351
+ self.metrics_client.increment('task_failed')
352
+ self.failed.append(name)
353
+
354
+ async def update_data(self, data_type: str, **kwargs: Any) -> None:
355
+ """
356
+ Entry point to update all artifacts of a given type concurrently.
357
+ """
358
+ self.logger.info(f"Starting update_data for '{data_type}' with kwargs={kwargs}")
359
+
360
+ # RESET STATE
361
+ self.completion_times.clear()
362
+ self.failed.clear()
363
+ self.original_classes = self.get_artifact_classes(data_type)
364
+
365
+ # NON-DESTRUCTIVE SORTING
366
+ ordered = sorted(self.original_classes, key=self.estimate_priority)
367
+
368
+ sem = asyncio.Semaphore(self.max_workers)
369
+ tasks = [
370
+ asyncio.create_task(self._bounded_update(cls, sem, **kwargs))
371
+ for cls in ordered
372
+ ]
373
+
374
+ try:
375
+ for coro in asyncio.as_completed(tasks):
376
+ await coro
377
+ except asyncio.CancelledError:
378
+ self.logger.warning("update_data was cancelled—aborting remaining retries")
379
+ for t in tasks:
380
+ t.cancel()
381
+ raise
382
+ finally:
383
+ total = len(self.original_classes)
384
+ completed = len(self.completion_times)
385
+ failed = len(self.failed)
386
+ self.logger.info(f"All artifacts processed: total={total}, completed={completed}, failed={failed}")
387
+
388
+ def get_update_status(self) -> Dict[str, Any]:
389
+ """
390
+ Returns summary status including completion times.
391
+ """
392
+ total = len(self.original_classes)
393
+ completed = set(self.completion_times.keys())
394
+ failed = set(self.failed)
395
+ pending = {cls.__name__ for cls in self.original_classes} - completed - failed
396
+
397
+ return {
398
+ 'total': total,
399
+ 'completed': list(completed),
400
+ 'failed': list(failed),
401
+ 'pending': list(pending),
402
+ 'completion_times': self.completion_times,
403
+ }
404
+
405
+ @staticmethod
406
+ def format_status_table(status: Dict[str, Any]) -> str:
407
+ """
408
+ Formats the status dict into a readable table.
409
+ """
410
+ lines = [
411
+ f"Total: {status['total']}",
412
+ f"Completed: {len(status['completed'])} {status['completed']}",
413
+ f"Failed: {len(status['failed'])} {status['failed']}",
414
+ f"Pending: {len(status['pending'])} {status['pending']}",
415
+ "",
416
+ "Per-artifact timings:"
417
+ ]
418
+ for name, dur in status['completion_times'].items():
419
+ lines.append(f" {name}: {dur:.2f}s")
420
+ return "\n".join(lines)
@@ -1,4 +1,3 @@
1
- import logging
2
1
  from typing import Optional, ClassVar, Dict
3
2
 
4
3
  import dask.dataframe as dd
@@ -47,15 +46,13 @@ class ParquetReader(DfHelper):
47
46
  'backend': 'parquet'
48
47
  }
49
48
 
50
- def __init__(self, filesystem_type="file", filesystem_options=None, **kwargs):
49
+ def __init__(self, **kwargs):
51
50
  self.config = {
52
51
  **self.DEFAULT_CONFIG,
53
52
  **kwargs,
54
53
  }
55
- self.df: Optional[dd.DataFrame] = None
56
- #self.debug = self.config.setdefault('debug', False)
57
- #self.logger = self.config.setdefault('logger', Logger.default_logger(logger_name=self.__class__.__name__))
58
- #self.logger.set_level(logging.DEBUG if self.debug else logging.INFO)
54
+ super().__init__(**self.config)
55
+
59
56
  self.parquet_storage_path = self.config.setdefault('parquet_storage_path', None)
60
57
  if self.parquet_storage_path is None:
61
58
  raise ValueError('parquet_storage_path must be set')
@@ -67,19 +64,9 @@ class ParquetReader(DfHelper):
67
64
  if self.parquet_end_date is None:
68
65
  raise ValueError('parquet_end_date must be set')
69
66
 
70
- # Filesystem setup
71
- #self.filesystem_type = filesystem_type
72
- #self.filesystem_options = filesystem_options or {}
73
- #self.fs = self.config.setdefault('fs', None)
74
- #if self.fs is None:
75
- # self.fs = fsspec.filesystem(self.filesystem_type, **self.filesystem_options)
76
- #self.config.setdefault('fs', self.fs)
77
-
78
67
  if not self.directory_exists():
79
68
  raise ValueError(f"{self.parquet_storage_path} does not exist")
80
69
 
81
- super().__init__(**self.config)
82
-
83
70
  def load(self, **kwargs):
84
71
  self.df = super().load(**kwargs)
85
72
  return self.df
@@ -89,5 +76,4 @@ class ParquetReader(DfHelper):
89
76
  info = self.fs.info(self.parquet_storage_path)
90
77
  return info['type'] == 'directory'
91
78
  except FileNotFoundError:
92
- return False
93
-
79
+ return False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sibi-dst
3
- Version: 2025.1.6
3
+ Version: 2025.1.7
4
4
  Summary: Data Science Toolkit
5
5
  Author: Luis Valverde
6
6
  Author-email: lvalverdeb@gmail.com
@@ -1,9 +1,9 @@
1
1
  sibi_dst/__init__.py,sha256=j8lZpGCJlxlLgEgeIMxZnWdqJ0g3MCs7-gsnbvPn_KY,285
2
- sibi_dst/df_helper/__init__.py,sha256=VJE1qvKO-7QsFADZxSY5s4LVoWnPKfz0rP3nYO2ljhA,358
3
- sibi_dst/df_helper/_artifact_updater_multi_wrapper.py,sha256=xPVc_Ub-ALjjQ8mjgtvWM_lH2_bAY6rk_wShIoaJuT8,18134
2
+ sibi_dst/df_helper/__init__.py,sha256=Jur_MO8RGPkVw0CS3XH5YIWv-d922DC_FwRDTvHHV6Y,432
3
+ sibi_dst/df_helper/_artifact_updater_multi_wrapper.py,sha256=10EkCYEfoWwTQbS-ahYWo6TvbtNXM8p0UqqDu0gTuyI,17426
4
4
  sibi_dst/df_helper/_df_helper.py,sha256=iBoWz2iVgLzQ3hA1EwllL62dkraKamRx2sXseu30FVI,11914
5
5
  sibi_dst/df_helper/_parquet_artifact.py,sha256=dCvUA2bytv0wY0pFI8lxbcLwXlgGpHndS36iKfEmjLw,14310
6
- sibi_dst/df_helper/_parquet_reader.py,sha256=zR6CK32725gha349eO9-2EisAS-hQpyMJApw_O2Syrc,3825
6
+ sibi_dst/df_helper/_parquet_reader.py,sha256=m98C0TZRroOXvVc2LpEuElrJnquGlR81E1gjI7v1hi4,3102
7
7
  sibi_dst/df_helper/backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
8
  sibi_dst/df_helper/backends/http/__init__.py,sha256=d1pfgYxbiYg7E0Iw8RbJ7xfqIfJShqqTBQQGU_S6OOo,105
9
9
  sibi_dst/df_helper/backends/http/_http_config.py,sha256=eGPFdqZ5M3Tscqx2P93B6XoBEEzlmdt7yNg7PXUQnNQ,4726
@@ -71,6 +71,6 @@ sibi_dst/v2/df_helper/core/_params_config.py,sha256=DYx2drDz3uF-lSPzizPkchhy-kxR
71
71
  sibi_dst/v2/df_helper/core/_query_config.py,sha256=Y8LVSyaKuVkrPluRDkQoOwuXHQxner1pFWG3HPfnDHM,441
72
72
  sibi_dst/v2/utils/__init__.py,sha256=6H4cvhqTiFufnFPETBF0f8beVVMpfJfvUs6Ne0TQZNY,58
73
73
  sibi_dst/v2/utils/log_utils.py,sha256=rfk5VsLAt-FKpv6aPTC1FToIPiyrnHAFFBAkHme24po,4123
74
- sibi_dst-2025.1.6.dist-info/METADATA,sha256=foBHcYsPg3CS4ClxzFKi2ZP9CLjLmhE36lDE2dOv-MU,2610
75
- sibi_dst-2025.1.6.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
76
- sibi_dst-2025.1.6.dist-info/RECORD,,
74
+ sibi_dst-2025.1.7.dist-info/METADATA,sha256=AaJunhF_PdvxT9KpA0mVzQhoY1sAZoe5HTA9ClabYPs,2610
75
+ sibi_dst-2025.1.7.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
76
+ sibi_dst-2025.1.7.dist-info/RECORD,,