sibi-dst 2025.1.10__py3-none-any.whl → 2025.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sibi_dst/df_helper/_df_helper.py +3 -0
- sibi_dst/df_helper/backends/sqlalchemy/_db_connection.py +2 -4
- sibi_dst/utils/date_utils.py +133 -134
- sibi_dst/v2/df_helper/backends/sqlmodel/_io_dask.py +4 -3
- {sibi_dst-2025.1.10.dist-info → sibi_dst-2025.1.11.dist-info}/METADATA +1 -1
- {sibi_dst-2025.1.10.dist-info → sibi_dst-2025.1.11.dist-info}/RECORD +7 -7
- {sibi_dst-2025.1.10.dist-info → sibi_dst-2025.1.11.dist-info}/WHEEL +0 -0
sibi_dst/df_helper/_df_helper.py
CHANGED
@@ -68,6 +68,9 @@ class ParquetBackend(BaseBackend):
|
|
68
68
|
df = self.helper.backend_parquet.load_files()
|
69
69
|
if options and df is not None:
|
70
70
|
df = FilterHandler('dask', logger=self.logger, debug=False).apply_filters(df, filters=options)
|
71
|
+
|
72
|
+
df = df.persist()
|
73
|
+
|
71
74
|
self.total_records = len(df) or -1 # If df is empty, set total_records to -1
|
72
75
|
return self.total_records, df
|
73
76
|
except Exception as e:
|
@@ -15,7 +15,7 @@ from sqlalchemy.engine import url as sqlalchemy_url
|
|
15
15
|
from sqlalchemy.engine import Engine
|
16
16
|
from sqlalchemy.exc import OperationalError, SQLAlchemyError
|
17
17
|
from sqlalchemy.orm import sessionmaker, Session
|
18
|
-
from sqlalchemy.pool import QueuePool, NullPool, StaticPool
|
18
|
+
from sqlalchemy.pool import QueuePool, NullPool, StaticPool, Pool
|
19
19
|
|
20
20
|
# Assuming these are your project's internal modules
|
21
21
|
from sibi_dst.utils import Logger
|
@@ -54,7 +54,7 @@ class SqlAlchemyConnectionConfig(BaseModel):
|
|
54
54
|
pool_timeout: int = int(os.environ.get("DB_POOL_TIMEOUT", 30))
|
55
55
|
pool_recycle: int = int(os.environ.get("DB_POOL_RECYCLE", 1800))
|
56
56
|
pool_pre_ping: bool = True
|
57
|
-
poolclass: Type[
|
57
|
+
poolclass: Type[Pool] = QueuePool
|
58
58
|
|
59
59
|
# --- Internal & Runtime State ---
|
60
60
|
model: Optional[Type[Any]] = None
|
@@ -195,7 +195,6 @@ class SqlAlchemyConnectionConfig(BaseModel):
|
|
195
195
|
wrapper = self._engine_registry.get(self._engine_key_instance)
|
196
196
|
if wrapper:
|
197
197
|
wrapper['active_connections'] += 1
|
198
|
-
# self.logger.debug(f"Connection checked out. Active: {self.active_connections}")
|
199
198
|
|
200
199
|
def _on_checkin(self, *args) -> None:
|
201
200
|
"""Event listener for when a connection is returned to the pool."""
|
@@ -203,7 +202,6 @@ class SqlAlchemyConnectionConfig(BaseModel):
|
|
203
202
|
wrapper = self._engine_registry.get(self._engine_key_instance)
|
204
203
|
if wrapper:
|
205
204
|
wrapper['active_connections'] = max(0, wrapper['active_connections'] - 1)
|
206
|
-
# self.logger.debug(f"Connection checked in. Active: {self.active_connections}")
|
207
205
|
|
208
206
|
@property
|
209
207
|
def active_connections(self) -> int:
|
sibi_dst/utils/date_utils.py
CHANGED
@@ -4,7 +4,7 @@ from typing import Union, Tuple, Callable, Dict, Optional
|
|
4
4
|
import fsspec
|
5
5
|
import numpy as np
|
6
6
|
import pandas as pd
|
7
|
-
|
7
|
+
import dask.dataframe as dd
|
8
8
|
from .log_utils import Logger
|
9
9
|
|
10
10
|
|
@@ -305,154 +305,153 @@ class FileAgeChecker:
|
|
305
305
|
raise ValueError(f"Unsupported modification time format for {file_path}") from e
|
306
306
|
|
307
307
|
|
308
|
+
# --- Vectorized Helper Functions ---
|
309
|
+
# These replace the slow, row-by-row .apply() logic. They operate
|
310
|
+
# on entire DataFrame partitions for maximum efficiency.
|
311
|
+
|
312
|
+
def _vectorized_busday_count(
|
313
|
+
partition: pd.DataFrame,
|
314
|
+
begin_col: str,
|
315
|
+
end_col: str,
|
316
|
+
holidays: list
|
317
|
+
) -> pd.Series:
|
318
|
+
"""Vectorized function to count business days on a DataFrame partition."""
|
319
|
+
if partition.empty:
|
320
|
+
return pd.Series([], dtype=float)
|
321
|
+
|
322
|
+
# Convert entire columns to datetime at once, coercing errors to NaT
|
323
|
+
start_dates = pd.to_datetime(partition[begin_col], errors='coerce').dt.date
|
324
|
+
end_dates = pd.to_datetime(partition[end_col], errors='coerce').dt.date
|
325
|
+
|
326
|
+
# Create a result series filled with NaN to handle rows with invalid dates
|
327
|
+
result = pd.Series(np.nan, index=partition.index, dtype=float)
|
328
|
+
|
329
|
+
# Create a boolean mask for valid, non-NaT date pairs
|
330
|
+
valid_mask = pd.notna(start_dates) & pd.notna(end_dates)
|
331
|
+
|
332
|
+
# Perform the vectorized calculation only on the valid subset of dates
|
333
|
+
result.loc[valid_mask] = np.busday_count(
|
334
|
+
start_dates[valid_mask],
|
335
|
+
end_dates[valid_mask],
|
336
|
+
holidays=holidays
|
337
|
+
)
|
338
|
+
return result
|
339
|
+
|
340
|
+
|
341
|
+
def _vectorized_sla_end_date(
|
342
|
+
partition: pd.DataFrame,
|
343
|
+
start_col: str,
|
344
|
+
n_days_col: str,
|
345
|
+
holidays: list
|
346
|
+
) -> pd.Series:
|
347
|
+
"""Vectorized function to calculate the SLA end date on a DataFrame partition."""
|
348
|
+
if partition.empty:
|
349
|
+
return pd.Series([], dtype='datetime64[ns]')
|
350
|
+
|
351
|
+
start_dates = pd.to_datetime(partition[start_col], errors='coerce').dt.date
|
352
|
+
sla_days = partition[n_days_col]
|
353
|
+
|
354
|
+
# Create a result series filled with NaT for rows with invalid start dates
|
355
|
+
result = pd.Series(pd.NaT, index=partition.index, dtype='datetime64[ns]')
|
356
|
+
|
357
|
+
# Create a boolean mask for valid start dates and sla_days
|
358
|
+
valid_mask = pd.notna(start_dates) & pd.notna(sla_days)
|
359
|
+
|
360
|
+
# Perform the vectorized calculation only on the valid subset
|
361
|
+
result.loc[valid_mask] = np.busday_offset(
|
362
|
+
start_dates[valid_mask],
|
363
|
+
sla_days[valid_mask].astype(int), # Ensure days are integers
|
364
|
+
roll='forward',
|
365
|
+
holidays=holidays
|
366
|
+
)
|
367
|
+
return result
|
368
|
+
|
369
|
+
|
370
|
+
# --- Refactored BusinessDays Class ---
|
371
|
+
|
308
372
|
class BusinessDays:
|
309
373
|
"""
|
310
|
-
|
311
|
-
|
312
|
-
business days, modifying dates by adding business days, and applying these
|
313
|
-
operations to Dask DataFrames.
|
314
|
-
|
315
|
-
:ivar logger: Logger instance for logging error, warning, and debug messages.
|
316
|
-
:type logger: logging.Logger
|
317
|
-
:ivar HOLIDAY_LIST: Dictionary mapping years to lists of holiday dates.
|
318
|
-
:type HOLIDAY_LIST: dict
|
319
|
-
:ivar bd_cal: Numpy busdaycalendar object containing holidays and week mask.
|
320
|
-
:type bd_cal: numpy.busdaycalendar
|
321
|
-
:ivar holidays: Array of holiday dates used by the business day calendar.
|
322
|
-
:type holidays: numpy.ndarray
|
323
|
-
:ivar week_mask: Boolean array indicating working days within a week.
|
324
|
-
:type week_mask: numpy.ndarray
|
374
|
+
Business days calculations with a custom holiday list.
|
375
|
+
Supports scalar and efficient, vectorized Dask DataFrame operations.
|
325
376
|
"""
|
326
377
|
|
327
|
-
def __init__(self, holiday_list, logger):
|
328
|
-
"""
|
329
|
-
Initialize a BusinessDays object with a given holiday list.
|
330
|
-
"""
|
378
|
+
def __init__(self, holiday_list: dict[str, list[str]], logger) -> None:
|
331
379
|
self.logger = logger
|
332
380
|
self.HOLIDAY_LIST = holiday_list
|
333
|
-
bd_holidays = [day for year in self.HOLIDAY_LIST for day in self.HOLIDAY_LIST[year]]
|
334
|
-
self.bd_cal = np.busdaycalendar(holidays=bd_holidays, weekmask="1111100")
|
335
|
-
self.holidays = self.bd_cal.holidays
|
336
|
-
self.week_mask = self.bd_cal.weekmask
|
337
381
|
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
"""
|
342
|
-
try:
|
343
|
-
begin_date = pd.to_datetime(begin_date)
|
344
|
-
end_date = pd.to_datetime(end_date)
|
345
|
-
except Exception as e:
|
346
|
-
raise ValueError(f"Invalid date format: {e}")
|
347
|
-
|
348
|
-
years = [str(year) for year in range(begin_date.year, end_date.year + 1)]
|
349
|
-
if not all(year in self.HOLIDAY_LIST for year in years):
|
350
|
-
raise ValueError("Not all years in date range are in the holiday list")
|
351
|
-
|
352
|
-
return np.busday_count(
|
353
|
-
begin_date.strftime("%Y-%m-%d"),
|
354
|
-
end_date.strftime("%Y-%m-%d"),
|
355
|
-
busdaycal=self.bd_cal,
|
356
|
-
)
|
382
|
+
# Flatten and store as tuple for determinism
|
383
|
+
bd_holidays = [day for year in self.HOLIDAY_LIST for day in self.HOLIDAY_LIST[year]]
|
384
|
+
self.holidays = tuple(bd_holidays)
|
357
385
|
|
358
|
-
def
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
# Apply the function using map_partitions
|
390
|
-
df[result_col] = df.map_partitions(
|
391
|
-
apply_business_days,
|
392
|
-
holidays,
|
393
|
-
weekmask,
|
394
|
-
meta=(result_col, "int64"),
|
386
|
+
def get_business_days_count(
|
387
|
+
self,
|
388
|
+
begin_date: str | datetime.date | pd.Timestamp,
|
389
|
+
end_date: str | datetime.date | pd.Timestamp,
|
390
|
+
) -> int:
|
391
|
+
"""Scalar method to count business days between two dates."""
|
392
|
+
begin = pd.to_datetime(begin_date)
|
393
|
+
end = pd.to_datetime(end_date)
|
394
|
+
return int(np.busday_count(begin.date(), end.date(), holidays=list(self.holidays)))
|
395
|
+
|
396
|
+
def calc_business_days_from_df(
|
397
|
+
self,
|
398
|
+
df: dd.DataFrame,
|
399
|
+
begin_date_col: str,
|
400
|
+
end_date_col: str,
|
401
|
+
result_col: str = "business_days",
|
402
|
+
) -> dd.DataFrame:
|
403
|
+
"""Calculates business days between two columns in a Dask DataFrame."""
|
404
|
+
missing = {begin_date_col, end_date_col} - set(df.columns)
|
405
|
+
if missing:
|
406
|
+
self.logger.error(f"Missing columns: {missing}")
|
407
|
+
raise ValueError("Required columns are missing from DataFrame")
|
408
|
+
|
409
|
+
return df.assign(
|
410
|
+
**{result_col: df.map_partitions(
|
411
|
+
_vectorized_busday_count,
|
412
|
+
begin_col=begin_date_col,
|
413
|
+
end_col=end_date_col,
|
414
|
+
holidays=list(self.holidays),
|
415
|
+
meta=(result_col, 'f8') # f8 is float64
|
416
|
+
)}
|
395
417
|
)
|
396
418
|
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
"""
|
403
|
-
|
404
|
-
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
|
405
|
-
except ValueError:
|
406
|
-
raise ValueError("Date should be a string in the format YYYY-MM-DD")
|
407
|
-
|
408
|
-
if str(start_date.year) not in self.HOLIDAY_LIST:
|
409
|
-
self.logger.warning(f"Year {start_date.year} is not in the holiday list")
|
410
|
-
|
419
|
+
def add_business_days(
|
420
|
+
self,
|
421
|
+
start_date: str | datetime.date | pd.Timestamp,
|
422
|
+
n_days: int,
|
423
|
+
) -> np.datetime64:
|
424
|
+
"""Scalar method to add N business days to a start date."""
|
425
|
+
start = pd.to_datetime(start_date)
|
411
426
|
return np.busday_offset(
|
412
|
-
|
427
|
+
start.date(),
|
413
428
|
n_days,
|
414
|
-
roll=
|
415
|
-
|
429
|
+
roll='forward',
|
430
|
+
holidays=list(self.holidays),
|
416
431
|
)
|
417
432
|
|
418
|
-
def calc_sla_end_date(
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
)
|
440
|
-
|
441
|
-
# Define a wrapper for partition-wise operation
|
442
|
-
def apply_sla_end_date(partition, holidays, weekmask):
|
443
|
-
return partition.apply(
|
444
|
-
calculate_sla_end_date, axis=1, holidays=holidays, weekmask=weekmask
|
445
|
-
)
|
446
|
-
|
447
|
-
# Apply the function using map_partitions
|
448
|
-
df[result_col] = df.map_partitions(
|
449
|
-
apply_sla_end_date,
|
450
|
-
holidays,
|
451
|
-
weekmask,
|
452
|
-
meta=(result_col, "object"),
|
433
|
+
def calc_sla_end_date(
|
434
|
+
self,
|
435
|
+
df: dd.DataFrame,
|
436
|
+
start_date_col: str,
|
437
|
+
n_days_col: str,
|
438
|
+
result_col: str = "sla_end_date",
|
439
|
+
) -> dd.DataFrame:
|
440
|
+
"""Calculates an SLA end date column for a Dask DataFrame."""
|
441
|
+
missing = {start_date_col, n_days_col} - set(df.columns)
|
442
|
+
if missing:
|
443
|
+
self.logger.error(f"Missing columns: {missing}")
|
444
|
+
raise ValueError("Required columns are missing from DataFrame")
|
445
|
+
|
446
|
+
return df.assign(
|
447
|
+
**{result_col: df.map_partitions(
|
448
|
+
_vectorized_sla_end_date,
|
449
|
+
start_col=start_date_col,
|
450
|
+
n_days_col=n_days_col,
|
451
|
+
holidays=list(self.holidays),
|
452
|
+
meta=(result_col, 'datetime64[ns]')
|
453
|
+
)}
|
453
454
|
)
|
454
|
-
|
455
|
-
return df
|
456
455
|
# Class enhancements
|
457
456
|
# DateUtils.register_period('next_week', lambda: (datetime.date.today() + datetime.timedelta(days=7),
|
458
457
|
# datetime.date.today() + datetime.timedelta(days=13)))
|
@@ -1,7 +1,8 @@
|
|
1
1
|
import itertools
|
2
2
|
import dask.dataframe as dd
|
3
3
|
import pandas as pd
|
4
|
-
|
4
|
+
|
5
|
+
#from sqlmodel import create_engine, Session, select
|
5
6
|
from sibi_dst.v2.df_helper.core import FilterHandler
|
6
7
|
from sibi_dst.v2.utils import Logger
|
7
8
|
|
@@ -116,7 +117,7 @@ class SQLModelDask:
|
|
116
117
|
return dask_df
|
117
118
|
|
118
119
|
except Exception as e:
|
119
|
-
self.logger.error(f"Error executing query: {str(e)}")
|
120
|
-
self.logger.error(self.query)
|
120
|
+
self.logger.error(f"_io_dask:Error executing query: {str(e)}")
|
121
|
+
self.logger.error(f"_io_dask:{self.query})
|
121
122
|
# In case of error, return an empty Dask DataFrame with the expected columns.
|
122
123
|
return dd.from_pandas(pd.DataFrame(columns=ordered_columns), npartitions=1)
|
@@ -1,7 +1,7 @@
|
|
1
1
|
sibi_dst/__init__.py,sha256=j8lZpGCJlxlLgEgeIMxZnWdqJ0g3MCs7-gsnbvPn_KY,285
|
2
2
|
sibi_dst/df_helper/__init__.py,sha256=Jur_MO8RGPkVw0CS3XH5YIWv-d922DC_FwRDTvHHV6Y,432
|
3
3
|
sibi_dst/df_helper/_artifact_updater_multi_wrapper.py,sha256=pSSw3N_ZNZCZHAiChbsF_ECyCmz0L2xCgvt9srHtPOM,17575
|
4
|
-
sibi_dst/df_helper/_df_helper.py,sha256=
|
4
|
+
sibi_dst/df_helper/_df_helper.py,sha256=PNoN0nlzRwo_4JiaVyzmOM--LRrsJ0jB9pZqDi_kkRA,12917
|
5
5
|
sibi_dst/df_helper/_parquet_artifact.py,sha256=dCvUA2bytv0wY0pFI8lxbcLwXlgGpHndS36iKfEmjLw,14310
|
6
6
|
sibi_dst/df_helper/_parquet_reader.py,sha256=m98C0TZRroOXvVc2LpEuElrJnquGlR81E1gjI7v1hi4,3102
|
7
7
|
sibi_dst/df_helper/backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -11,7 +11,7 @@ sibi_dst/df_helper/backends/parquet/__init__.py,sha256=esWJ9aSuYC26d-T01z9dPrJ1u
|
|
11
11
|
sibi_dst/df_helper/backends/parquet/_filter_handler.py,sha256=TvDf0RXta7mwJv11GNQttYJsXgFf2XDj4oLIjt4xTzA,5219
|
12
12
|
sibi_dst/df_helper/backends/parquet/_parquet_options.py,sha256=FWExRRTlhGrOhGPyzL1tucxgoHa3nJenLLs87I2gs-I,11776
|
13
13
|
sibi_dst/df_helper/backends/sqlalchemy/__init__.py,sha256=LjWm9B7CweTvlvFOgB90XjSe0lVLILAIYMWKPkFXFm8,265
|
14
|
-
sibi_dst/df_helper/backends/sqlalchemy/_db_connection.py,sha256=
|
14
|
+
sibi_dst/df_helper/backends/sqlalchemy/_db_connection.py,sha256=ycjnkhD1lWMKnLFy1bycle__jbfaWH6oI7m9ymX59c4,10783
|
15
15
|
sibi_dst/df_helper/backends/sqlalchemy/_io_dask.py,sha256=NqBSHqeYv_1vHt6J0tez0GdMwKrP_sIRcXYXu869ZkY,13313
|
16
16
|
sibi_dst/df_helper/backends/sqlalchemy/_load_from_db.py,sha256=ibxeVqpIEsSVusP2bgcd1MNV_wJIoNgXwacltUbwTas,3194
|
17
17
|
sibi_dst/df_helper/backends/sqlalchemy/_sql_model_builder.py,sha256=d_-ip-dQnWOlM8btCjoywAXpaiSuN6AaavkTGJsVQfY,3576
|
@@ -39,7 +39,7 @@ sibi_dst/utils/credentials.py,sha256=cHJPPsmVyijqbUQIq7WWPe-lIallA-mI5RAy3YUuRME
|
|
39
39
|
sibi_dst/utils/data_from_http_source.py,sha256=AcpKNsqTgN2ClNwuhgUpuNCx62r5_DdsAiKY8vcHEBA,1867
|
40
40
|
sibi_dst/utils/data_utils.py,sha256=MqbwXk33BuANWeKKmsabHouhb8GZswSmbM-VetWWE-M,10357
|
41
41
|
sibi_dst/utils/data_wrapper.py,sha256=9aYXorbrqDX53NVJ5oUnNQy6FbXYhs5osxzeMcdZpC4,9609
|
42
|
-
sibi_dst/utils/date_utils.py,sha256=
|
42
|
+
sibi_dst/utils/date_utils.py,sha256=T0uXNIG2IQfgs0AyQNsF9S6-cTujtA4GDC1IalvZVSU,18040
|
43
43
|
sibi_dst/utils/df_utils.py,sha256=TzIAUCLbgOn3bvCFvzkc1S9YU-OlZTImdCj-88dtg8g,11401
|
44
44
|
sibi_dst/utils/file_utils.py,sha256=Z99CZ_4nPDIaZqbCfzzUDfAYJjSudWDj-mwEO8grhbc,1253
|
45
45
|
sibi_dst/utils/filepath_generator.py,sha256=-HHO0U-PR8fysDDFwnWdHRlgqksh_RkmgBZLWv9hM7s,6669
|
@@ -62,7 +62,7 @@ sibi_dst/v2/df_helper/backends/sqlalchemy/_load_from_db.py,sha256=jhgN0OO5Sk1zQF
|
|
62
62
|
sibi_dst/v2/df_helper/backends/sqlalchemy/_model_builder.py,sha256=jX_mQAzl_6xdh7CTYw4uvUIX2wMp3NzXMlfbC5alOzs,13632
|
63
63
|
sibi_dst/v2/df_helper/backends/sqlmodel/__init__.py,sha256=LcwJjVVxxrnVZalWqnz5m7r77i9tmJR0-U2k8eSQ-m8,249
|
64
64
|
sibi_dst/v2/df_helper/backends/sqlmodel/_db_connection.py,sha256=n3CDbda0OY3X7eTeu_PR2KcZ5hYyEJL7Hroo8yQkjG8,15435
|
65
|
-
sibi_dst/v2/df_helper/backends/sqlmodel/_io_dask.py,sha256=
|
65
|
+
sibi_dst/v2/df_helper/backends/sqlmodel/_io_dask.py,sha256=VyhSGZGSN0gpsGhHHpY07NkmeAvPmMyQi3ewAaE79VM,5446
|
66
66
|
sibi_dst/v2/df_helper/backends/sqlmodel/_load_from_db.py,sha256=FIs6UrNxdJ7eDHDvTv-cJuybIue2-oCRedhW-MNe7CU,6285
|
67
67
|
sibi_dst/v2/df_helper/backends/sqlmodel/_model_builder.py,sha256=k0dnMLkLMMvkDYDYWkGFgibW5UD8pJgB3YrEg_R7pj8,13556
|
68
68
|
sibi_dst/v2/df_helper/core/__init__.py,sha256=rZhBh32Rgcxj4MBii-KsYVJQmrT9egiWKXk68gWKblo,197
|
@@ -71,6 +71,6 @@ sibi_dst/v2/df_helper/core/_params_config.py,sha256=DYx2drDz3uF-lSPzizPkchhy-kxR
|
|
71
71
|
sibi_dst/v2/df_helper/core/_query_config.py,sha256=Y8LVSyaKuVkrPluRDkQoOwuXHQxner1pFWG3HPfnDHM,441
|
72
72
|
sibi_dst/v2/utils/__init__.py,sha256=6H4cvhqTiFufnFPETBF0f8beVVMpfJfvUs6Ne0TQZNY,58
|
73
73
|
sibi_dst/v2/utils/log_utils.py,sha256=rfk5VsLAt-FKpv6aPTC1FToIPiyrnHAFFBAkHme24po,4123
|
74
|
-
sibi_dst-2025.1.
|
75
|
-
sibi_dst-2025.1.
|
76
|
-
sibi_dst-2025.1.
|
74
|
+
sibi_dst-2025.1.11.dist-info/METADATA,sha256=7iwn7RFfaDF_9dfpWvnNl2Al_8NHWu7l8vGhzO9BAac,2611
|
75
|
+
sibi_dst-2025.1.11.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
76
|
+
sibi_dst-2025.1.11.dist-info/RECORD,,
|
File without changes
|