sibi-dst 2025.1.10__py3-none-any.whl → 2025.1.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -68,6 +68,9 @@ class ParquetBackend(BaseBackend):
68
68
  df = self.helper.backend_parquet.load_files()
69
69
  if options and df is not None:
70
70
  df = FilterHandler('dask', logger=self.logger, debug=False).apply_filters(df, filters=options)
71
+
72
+ df = df.persist()
73
+
71
74
  self.total_records = len(df) or -1 # If df is empty, set total_records to -1
72
75
  return self.total_records, df
73
76
  except Exception as e:
@@ -15,7 +15,7 @@ from sqlalchemy.engine import url as sqlalchemy_url
15
15
  from sqlalchemy.engine import Engine
16
16
  from sqlalchemy.exc import OperationalError, SQLAlchemyError
17
17
  from sqlalchemy.orm import sessionmaker, Session
18
- from sqlalchemy.pool import QueuePool, NullPool, StaticPool
18
+ from sqlalchemy.pool import QueuePool, NullPool, StaticPool, Pool
19
19
 
20
20
  # Assuming these are your project's internal modules
21
21
  from sibi_dst.utils import Logger
@@ -54,7 +54,7 @@ class SqlAlchemyConnectionConfig(BaseModel):
54
54
  pool_timeout: int = int(os.environ.get("DB_POOL_TIMEOUT", 30))
55
55
  pool_recycle: int = int(os.environ.get("DB_POOL_RECYCLE", 1800))
56
56
  pool_pre_ping: bool = True
57
- poolclass: Type[QueuePool] = QueuePool
57
+ poolclass: Type[Pool] = QueuePool
58
58
 
59
59
  # --- Internal & Runtime State ---
60
60
  model: Optional[Type[Any]] = None
@@ -195,7 +195,6 @@ class SqlAlchemyConnectionConfig(BaseModel):
195
195
  wrapper = self._engine_registry.get(self._engine_key_instance)
196
196
  if wrapper:
197
197
  wrapper['active_connections'] += 1
198
- # self.logger.debug(f"Connection checked out. Active: {self.active_connections}")
199
198
 
200
199
  def _on_checkin(self, *args) -> None:
201
200
  """Event listener for when a connection is returned to the pool."""
@@ -203,7 +202,6 @@ class SqlAlchemyConnectionConfig(BaseModel):
203
202
  wrapper = self._engine_registry.get(self._engine_key_instance)
204
203
  if wrapper:
205
204
  wrapper['active_connections'] = max(0, wrapper['active_connections'] - 1)
206
- # self.logger.debug(f"Connection checked in. Active: {self.active_connections}")
207
205
 
208
206
  @property
209
207
  def active_connections(self) -> int:
@@ -4,7 +4,7 @@ from typing import Union, Tuple, Callable, Dict, Optional
4
4
  import fsspec
5
5
  import numpy as np
6
6
  import pandas as pd
7
-
7
+ import dask.dataframe as dd
8
8
  from .log_utils import Logger
9
9
 
10
10
 
@@ -305,154 +305,153 @@ class FileAgeChecker:
305
305
  raise ValueError(f"Unsupported modification time format for {file_path}") from e
306
306
 
307
307
 
308
+ # --- Vectorized Helper Functions ---
309
+ # These replace the slow, row-by-row .apply() logic. They operate
310
+ # on entire DataFrame partitions for maximum efficiency.
311
+
312
+ def _vectorized_busday_count(
313
+ partition: pd.DataFrame,
314
+ begin_col: str,
315
+ end_col: str,
316
+ holidays: list
317
+ ) -> pd.Series:
318
+ """Vectorized function to count business days on a DataFrame partition."""
319
+ if partition.empty:
320
+ return pd.Series([], dtype=float)
321
+
322
+ # Convert entire columns to datetime at once, coercing errors to NaT
323
+ start_dates = pd.to_datetime(partition[begin_col], errors='coerce').dt.date
324
+ end_dates = pd.to_datetime(partition[end_col], errors='coerce').dt.date
325
+
326
+ # Create a result series filled with NaN to handle rows with invalid dates
327
+ result = pd.Series(np.nan, index=partition.index, dtype=float)
328
+
329
+ # Create a boolean mask for valid, non-NaT date pairs
330
+ valid_mask = pd.notna(start_dates) & pd.notna(end_dates)
331
+
332
+ # Perform the vectorized calculation only on the valid subset of dates
333
+ result.loc[valid_mask] = np.busday_count(
334
+ start_dates[valid_mask],
335
+ end_dates[valid_mask],
336
+ holidays=holidays
337
+ )
338
+ return result
339
+
340
+
341
+ def _vectorized_sla_end_date(
342
+ partition: pd.DataFrame,
343
+ start_col: str,
344
+ n_days_col: str,
345
+ holidays: list
346
+ ) -> pd.Series:
347
+ """Vectorized function to calculate the SLA end date on a DataFrame partition."""
348
+ if partition.empty:
349
+ return pd.Series([], dtype='datetime64[ns]')
350
+
351
+ start_dates = pd.to_datetime(partition[start_col], errors='coerce').dt.date
352
+ sla_days = partition[n_days_col]
353
+
354
+ # Create a result series filled with NaT for rows with invalid start dates
355
+ result = pd.Series(pd.NaT, index=partition.index, dtype='datetime64[ns]')
356
+
357
+ # Create a boolean mask for valid start dates and sla_days
358
+ valid_mask = pd.notna(start_dates) & pd.notna(sla_days)
359
+
360
+ # Perform the vectorized calculation only on the valid subset
361
+ result.loc[valid_mask] = np.busday_offset(
362
+ start_dates[valid_mask],
363
+ sla_days[valid_mask].astype(int), # Ensure days are integers
364
+ roll='forward',
365
+ holidays=holidays
366
+ )
367
+ return result
368
+
369
+
370
+ # --- Refactored BusinessDays Class ---
371
+
308
372
  class BusinessDays:
309
373
  """
310
- Provides functionality for handling business days calculations with a custom
311
- holiday list. The class includes methods for calculating the number of
312
- business days, modifying dates by adding business days, and applying these
313
- operations to Dask DataFrames.
314
-
315
- :ivar logger: Logger instance for logging error, warning, and debug messages.
316
- :type logger: logging.Logger
317
- :ivar HOLIDAY_LIST: Dictionary mapping years to lists of holiday dates.
318
- :type HOLIDAY_LIST: dict
319
- :ivar bd_cal: Numpy busdaycalendar object containing holidays and week mask.
320
- :type bd_cal: numpy.busdaycalendar
321
- :ivar holidays: Array of holiday dates used by the business day calendar.
322
- :type holidays: numpy.ndarray
323
- :ivar week_mask: Boolean array indicating working days within a week.
324
- :type week_mask: numpy.ndarray
374
+ Business days calculations with a custom holiday list.
375
+ Supports scalar and efficient, vectorized Dask DataFrame operations.
325
376
  """
326
377
 
327
- def __init__(self, holiday_list, logger):
328
- """
329
- Initialize a BusinessDays object with a given holiday list.
330
- """
378
+ def __init__(self, holiday_list: dict[str, list[str]], logger) -> None:
331
379
  self.logger = logger
332
380
  self.HOLIDAY_LIST = holiday_list
333
- bd_holidays = [day for year in self.HOLIDAY_LIST for day in self.HOLIDAY_LIST[year]]
334
- self.bd_cal = np.busdaycalendar(holidays=bd_holidays, weekmask="1111100")
335
- self.holidays = self.bd_cal.holidays
336
- self.week_mask = self.bd_cal.weekmask
337
381
 
338
- def get_business_days_count(self, begin_date, end_date):
339
- """
340
- Calculate the number of business days between two dates.
341
- """
342
- try:
343
- begin_date = pd.to_datetime(begin_date)
344
- end_date = pd.to_datetime(end_date)
345
- except Exception as e:
346
- raise ValueError(f"Invalid date format: {e}")
347
-
348
- years = [str(year) for year in range(begin_date.year, end_date.year + 1)]
349
- if not all(year in self.HOLIDAY_LIST for year in years):
350
- raise ValueError("Not all years in date range are in the holiday list")
351
-
352
- return np.busday_count(
353
- begin_date.strftime("%Y-%m-%d"),
354
- end_date.strftime("%Y-%m-%d"),
355
- busdaycal=self.bd_cal,
356
- )
382
+ # Flatten and store as tuple for determinism
383
+ bd_holidays = [day for year in self.HOLIDAY_LIST for day in self.HOLIDAY_LIST[year]]
384
+ self.holidays = tuple(bd_holidays)
357
385
 
358
- def calc_business_days_from_df(self, df, begin_date_col, end_date_col, result_col="business_days"):
359
- """
360
- Add a column to a Dask DataFrame with the number of business days between two date columns.
361
- """
362
- if not all(col in df.columns for col in [begin_date_col, end_date_col]):
363
- self.logger.error("Column names not found in DataFrame")
364
- raise ValueError("Required columns are missing")
365
-
366
- # Extract holidays and weekmask to recreate the busdaycalendar
367
- holidays = self.bd_cal.holidays
368
- weekmask = self.bd_cal.weekmask
369
-
370
- # Define a function to calculate business days
371
- def calculate_business_days(row, holidays, weekmask):
372
- begin_date = pd.to_datetime(row[begin_date_col])
373
- end_date = pd.to_datetime(row[end_date_col])
374
- if pd.isna(begin_date) or pd.isna(end_date):
375
- return np.nan
376
- busdaycal = np.busdaycalendar(holidays=holidays, weekmask=weekmask)
377
- return np.busday_count(
378
- begin_date.strftime("%Y-%m-%d"),
379
- end_date.strftime("%Y-%m-%d"),
380
- busdaycal=busdaycal,
381
- )
382
-
383
- # Define a wrapper function for partition-wise operations
384
- def apply_business_days(partition, holidays, weekmask):
385
- return partition.apply(
386
- calculate_business_days, axis=1, holidays=holidays, weekmask=weekmask
387
- )
388
-
389
- # Apply the function using map_partitions
390
- df[result_col] = df.map_partitions(
391
- apply_business_days,
392
- holidays,
393
- weekmask,
394
- meta=(result_col, "int64"),
386
+ def get_business_days_count(
387
+ self,
388
+ begin_date: str | datetime.date | pd.Timestamp,
389
+ end_date: str | datetime.date | pd.Timestamp,
390
+ ) -> int:
391
+ """Scalar method to count business days between two dates."""
392
+ begin = pd.to_datetime(begin_date)
393
+ end = pd.to_datetime(end_date)
394
+ return int(np.busday_count(begin.date(), end.date(), holidays=list(self.holidays)))
395
+
396
+ def calc_business_days_from_df(
397
+ self,
398
+ df: dd.DataFrame,
399
+ begin_date_col: str,
400
+ end_date_col: str,
401
+ result_col: str = "business_days",
402
+ ) -> dd.DataFrame:
403
+ """Calculates business days between two columns in a Dask DataFrame."""
404
+ missing = {begin_date_col, end_date_col} - set(df.columns)
405
+ if missing:
406
+ self.logger.error(f"Missing columns: {missing}")
407
+ raise ValueError("Required columns are missing from DataFrame")
408
+
409
+ return df.assign(
410
+ **{result_col: df.map_partitions(
411
+ _vectorized_busday_count,
412
+ begin_col=begin_date_col,
413
+ end_col=end_date_col,
414
+ holidays=list(self.holidays),
415
+ meta=(result_col, 'f8') # f8 is float64
416
+ )}
395
417
  )
396
418
 
397
- return df
398
-
399
- def add_business_days(self, start_date, n_days):
400
- """
401
- Add n_days business days to start_date.
402
- """
403
- try:
404
- start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
405
- except ValueError:
406
- raise ValueError("Date should be a string in the format YYYY-MM-DD")
407
-
408
- if str(start_date.year) not in self.HOLIDAY_LIST:
409
- self.logger.warning(f"Year {start_date.year} is not in the holiday list")
410
-
419
+ def add_business_days(
420
+ self,
421
+ start_date: str | datetime.date | pd.Timestamp,
422
+ n_days: int,
423
+ ) -> np.datetime64:
424
+ """Scalar method to add N business days to a start date."""
425
+ start = pd.to_datetime(start_date)
411
426
  return np.busday_offset(
412
- start_date.strftime("%Y-%m-%d"),
427
+ start.date(),
413
428
  n_days,
414
- roll="forward",
415
- busdaycal=self.bd_cal,
429
+ roll='forward',
430
+ holidays=list(self.holidays),
416
431
  )
417
432
 
418
- def calc_sla_end_date(self, df, start_date_col, n_days_col, result_col="sla_end_date"):
419
- """
420
- Add a column to a Dask DataFrame with SLA end dates based on start date and SLA days.
421
- """
422
- if not all(col in df.columns for col in [start_date_col, n_days_col]):
423
- raise ValueError("Column names not found in DataFrame")
424
-
425
- # Extract holidays and weekmask to recreate the busdaycalendar
426
- holidays = self.bd_cal.holidays
427
- weekmask = self.bd_cal.weekmask
428
-
429
- # Define a function to calculate SLA end dates
430
- def calculate_sla_end_date(row, holidays, weekmask):
431
- start_date = pd.to_datetime(row[start_date_col])
432
- n_days = row[n_days_col]
433
- busdaycal = np.busdaycalendar(holidays=holidays, weekmask=weekmask)
434
- return np.busday_offset(
435
- start_date.strftime("%Y-%m-%d"),
436
- n_days,
437
- roll="forward",
438
- busdaycal=busdaycal,
439
- )
440
-
441
- # Define a wrapper for partition-wise operation
442
- def apply_sla_end_date(partition, holidays, weekmask):
443
- return partition.apply(
444
- calculate_sla_end_date, axis=1, holidays=holidays, weekmask=weekmask
445
- )
446
-
447
- # Apply the function using map_partitions
448
- df[result_col] = df.map_partitions(
449
- apply_sla_end_date,
450
- holidays,
451
- weekmask,
452
- meta=(result_col, "object"),
433
+ def calc_sla_end_date(
434
+ self,
435
+ df: dd.DataFrame,
436
+ start_date_col: str,
437
+ n_days_col: str,
438
+ result_col: str = "sla_end_date",
439
+ ) -> dd.DataFrame:
440
+ """Calculates an SLA end date column for a Dask DataFrame."""
441
+ missing = {start_date_col, n_days_col} - set(df.columns)
442
+ if missing:
443
+ self.logger.error(f"Missing columns: {missing}")
444
+ raise ValueError("Required columns are missing from DataFrame")
445
+
446
+ return df.assign(
447
+ **{result_col: df.map_partitions(
448
+ _vectorized_sla_end_date,
449
+ start_col=start_date_col,
450
+ n_days_col=n_days_col,
451
+ holidays=list(self.holidays),
452
+ meta=(result_col, 'datetime64[ns]')
453
+ )}
453
454
  )
454
-
455
- return df
456
455
  # Class enhancements
457
456
  # DateUtils.register_period('next_week', lambda: (datetime.date.today() + datetime.timedelta(days=7),
458
457
  # datetime.date.today() + datetime.timedelta(days=13)))
@@ -1,7 +1,8 @@
1
1
  import itertools
2
2
  import dask.dataframe as dd
3
3
  import pandas as pd
4
- from sqlmodel import create_engine, Session, select
4
+
5
+ #from sqlmodel import create_engine, Session, select
5
6
  from sibi_dst.v2.df_helper.core import FilterHandler
6
7
  from sibi_dst.v2.utils import Logger
7
8
 
@@ -116,7 +117,7 @@ class SQLModelDask:
116
117
  return dask_df
117
118
 
118
119
  except Exception as e:
119
- self.logger.error(f"Error executing query: {str(e)}")
120
- self.logger.error(self.query)
120
+ self.logger.error(f"_io_dask:Error executing query: {str(e)}")
121
+ self.logger.error(f"_io_dask:{self.query})
121
122
  # In case of error, return an empty Dask DataFrame with the expected columns.
122
123
  return dd.from_pandas(pd.DataFrame(columns=ordered_columns), npartitions=1)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sibi-dst
3
- Version: 2025.1.10
3
+ Version: 2025.1.11
4
4
  Summary: Data Science Toolkit
5
5
  Author: Luis Valverde
6
6
  Author-email: lvalverdeb@gmail.com
@@ -1,7 +1,7 @@
1
1
  sibi_dst/__init__.py,sha256=j8lZpGCJlxlLgEgeIMxZnWdqJ0g3MCs7-gsnbvPn_KY,285
2
2
  sibi_dst/df_helper/__init__.py,sha256=Jur_MO8RGPkVw0CS3XH5YIWv-d922DC_FwRDTvHHV6Y,432
3
3
  sibi_dst/df_helper/_artifact_updater_multi_wrapper.py,sha256=pSSw3N_ZNZCZHAiChbsF_ECyCmz0L2xCgvt9srHtPOM,17575
4
- sibi_dst/df_helper/_df_helper.py,sha256=BbpP0BOLDGCOE8oAxqP5ODN_HqYohQcGsh-8Dx2-sks,12885
4
+ sibi_dst/df_helper/_df_helper.py,sha256=PNoN0nlzRwo_4JiaVyzmOM--LRrsJ0jB9pZqDi_kkRA,12917
5
5
  sibi_dst/df_helper/_parquet_artifact.py,sha256=dCvUA2bytv0wY0pFI8lxbcLwXlgGpHndS36iKfEmjLw,14310
6
6
  sibi_dst/df_helper/_parquet_reader.py,sha256=m98C0TZRroOXvVc2LpEuElrJnquGlR81E1gjI7v1hi4,3102
7
7
  sibi_dst/df_helper/backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -11,7 +11,7 @@ sibi_dst/df_helper/backends/parquet/__init__.py,sha256=esWJ9aSuYC26d-T01z9dPrJ1u
11
11
  sibi_dst/df_helper/backends/parquet/_filter_handler.py,sha256=TvDf0RXta7mwJv11GNQttYJsXgFf2XDj4oLIjt4xTzA,5219
12
12
  sibi_dst/df_helper/backends/parquet/_parquet_options.py,sha256=FWExRRTlhGrOhGPyzL1tucxgoHa3nJenLLs87I2gs-I,11776
13
13
  sibi_dst/df_helper/backends/sqlalchemy/__init__.py,sha256=LjWm9B7CweTvlvFOgB90XjSe0lVLILAIYMWKPkFXFm8,265
14
- sibi_dst/df_helper/backends/sqlalchemy/_db_connection.py,sha256=AOYvWw1vxd1CwXpIakQNFln7PHzFLfp7oaOsGaG0UN8,10961
14
+ sibi_dst/df_helper/backends/sqlalchemy/_db_connection.py,sha256=ycjnkhD1lWMKnLFy1bycle__jbfaWH6oI7m9ymX59c4,10783
15
15
  sibi_dst/df_helper/backends/sqlalchemy/_io_dask.py,sha256=NqBSHqeYv_1vHt6J0tez0GdMwKrP_sIRcXYXu869ZkY,13313
16
16
  sibi_dst/df_helper/backends/sqlalchemy/_load_from_db.py,sha256=ibxeVqpIEsSVusP2bgcd1MNV_wJIoNgXwacltUbwTas,3194
17
17
  sibi_dst/df_helper/backends/sqlalchemy/_sql_model_builder.py,sha256=d_-ip-dQnWOlM8btCjoywAXpaiSuN6AaavkTGJsVQfY,3576
@@ -39,7 +39,7 @@ sibi_dst/utils/credentials.py,sha256=cHJPPsmVyijqbUQIq7WWPe-lIallA-mI5RAy3YUuRME
39
39
  sibi_dst/utils/data_from_http_source.py,sha256=AcpKNsqTgN2ClNwuhgUpuNCx62r5_DdsAiKY8vcHEBA,1867
40
40
  sibi_dst/utils/data_utils.py,sha256=MqbwXk33BuANWeKKmsabHouhb8GZswSmbM-VetWWE-M,10357
41
41
  sibi_dst/utils/data_wrapper.py,sha256=9aYXorbrqDX53NVJ5oUnNQy6FbXYhs5osxzeMcdZpC4,9609
42
- sibi_dst/utils/date_utils.py,sha256=8fwPpOYqSdM3nHeNykh7Ftk-uPdFa44cEAy5S8iUNw4,18667
42
+ sibi_dst/utils/date_utils.py,sha256=T0uXNIG2IQfgs0AyQNsF9S6-cTujtA4GDC1IalvZVSU,18040
43
43
  sibi_dst/utils/df_utils.py,sha256=TzIAUCLbgOn3bvCFvzkc1S9YU-OlZTImdCj-88dtg8g,11401
44
44
  sibi_dst/utils/file_utils.py,sha256=Z99CZ_4nPDIaZqbCfzzUDfAYJjSudWDj-mwEO8grhbc,1253
45
45
  sibi_dst/utils/filepath_generator.py,sha256=-HHO0U-PR8fysDDFwnWdHRlgqksh_RkmgBZLWv9hM7s,6669
@@ -62,7 +62,7 @@ sibi_dst/v2/df_helper/backends/sqlalchemy/_load_from_db.py,sha256=jhgN0OO5Sk1zQF
62
62
  sibi_dst/v2/df_helper/backends/sqlalchemy/_model_builder.py,sha256=jX_mQAzl_6xdh7CTYw4uvUIX2wMp3NzXMlfbC5alOzs,13632
63
63
  sibi_dst/v2/df_helper/backends/sqlmodel/__init__.py,sha256=LcwJjVVxxrnVZalWqnz5m7r77i9tmJR0-U2k8eSQ-m8,249
64
64
  sibi_dst/v2/df_helper/backends/sqlmodel/_db_connection.py,sha256=n3CDbda0OY3X7eTeu_PR2KcZ5hYyEJL7Hroo8yQkjG8,15435
65
- sibi_dst/v2/df_helper/backends/sqlmodel/_io_dask.py,sha256=wVgNPo5V75aLtlZr_SIQ-yteyXq-Rg93eMfR8JCfkSo,5422
65
+ sibi_dst/v2/df_helper/backends/sqlmodel/_io_dask.py,sha256=VyhSGZGSN0gpsGhHHpY07NkmeAvPmMyQi3ewAaE79VM,5446
66
66
  sibi_dst/v2/df_helper/backends/sqlmodel/_load_from_db.py,sha256=FIs6UrNxdJ7eDHDvTv-cJuybIue2-oCRedhW-MNe7CU,6285
67
67
  sibi_dst/v2/df_helper/backends/sqlmodel/_model_builder.py,sha256=k0dnMLkLMMvkDYDYWkGFgibW5UD8pJgB3YrEg_R7pj8,13556
68
68
  sibi_dst/v2/df_helper/core/__init__.py,sha256=rZhBh32Rgcxj4MBii-KsYVJQmrT9egiWKXk68gWKblo,197
@@ -71,6 +71,6 @@ sibi_dst/v2/df_helper/core/_params_config.py,sha256=DYx2drDz3uF-lSPzizPkchhy-kxR
71
71
  sibi_dst/v2/df_helper/core/_query_config.py,sha256=Y8LVSyaKuVkrPluRDkQoOwuXHQxner1pFWG3HPfnDHM,441
72
72
  sibi_dst/v2/utils/__init__.py,sha256=6H4cvhqTiFufnFPETBF0f8beVVMpfJfvUs6Ne0TQZNY,58
73
73
  sibi_dst/v2/utils/log_utils.py,sha256=rfk5VsLAt-FKpv6aPTC1FToIPiyrnHAFFBAkHme24po,4123
74
- sibi_dst-2025.1.10.dist-info/METADATA,sha256=8vs8tux9EiNETH_j1d-2JMDfWfhN7DysoBAa9HtJk1w,2611
75
- sibi_dst-2025.1.10.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
76
- sibi_dst-2025.1.10.dist-info/RECORD,,
74
+ sibi_dst-2025.1.11.dist-info/METADATA,sha256=7iwn7RFfaDF_9dfpWvnNl2Al_8NHWu7l8vGhzO9BAac,2611
75
+ sibi_dst-2025.1.11.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
76
+ sibi_dst-2025.1.11.dist-info/RECORD,,