dragon-ml-toolbox 3.3.0__py3-none-any.whl → 3.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dragon-ml-toolbox might be problematic. Click here for more details.
- {dragon_ml_toolbox-3.3.0.dist-info → dragon_ml_toolbox-3.5.0.dist-info}/METADATA +1 -1
- {dragon_ml_toolbox-3.3.0.dist-info → dragon_ml_toolbox-3.5.0.dist-info}/RECORD +9 -9
- ml_tools/ETL_engineering.py +88 -76
- ml_tools/VIF_factor.py +2 -2
- ml_tools/utilities.py +52 -6
- {dragon_ml_toolbox-3.3.0.dist-info → dragon_ml_toolbox-3.5.0.dist-info}/WHEEL +0 -0
- {dragon_ml_toolbox-3.3.0.dist-info → dragon_ml_toolbox-3.5.0.dist-info}/licenses/LICENSE +0 -0
- {dragon_ml_toolbox-3.3.0.dist-info → dragon_ml_toolbox-3.5.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +0 -0
- {dragon_ml_toolbox-3.3.0.dist-info → dragon_ml_toolbox-3.5.0.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
dragon_ml_toolbox-3.
|
|
2
|
-
dragon_ml_toolbox-3.
|
|
3
|
-
ml_tools/ETL_engineering.py,sha256=
|
|
1
|
+
dragon_ml_toolbox-3.5.0.dist-info/licenses/LICENSE,sha256=2uUFNy7D0TLgHim1K5s3DIJ4q_KvxEXVilnU20cWliY,1066
|
|
2
|
+
dragon_ml_toolbox-3.5.0.dist-info/licenses/LICENSE-THIRD-PARTY.md,sha256=6cfpIeQ6D4Mcs10nkogQrkVyq1T7i2qXjjNHFoUMOyE,1892
|
|
3
|
+
ml_tools/ETL_engineering.py,sha256=9dojA9RqE7bq1A70tegsERszoqll7UmcJelVeQeefik,39519
|
|
4
4
|
ml_tools/GUI_tools.py,sha256=uFx6zIrQZzDPSTtOSHz8ptz-fxZiQz-lXHcrqwuYV_E,20385
|
|
5
5
|
ml_tools/MICE_imputation.py,sha256=ed-YeQkEAeHxTNkWIHs09T4YeYNF0aqAnrUTcdIEp9E,11372
|
|
6
6
|
ml_tools/ML_callbacks.py,sha256=gHZk-lyzAax6iEtG26zHuoobdAZCFJ6BmI6pWoXkOrw,13189
|
|
@@ -9,7 +9,7 @@ ml_tools/ML_trainer.py,sha256=zRs3crz_z4B285iJhmY7m4AFwnvvq4urOyl4zDuCLtA,14456
|
|
|
9
9
|
ml_tools/ML_tutorial.py,sha256=-9tJO9ISPxEjRINVaF_Bu7tiiJ2W3zznQ4gNlZeP1HQ,12238
|
|
10
10
|
ml_tools/PSO_optimization.py,sha256=RCvIFGyf28voo2mpbRKC6LfDzKslzY-aYoPwgv9F4Bg,25458
|
|
11
11
|
ml_tools/RNN_forecast.py,sha256=IZLcPs3by0Chei7ill_Grjxs7BBUnzau0Oavi3dWiyE,1886
|
|
12
|
-
ml_tools/VIF_factor.py,sha256=
|
|
12
|
+
ml_tools/VIF_factor.py,sha256=4b3HmrrolN7ZIAo16TWwLlExqj_xaa8MxbkXD1xPCys,10295
|
|
13
13
|
ml_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
14
14
|
ml_tools/_particle_swarm_optimization.py,sha256=b_eNNkA89Y40hj76KauivT8KLScH1B9wF2IXptOqkOw,22220
|
|
15
15
|
ml_tools/_pytorch_models.py,sha256=bpWZsrSwCvHJQkR6UfoPpElsMv9AvmiNErNHC8NYB_I,10132
|
|
@@ -18,8 +18,8 @@ ml_tools/datasetmaster.py,sha256=N-uwfzWnl_qnoAqjbfS98I1pVNra5u6rhKLdWbFIReA,301
|
|
|
18
18
|
ml_tools/ensemble_learning.py,sha256=PPtBBLgLvaYOdY-MlcjXuxWWXf3JQavLNEysFgzjc_s,37470
|
|
19
19
|
ml_tools/handle_excel.py,sha256=lwds7rDLlGSCWiWGI7xNg-Z7kxAepogp0lstSFa0590,12949
|
|
20
20
|
ml_tools/logger.py,sha256=jC4Q2OqmDm8ZO9VpuZqBSWdXryqaJvLscqVJ6caNMOk,6009
|
|
21
|
-
ml_tools/utilities.py,sha256=
|
|
22
|
-
dragon_ml_toolbox-3.
|
|
23
|
-
dragon_ml_toolbox-3.
|
|
24
|
-
dragon_ml_toolbox-3.
|
|
25
|
-
dragon_ml_toolbox-3.
|
|
21
|
+
ml_tools/utilities.py,sha256=7cVWXjdxgSoIbZunuxJEOnJDSYp29liYsZexbrVDabs,23132
|
|
22
|
+
dragon_ml_toolbox-3.5.0.dist-info/METADATA,sha256=yxz0lVEDn3k40f3nZk_pocW8WCOXifF6ItuUMJ8LFsM,3273
|
|
23
|
+
dragon_ml_toolbox-3.5.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
24
|
+
dragon_ml_toolbox-3.5.0.dist-info/top_level.txt,sha256=wm-oxax3ciyez6VoO4zsFd-gSok2VipYXnbg3TH9PtU,9
|
|
25
|
+
dragon_ml_toolbox-3.5.0.dist-info/RECORD,,
|
ml_tools/ETL_engineering.py
CHANGED
|
@@ -2,7 +2,6 @@ import polars as pl
|
|
|
2
2
|
import re
|
|
3
3
|
from typing import Literal, Union, Optional, Any, Callable, List, Dict, Tuple
|
|
4
4
|
from .utilities import _script_info
|
|
5
|
-
import pandas as pd
|
|
6
5
|
from .logger import _LOGGER
|
|
7
6
|
|
|
8
7
|
|
|
@@ -24,124 +23,137 @@ __all__ = [
|
|
|
24
23
|
]
|
|
25
24
|
|
|
26
25
|
########## EXTRACT and CLEAN ##########
|
|
27
|
-
|
|
28
26
|
class ColumnCleaner:
|
|
29
27
|
"""
|
|
30
|
-
|
|
31
|
-
|
|
28
|
+
A configuration object that defines cleaning rules for a single Polars DataFrame column.
|
|
29
|
+
|
|
30
|
+
This class holds a dictionary of regex-to-replacement rules, the target column name,
|
|
31
|
+
and the case-sensitivity setting. It is intended to be used with the DataFrameCleaner.
|
|
32
32
|
|
|
33
33
|
Notes:
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
34
|
+
- Define rules from most specific to more general to create a fallback system.
|
|
35
|
+
- Beware of chain replacements (rules matching strings that have already been
|
|
36
|
+
changed by a previous rule in the same cleaner).
|
|
37
|
+
|
|
38
38
|
Args:
|
|
39
|
+
column_name (str):
|
|
40
|
+
The name of the column to be cleaned.
|
|
39
41
|
rules (Dict[str, str]):
|
|
40
42
|
A dictionary of regex patterns to replacement strings. Can use
|
|
41
|
-
backreferences
|
|
43
|
+
backreferences (e.g., r'$1 $2') for captured groups. Note that Polars
|
|
44
|
+
uses a '$' prefix for backreferences.
|
|
42
45
|
case_insensitive (bool):
|
|
43
|
-
If True, regex matching ignores case.
|
|
46
|
+
If True (default), regex matching ignores case.
|
|
47
|
+
|
|
48
|
+
## Usage Example
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
phone_rules = {
|
|
52
|
+
# Matches (123) 456-7890 and reformats to 123-456-7890
|
|
53
|
+
r'\((\d{3})\)\s*(\d{3})-(\d{4})': r'$1-$2-$3'
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
phone_cleaner = ColumnCleaner(column_name='phone_number', rules=phone_rules)
|
|
57
|
+
|
|
58
|
+
# This object would then be passed to a DataFrameCleaner.
|
|
59
|
+
```
|
|
44
60
|
"""
|
|
45
|
-
def __init__(self, rules: Dict[str, str], case_insensitive: bool = True):
|
|
61
|
+
def __init__(self, column_name: str, rules: Dict[str, str], case_insensitive: bool = True):
|
|
62
|
+
if not isinstance(column_name, str) or not column_name:
|
|
63
|
+
raise TypeError("The 'column_name' must be a non-empty string.")
|
|
46
64
|
if not isinstance(rules, dict):
|
|
47
65
|
raise TypeError("The 'rules' argument must be a dictionary.")
|
|
48
66
|
|
|
49
|
-
# Validate regex
|
|
67
|
+
# Validate each regex pattern for correctness
|
|
50
68
|
for pattern in rules.keys():
|
|
51
69
|
try:
|
|
52
70
|
re.compile(pattern)
|
|
53
71
|
except re.error as e:
|
|
54
72
|
raise ValueError(f"Invalid regex pattern '{pattern}': {e}") from e
|
|
55
73
|
|
|
74
|
+
self.column_name = column_name
|
|
56
75
|
self.rules = rules
|
|
57
76
|
self.case_insensitive = case_insensitive
|
|
58
77
|
|
|
59
|
-
def clean(self, series: pd.Series) -> pd.Series:
|
|
60
|
-
"""
|
|
61
|
-
Applies the standardization rules sequentially to the provided Series.
|
|
62
|
-
|
|
63
|
-
Args:
|
|
64
|
-
series (pd.Series): The pandas Series to clean.
|
|
65
|
-
|
|
66
|
-
Returns:
|
|
67
|
-
pd.Series: A new Series with the regex replacements applied.
|
|
68
|
-
"""
|
|
69
|
-
cleaned_series = series.astype(str)
|
|
70
|
-
|
|
71
|
-
# Set the regex flags based on the case_insensitive setting
|
|
72
|
-
flags = re.IGNORECASE if self.case_insensitive else 0
|
|
73
|
-
|
|
74
|
-
# Sequentially apply each regex rule
|
|
75
|
-
for pattern, replacement in self.rules.items():
|
|
76
|
-
cleaned_series = cleaned_series.str.replace(
|
|
77
|
-
pattern,
|
|
78
|
-
replacement,
|
|
79
|
-
regex=True,
|
|
80
|
-
flags=flags
|
|
81
|
-
)
|
|
82
|
-
|
|
83
|
-
return cleaned_series
|
|
84
|
-
|
|
85
78
|
|
|
86
79
|
class DataFrameCleaner:
|
|
87
80
|
"""
|
|
88
|
-
Orchestrates
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
- Write separate, specific rules for each case. Don't combine patterns with an "OR".
|
|
94
|
-
- Define rules from most specific to more general to create a fallback system.
|
|
95
|
-
- Beware of chain replacements (rules matching strings that have already been changed by a previous rule).
|
|
81
|
+
Orchestrates cleaning multiple columns in a Polars DataFrame.
|
|
82
|
+
|
|
83
|
+
This class takes a list of ColumnCleaner objects and applies their defined
|
|
84
|
+
rules to the corresponding columns of a DataFrame using high-performance
|
|
85
|
+
Polars expressions.
|
|
96
86
|
|
|
97
87
|
Args:
|
|
98
|
-
|
|
99
|
-
A
|
|
100
|
-
|
|
88
|
+
cleaners (List[ColumnCleaner]):
|
|
89
|
+
A list of ColumnCleaner configuration objects.
|
|
90
|
+
|
|
91
|
+
Raises:
|
|
92
|
+
TypeError: If 'cleaners' is not a list or contains non-ColumnCleaner objects.
|
|
93
|
+
ValueError: If multiple ColumnCleaner objects target the same column.
|
|
101
94
|
"""
|
|
102
|
-
def __init__(self,
|
|
103
|
-
if not isinstance(
|
|
104
|
-
raise TypeError("The '
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
95
|
+
def __init__(self, cleaners: List[ColumnCleaner]):
|
|
96
|
+
if not isinstance(cleaners, list):
|
|
97
|
+
raise TypeError("The 'cleaners' argument must be a list of ColumnCleaner objects.")
|
|
98
|
+
|
|
99
|
+
seen_columns = set()
|
|
100
|
+
for cleaner in cleaners:
|
|
101
|
+
if not isinstance(cleaner, ColumnCleaner):
|
|
108
102
|
raise TypeError(
|
|
109
|
-
f"
|
|
110
|
-
f"
|
|
103
|
+
f"All items in 'cleaners' list must be ColumnCleaner objects, "
|
|
104
|
+
f"but found an object of type {type(cleaner).__name__}."
|
|
111
105
|
)
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
106
|
+
if cleaner.column_name in seen_columns:
|
|
107
|
+
raise ValueError(
|
|
108
|
+
f"Duplicate ColumnCleaner found for column '{cleaner.column_name}'. "
|
|
109
|
+
"Each column should only have one cleaner."
|
|
110
|
+
)
|
|
111
|
+
seen_columns.add(cleaner.column_name)
|
|
115
112
|
|
|
116
|
-
|
|
113
|
+
self.cleaners = cleaners
|
|
114
|
+
|
|
115
|
+
def clean(self, df: pl.DataFrame) -> pl.DataFrame:
|
|
117
116
|
"""
|
|
118
|
-
Applies all defined cleaning rules to the DataFrame.
|
|
117
|
+
Applies all defined cleaning rules to the Polars DataFrame.
|
|
119
118
|
|
|
120
119
|
Args:
|
|
121
|
-
df (
|
|
120
|
+
df (pl.DataFrame): The Polars DataFrame to clean.
|
|
122
121
|
|
|
123
122
|
Returns:
|
|
124
|
-
|
|
123
|
+
pl.DataFrame: A new, cleaned Polars DataFrame.
|
|
124
|
+
|
|
125
|
+
Raises:
|
|
126
|
+
ValueError: If any columns specified in the cleaners are not found
|
|
127
|
+
in the input DataFrame.
|
|
125
128
|
"""
|
|
126
|
-
rule_columns =
|
|
129
|
+
rule_columns = {c.column_name for c in self.cleaners}
|
|
127
130
|
df_columns = set(df.columns)
|
|
128
|
-
|
|
129
131
|
missing_columns = rule_columns - df_columns
|
|
130
|
-
|
|
132
|
+
|
|
131
133
|
if missing_columns:
|
|
132
|
-
# Report all missing columns in a single, clear error message
|
|
133
134
|
raise ValueError(
|
|
134
|
-
f"The following columns specified in
|
|
135
|
+
f"The following columns specified in cleaning rules "
|
|
135
136
|
f"were not found in the DataFrame: {sorted(list(missing_columns))}"
|
|
136
137
|
)
|
|
138
|
+
|
|
139
|
+
df_cleaned = df.clone()
|
|
137
140
|
|
|
138
|
-
#
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
#
|
|
143
|
-
|
|
144
|
-
|
|
141
|
+
# Build and apply a series of expressions for each column
|
|
142
|
+
for cleaner in self.cleaners:
|
|
143
|
+
col_name = cleaner.column_name
|
|
144
|
+
|
|
145
|
+
# Start with the column, cast to String for replacement operations
|
|
146
|
+
col_expr = pl.col(col_name).cast(pl.String)
|
|
147
|
+
|
|
148
|
+
# Sequentially chain 'replace_all' expressions for each rule
|
|
149
|
+
for pattern, replacement in cleaner.rules.items():
|
|
150
|
+
final_pattern = f"(?i){pattern}" if cleaner.case_insensitive else pattern
|
|
151
|
+
col_expr = col_expr.str.replace_all(final_pattern, replacement)
|
|
152
|
+
|
|
153
|
+
# Execute the expression chain for the column
|
|
154
|
+
df_cleaned = df_cleaned.with_columns(col_expr.alias(col_name))
|
|
155
|
+
|
|
156
|
+
print(f"Cleaned {len(self.cleaners)} columns.")
|
|
145
157
|
|
|
146
158
|
return df_cleaned
|
|
147
159
|
|
ml_tools/VIF_factor.py
CHANGED
|
@@ -35,7 +35,7 @@ def compute_vif(
|
|
|
35
35
|
Args:
|
|
36
36
|
df (pd.DataFrame): The input DataFrame.
|
|
37
37
|
use_columns (list[str] | None): Optional list of columns to include. Defaults to all numeric columns.
|
|
38
|
-
ignore_columns (list[str] | None): Optional list of columns to exclude from the VIF computation. Skipped if `
|
|
38
|
+
ignore_columns (list[str] | None): Optional list of columns to exclude from the VIF computation. Skipped if `use_columns` is provided.
|
|
39
39
|
max_features_to_plot (int): Adjust the number of features shown in the plot.
|
|
40
40
|
save_dir (str | Path | None): Directory to save the plot as SVG. If None, the plot is not saved.
|
|
41
41
|
filename (str | None): Optional filename for saving the plot. Defaults to "VIF_plot.svg".
|
|
@@ -194,7 +194,7 @@ def compute_vif_multi(input_directory: Union[str, Path],
|
|
|
194
194
|
output_plot_directory (str | Path): Save plots to this directory.
|
|
195
195
|
output_dataset_directory (str | Path | None): If provided, saves new CSV files to this directory.
|
|
196
196
|
use_columns (list[str] | None): Optional list of columns to include. Defaults to all numeric columns.
|
|
197
|
-
ignore_columns (list[str] | None): Optional list of columns to exclude from the VIF computation. Skipped if `
|
|
197
|
+
ignore_columns (list[str] | None): Optional list of columns to exclude from the VIF computation. Skipped if `use_columns` is provided.
|
|
198
198
|
max_features_to_plot (int): Adjust the number of features shown in the plot.
|
|
199
199
|
fontsize (int): Base fontsize to scale title and labels on hte plot.
|
|
200
200
|
|
ml_tools/utilities.py
CHANGED
|
@@ -24,7 +24,8 @@ __all__ = [
|
|
|
24
24
|
"threshold_binary_values_batch",
|
|
25
25
|
"serialize_object",
|
|
26
26
|
"deserialize_object",
|
|
27
|
-
"distribute_datasets_by_target"
|
|
27
|
+
"distribute_datasets_by_target",
|
|
28
|
+
"train_dataset_orchestrator"
|
|
28
29
|
]
|
|
29
30
|
|
|
30
31
|
|
|
@@ -497,7 +498,7 @@ def threshold_binary_values_batch(
|
|
|
497
498
|
return np.hstack([cont_part, bin_part])
|
|
498
499
|
|
|
499
500
|
|
|
500
|
-
def serialize_object(obj: Any, save_dir: Union[str,Path], filename: str, verbose: bool=True, raise_on_error: bool=False) ->
|
|
501
|
+
def serialize_object(obj: Any, save_dir: Union[str,Path], filename: str, verbose: bool=True, raise_on_error: bool=False) -> None:
|
|
501
502
|
"""
|
|
502
503
|
Serializes a Python object using joblib; suitable for Python built-ins, numpy, and pandas.
|
|
503
504
|
|
|
@@ -505,9 +506,6 @@ def serialize_object(obj: Any, save_dir: Union[str,Path], filename: str, verbose
|
|
|
505
506
|
obj (Any) : The Python object to serialize.
|
|
506
507
|
save_dir (str | Path) : Directory path where the serialized object will be saved.
|
|
507
508
|
filename (str) : Name for the output file, extension will be appended if needed.
|
|
508
|
-
|
|
509
|
-
Returns:
|
|
510
|
-
(Path | None) : The full file path where the object was saved if successful; otherwise, None.
|
|
511
509
|
"""
|
|
512
510
|
try:
|
|
513
511
|
save_path = make_fullpath(save_dir, make=True)
|
|
@@ -526,7 +524,7 @@ def serialize_object(obj: Any, save_dir: Union[str,Path], filename: str, verbose
|
|
|
526
524
|
else:
|
|
527
525
|
if verbose:
|
|
528
526
|
print(f"✅ Object of type '{type(obj)}' saved to '{full_path}'")
|
|
529
|
-
return
|
|
527
|
+
return None
|
|
530
528
|
|
|
531
529
|
|
|
532
530
|
def deserialize_object(filepath: Union[str,Path], verbose: bool=True, raise_on_error: bool=True) -> Optional[Any]:
|
|
@@ -597,6 +595,54 @@ def distribute_datasets_by_target(
|
|
|
597
595
|
yield target, subset
|
|
598
596
|
|
|
599
597
|
|
|
598
|
+
def train_dataset_orchestrator(list_of_dirs: list[Union[str,Path]],
|
|
599
|
+
target_columns: list[str],
|
|
600
|
+
save_dir: Union[str,Path],
|
|
601
|
+
safe_mode: bool=False):
|
|
602
|
+
"""
|
|
603
|
+
Orchestrates the creation of single-target datasets from multiple directories each with a variable number of CSV datasets.
|
|
604
|
+
|
|
605
|
+
This function iterates through a list of directories, finds all CSV files,
|
|
606
|
+
and splits each dataframe based on the provided target columns. Each resulting
|
|
607
|
+
single-target dataframe is then saved to a specified directory.
|
|
608
|
+
|
|
609
|
+
Parameters
|
|
610
|
+
----------
|
|
611
|
+
list_of_dirs : list[str | Path]
|
|
612
|
+
A list of directory paths where the source CSV files are located.
|
|
613
|
+
target_columns : list[str]
|
|
614
|
+
A list of column names to be used as targets for splitting the datasets.
|
|
615
|
+
save_dir : str | Path
|
|
616
|
+
The directory where the newly created single-target datasets will be saved.
|
|
617
|
+
safe_mode : bool
|
|
618
|
+
If True, prefixes the saved filename with the source directory name to prevent overwriting files with the same name from different sources.
|
|
619
|
+
"""
|
|
620
|
+
all_dir_paths: list[Path] = list()
|
|
621
|
+
for dir in list_of_dirs:
|
|
622
|
+
dir_path = make_fullpath(dir)
|
|
623
|
+
if not dir_path.is_dir():
|
|
624
|
+
raise IOError(f"'{dir}' is not a directory.")
|
|
625
|
+
all_dir_paths.append(dir_path)
|
|
626
|
+
|
|
627
|
+
# main loop
|
|
628
|
+
total_saved = 0
|
|
629
|
+
for df_dir in all_dir_paths:
|
|
630
|
+
for df_name, df_path in list_csv_paths(df_dir).items():
|
|
631
|
+
try:
|
|
632
|
+
for target_name, df in distribute_datasets_by_target(df_or_path=df_path, target_columns=target_columns, verbose=False):
|
|
633
|
+
if safe_mode:
|
|
634
|
+
filename = df_dir.name + '_' + target_name + '_' + df_name
|
|
635
|
+
else:
|
|
636
|
+
filename = target_name + '_' + df_name
|
|
637
|
+
save_dataframe(df=df, save_dir=save_dir, filename=filename)
|
|
638
|
+
total_saved += 1
|
|
639
|
+
except Exception as e:
|
|
640
|
+
print(f"⚠️ Failed to process file '{df_path}'. Reason: {e}")
|
|
641
|
+
continue
|
|
642
|
+
|
|
643
|
+
print(f"{total_saved} single-target datasets were created.")
|
|
644
|
+
|
|
645
|
+
|
|
600
646
|
class LogKeys:
|
|
601
647
|
"""
|
|
602
648
|
Used for ML scripts only
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|