dragon-ml-toolbox 4.2.2__py3-none-any.whl → 4.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dragon-ml-toolbox might be problematic. Click here for more details.
- {dragon_ml_toolbox-4.2.2.dist-info → dragon_ml_toolbox-4.3.0.dist-info}/METADATA +1 -1
- {dragon_ml_toolbox-4.2.2.dist-info → dragon_ml_toolbox-4.3.0.dist-info}/RECORD +7 -7
- ml_tools/data_exploration.py +41 -28
- {dragon_ml_toolbox-4.2.2.dist-info → dragon_ml_toolbox-4.3.0.dist-info}/WHEEL +0 -0
- {dragon_ml_toolbox-4.2.2.dist-info → dragon_ml_toolbox-4.3.0.dist-info}/licenses/LICENSE +0 -0
- {dragon_ml_toolbox-4.2.2.dist-info → dragon_ml_toolbox-4.3.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +0 -0
- {dragon_ml_toolbox-4.2.2.dist-info → dragon_ml_toolbox-4.3.0.dist-info}/top_level.txt +0 -0
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
dragon_ml_toolbox-4.
|
|
2
|
-
dragon_ml_toolbox-4.
|
|
1
|
+
dragon_ml_toolbox-4.3.0.dist-info/licenses/LICENSE,sha256=2uUFNy7D0TLgHim1K5s3DIJ4q_KvxEXVilnU20cWliY,1066
|
|
2
|
+
dragon_ml_toolbox-4.3.0.dist-info/licenses/LICENSE-THIRD-PARTY.md,sha256=lY4_rJPnLnMu7YBQaY-_iz1JRDcLdQzNCyeLAF1glJY,1837
|
|
3
3
|
ml_tools/ETL_engineering.py,sha256=P7HN_e3vfmrOqDDK-IenyRSFQPr0N3V9e2gN75QFVWs,39372
|
|
4
4
|
ml_tools/GUI_tools.py,sha256=n4ZZ5kEjwK5rkOCFJE41HeLFfjhpJVLUSzk9Kd9Kr_0,45410
|
|
5
5
|
ml_tools/MICE_imputation.py,sha256=b6ZTs8RedXFifOpuMCzr68xM16mCBVh1Ua6kcGfiVtg,11462
|
|
@@ -16,7 +16,7 @@ ml_tools/_logger.py,sha256=TpgYguxO-CWYqqgLW0tqFjtwZ58PE_W2OCfWNGZr0n0,1175
|
|
|
16
16
|
ml_tools/_pytorch_models.py,sha256=ewPPsTHgmRPzMMWwObZOdH1vxm2Ij2VWZP38NC6zSH4,10135
|
|
17
17
|
ml_tools/_script_info.py,sha256=21r83LV3RubsNZ_RTEUON6RbDf7Mh4_udweNcvdF_Fk,212
|
|
18
18
|
ml_tools/custom_logger.py,sha256=a3ywSCQT7j5ypR-usnKh2l861d_aVJ93ZRVqxrHsBBw,4112
|
|
19
|
-
ml_tools/data_exploration.py,sha256=
|
|
19
|
+
ml_tools/data_exploration.py,sha256=T4nO9YSDGvrpom7JELtoQTyg7XTEmvQz-jG0KKxqTRk,23467
|
|
20
20
|
ml_tools/datasetmaster.py,sha256=_tNC2v98eCQGr3nMW_EFs83TRgRme8Uc7ttg1vosmQU,30106
|
|
21
21
|
ml_tools/ensemble_inference.py,sha256=0SNX3YAz5bpvtwYmqEwqyWeIJP2Pb-v-bemENRSO7qg,9426
|
|
22
22
|
ml_tools/ensemble_learning.py,sha256=Zi1oy6G2FWnTI5hBwjlexwF3JKALFS2FN6F8HAlVi_s,35391
|
|
@@ -24,7 +24,7 @@ ml_tools/handle_excel.py,sha256=J9iwIqMZemoxK49J5osSwp9Ge0h9YTKyYGbOm53hcno,1300
|
|
|
24
24
|
ml_tools/keys.py,sha256=kK9UF-hek2VcPGFILCKl5geoN6flmMOu7IzhdEA6z5Y,1068
|
|
25
25
|
ml_tools/path_manager.py,sha256=Z8e7w3MPqQaN8xmTnKuXZS6CIW59BFwwqGhGc00sdp4,13692
|
|
26
26
|
ml_tools/utilities.py,sha256=mz-M351DzxWxnYVcLX-7ZQ6c-RGoCV9g4VTS9Qif2Es,18348
|
|
27
|
-
dragon_ml_toolbox-4.
|
|
28
|
-
dragon_ml_toolbox-4.
|
|
29
|
-
dragon_ml_toolbox-4.
|
|
30
|
-
dragon_ml_toolbox-4.
|
|
27
|
+
dragon_ml_toolbox-4.3.0.dist-info/METADATA,sha256=7aZO_5P8SDx4tPFTtb3MTAaRgf_vbcOEURaxpT3MGK8,6572
|
|
28
|
+
dragon_ml_toolbox-4.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
29
|
+
dragon_ml_toolbox-4.3.0.dist-info/top_level.txt,sha256=wm-oxax3ciyez6VoO4zsFd-gSok2VipYXnbg3TH9PtU,9
|
|
30
|
+
dragon_ml_toolbox-4.3.0.dist-info/RECORD,,
|
ml_tools/data_exploration.py
CHANGED
|
@@ -15,9 +15,9 @@ __all__ = [
|
|
|
15
15
|
"summarize_dataframe",
|
|
16
16
|
"drop_constant_columns",
|
|
17
17
|
"drop_rows_with_missing_data",
|
|
18
|
-
"split_features_targets",
|
|
19
18
|
"show_null_columns",
|
|
20
19
|
"drop_columns_with_missing_data",
|
|
20
|
+
"split_features_targets",
|
|
21
21
|
"split_continuous_binary",
|
|
22
22
|
"plot_correlation_heatmap",
|
|
23
23
|
"plot_value_distributions",
|
|
@@ -125,7 +125,9 @@ def drop_rows_with_missing_data(df: pd.DataFrame, targets: Optional[list[str]],
|
|
|
125
125
|
|
|
126
126
|
# Stage 1: Drop rows with all target columns missing
|
|
127
127
|
if targets is not None:
|
|
128
|
-
|
|
128
|
+
# validate targets
|
|
129
|
+
valid_targets = [target for target in targets if target in df_clean.columns]
|
|
130
|
+
target_na = df_clean[valid_targets].isnull().all(axis=1)
|
|
129
131
|
if target_na.any():
|
|
130
132
|
print(f"🧹 Dropping {target_na.sum()} rows with all target columns missing.")
|
|
131
133
|
df_clean = df_clean[~target_na]
|
|
@@ -150,30 +152,6 @@ def drop_rows_with_missing_data(df: pd.DataFrame, targets: Optional[list[str]],
|
|
|
150
152
|
return df_clean
|
|
151
153
|
|
|
152
154
|
|
|
153
|
-
def split_features_targets(df: pd.DataFrame, targets: list[str]):
|
|
154
|
-
"""
|
|
155
|
-
Splits a DataFrame's columns into features and targets.
|
|
156
|
-
|
|
157
|
-
Args:
|
|
158
|
-
df (pd.DataFrame): Pandas DataFrame containing the dataset.
|
|
159
|
-
targets (list[str]): List of column names to be treated as target variables.
|
|
160
|
-
|
|
161
|
-
Returns:
|
|
162
|
-
tuple: A tuple containing:
|
|
163
|
-
- pd.DataFrame: Features dataframe.
|
|
164
|
-
- pd.DataFrame: Targets dataframe.
|
|
165
|
-
|
|
166
|
-
Prints:
|
|
167
|
-
- Shape of the original dataframe.
|
|
168
|
-
- Shape of the features dataframe.
|
|
169
|
-
- Shape of the targets dataframe.
|
|
170
|
-
"""
|
|
171
|
-
df_targets = df[targets]
|
|
172
|
-
df_features = df.drop(columns=targets)
|
|
173
|
-
print(f"Original shape: {df.shape}\nFeatures shape: {df_features.shape}\nTargets shape: {df_targets.shape}")
|
|
174
|
-
return df_features, df_targets
|
|
175
|
-
|
|
176
|
-
|
|
177
155
|
def show_null_columns(df: pd.DataFrame, round_digits: int = 2):
|
|
178
156
|
"""
|
|
179
157
|
Displays a table of columns with missing values, showing both the count and
|
|
@@ -202,7 +180,7 @@ def show_null_columns(df: pd.DataFrame, round_digits: int = 2):
|
|
|
202
180
|
return null_summary
|
|
203
181
|
|
|
204
182
|
|
|
205
|
-
def drop_columns_with_missing_data(df: pd.DataFrame, threshold: float = 0.7, show_nulls_after: bool = True) -> pd.DataFrame:
|
|
183
|
+
def drop_columns_with_missing_data(df: pd.DataFrame, threshold: float = 0.7, show_nulls_after: bool = True, skip_columns: Optional[List[str]]=None) -> pd.DataFrame:
|
|
206
184
|
"""
|
|
207
185
|
Drops columns with more than `threshold` fraction of missing values.
|
|
208
186
|
|
|
@@ -210,11 +188,22 @@ def drop_columns_with_missing_data(df: pd.DataFrame, threshold: float = 0.7, sho
|
|
|
210
188
|
df (pd.DataFrame): The input DataFrame.
|
|
211
189
|
threshold (float): Fraction of missing values above which columns are dropped.
|
|
212
190
|
show_nulls_after (bool): Prints `show_null_columns` after dropping columns.
|
|
191
|
+
skip_columns (list[str] | None): If given, these columns wont be included in the drop process.
|
|
213
192
|
|
|
214
193
|
Returns:
|
|
215
194
|
pd.DataFrame: A new DataFrame without the dropped columns.
|
|
216
195
|
"""
|
|
217
|
-
|
|
196
|
+
# If skip_columns is provided, create a list of columns to check.
|
|
197
|
+
# Otherwise, check all columns.
|
|
198
|
+
cols_to_check = df.columns
|
|
199
|
+
if skip_columns:
|
|
200
|
+
# Use set difference for efficient exclusion
|
|
201
|
+
cols_to_check = df.columns.difference(skip_columns)
|
|
202
|
+
|
|
203
|
+
# Calculate the missing fraction only on the columns to be checked
|
|
204
|
+
missing_fraction = df[cols_to_check].isnull().mean()
|
|
205
|
+
|
|
206
|
+
|
|
218
207
|
cols_to_drop = missing_fraction[missing_fraction > threshold].index
|
|
219
208
|
|
|
220
209
|
if len(cols_to_drop) > 0:
|
|
@@ -231,6 +220,30 @@ def drop_columns_with_missing_data(df: pd.DataFrame, threshold: float = 0.7, sho
|
|
|
231
220
|
return df
|
|
232
221
|
|
|
233
222
|
|
|
223
|
+
def split_features_targets(df: pd.DataFrame, targets: list[str]):
|
|
224
|
+
"""
|
|
225
|
+
Splits a DataFrame's columns into features and targets.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
df (pd.DataFrame): Pandas DataFrame containing the dataset.
|
|
229
|
+
targets (list[str]): List of column names to be treated as target variables.
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
tuple: A tuple containing:
|
|
233
|
+
- pd.DataFrame: Features dataframe.
|
|
234
|
+
- pd.DataFrame: Targets dataframe.
|
|
235
|
+
|
|
236
|
+
Prints:
|
|
237
|
+
- Shape of the original dataframe.
|
|
238
|
+
- Shape of the features dataframe.
|
|
239
|
+
- Shape of the targets dataframe.
|
|
240
|
+
"""
|
|
241
|
+
df_targets = df[targets]
|
|
242
|
+
df_features = df.drop(columns=targets)
|
|
243
|
+
print(f"Original shape: {df.shape}\nFeatures shape: {df_features.shape}\nTargets shape: {df_targets.shape}")
|
|
244
|
+
return df_features, df_targets
|
|
245
|
+
|
|
246
|
+
|
|
234
247
|
def split_continuous_binary(df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
|
|
235
248
|
"""
|
|
236
249
|
Split DataFrame into two DataFrames: one with continuous columns, one with binary columns.
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|