dragon-ml-toolbox 20.1.1__py3-none-any.whl → 20.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dragon-ml-toolbox
3
- Version: 20.1.1
3
+ Version: 20.2.0
4
4
  Summary: Complete pipelines and helper tools for data science and machine learning projects.
5
5
  Author-email: Karl Luigi Loza Vidaurre <luigiloza@gmail.com>
6
6
  License-Expression: MIT
@@ -1,5 +1,5 @@
1
- dragon_ml_toolbox-20.1.1.dist-info/licenses/LICENSE,sha256=L35WDmmLZNTlJvxF6Vy7Uy4SYNi6rCfWUqlTHpoRMoU,1081
2
- dragon_ml_toolbox-20.1.1.dist-info/licenses/LICENSE-THIRD-PARTY.md,sha256=0-HBRMMgKuwtGy6nMJZvIn1fLxhx_ksyyVB2U_iyYZU,2818
1
+ dragon_ml_toolbox-20.2.0.dist-info/licenses/LICENSE,sha256=L35WDmmLZNTlJvxF6Vy7Uy4SYNi6rCfWUqlTHpoRMoU,1081
2
+ dragon_ml_toolbox-20.2.0.dist-info/licenses/LICENSE-THIRD-PARTY.md,sha256=0-HBRMMgKuwtGy6nMJZvIn1fLxhx_ksyyVB2U_iyYZU,2818
3
3
  ml_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  ml_tools/constants.py,sha256=3br5Rk9cL2IUo638eJuMOGdbGQaWssaUecYEvSeRBLM,3322
5
5
  ml_tools/ETL_cleaning/__init__.py,sha256=TytE8RKmtW4KQlkaTxpYKlJAbCu-VAc82eDdHwVD3Jo,427
@@ -42,7 +42,7 @@ ml_tools/ML_configuration/_metrics.py,sha256=PqBGPO1Y_6ImmYI3TEBJhzipULE854vbvE0
42
42
  ml_tools/ML_configuration/_models.py,sha256=lvuuqvD6DWUzOa3i06NZfrdfOi9bu2e26T_QO6BGMSw,7629
43
43
  ml_tools/ML_configuration/_training.py,sha256=W-x2J0tqBIVqEkmjfvu7frIuK98JB6aUfG8IZBzdECI,5949
44
44
  ml_tools/ML_datasetmaster/__init__.py,sha256=30QTwpZBwZRg5M6CNSjCLNc0vnKFLw0LA7u9Wg0DGOg,517
45
- ml_tools/ML_datasetmaster/_base_datasetmaster.py,sha256=bwP8_dcpQ-6LvW6ijU5jJCubhI2GrdxavietDC7HmR0,13948
45
+ ml_tools/ML_datasetmaster/_base_datasetmaster.py,sha256=c0oXlZb6LrJb11BGvBKXIjQYmEuWdALAdcOxbHAe8Uw,14330
46
46
  ml_tools/ML_datasetmaster/_datasetmaster.py,sha256=9B8NdasdeFNMvEMCt5ceq2xRJrB7g6JkgD3KDYFYAJE,18383
47
47
  ml_tools/ML_datasetmaster/_imprimir.py,sha256=K-EkqibbpTRWMFJevYbGkjr3KrVkFXuQe4dnlKWylxI,305
48
48
  ml_tools/ML_datasetmaster/_sequence_datasetmaster.py,sha256=srzC9BbBpHJGmbUem1AAQF9XXHn7EUJljPz69O2eUUA,17788
@@ -95,7 +95,7 @@ ml_tools/ML_optimization/_imprimir.py,sha256=bCxJyUU-Kxc-duKo3kbZD7j7fjaQ51F0ket
95
95
  ml_tools/ML_optimization/_multi_dragon.py,sha256=kfMr252VSTeTakXf6ISFQFPtw81k8OTkbB9ctqeg71M,37456
96
96
  ml_tools/ML_optimization/_single_dragon.py,sha256=9OvqMi-HC8Ek1ATUsX2C7tt8ssrW_VSpeFnq2Ij0-bY,8542
97
97
  ml_tools/ML_optimization/_single_manual.py,sha256=vh3_rXiqdwLg1SPLKR1HuJ_njYF3wfMADdRuxJns07w,21798
98
- ml_tools/ML_scaler/_ML_scaler.py,sha256=8Hr_qxo4MJb08N2q3_8ca62WjjgWRO4ScuWv4LM391E,8086
98
+ ml_tools/ML_scaler/_ML_scaler.py,sha256=8L8gTSNLAcOHNJ1y0jva_H3R4YcSLqGOj9J2LfSV2IM,8623
99
99
  ml_tools/ML_scaler/__init__.py,sha256=s5L7FD1Sbg8VwdyM8Ef722vHX_a1TNVfgCAIUC40D5I,109
100
100
  ml_tools/ML_scaler/_imprimir.py,sha256=QAVN_fR5g25SmYX-XBIVi_A9XA4E36yJFy7tfzFaut8,124
101
101
  ml_tools/ML_trainer/__init__.py,sha256=f3vR157OplYFom4U4X_ciULcGKvEYkwVZFN66So0kEg,316
@@ -173,7 +173,7 @@ ml_tools/utilities/__init__.py,sha256=pkR2HxUIlKZMDderP2awYXVIFxkU2Xt3FkJmcmuRIp
173
173
  ml_tools/utilities/_imprimir.py,sha256=sV3ASBOsTdVYvGojOTIpZYFyrnd4panS5h_4HcMzob4,432
174
174
  ml_tools/utilities/_utility_save_load.py,sha256=7skiiuYGVLVMK_nU9uLfUZw16ePvF3i9ub7G7LMyUgs,16085
175
175
  ml_tools/utilities/_utility_tools.py,sha256=bN0J9d1S0W5wNzNntBWqDsJcEAK7-1OgQg3X2fwXns0,6918
176
- dragon_ml_toolbox-20.1.1.dist-info/METADATA,sha256=075Nmdtqm7RtKasIDpEoL0U5NFx9pdrSfjg_K10Xzf4,7866
177
- dragon_ml_toolbox-20.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
178
- dragon_ml_toolbox-20.1.1.dist-info/top_level.txt,sha256=wm-oxax3ciyez6VoO4zsFd-gSok2VipYXnbg3TH9PtU,9
179
- dragon_ml_toolbox-20.1.1.dist-info/RECORD,,
176
+ dragon_ml_toolbox-20.2.0.dist-info/METADATA,sha256=UIHNTVykZBpohUtCdzsiPd33ilhKQDLSKNvURIWmrh0,7866
177
+ dragon_ml_toolbox-20.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
178
+ dragon_ml_toolbox-20.2.0.dist-info/top_level.txt,sha256=wm-oxax3ciyez6VoO4zsFd-gSok2VipYXnbg3TH9PtU,9
179
+ dragon_ml_toolbox-20.2.0.dist-info/RECORD,,
@@ -172,6 +172,12 @@ class _BaseDatasetMaker(ABC):
172
172
  y_train_arr = y_train.to_numpy() if isinstance(y_train, (pandas.Series, pandas.DataFrame)) else y_train
173
173
  y_val_arr = y_val.to_numpy() if isinstance(y_val, (pandas.Series, pandas.DataFrame)) else y_val
174
174
  y_test_arr = y_test.to_numpy() if isinstance(y_test, (pandas.Series, pandas.DataFrame)) else y_test
175
+
176
+ # --- Ensure targets are 2D (N, 1) if they are currently 1D (N,) ---
177
+ if y_train_arr.ndim == 1: y_train_arr = y_train_arr.reshape(-1, 1)
178
+ if y_val_arr.ndim == 1: y_val_arr = y_val_arr.reshape(-1, 1)
179
+ if y_test_arr.ndim == 1: y_test_arr = y_test_arr.reshape(-1, 1)
180
+ # ------------------------------------------------------------------
175
181
 
176
182
  if self.target_scaler is None:
177
183
  _LOGGER.info("Fitting a new DragonScaler on training targets.")
@@ -127,9 +127,14 @@ class DragonScaler:
127
127
  if self.mean_ is None or self.std_ is None or self.continuous_feature_indices is None:
128
128
  # If not fitted, return as is
129
129
  return data
130
-
130
+
131
131
  data_clone = data.clone()
132
132
 
133
+ # Robust check: If data is 1D, assume it's a single feature/target column and reshape it to (N, 1) for the operation, then reshape back.
134
+ input_is_1d = (data_clone.ndim == 1)
135
+ if input_is_1d:
136
+ data_clone = data_clone.view(-1, 1)
137
+
133
138
  # Ensure mean and std are on the same device as the data
134
139
  mean = self.mean_.to(data.device)
135
140
  std = self.std_.to(data.device)
@@ -143,6 +148,9 @@ class DragonScaler:
143
148
  # Place the scaled features back into the cloned tensor
144
149
  data_clone[:, self.continuous_feature_indices] = scaled_features
145
150
 
151
+ if input_is_1d:
152
+ return data_clone.view(-1)
153
+
146
154
  return data_clone
147
155
 
148
156
  def inverse_transform(self, data: torch.Tensor) -> torch.Tensor:
@@ -151,9 +159,13 @@ class DragonScaler:
151
159
  """
152
160
  if self.mean_ is None or self.std_ is None or self.continuous_feature_indices is None:
153
161
  return data
154
-
162
+
155
163
  data_clone = data.clone()
156
164
 
165
+ input_is_1d = (data_clone.ndim == 1)
166
+ if input_is_1d:
167
+ data_clone = data_clone.view(-1, 1)
168
+
157
169
  mean = self.mean_.to(data.device)
158
170
  std = self.std_.to(data.device)
159
171
 
@@ -164,6 +176,9 @@ class DragonScaler:
164
176
 
165
177
  data_clone[:, self.continuous_feature_indices] = original_scale_features
166
178
 
179
+ if input_is_1d:
180
+ return data_clone.view(-1)
181
+
167
182
  return data_clone
168
183
 
169
184
  def _get_state(self):