py2ls 0.2.4.7__py3-none-any.whl → 0.2.4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
py2ls/ml2ls.py CHANGED
@@ -1,33 +1,59 @@
1
- from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier,BaggingClassifier
2
- from sklearn.svm import SVC
1
+ from sklearn.ensemble import (
2
+ RandomForestClassifier,
3
+ GradientBoostingClassifier,
4
+ AdaBoostClassifier,
5
+ BaggingClassifier,
6
+ )
7
+ from sklearn.svm import SVC,SVR
3
8
  from sklearn.calibration import CalibratedClassifierCV
4
- from sklearn.model_selection import GridSearchCV,StratifiedKFold
5
- from sklearn.linear_model import LassoCV, LogisticRegression, Lasso, Ridge,RidgeClassifierCV, ElasticNet
9
+ from sklearn.model_selection import GridSearchCV, StratifiedKFold
10
+ from sklearn.linear_model import (
11
+ LassoCV,
12
+ LogisticRegression,LinearRegression,
13
+ Lasso,
14
+ Ridge,
15
+ RidgeClassifierCV,
16
+ ElasticNet,
17
+ )
6
18
  from sklearn.feature_selection import RFE
7
19
  from sklearn.naive_bayes import GaussianNB
8
20
  from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
9
21
  import xgboost as xgb # Make sure you have xgboost installed
10
22
 
11
23
  from sklearn.model_selection import train_test_split, cross_val_score
12
- from sklearn.metrics import (accuracy_score, precision_score, recall_score,
13
- f1_score, roc_auc_score, confusion_matrix,
14
- matthews_corrcoef,roc_curve,auc,
15
- balanced_accuracy_score,precision_recall_curve,average_precision_score)
24
+ from sklearn.metrics import (
25
+ accuracy_score,
26
+ precision_score,
27
+ recall_score,
28
+ f1_score,
29
+ roc_auc_score,
30
+ confusion_matrix,
31
+ matthews_corrcoef,
32
+ roc_curve,
33
+ auc,
34
+ balanced_accuracy_score,
35
+ precision_recall_curve,
36
+ average_precision_score,
37
+ )
16
38
  from imblearn.over_sampling import SMOTE
17
39
  from sklearn.pipeline import Pipeline
18
40
  from collections import defaultdict
19
- from sklearn.preprocessing import StandardScaler
20
- from typing import Dict, Any, Optional,List
41
+ from sklearn.preprocessing import StandardScaler, OneHotEncoder
42
+ from typing import Dict, Any, Optional, List, Union
21
43
  import numpy as np
22
44
  import pandas as pd
23
- from . import ips
45
+ from . import ips
24
46
  from . import plot
25
47
  import matplotlib.pyplot as plt
26
48
  import seaborn as sns
27
- plt.style.use("paper")
49
+
50
+ plt.style.use("paper")
28
51
  import logging
29
52
  import warnings
30
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
53
+
54
+ logging.basicConfig(
55
+ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
56
+ )
31
57
  logger = logging.getLogger()
32
58
 
33
59
  # Ignore specific warnings (UserWarning in this case)
@@ -35,7 +61,10 @@ warnings.filterwarnings("ignore", category=UserWarning)
35
61
  from sklearn.tree import DecisionTreeClassifier
36
62
  from sklearn.neighbors import KNeighborsClassifier
37
63
 
38
- def features_knn(X_train: pd.DataFrame, y_train: pd.Series, knn_params: dict) -> pd.DataFrame:
64
+
65
+ def features_knn(
66
+ x_train: pd.DataFrame, y_train: pd.Series, knn_params: dict
67
+ ) -> pd.DataFrame:
39
68
  """
40
69
  A distance-based classifier that assigns labels based on the majority label of nearest neighbors.
41
70
  when to use:
@@ -46,76 +75,99 @@ def features_knn(X_train: pd.DataFrame, y_train: pd.Series, knn_params: dict) ->
46
75
  Fits KNeighborsClassifier and approximates feature influence using permutation importance.
47
76
  """
48
77
  knn = KNeighborsClassifier(**knn_params)
49
- knn.fit(X_train, y_train)
50
- importances = permutation_importance(knn, X_train, y_train, n_repeats=30, random_state=1, scoring="accuracy")
51
- return pd.DataFrame({"feature": X_train.columns, "importance": importances.importances_mean}).sort_values(by="importance", ascending=False)
78
+ knn.fit(x_train, y_train)
79
+ importances = permutation_importance(
80
+ knn, x_train, y_train, n_repeats=30, random_state=1, scoring="accuracy"
81
+ )
82
+ return pd.DataFrame(
83
+ {"feature": x_train.columns, "importance": importances.importances_mean}
84
+ ).sort_values(by="importance", ascending=False)
85
+
52
86
 
53
87
  #! 1. Linear and Regularized Regression Methods
54
88
  # 1.1 Lasso
55
- def features_lasso(X_train: pd.DataFrame, y_train: pd.Series, lasso_params: dict) -> np.ndarray:
89
+ def features_lasso(
90
+ x_train: pd.DataFrame, y_train: pd.Series, lasso_params: dict
91
+ ) -> np.ndarray:
56
92
  """
57
- Lasso (Least Absolute Shrinkage and Selection Operator):
58
- A regularized linear regression method that uses L1 penalty to shrink coefficients, effectively
93
+ Lasso (Least Absolute Shrinkage and Selection Operator):
94
+ A regularized linear regression method that uses L1 penalty to shrink coefficients, effectively
59
95
  performing feature selection by zeroing out less important ones.
60
96
  """
61
97
  lasso = LassoCV(**lasso_params)
62
- lasso.fit(X_train, y_train)
98
+ lasso.fit(x_train, y_train)
63
99
  # Get non-zero coefficients and their corresponding features
64
100
  coefficients = lasso.coef_
65
- importance_df = pd.DataFrame({
66
- "feature": X_train.columns,
67
- "importance": np.abs(coefficients)
68
- })
69
- return importance_df[importance_df["importance"] > 0].sort_values(by="importance", ascending=False)
101
+ importance_df = pd.DataFrame(
102
+ {"feature": x_train.columns, "importance": np.abs(coefficients)}
103
+ )
104
+ return importance_df[importance_df["importance"] > 0].sort_values(
105
+ by="importance", ascending=False
106
+ )
107
+
70
108
 
71
109
  # 1.2 Ridge regression
72
- def features_ridge(X_train: pd.DataFrame, y_train: pd.Series, ridge_params: dict) -> np.ndarray:
110
+ def features_ridge(
111
+ x_train: pd.DataFrame, y_train: pd.Series, ridge_params: dict
112
+ ) -> np.ndarray:
73
113
  """
74
- Ridge Regression: A linear regression technique that applies L2 regularization, reducing coefficient
114
+ Ridge Regression: A linear regression technique that applies L2 regularization, reducing coefficient
75
115
  magnitudes to avoid overfitting, especially with multicollinearity among features.
76
116
  """
77
117
  from sklearn.linear_model import RidgeCV
118
+
78
119
  ridge = RidgeCV(**ridge_params)
79
- ridge.fit(X_train, y_train)
80
-
120
+ ridge.fit(x_train, y_train)
121
+
81
122
  # Get the coefficients
82
123
  coefficients = ridge.coef_
83
-
124
+
84
125
  # Create a DataFrame to hold feature importance
85
- importance_df = pd.DataFrame({
86
- "feature": X_train.columns,
87
- "importance": np.abs(coefficients)
88
- })
89
- return importance_df[importance_df["importance"] > 0].sort_values(by="importance", ascending=False)
126
+ importance_df = pd.DataFrame(
127
+ {"feature": x_train.columns, "importance": np.abs(coefficients)}
128
+ )
129
+ return importance_df[importance_df["importance"] > 0].sort_values(
130
+ by="importance", ascending=False
131
+ )
132
+
90
133
 
91
134
  # 1.3 Elastic Net(Enet)
92
- def features_enet(X_train: pd.DataFrame, y_train: pd.Series, enet_params: dict) -> np.ndarray:
135
+ def features_enet(
136
+ x_train: pd.DataFrame, y_train: pd.Series, enet_params: dict
137
+ ) -> np.ndarray:
93
138
  """
94
- Elastic Net (Enet): Combines L1 and L2 penalties (lasso and ridge) in a linear model, beneficial
139
+ Elastic Net (Enet): Combines L1 and L2 penalties (lasso and ridge) in a linear model, beneficial
95
140
  when features are highly correlated or for datasets with more features than samples.
96
141
  """
97
142
  from sklearn.linear_model import ElasticNetCV
143
+
98
144
  enet = ElasticNetCV(**enet_params)
99
- enet.fit(X_train, y_train)
145
+ enet.fit(x_train, y_train)
100
146
  # Get the coefficients
101
147
  coefficients = enet.coef_
102
148
  # Create a DataFrame to hold feature importance
103
- importance_df = pd.DataFrame({
104
- "feature": X_train.columns,
105
- "importance": np.abs(coefficients)
106
- })
107
- return importance_df[importance_df["importance"] > 0].sort_values(by="importance", ascending=False)
108
- # 1.4 Partial Least Squares Regression for Generalized Linear Models (plsRglm): Combines regression and
149
+ importance_df = pd.DataFrame(
150
+ {"feature": x_train.columns, "importance": np.abs(coefficients)}
151
+ )
152
+ return importance_df[importance_df["importance"] > 0].sort_values(
153
+ by="importance", ascending=False
154
+ )
155
+
156
+
157
+ # 1.4 Partial Least Squares Regression for Generalized Linear Models (plsRglm): Combines regression and
109
158
  # feature reduction, useful for high-dimensional data with correlated features, such as genomics.
110
159
 
111
160
  #! 2.Generalized Linear Models and Extensions
112
- # 2.1
161
+ # 2.1
162
+
113
163
 
114
164
  #!3.Tree-Based and Ensemble Methods
115
165
  # 3.1 Random Forest(RF)
116
- def features_rf(X_train: pd.DataFrame, y_train: pd.Series, rf_params: dict) -> np.ndarray:
166
+ def features_rf(
167
+ x_train: pd.DataFrame, y_train: pd.Series, rf_params: dict
168
+ ) -> np.ndarray:
117
169
  """
118
- An ensemble of decision trees that combines predictions from multiple trees for classification or
170
+ An ensemble of decision trees that combines predictions from multiple trees for classification or
119
171
  regression, effective with high-dimensional, complex datasets.
120
172
  when to use:
121
173
  Handles high-dimensional data well.
@@ -125,36 +177,55 @@ def features_rf(X_train: pd.DataFrame, y_train: pd.Series, rf_params: dict) -> n
125
177
  Recommended Use: Great for classification problems, especially when you have many features (genes).
126
178
  """
127
179
  rf = RandomForestClassifier(**rf_params)
128
- rf.fit(X_train, y_train)
129
- return pd.DataFrame({"feature": X_train.columns, "importance": rf.featuress_}).sort_values(by="importance", ascending=False)
180
+ rf.fit(x_train, y_train)
181
+ return pd.DataFrame(
182
+ {"feature": x_train.columns, "importance": rf.featuress_}
183
+ ).sort_values(by="importance", ascending=False)
184
+
185
+
130
186
  # 3.2 Gradient Boosting Trees
131
- def features_gradient_boosting(X_train: pd.DataFrame, y_train: pd.Series, gb_params: dict) -> pd.DataFrame:
187
+ def features_gradient_boosting(
188
+ x_train: pd.DataFrame, y_train: pd.Series, gb_params: dict
189
+ ) -> pd.DataFrame:
132
190
  """
133
- An ensemble of decision trees that combines predictions from multiple trees for classification or regression, effective with
191
+ An ensemble of decision trees that combines predictions from multiple trees for classification or regression, effective with
134
192
  high-dimensional, complex datasets.
135
193
  Gradient Boosting
136
194
  Strengths:
137
195
  High predictive accuracy and works well for both classification and regression.
138
196
  Can handle a mixture of numerical and categorical features.
139
- Recommended Use:
197
+ Recommended Use:
140
198
  Effective for complex relationships and when you need a powerful predictive model.
141
199
  Fit Gradient Boosting classifier and return sorted feature importances.
142
200
  Recommended Use: Effective for complex datasets with many features (genes).
143
201
  """
144
202
  gb = GradientBoostingClassifier(**gb_params)
145
- gb.fit(X_train, y_train)
146
- return pd.DataFrame({"feature": X_train.columns, "importance": gb.feature_importances_}).sort_values(by="importance", ascending=False)
203
+ gb.fit(x_train, y_train)
204
+ return pd.DataFrame(
205
+ {"feature": x_train.columns, "importance": gb.feature_importances_}
206
+ ).sort_values(by="importance", ascending=False)
207
+
208
+
147
209
  # 3.3 XGBoost
148
- def features_xgb(X_train: pd.DataFrame, y_train: pd.Series, xgb_params: dict) -> pd.DataFrame:
210
+ def features_xgb(
211
+ x_train: pd.DataFrame, y_train: pd.Series, xgb_params: dict
212
+ ) -> pd.DataFrame:
149
213
  """
150
214
  XGBoost: An advanced gradient boosting technique, faster and more efficient than GBM, with excellent predictive performance on structured data.
151
215
  """
152
216
  import xgboost as xgb
217
+
153
218
  xgb_model = xgb.XGBClassifier(**xgb_params)
154
- xgb_model.fit(X_train, y_train)
155
- return pd.DataFrame({"feature": X_train.columns, "importance": xgb_model.feature_importances_}).sort_values(by="importance", ascending=False)
219
+ xgb_model.fit(x_train, y_train)
220
+ return pd.DataFrame(
221
+ {"feature": x_train.columns, "importance": xgb_model.feature_importances_}
222
+ ).sort_values(by="importance", ascending=False)
223
+
224
+
156
225
  # 3.4.decision tree
157
- def features_decision_tree(X_train: pd.DataFrame, y_train: pd.Series, dt_params: dict) -> pd.DataFrame:
226
+ def features_decision_tree(
227
+ x_train: pd.DataFrame, y_train: pd.Series, dt_params: dict
228
+ ) -> pd.DataFrame:
158
229
  """
159
230
  A single decision tree classifier effective for identifying key decision boundaries in data.
160
231
  when to use:
@@ -162,58 +233,76 @@ def features_decision_tree(X_train: pd.DataFrame, y_train: pd.Series, dt_params:
162
233
  Provides feature importance scores for each feature, though it may overfit on small datasets.
163
234
  Efficient for low to medium-sized datasets, where interpretability of decisions is key.
164
235
  Recommended Use: Useful for interpretable feature importance analysis in smaller or balanced datasets.
165
-
236
+
166
237
  Fits DecisionTreeClassifier and returns sorted feature importances.
167
238
  """
168
239
  dt = DecisionTreeClassifier(**dt_params)
169
- dt.fit(X_train, y_train)
170
- return pd.DataFrame({"feature": X_train.columns, "importance": dt.feature_importances_}).sort_values(by="importance", ascending=False)
240
+ dt.fit(x_train, y_train)
241
+ return pd.DataFrame(
242
+ {"feature": x_train.columns, "importance": dt.feature_importances_}
243
+ ).sort_values(by="importance", ascending=False)
244
+
245
+
171
246
  # 3.5 bagging
172
- def features_bagging(X_train: pd.DataFrame, y_train: pd.Series, bagging_params: dict) -> pd.DataFrame:
247
+ def features_bagging(
248
+ x_train: pd.DataFrame, y_train: pd.Series, bagging_params: dict
249
+ ) -> pd.DataFrame:
173
250
  """
174
- A bagging ensemble of classifiers, often used with weak learners like decision trees, to reduce variance.
251
+ A bagging ensemble of models, often used with weak learners like decision trees, to reduce variance.
175
252
  when to use:
176
253
  Helps reduce overfitting, especially on high-variance models.
177
254
  Effective when the dataset has numerous features and may benefit from ensemble stability.
178
255
  Recommended Use: Beneficial for high-dimensional or noisy datasets needing ensemble stability.
179
-
256
+
180
257
  Fits BaggingClassifier and returns averaged feature importances from underlying estimators if available.
181
258
  """
182
259
  bagging = BaggingClassifier(**bagging_params)
183
- bagging.fit(X_train, y_train)
184
-
260
+ bagging.fit(x_train, y_train)
261
+
185
262
  # Calculate feature importance by averaging importances across estimators, if feature_importances_ is available.
186
263
  if hasattr(bagging.estimators_[0], "feature_importances_"):
187
- importances = np.mean([estimator.feature_importances_ for estimator in bagging.estimators_], axis=0)
188
- return pd.DataFrame({"feature": X_train.columns, "importance": importances}).sort_values(by="importance", ascending=False)
264
+ importances = np.mean(
265
+ [estimator.feature_importances_ for estimator in bagging.estimators_],
266
+ axis=0,
267
+ )
268
+ return pd.DataFrame(
269
+ {"feature": x_train.columns, "importance": importances}
270
+ ).sort_values(by="importance", ascending=False)
189
271
  else:
190
272
  # If the base estimator does not support feature importances, fallback to permutation importance.
191
- importances = permutation_importance(bagging, X_train, y_train, n_repeats=30, random_state=1, scoring="accuracy")
192
- return pd.DataFrame({"feature": X_train.columns, "importance": importances.importances_mean}).sort_values(by="importance", ascending=False)
273
+ importances = permutation_importance(
274
+ bagging, x_train, y_train, n_repeats=30, random_state=1, scoring="accuracy"
275
+ )
276
+ return pd.DataFrame(
277
+ {"feature": x_train.columns, "importance": importances.importances_mean}
278
+ ).sort_values(by="importance", ascending=False)
279
+
193
280
 
194
281
  #! 4.Support Vector Machines
195
- def features_svm(X_train: pd.DataFrame, y_train: pd.Series, rfe_params: dict) -> np.ndarray:
282
+ def features_svm(
283
+ x_train: pd.DataFrame, y_train: pd.Series, rfe_params: dict
284
+ ) -> np.ndarray:
196
285
  """
197
286
  Suitable for classification tasks where the number of features is much larger than the number of samples.
198
287
  1. Effective in high-dimensional spaces and with clear margin of separation.
199
288
  2. Works well for both linear and non-linear classification (using kernel functions).
200
- Select features using RFE with SVM.When combined with SVM, RFE selects features that are most critical for the decision boundary,
289
+ Select features using RFE with SVM.When combined with SVM, RFE selects features that are most critical for the decision boundary,
201
290
  helping reduce the dataset to a more manageable size without losing much predictive power.
202
- SVM (Support Vector Machines),supports various kernels (linear, rbf, poly, and sigmoid), is good at handling high-dimensional
291
+ SVM (Support Vector Machines),supports various kernels (linear, rbf, poly, and sigmoid), is good at handling high-dimensional
203
292
  data and finding an optimal decision boundary between classes, especially when using the right kernel.
204
293
  kernel: ["linear", "rbf", "poly", "sigmoid"]
205
- 'linear': simplest kernel that attempts to separate data by drawing a straight line (or hyperplane) between classes. It is effective
294
+ 'linear': simplest kernel that attempts to separate data by drawing a straight line (or hyperplane) between classes. It is effective
206
295
  when the data is linearly separable, meaning the classes can be well divided by a straight boundary.
207
296
  Advantages:
208
297
  - Computationally efficient for large datasets.
209
- - Works well when the number of features is high, which is common in genomic data where you may have thousands of genes
298
+ - Works well when the number of features is high, which is common in genomic data where you may have thousands of genes
210
299
  as features.
211
- 'rbf': a nonlinear kernel that maps the input data into a higher-dimensional space to find a decision boundary. It works well for
300
+ 'rbf': a nonlinear kernel that maps the input data into a higher-dimensional space to find a decision boundary. It works well for
212
301
  data that is not linearly separable in its original space.
213
- Advantages:
302
+ Advantages:
214
303
  - Handles nonlinear relationships between features and classes
215
304
  - Often better than a linear kernel when there is no clear linear decision boundary in the data.
216
- 'poly': Polynomial Kernel: computes similarity between data points based on polynomial functions of the input features. It can model
305
+ 'poly': Polynomial Kernel: computes similarity between data points based on polynomial functions of the input features. It can model
217
306
  interactions between features to a certain degree, depending on the polynomial degree chosen.
218
307
  Advantages:
219
308
  - Allows modeling of feature interactions.
@@ -221,58 +310,80 @@ def features_svm(X_train: pd.DataFrame, y_train: pd.Series, rfe_params: dict) ->
221
310
  'sigmoid': similar to the activation function in neural networks, and it works well when the data follows an S-shaped decision boundary.
222
311
  Advantages:
223
312
  - Can approximate the behavior of neural networks.
224
- - Use case: It’s not as widely used as the RBF or linear kernel but can be explored when there is some evidence of non-linear
313
+ - Use case: It’s not as widely used as the RBF or linear kernel but can be explored when there is some evidence of non-linear
225
314
  S-shaped relationships.
226
315
  """
227
316
  # SVM (Support Vector Machines)
228
- svc = SVC(kernel=rfe_params["kernel"]) # ["linear", "rbf", "poly", "sigmoid"]
317
+ svc = SVC(kernel=rfe_params["kernel"]) # ["linear", "rbf", "poly", "sigmoid"]
229
318
  # RFE(Recursive Feature Elimination)
230
319
  selector = RFE(svc, n_features_to_select=rfe_params["n_features_to_select"])
231
- selector.fit(X_train, y_train)
232
- return X_train.columns[selector.support_]
320
+ selector.fit(x_train, y_train)
321
+ return x_train.columns[selector.support_]
322
+
323
+
233
324
  #! 5.Bayesian and Probabilistic Methods
234
- def features_naive_bayes(X_train: pd.DataFrame, y_train: pd.Series) -> list:
325
+ def features_naive_bayes(x_train: pd.DataFrame, y_train: pd.Series) -> list:
235
326
  """
236
- Naive Bayes: A probabilistic classifier based on Bayes' theorem, assuming independence between features, simple and fast, especially
327
+ Naive Bayes: A probabilistic classifier based on Bayes' theorem, assuming independence between features, simple and fast, especially
237
328
  effective for text classification and other high-dimensional data.
238
329
  """
239
330
  from sklearn.naive_bayes import GaussianNB
331
+
240
332
  nb = GaussianNB()
241
- nb.fit(X_train, y_train)
242
- probabilities = nb.predict_proba(X_train)
243
- return X_train.columns[np.argsort(probabilities.max(axis=1))[:X_train.shape[1] // 2]]
333
+ nb.fit(x_train, y_train)
334
+ probabilities = nb.predict_proba(x_train)
335
+ # Limit the number of features safely, choosing the lesser of half the features or all columns
336
+ n_features = min(x_train.shape[1] // 2, len(x_train.columns))
337
+
338
+ # Sort probabilities, then map to valid column indices
339
+ sorted_indices = np.argsort(probabilities.max(axis=1))[:n_features]
340
+
341
+ # Ensure indices are within the column bounds of x_train
342
+ valid_indices = sorted_indices[sorted_indices < len(x_train.columns)]
343
+
344
+ return x_train.columns[valid_indices]
345
+
346
+
244
347
  #! 6.Linear Discriminant Analysis (LDA)
245
- def features_lda(X_train: pd.DataFrame, y_train: pd.Series) -> list:
348
+ def features_lda(x_train: pd.DataFrame, y_train: pd.Series) -> list:
246
349
  """
247
- Linear Discriminant Analysis (LDA): Projects data onto a lower-dimensional space to maximize class separability, often used as a dimensionality
350
+ Linear Discriminant Analysis (LDA): Projects data onto a lower-dimensional space to maximize class separability, often used as a dimensionality
248
351
  reduction technique before classification on high-dimensional data.
249
352
  """
250
353
  from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
354
+
251
355
  lda = LinearDiscriminantAnalysis()
252
- lda.fit(X_train, y_train)
356
+ lda.fit(x_train, y_train)
253
357
  coef = lda.coef_.flatten()
254
358
  # Create a DataFrame to hold feature importance
255
- importance_df = pd.DataFrame({
256
- "feature": X_train.columns,
257
- "importance": np.abs(coef)
258
- })
259
-
260
- return importance_df[importance_df["importance"] > 0].sort_values(by="importance", ascending=False)
359
+ importance_df = pd.DataFrame(
360
+ {"feature": x_train.columns, "importance": np.abs(coef)}
361
+ )
261
362
 
262
- def features_adaboost(X_train: pd.DataFrame, y_train: pd.Series, adaboost_params: dict) -> pd.DataFrame:
363
+ return importance_df[importance_df["importance"] > 0].sort_values(
364
+ by="importance", ascending=False
365
+ )
366
+
367
+
368
+ def features_adaboost(
369
+ x_train: pd.DataFrame, y_train: pd.Series, adaboost_params: dict
370
+ ) -> pd.DataFrame:
263
371
  """
264
372
  AdaBoost
265
373
  Strengths:
266
374
  Combines multiple weak learners to create a strong classifier.
267
375
  Focuses on examples that are hard to classify, improving overall performance.
268
- Recommended Use:
269
- Can be effective for boosting weak classifiers in a genomics context.
376
+ Recommended Use:
377
+ Can be effective for boosting weak models in a genomics context.
270
378
  Fit AdaBoost classifier and return sorted feature importances.
271
379
  Recommended Use: Great for classification problems with a large number of features (genes).
272
380
  """
273
381
  ada = AdaBoostClassifier(**adaboost_params)
274
- ada.fit(X_train, y_train)
275
- return pd.DataFrame({"feature": X_train.columns, "importance": ada.feature_importances_}).sort_values(by="importance", ascending=False)
382
+ ada.fit(x_train, y_train)
383
+ return pd.DataFrame(
384
+ {"feature": x_train.columns, "importance": ada.feature_importances_}
385
+ ).sort_values(by="importance", ascending=False)
386
+
276
387
 
277
388
  import torch
278
389
  import torch.nn as nn
@@ -280,32 +391,30 @@ import torch.optim as optim
280
391
  from torch.utils.data import DataLoader, TensorDataset
281
392
  from skorch import NeuralNetClassifier # sklearn compatible
282
393
 
394
+
283
395
  class DNNClassifier(nn.Module):
284
396
  def __init__(self, input_dim, hidden_dim=128, output_dim=2, dropout_rate=0.5):
285
397
  super(DNNClassifier, self).__init__()
286
-
398
+
287
399
  self.hidden_layer1 = nn.Sequential(
288
400
  nn.Linear(input_dim, hidden_dim),
289
401
  nn.ReLU(),
290
402
  nn.Dropout(dropout_rate),
291
403
  nn.Linear(hidden_dim, hidden_dim),
292
- nn.ReLU()
404
+ nn.ReLU(),
293
405
  )
294
-
406
+
295
407
  self.hidden_layer2 = nn.Sequential(
296
- nn.Linear(hidden_dim, hidden_dim),
297
- nn.ReLU(),
298
- nn.Dropout(dropout_rate)
408
+ nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Dropout(dropout_rate)
299
409
  )
300
-
410
+
301
411
  # Adding a residual connection between hidden layers
302
412
  self.residual = nn.Linear(input_dim, hidden_dim)
303
-
413
+
304
414
  self.output_layer = nn.Sequential(
305
- nn.Linear(hidden_dim, output_dim),
306
- nn.Softmax(dim=1)
415
+ nn.Linear(hidden_dim, output_dim), nn.Softmax(dim=1)
307
416
  )
308
-
417
+
309
418
  def forward(self, x):
310
419
  residual = self.residual(x)
311
420
  x = self.hidden_layer1(x)
@@ -314,64 +423,77 @@ class DNNClassifier(nn.Module):
314
423
  x = self.output_layer(x)
315
424
  return x
316
425
 
317
- def validate_classifier(clf, X_train: pd.DataFrame, y_train: pd.Series, X_test: pd.DataFrame, y_test: pd.Series, metrics: list=["accuracy", "precision", "recall", "f1", "roc_auc"] , cv_folds: int=5) -> dict:
426
+
427
+ def validate_classifier(
428
+ clf,
429
+ x_train: pd.DataFrame,
430
+ y_train: pd.Series,
431
+ x_test: pd.DataFrame,
432
+ y_test: pd.Series,
433
+ metrics: list = ["accuracy", "precision", "recall", "f1", "roc_auc"],
434
+ cv_folds: int = 5,
435
+ ) -> dict:
318
436
  """
319
437
  Perform cross-validation for a given classifier and return average scores for specified metrics on training data.
320
438
  Then fit the best model on the full training data and evaluate it on the test set.
321
-
439
+
322
440
  Parameters:
323
441
  - clf: The classifier to be validated.
324
- - X_train: Training features.
442
+ - x_train: Training features.
325
443
  - y_train: Training labels.
326
- - X_test: Test features.
444
+ - x_test: Test features.
327
445
  - y_test: Test labels.
328
446
  - metrics: List of metrics to evaluate (e.g., ['accuracy', 'roc_auc']).
329
447
  - cv_folds: Number of cross-validation folds.
330
-
448
+
331
449
  Returns:
332
450
  - results: Dictionary containing average cv_train_scores and cv_test_scores.
333
451
  """
334
452
  cv_train_scores = {metric: [] for metric in metrics}
335
453
  skf = StratifiedKFold(n_splits=cv_folds)
336
- # Perform cross-validation
454
+ # Perform cross-validation
337
455
  for metric in metrics:
338
456
  try:
339
457
  if metric == "roc_auc" and len(set(y_train)) == 2:
340
- scores = cross_val_score(clf, X_train, y_train, cv=skf, scoring="roc_auc")
341
- cv_train_scores[metric] = np.nanmean(scores) if not np.isnan(scores).all() else float('nan')
458
+ scores = cross_val_score(
459
+ clf, x_train, y_train, cv=skf, scoring="roc_auc"
460
+ )
461
+ cv_train_scores[metric] = (
462
+ np.nanmean(scores) if not np.isnan(scores).all() else float("nan")
463
+ )
342
464
  else:
343
- score = cross_val_score(clf, X_train, y_train, cv=skf, scoring=metric)
465
+ score = cross_val_score(clf, x_train, y_train, cv=skf, scoring=metric)
344
466
  cv_train_scores[metric] = score.mean()
345
467
  except Exception as e:
346
- cv_train_scores[metric] = float('nan')
347
- clf.fit(X_train, y_train)
348
-
468
+ cv_train_scores[metric] = float("nan")
469
+ clf.fit(x_train, y_train)
470
+
349
471
  # Evaluate on the test set
350
472
  cv_test_scores = {}
351
473
  for metric in metrics:
352
474
  if metric == "roc_auc" and len(set(y_test)) == 2:
353
475
  try:
354
- y_prob=clf.predict_proba(X_test)[:, 1]
355
- cv_test_scores[metric] = roc_auc_score(y_test,y_prob)
476
+ y_prob = clf.predict_proba(x_test)[:, 1]
477
+ cv_test_scores[metric] = roc_auc_score(y_test, y_prob)
356
478
  except AttributeError:
357
- cv_test_scores[metric]=float('nan')
479
+ cv_test_scores[metric] = float("nan")
358
480
  else:
359
- score_func = globals().get(f'{metric}_score') # Fetching the appropriate scoring function
481
+ score_func = globals().get(
482
+ f"{metric}_score"
483
+ ) # Fetching the appropriate scoring function
360
484
  if score_func:
361
485
  try:
362
- y_pred = clf.predict(X_test)
486
+ y_pred = clf.predict(x_test)
363
487
  cv_test_scores[metric] = score_func(y_test, y_pred)
364
488
  except Exception as e:
365
- cv_test_scores[metric] = float('nan')
489
+ cv_test_scores[metric] = float("nan")
366
490
 
367
491
  # Combine results
368
- results = {
369
- 'cv_train_scores': cv_train_scores,
370
- 'cv_test_scores': cv_test_scores
371
- }
492
+ results = {"cv_train_scores": cv_train_scores, "cv_test_scores": cv_test_scores}
372
493
  return results
373
494
 
374
- def get_classifiers(
495
+
496
+ def get_models(
375
497
  random_state=1,
376
498
  cls=[
377
499
  "lasso",
@@ -383,25 +505,36 @@ def get_classifiers(
383
505
  "Support Vector Machine(svm)",
384
506
  "naive bayes",
385
507
  "Linear Discriminant Analysis (lda)",
386
- "adaboost","DecisionTree","KNeighbors","Bagging"
508
+ "adaboost",
509
+ "DecisionTree",
510
+ "KNeighbors",
511
+ "Bagging",
387
512
  ],
388
513
  ):
389
514
  from sklearn.ensemble import (
390
515
  RandomForestClassifier,
391
516
  GradientBoostingClassifier,
392
517
  AdaBoostClassifier,
393
- BaggingClassifier
518
+ BaggingClassifier,
394
519
  )
395
520
  from sklearn.svm import SVC
396
- from sklearn.linear_model import LogisticRegression, Lasso, RidgeClassifierCV, ElasticNet
521
+ from sklearn.linear_model import (
522
+ LogisticRegression,
523
+ Lasso,
524
+ RidgeClassifierCV,
525
+ ElasticNet,
526
+ )
397
527
  from sklearn.naive_bayes import GaussianNB
398
528
  from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
399
529
  import xgboost as xgb
400
530
  from sklearn.tree import DecisionTreeClassifier
401
531
  from sklearn.neighbors import KNeighborsClassifier
532
+
402
533
  res_cls = {}
403
- classifiers_all = {
404
- "Lasso": LogisticRegression(penalty='l1', solver='saga', random_state=random_state),
534
+ model_all = {
535
+ "Lasso": LogisticRegression(
536
+ penalty="l1", solver="saga", random_state=random_state
537
+ ),
405
538
  "Ridge": RidgeClassifierCV(),
406
539
  "Elastic Net (Enet)": ElasticNet(random_state=random_state),
407
540
  "Gradient Boosting": GradientBoostingClassifier(random_state=random_state),
@@ -411,23 +544,25 @@ def get_classifiers(
411
544
  "Naive Bayes": GaussianNB(),
412
545
  "Linear Discriminant Analysis (LDA)": LinearDiscriminantAnalysis(),
413
546
  "AdaBoost": AdaBoostClassifier(random_state=random_state, algorithm="SAMME"),
414
- "DecisionTree":DecisionTreeClassifier(),
547
+ "DecisionTree": DecisionTreeClassifier(),
415
548
  "KNeighbors": KNeighborsClassifier(n_neighbors=5),
416
549
  "Bagging": BaggingClassifier(),
417
550
  }
418
- print("Using default classifiers:")
551
+ print("Using default models:")
419
552
  for cls_name in cls:
420
- cls_name = ips.strcmp(cls_name, list(classifiers_all.keys()))[0]
421
- res_cls[cls_name] = classifiers_all[cls_name]
553
+ cls_name = ips.strcmp(cls_name, list(model_all.keys()))[0]
554
+ res_cls[cls_name] = model_all[cls_name]
422
555
  print(f"- {cls_name}")
423
556
  return res_cls
424
557
 
558
+
425
559
  def get_features(
426
- X: pd.DataFrame,
427
- y: pd.Series,
560
+ X: Union[pd.DataFrame, np.ndarray], # n_samples X n_features
561
+ y: Union[pd.Series, np.ndarray, list], # n_samples X n_features
428
562
  test_size: float = 0.2,
429
563
  random_state: int = 1,
430
564
  n_features: int = 10,
565
+ fill_missing=True,
431
566
  rf_params: Optional[Dict] = None,
432
567
  rfe_params: Optional[Dict] = None,
433
568
  lasso_params: Optional[Dict] = None,
@@ -439,167 +574,327 @@ def get_features(
439
574
  dt_params: Optional[Dict] = None,
440
575
  bagging_params: Optional[Dict] = None,
441
576
  knn_params: Optional[Dict] = None,
442
- cls: list=[
443
- "lasso",
444
- "ridge",
445
- "Elastic Net(Enet)",
446
- "gradient Boosting",
447
- "Random forest (rf)",
448
- "XGBoost (xgb)",
449
- "Support Vector Machine(svm)",
450
- "naive bayes",
451
- "Linear Discriminant Analysis (lda)",
452
- "adaboost","DecisionTree","KNeighbors","Bagging"
453
- ],
577
+ cls: list = [
578
+ "lasso","ridge","Elastic Net(Enet)","gradient Boosting","Random forest (rf)","XGBoost (xgb)","Support Vector Machine(svm)",
579
+ "naive bayes","Linear Discriminant Analysis (lda)","adaboost","DecisionTree","KNeighbors","Bagging"],
454
580
  metrics: Optional[List[str]] = None,
455
581
  cv_folds: int = 5,
456
- strict:bool=False,
457
- n_shared:int=2, # 只要有两个方法有重合,就纳入common genes
582
+ strict: bool = False,
583
+ n_shared: int = 2, # 只要有两个方法有重合,就纳入common genes
458
584
  use_selected_features: bool = True,
459
585
  ) -> dict:
460
586
  """
461
- Master function to perform feature selection and validate classifiers.
587
+ Master function to perform feature selection and validate models.
462
588
  """
589
+ from sklearn.compose import ColumnTransformer
590
+ from sklearn.preprocessing import StandardScaler, OneHotEncoder
591
+
592
+ # Ensure X and y are DataFrames/Series for consistency
593
+ if isinstance(X, np.ndarray):
594
+ X = pd.DataFrame(X)
595
+ if isinstance(y, (np.ndarray, list)):
596
+ y = pd.Series(y)
597
+
598
+ # fill na
599
+ if fill_missing:
600
+ ips.df_fillna(data=X,method='knn',inplace=True,axis=0)
601
+
602
+ # rm missing values
603
+ X.dropna(inplace=True)
604
+ y.dropna(inplace=True)
605
+ y = y.loc[X.index] # Align y with X after dropping rows with missing values in X
606
+
607
+ if X.shape[0] != len(y):
608
+ raise ValueError("X and y must have the same number of samples (rows).")
609
+
610
+ # #! # Check for non-numeric columns in X and apply one-hot encoding if needed
611
+ # Check if any column in X is non-numeric
612
+ if any(not np.issubdtype(dtype, np.number) for dtype in X.dtypes):
613
+ X = pd.get_dummies(X, drop_first=True)
614
+ print(X.shape)
615
+
616
+ # #!alternative: # Identify categorical and numerical columns
617
+ # categorical_cols = X.select_dtypes(include=["object", "category"]).columns
618
+ # numerical_cols = X.select_dtypes(include=["number"]).columns
619
+
620
+ # # Define preprocessing pipeline
621
+ # preprocessor = ColumnTransformer(
622
+ # transformers=[
623
+ # ("num", StandardScaler(), numerical_cols),
624
+ # ("cat", OneHotEncoder(drop="first", handle_unknown="ignore"), categorical_cols),
625
+ # ]
626
+ # )
627
+ # # Preprocess the data
628
+ # X = preprocessor.fit_transform(X)
629
+
463
630
  # Split data into training and test sets
464
- X_train, X_test, y_train, y_test = train_test_split(
631
+ x_train, x_test, y_train, y_test = train_test_split(
465
632
  X, y, test_size=test_size, random_state=random_state
466
633
  )
467
634
  # Standardize features
468
635
  scaler = StandardScaler()
469
- X_train_scaled = scaler.fit_transform(X_train)
470
- X_test_scaled = scaler.transform(X_test)
471
-
636
+ x_train_scaled = scaler.fit_transform(x_train)
637
+ x_test_scaled = scaler.transform(x_test)
638
+
472
639
  # Convert back to DataFrame for consistency
473
- X_train = pd.DataFrame(X_train_scaled, columns=X_train.columns)
474
- X_test = pd.DataFrame(X_test_scaled, columns=X_test.columns)
640
+ x_train = pd.DataFrame(x_train_scaled, columns=x_train.columns)
641
+ x_test = pd.DataFrame(x_test_scaled, columns=x_test.columns)
475
642
 
476
643
  rf_defaults = {"n_estimators": 100, "random_state": random_state}
477
644
  rfe_defaults = {"kernel": "linear", "n_features_to_select": n_features}
478
645
  lasso_defaults = {"alphas": np.logspace(-4, 4, 100), "cv": 10}
479
646
  ridge_defaults = {"alphas": np.logspace(-4, 4, 100), "cv": 10}
480
647
  enet_defaults = {"alphas": np.logspace(-4, 4, 100), "cv": 10}
481
- xgb_defaults = {"n_estimators": 100, "use_label_encoder": False, "eval_metric": "logloss", "random_state": random_state}
648
+ xgb_defaults = {
649
+ "n_estimators": 100,
650
+ "use_label_encoder": False,
651
+ "eval_metric": "logloss",
652
+ "random_state": random_state,
653
+ }
482
654
  gb_defaults = {"n_estimators": 100, "random_state": random_state}
483
655
  adaboost_defaults = {"n_estimators": 50, "random_state": random_state}
484
656
  dt_defaults = {"max_depth": None, "random_state": random_state}
485
657
  bagging_defaults = {"n_estimators": 50, "random_state": random_state}
486
658
  knn_defaults = {"n_neighbors": 5}
487
659
  rf_params, rfe_params = rf_params or rf_defaults, rfe_params or rfe_defaults
488
- lasso_params, ridge_params = lasso_params or lasso_defaults, ridge_params or ridge_defaults
660
+ lasso_params, ridge_params = (
661
+ lasso_params or lasso_defaults,
662
+ ridge_params or ridge_defaults,
663
+ )
489
664
  enet_params, xgb_params = enet_params or enet_defaults, xgb_params or xgb_defaults
490
- gb_params, adaboost_params = gb_params or gb_defaults, adaboost_params or adaboost_defaults
665
+ gb_params, adaboost_params = (
666
+ gb_params or gb_defaults,
667
+ adaboost_params or adaboost_defaults,
668
+ )
491
669
  dt_params = dt_params or dt_defaults
492
670
  bagging_params = bagging_params or bagging_defaults
493
671
  knn_params = knn_params or knn_defaults
494
672
 
495
- cls_ = ["lasso",'ridge','Elastic Net(Enet)',"Gradient Boosting","Random Forest (rf)",
496
- 'XGBoost (xgb)','Support Vector Machine(svm)','Naive Bayes','Linear Discriminant Analysis (lda)','adaboost']
497
- cls=[ips.strcmp(i,cls_)[0] for i in cls]
673
+ cls_ = [
674
+ "lasso",
675
+ "ridge",
676
+ "Elastic Net(Enet)",
677
+ "Gradient Boosting",
678
+ "Random Forest (rf)",
679
+ "XGBoost (xgb)",
680
+ "Support Vector Machine(svm)",
681
+ "Naive Bayes",
682
+ "Linear Discriminant Analysis (lda)",
683
+ "adaboost",
684
+ ]
685
+ cls = [ips.strcmp(i, cls_)[0] for i in cls]
498
686
 
499
687
  # Lasso Feature Selection
500
- lasso_importances = features_lasso(X_train, y_train, lasso_params) if 'lasso'in cls else pd.DataFrame()
501
- lasso_selected_features= lasso_importances.head(n_features)["feature"].values if 'lasso'in cls else []
502
- # Ridge
503
- ridge_importances=features_ridge(X_train, y_train,ridge_params) if 'ridge'in cls else pd.DataFrame()
504
- selected_ridge_features= ridge_importances.head(n_features)["feature"].values if 'ridge'in cls else []
688
+ lasso_importances = (
689
+ features_lasso(x_train, y_train, lasso_params)
690
+ if "lasso" in cls
691
+ else pd.DataFrame()
692
+ )
693
+ lasso_selected_features = (
694
+ lasso_importances.head(n_features)["feature"].values if "lasso" in cls else []
695
+ )
696
+ # Ridge
697
+ ridge_importances = (
698
+ features_ridge(x_train, y_train, ridge_params)
699
+ if "ridge" in cls
700
+ else pd.DataFrame()
701
+ )
702
+ selected_ridge_features = (
703
+ ridge_importances.head(n_features)["feature"].values if "ridge" in cls else []
704
+ )
505
705
  # Elastic Net
506
- enet_importances=features_enet(X_train, y_train,enet_params) if 'Enet'in cls else pd.DataFrame()
507
- selected_enet_features= enet_importances.head(n_features)["feature"].values if 'Enet'in cls else []
508
- # Random Forest Feature Importance
509
- rf_importances = features_rf(X_train, y_train, rf_params) if 'Random Forest'in cls else pd.DataFrame()
510
- top_rf_features = rf_importances.head(n_features)["feature"].values if 'Random Forest'in cls else []
511
- # Gradient Boosting Feature Importance
512
- gb_importances = features_gradient_boosting(X_train, y_train, gb_params) if 'Gradient Boosting'in cls else pd.DataFrame()
513
- top_gb_features = gb_importances.head(n_features)["feature"].values if 'Gradient Boosting'in cls else []
706
+ enet_importances = (
707
+ features_enet(x_train, y_train, enet_params)
708
+ if "Enet" in cls
709
+ else pd.DataFrame()
710
+ )
711
+ selected_enet_features = (
712
+ enet_importances.head(n_features)["feature"].values if "Enet" in cls else []
713
+ )
714
+ # Random Forest Feature Importance
715
+ rf_importances = (
716
+ features_rf(x_train, y_train, rf_params)
717
+ if "Random Forest" in cls
718
+ else pd.DataFrame()
719
+ )
720
+ top_rf_features = (
721
+ rf_importances.head(n_features)["feature"].values
722
+ if "Random Forest" in cls
723
+ else []
724
+ )
725
+ # Gradient Boosting Feature Importance
726
+ gb_importances = (
727
+ features_gradient_boosting(x_train, y_train, gb_params)
728
+ if "Gradient Boosting" in cls
729
+ else pd.DataFrame()
730
+ )
731
+ top_gb_features = (
732
+ gb_importances.head(n_features)["feature"].values
733
+ if "Gradient Boosting" in cls
734
+ else []
735
+ )
514
736
  # xgb
515
- xgb_importances = features_xgb(X_train, y_train,xgb_params) if 'xgb'in cls else pd.DataFrame()
516
- top_xgb_features = xgb_importances.head(n_features)["feature"].values if 'xgb'in cls else []
517
-
518
- # SVM with RFE
519
- selected_svm_features = features_svm(X_train, y_train, rfe_params) if 'svm'in cls else []
737
+ xgb_importances = (
738
+ features_xgb(x_train, y_train, xgb_params) if "xgb" in cls else pd.DataFrame()
739
+ )
740
+ top_xgb_features = (
741
+ xgb_importances.head(n_features)["feature"].values if "xgb" in cls else []
742
+ )
743
+
744
+ # SVM with RFE
745
+ selected_svm_features = (
746
+ features_svm(x_train, y_train, rfe_params) if "svm" in cls else []
747
+ )
520
748
  # Naive Bayes
521
- selected_naive_bayes_features=features_naive_bayes(X_train, y_train) if 'Naive Bayes'in cls else []
749
+ selected_naive_bayes_features = (
750
+ features_naive_bayes(x_train, y_train) if "Naive Bayes" in cls else []
751
+ )
522
752
  # lda: linear discriminant analysis
523
- lda_importances=features_lda(X_train, y_train) if 'lda'in cls else pd.DataFrame()
524
- selected_lda_features= lda_importances.head(n_features)["feature"].values if 'lda'in cls else []
525
- # AdaBoost Feature Importance
526
- adaboost_importances = features_adaboost(X_train, y_train, adaboost_params) if 'AdaBoost'in cls else pd.DataFrame()
527
- top_adaboost_features = adaboost_importances.head(n_features)["feature"].values if 'AdaBoost'in cls else []
753
+ lda_importances = features_lda(x_train, y_train) if "lda" in cls else pd.DataFrame()
754
+ selected_lda_features = (
755
+ lda_importances.head(n_features)["feature"].values if "lda" in cls else []
756
+ )
757
+ # AdaBoost Feature Importance
758
+ adaboost_importances = (
759
+ features_adaboost(x_train, y_train, adaboost_params)
760
+ if "AdaBoost" in cls
761
+ else pd.DataFrame()
762
+ )
763
+ top_adaboost_features = (
764
+ adaboost_importances.head(n_features)["feature"].values
765
+ if "AdaBoost" in cls
766
+ else []
767
+ )
528
768
  # Decision Tree Feature Importance
529
- dt_importances = features_decision_tree(X_train, y_train, dt_params) if 'Decision Tree' in cls else pd.DataFrame()
530
- top_dt_features = dt_importances.head(n_features)["feature"].values if 'Decision Tree' in cls else []
769
+ dt_importances = (
770
+ features_decision_tree(x_train, y_train, dt_params)
771
+ if "Decision Tree" in cls
772
+ else pd.DataFrame()
773
+ )
774
+ top_dt_features = (
775
+ dt_importances.head(n_features)["feature"].values
776
+ if "Decision Tree" in cls
777
+ else []
778
+ )
531
779
  # Bagging Feature Importance
532
- bagging_importances = features_bagging(X_train, y_train, bagging_params) if 'Bagging' in cls else pd.DataFrame()
533
- top_bagging_features = bagging_importances.head(n_features)["feature"].values if 'Bagging' in cls else []
780
+ bagging_importances = (
781
+ features_bagging(x_train, y_train, bagging_params)
782
+ if "Bagging" in cls
783
+ else pd.DataFrame()
784
+ )
785
+ top_bagging_features = (
786
+ bagging_importances.head(n_features)["feature"].values
787
+ if "Bagging" in cls
788
+ else []
789
+ )
534
790
  # KNN Feature Importance via Permutation
535
- knn_importances = features_knn(X_train, y_train, knn_params) if 'KNN' in cls else pd.DataFrame()
536
- top_knn_features = knn_importances.head(n_features)["feature"].values if 'KNN' in cls else []
791
+ knn_importances = (
792
+ features_knn(x_train, y_train, knn_params) if "KNN" in cls else pd.DataFrame()
793
+ )
794
+ top_knn_features = (
795
+ knn_importances.head(n_features)["feature"].values if "KNN" in cls else []
796
+ )
537
797
 
538
798
  #! Find common features
539
- common_features = ips.shared(lasso_selected_features,selected_ridge_features, selected_enet_features,
540
- top_rf_features,top_gb_features,top_xgb_features,
541
- selected_svm_features, selected_naive_bayes_features,selected_lda_features,
542
- top_adaboost_features,top_dt_features, top_bagging_features, top_knn_features,
543
- strict=strict,
544
- n_shared=n_shared
545
- )
799
+ common_features = ips.shared(
800
+ lasso_selected_features,
801
+ selected_ridge_features,
802
+ selected_enet_features,
803
+ top_rf_features,
804
+ top_gb_features,
805
+ top_xgb_features,
806
+ selected_svm_features,
807
+ selected_naive_bayes_features,
808
+ selected_lda_features,
809
+ top_adaboost_features,
810
+ top_dt_features,
811
+ top_bagging_features,
812
+ top_knn_features,
813
+ strict=strict,
814
+ n_shared=n_shared,
815
+ verbose=False
816
+ )
546
817
 
547
818
  # Use selected features or all features for model validation
548
- X_train_selected = X_train[list(common_features)] if use_selected_features else X_train
549
- X_test_selected = X_test[list(common_features)] if use_selected_features else X_test
819
+ x_train_selected = (
820
+ x_train[list(common_features)] if use_selected_features else x_train
821
+ )
822
+ x_test_selected = x_test[list(common_features)] if use_selected_features else x_test
550
823
 
551
824
  if metrics is None:
552
- metrics = ["accuracy", "precision", "recall", "f1", "roc_auc"]
825
+ metrics = ["accuracy", "precision", "recall", "f1", "roc_auc"]
553
826
 
554
827
  # Prepare results DataFrame for selected features
555
- features_df = pd.DataFrame({
556
- 'type':
557
- ['Lasso'] * len(lasso_selected_features)+
558
- ['Ridge'] * len(selected_ridge_features)+
559
- ['Random Forest'] * len(top_rf_features) +
560
- ['Gradient Boosting'] * len(top_gb_features)+
561
- ["Enet"]*len(selected_enet_features)+
562
- ['xgb'] * len(top_xgb_features)+
563
- ['SVM'] * len(selected_svm_features) +
564
- ['Naive Bayes'] * len(selected_naive_bayes_features)+
565
- ['Linear Discriminant Analysis'] * len(selected_lda_features)+
566
- ['AdaBoost'] * len(top_adaboost_features)+
567
- ['Decision Tree'] * len(top_dt_features) +
568
- ['Bagging'] * len(top_bagging_features) +
569
- ['KNN'] * len(top_knn_features),
570
- 'feature': np.concatenate([lasso_selected_features,selected_ridge_features,
571
- top_rf_features,top_gb_features,selected_enet_features,top_xgb_features,
572
- selected_svm_features,selected_naive_bayes_features,
573
- selected_lda_features,top_adaboost_features,top_dt_features,
574
- top_bagging_features, top_knn_features
575
- ])
576
- })
828
+ features_df = pd.DataFrame(
829
+ {
830
+ "type": ["Lasso"] * len(lasso_selected_features)
831
+ + ["Ridge"] * len(selected_ridge_features)
832
+ + ["Random Forest"] * len(top_rf_features)
833
+ + ["Gradient Boosting"] * len(top_gb_features)
834
+ + ["Enet"] * len(selected_enet_features)
835
+ + ["xgb"] * len(top_xgb_features)
836
+ + ["SVM"] * len(selected_svm_features)
837
+ + ["Naive Bayes"] * len(selected_naive_bayes_features)
838
+ + ["Linear Discriminant Analysis"] * len(selected_lda_features)
839
+ + ["AdaBoost"] * len(top_adaboost_features)
840
+ + ["Decision Tree"] * len(top_dt_features)
841
+ + ["Bagging"] * len(top_bagging_features)
842
+ + ["KNN"] * len(top_knn_features),
843
+ "feature": np.concatenate(
844
+ [
845
+ lasso_selected_features,
846
+ selected_ridge_features,
847
+ top_rf_features,
848
+ top_gb_features,
849
+ selected_enet_features,
850
+ top_xgb_features,
851
+ selected_svm_features,
852
+ selected_naive_bayes_features,
853
+ selected_lda_features,
854
+ top_adaboost_features,
855
+ top_dt_features,
856
+ top_bagging_features,
857
+ top_knn_features,
858
+ ]
859
+ ),
860
+ }
861
+ )
577
862
 
578
863
  #! Validate trained each classifier
579
- classifiers=get_classifiers(random_state=random_state,cls=cls)
580
- cv_train_results,cv_test_results = [],[]
581
- for name, clf in classifiers.items():
582
- if not X_train_selected.empty:
583
- cv_scores=validate_classifier(clf,
584
- X_train_selected,
585
- y_train,
586
- X_test_selected,
587
- y_test,
588
- metrics=metrics,
589
- cv_folds=cv_folds)
864
+ models = get_models(random_state=random_state, cls=cls)
865
+ cv_train_results, cv_test_results = [], []
866
+ for name, clf in models.items():
867
+ if not x_train_selected.empty:
868
+ cv_scores = validate_classifier(
869
+ clf,
870
+ x_train_selected,
871
+ y_train,
872
+ x_test_selected,
873
+ y_test,
874
+ metrics=metrics,
875
+ cv_folds=cv_folds,
876
+ )
590
877
 
591
878
  cv_train_score_df = pd.DataFrame(cv_scores["cv_train_scores"], index=[name])
592
879
  cv_test_score_df = pd.DataFrame(cv_scores["cv_test_scores"], index=[name])
593
880
  cv_train_results.append(cv_train_score_df)
594
881
  cv_test_results.append(cv_test_score_df)
595
- if all([cv_train_results,cv_train_results]):
596
- cv_train_results_df = pd.concat(cv_train_results).reset_index().rename(columns={'index': 'Classifier'})
597
- cv_test_results_df = pd.concat(cv_test_results).reset_index().rename(columns={'index': 'Classifier'})
882
+ if all([cv_train_results, cv_test_results]):
883
+ cv_train_results_df = (
884
+ pd.concat(cv_train_results)
885
+ .reset_index()
886
+ .rename(columns={"index": "Classifier"})
887
+ )
888
+ cv_test_results_df = (
889
+ pd.concat(cv_test_results)
890
+ .reset_index()
891
+ .rename(columns={"index": "Classifier"})
892
+ )
598
893
  #! Store results in the main results dictionary
599
894
  results = {
600
895
  "selected_features": features_df,
601
896
  "cv_train_scores": cv_train_results_df,
602
- "cv_test_scores": cv_test_results_df,
897
+ "cv_test_scores": rank_models(cv_test_results_df),
603
898
  "common_features": list(common_features),
604
899
  }
605
900
  else:
@@ -611,71 +906,75 @@ def get_features(
611
906
  }
612
907
  print(f"Warning: 没有找到共同的genes, when n_shared={n_shared}")
613
908
  return results
909
+
910
+
614
911
  #! # usage:
615
912
  # # Get features and common features
616
913
  # results = get_features(X, y)
617
914
  # common_features = results["common_features"]
618
915
  def validate_features(
619
- X_train: pd.DataFrame,
916
+ x_train: pd.DataFrame,
620
917
  y_train: pd.Series,
621
- X_true: pd.DataFrame,
918
+ x_true: pd.DataFrame,
622
919
  y_true: pd.Series,
623
- common_features:set=None,
624
- classifiers: Optional[Dict[str, Any]] = None,
920
+ common_features: set = None,
921
+ models: Optional[Dict[str, Any]] = None,
625
922
  metrics: Optional[list] = None,
626
923
  random_state: int = 1,
627
924
  smote: bool = False,
925
+ n_jobs:int = -1,
628
926
  plot_: bool = True,
629
927
  class_weight: str = "balanced",
630
928
  ) -> dict:
631
929
  """
632
- Validate classifiers using selected features on the validation dataset.
930
+ Validate models using selected features on the validation dataset.
633
931
 
634
932
  Parameters:
635
- - X_train (pd.DataFrame): Training feature dataset.
933
+ - x_train (pd.DataFrame): Training feature dataset.
636
934
  - y_train (pd.Series): Training target variable.
637
- - X_true (pd.DataFrame): Validation feature dataset.
935
+ - x_true (pd.DataFrame): Validation feature dataset.
638
936
  - y_true (pd.Series): Validation target variable.
639
937
  - common_features (set): Set of common features to use for validation.
640
- - classifiers (dict, optional): Dictionary of classifiers to validate.
938
+ - models (dict, optional): Dictionary of models to validate.
641
939
  - metrics (list, optional): List of metrics to compute.
642
940
  - random_state (int): Random state for reproducibility.
643
941
  - plot_ (bool): Option to plot metrics (to be implemented if needed).
644
942
  - class_weight (str or dict): Class weights to handle imbalance.
645
943
 
646
944
  """
647
-
945
+ from tqdm import tqdm
648
946
  # Ensure common features are selected
649
- common_features = ips.shared(common_features,
650
- X_train.columns,
651
- X_true.columns,
652
- strict=True)
947
+ common_features = ips.shared(common_features, x_train.columns, x_true.columns, strict=True,verbose=False)
653
948
 
654
949
  # Filter the training and validation datasets for the common features
655
- X_train_selected = X_train[common_features]
656
- X_true_selected = X_true[common_features]
950
+ x_train_selected = x_train[common_features]
951
+ x_true_selected = x_true[common_features]
657
952
 
658
- if not X_true_selected.index.equals(y_true.index):
659
- raise ValueError("Index mismatch between validation features and target. Ensure data alignment.")
660
-
661
- y_true= y_true.loc[X_true_selected.index]
953
+ if not x_true_selected.index.equals(y_true.index):
954
+ raise ValueError(
955
+ "Index mismatch between validation features and target. Ensure data alignment."
956
+ )
957
+
958
+ y_true = y_true.loc[x_true_selected.index]
662
959
 
663
960
  # Handle class imbalance using SMOTE
664
961
  if smote:
665
- if y_train.value_counts(normalize=True).max() < 0.8: # Threshold to decide if data is imbalanced
962
+ if (
963
+ y_train.value_counts(normalize=True).max() < 0.8
964
+ ): # Threshold to decide if data is imbalanced
666
965
  smote = SMOTE(random_state=random_state)
667
- X_train_resampled, y_train_resampled = smote.fit_resample(
668
- X_train_selected, y_train
966
+ x_train_resampled, y_train_resampled = smote.fit_resample(
967
+ x_train_selected, y_train
669
968
  )
670
969
  else:
671
970
  # skip SMOTE
672
- X_train_resampled, y_train_resampled = X_train_selected, y_train
971
+ x_train_resampled, y_train_resampled = x_train_selected, y_train
673
972
  else:
674
- X_train_resampled, y_train_resampled = X_train_selected, y_train
973
+ x_train_resampled, y_train_resampled = x_train_selected, y_train
675
974
 
676
- # Default classifiers if not provided
677
- if classifiers is None:
678
- classifiers = {
975
+ # Default models if not provided
976
+ if models is None:
977
+ models = {
679
978
  "Random Forest": RandomForestClassifier(
680
979
  class_weight=class_weight, random_state=random_state
681
980
  ),
@@ -684,86 +983,107 @@ def validate_features(
684
983
  class_weight=class_weight, random_state=random_state
685
984
  ),
686
985
  "Gradient Boosting": GradientBoostingClassifier(random_state=random_state),
687
- "AdaBoost": AdaBoostClassifier(random_state=random_state, algorithm="SAMME"),
688
- "Lasso": LogisticRegression(penalty='l1', solver='saga', random_state=random_state),
689
- "Ridge": LogisticRegression(penalty='l2', solver='saga', random_state=random_state),
690
- "Elastic Net": LogisticRegression(penalty='elasticnet', solver='saga', l1_ratio=0.5, random_state=random_state),
691
- "XGBoost": xgb.XGBClassifier(use_label_encoder=False, eval_metric='logloss'),
986
+ "AdaBoost": AdaBoostClassifier(
987
+ random_state=random_state, algorithm="SAMME"
988
+ ),
989
+ "Lasso": LogisticRegression(
990
+ penalty="l1", solver="saga", random_state=random_state
991
+ ),
992
+ "Ridge": LogisticRegression(
993
+ penalty="l2", solver="saga", random_state=random_state
994
+ ),
995
+ "Elastic Net": LogisticRegression(
996
+ penalty="elasticnet",
997
+ solver="saga",
998
+ l1_ratio=0.5,
999
+ random_state=random_state,
1000
+ ),
1001
+ "XGBoost": xgb.XGBClassifier(eval_metric="logloss"
1002
+ ),
692
1003
  "Naive Bayes": GaussianNB(),
693
- "LDA": LinearDiscriminantAnalysis()
1004
+ "LDA": LinearDiscriminantAnalysis(),
694
1005
  }
695
1006
 
696
- # Hyperparameter grids for tuning
1007
+ # Hyperparameter grids for tuning
697
1008
  param_grids = {
698
1009
  "Random Forest": {
699
- 'n_estimators': [100, 200, 300, 400, 500],
700
- 'max_depth': [None, 3, 5, 10, 20],
701
- 'min_samples_split': [2, 5, 10],
702
- 'min_samples_leaf': [1, 2, 4],
703
- 'class_weight': [None, 'balanced']
1010
+ "n_estimators": [100, 200, 300, 400, 500],
1011
+ "max_depth": [None, 3, 5, 10, 20],
1012
+ "min_samples_split": [2, 5, 10],
1013
+ "min_samples_leaf": [1, 2, 4],
1014
+ "class_weight": [None, "balanced"],
704
1015
  },
705
1016
  "SVM": {
706
- 'C': [0.01, 0.1, 1, 10, 100, 1000],
707
- 'gamma': [0.001, 0.01, 0.1, 'scale', 'auto'],
708
- 'kernel': ['linear', 'rbf', 'poly']
1017
+ "C": [0.01, 0.1, 1, 10, 100, 1000],
1018
+ "gamma": [0.001, 0.01, 0.1, "scale", "auto"],
1019
+ "kernel": ["linear", "rbf", "poly"],
709
1020
  },
710
1021
  "Logistic Regression": {
711
- 'C': [0.01, 0.1, 1, 10, 100],
712
- 'solver': ['liblinear', 'saga', 'newton-cg', 'lbfgs'],
713
- 'penalty': ['l1', 'l2'],
714
- 'max_iter': [100, 200, 300]
1022
+ "C": [0.01, 0.1, 1, 10, 100],
1023
+ "solver": ["liblinear", "saga", "newton-cg", "lbfgs"],
1024
+ "penalty": ["l1", "l2"],
1025
+ "max_iter": [100, 200, 300],
715
1026
  },
716
1027
  "Gradient Boosting": {
717
- 'n_estimators': [100, 200, 300, 400, 500],
718
- 'learning_rate': np.logspace(-3, 0, 4),
719
- 'max_depth': [3, 5, 7, 9],
720
- 'min_samples_split': [2, 5, 10]
1028
+ "n_estimators": [100, 200, 300, 400, 500],
1029
+ "learning_rate": np.logspace(-3, 0, 4),
1030
+ "max_depth": [3, 5, 7, 9],
1031
+ "min_samples_split": [2, 5, 10],
721
1032
  },
722
1033
  "AdaBoost": {
723
- 'n_estimators': [50, 100, 200, 300, 500],
724
- 'learning_rate': np.logspace(-3, 0, 4)
725
- },
726
- "Lasso": {
727
- 'C': np.logspace(-3, 1, 10),
728
- 'max_iter': [100, 200, 300]
729
- },
730
- "Ridge": {
731
- 'C': np.logspace(-3, 1, 10),
732
- 'max_iter': [100, 200, 300]
1034
+ "n_estimators": [50, 100, 200, 300, 500],
1035
+ "learning_rate": np.logspace(-3, 0, 4),
733
1036
  },
1037
+ "Lasso": {"C": np.logspace(-3, 1, 10), "max_iter": [100, 200, 300]},
1038
+ "Ridge": {"C": np.logspace(-3, 1, 10), "max_iter": [100, 200, 300]},
734
1039
  "Elastic Net": {
735
- 'C': np.logspace(-3, 1, 10),
736
- 'l1_ratio': [0.1, 0.5, 0.9],
737
- 'max_iter': [100, 200, 300]
1040
+ "C": np.logspace(-3, 1, 10),
1041
+ "l1_ratio": [0.1, 0.5, 0.9],
1042
+ "max_iter": [100, 200, 300],
738
1043
  },
739
1044
  "XGBoost": {
740
- 'n_estimators': [100, 200],
741
- 'max_depth': [3, 5, 7],
742
- 'learning_rate': [0.01, 0.1, 0.2],
743
- 'subsample': [0.8, 1.0],
744
- 'colsample_bytree': [0.8, 1.0]
1045
+ "n_estimators": [100, 200],
1046
+ "max_depth": [3, 5, 7],
1047
+ "learning_rate": [0.01, 0.1, 0.2],
1048
+ "subsample": [0.8, 1.0],
1049
+ "colsample_bytree": [0.8, 1.0],
745
1050
  },
746
1051
  "Naive Bayes": {},
747
- "LDA": {
748
- 'solver': ['svd', 'lsqr', 'eigen']
749
- }
1052
+ "LDA": {"solver": ["svd", "lsqr", "eigen"]},
750
1053
  }
751
1054
  # Default metrics if not provided
752
1055
  if metrics is None:
753
- metrics = ["accuracy", "precision", "recall", "f1", "roc_auc", "mcc", "specificity", "balanced_accuracy", "pr_auc"]
1056
+ metrics = [
1057
+ "accuracy",
1058
+ "precision",
1059
+ "recall",
1060
+ "f1",
1061
+ "roc_auc",
1062
+ "mcc",
1063
+ "specificity",
1064
+ "balanced_accuracy",
1065
+ "pr_auc",
1066
+ ]
754
1067
 
755
1068
  results = {}
756
1069
 
757
1070
  # Validate each classifier with GridSearchCV
758
- for name, clf in classifiers.items():
1071
+ for name, clf in tqdm(
1072
+ models.items(),
1073
+ desc="for metric in metrics",
1074
+ colour="green",
1075
+ bar_format="{l_bar}{bar} {n_fmt}/{total_fmt}",
1076
+ ):
759
1077
  print(f"\nValidating {name} on the validation dataset:")
760
1078
 
761
1079
  # Check if `predict_proba` method exists; if not, use CalibratedClassifierCV
762
1080
  # 没有predict_proba的分类器,使用 CalibratedClassifierCV 可以获得校准的概率估计。此外,为了使代码更灵活,我们可以在创建分类器
763
1081
  # 时检查 predict_proba 方法是否存在,如果不存在且用户希望计算 roc_auc 或 pr_auc,则启用 CalibratedClassifierCV
764
1082
  if not hasattr(clf, "predict_proba"):
765
- print(f"Using CalibratedClassifierCV for {name} due to lack of probability estimates.")
766
- calibrated_clf = CalibratedClassifierCV(clf, method='sigmoid', cv='prefit')
1083
+ print(
1084
+ f"Using CalibratedClassifierCV for {name} due to lack of probability estimates."
1085
+ )
1086
+ calibrated_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
767
1087
  else:
768
1088
  calibrated_clf = clf
769
1089
  # Stratified K-Fold for cross-validation
@@ -771,28 +1091,30 @@ def validate_features(
771
1091
 
772
1092
  # Create GridSearchCV object
773
1093
  gs = GridSearchCV(
774
- estimator= calibrated_clf,
1094
+ estimator=calibrated_clf,
775
1095
  param_grid=param_grids[name],
776
1096
  scoring="roc_auc", # Optimize for ROC AUC
777
1097
  cv=skf, # Stratified K-Folds cross-validation
778
- n_jobs=-1,
1098
+ n_jobs=n_jobs,
779
1099
  verbose=1,
780
1100
  )
781
1101
 
782
1102
  # Fit the model using GridSearchCV
783
- gs.fit(X_train_resampled, y_train_resampled)
1103
+ gs.fit(x_train_resampled, y_train_resampled)
784
1104
  # Best estimator from grid search
785
1105
  best_clf = gs.best_estimator_
786
1106
  # Make predictions on the validation set
787
- y_pred = best_clf.predict(X_true_selected)
1107
+ y_pred = best_clf.predict(x_true_selected)
788
1108
  # Calculate probabilities for ROC AUC if possible
789
1109
  if hasattr(best_clf, "predict_proba"):
790
- y_pred_proba = best_clf.predict_proba(X_true_selected)[:, 1]
1110
+ y_pred_proba = best_clf.predict_proba(x_true_selected)[:, 1]
791
1111
  elif hasattr(best_clf, "decision_function"):
792
1112
  # If predict_proba is not available, use decision_function (e.g., for SVM)
793
- y_pred_proba = best_clf.decision_function(X_true_selected)
1113
+ y_pred_proba = best_clf.decision_function(x_true_selected)
794
1114
  # Ensure y_pred_proba is within 0 and 1 bounds
795
- y_pred_proba = (y_pred_proba - y_pred_proba.min()) / (y_pred_proba.max() - y_pred_proba.min())
1115
+ y_pred_proba = (y_pred_proba - y_pred_proba.min()) / (
1116
+ y_pred_proba.max() - y_pred_proba.min()
1117
+ )
796
1118
  else:
797
1119
  y_pred_proba = None # No probability output for certain models
798
1120
 
@@ -802,11 +1124,15 @@ def validate_features(
802
1124
  if metric == "accuracy":
803
1125
  validation_scores[metric] = accuracy_score(y_true, y_pred)
804
1126
  elif metric == "precision":
805
- validation_scores[metric] = precision_score(y_true, y_pred, average='weighted')
1127
+ validation_scores[metric] = precision_score(
1128
+ y_true, y_pred, average="weighted"
1129
+ )
806
1130
  elif metric == "recall":
807
- validation_scores[metric] = recall_score(y_true, y_pred, average='weighted')
1131
+ validation_scores[metric] = recall_score(
1132
+ y_true, y_pred, average="weighted"
1133
+ )
808
1134
  elif metric == "f1":
809
- validation_scores[metric] = f1_score(y_true, y_pred, average='weighted')
1135
+ validation_scores[metric] = f1_score(y_true, y_pred, average="weighted")
810
1136
  elif metric == "roc_auc" and y_pred_proba is not None:
811
1137
  validation_scores[metric] = roc_auc_score(y_true, y_pred_proba)
812
1138
  elif metric == "mcc":
@@ -816,32 +1142,35 @@ def validate_features(
816
1142
  validation_scores[metric] = tn / (tn + fp) # Specificity calculation
817
1143
  elif metric == "balanced_accuracy":
818
1144
  validation_scores[metric] = balanced_accuracy_score(y_true, y_pred)
819
- elif metric == "pr_auc" and y_pred_proba is not None:
1145
+ elif metric == "pr_auc" and y_pred_proba is not None:
820
1146
  precision, recall, _ = precision_recall_curve(y_true, y_pred_proba)
821
- validation_scores[metric] = average_precision_score(y_true, y_pred_proba)
822
-
1147
+ validation_scores[metric] = average_precision_score(
1148
+ y_true, y_pred_proba
1149
+ )
1150
+
823
1151
  # Calculate ROC curve
824
- #https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
1152
+ # https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
825
1153
  if y_pred_proba is not None:
826
1154
  # fpr, tpr, roc_auc = dict(), dict(), dict()
827
1155
  fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
828
- lower_ci, upper_ci = cal_auc_ci(y_true, y_pred_proba)
829
- roc_auc=auc(fpr, tpr)
830
- roc_info={
1156
+ lower_ci, upper_ci = cal_auc_ci(y_true, y_pred_proba,verbose=False)
1157
+ roc_auc = auc(fpr, tpr)
1158
+ roc_info = {
831
1159
  "fpr": fpr.tolist(),
832
1160
  "tpr": tpr.tolist(),
833
- "auc":roc_auc,
834
- "ci95":(lower_ci, upper_ci)
1161
+ "auc": roc_auc,
1162
+ "ci95": (lower_ci, upper_ci),
835
1163
  }
836
1164
  # precision-recall curve
837
- precision_, recall_, _ = precision_recall_curve(y_true, y_pred_proba)
1165
+ precision_, recall_, _ = precision_recall_curve(y_true, y_pred_proba)
838
1166
  avg_precision_ = average_precision_score(y_true, y_pred_proba)
839
- pr_info = {"precision": precision_,
840
- "recall":recall_,
841
- "avg_precision":avg_precision_
842
- }
1167
+ pr_info = {
1168
+ "precision": precision_,
1169
+ "recall": recall_,
1170
+ "avg_precision": avg_precision_,
1171
+ }
843
1172
  else:
844
- roc_info,pr_info=None,None
1173
+ roc_info, pr_info = None, None
845
1174
  results[name] = {
846
1175
  "best_params": gs.best_params_,
847
1176
  "scores": validation_scores,
@@ -849,24 +1178,93 @@ def validate_features(
849
1178
  "pr_curve": pr_info,
850
1179
  "confusion_matrix": confusion_matrix(y_true, y_pred),
851
1180
  }
852
-
1181
+
853
1182
  df_results = pd.DataFrame.from_dict(results, orient="index")
854
1183
 
855
1184
  return df_results
856
1185
 
857
- #! usage validate_features()
858
- # Validate classifiers using the validation dataset (X_val, y_val)
1186
+
1187
+ #! usage validate_features()
1188
+ # Validate models using the validation dataset (X_val, y_val)
859
1189
  # validation_results = validate_features(X, y, X_val, y_val, common_features)
860
1190
 
861
1191
  # # If you want to access validation scores
862
1192
  # print(validation_results)
1193
+ def plot_validate_features(res_val):
1194
+ """
1195
+ plot the results of 'validate_features()'
1196
+ """
1197
+ colors = plot.get_color(len(ips.flatten(res_val["pr_curve"].index)))
1198
+ if res_val.shape[0]>5:
1199
+ alpha=0
1200
+ figsize=[8,10]
1201
+ subplot_layout=[1,2]
1202
+ ncols=2
1203
+ bbox_to_anchor=[1.5,0.6]
1204
+ else:
1205
+ alpha=0.03
1206
+ figsize=[10,6]
1207
+ subplot_layout=[1,1]
1208
+ ncols=1
1209
+ bbox_to_anchor=[1,1]
1210
+ nexttile = plot.subplot(figsize=figsize)
1211
+ ax = nexttile(subplot_layout[0],subplot_layout[1])
1212
+ for i, model_name in enumerate(ips.flatten(res_val["pr_curve"].index)):
1213
+ fpr = res_val["roc_curve"][model_name]["fpr"]
1214
+ tpr = res_val["roc_curve"][model_name]["tpr"]
1215
+ (lower_ci, upper_ci) = res_val["roc_curve"][model_name]["ci95"]
1216
+ mean_auc = res_val["roc_curve"][model_name]["auc"]
1217
+ plot_roc_curve(
1218
+ fpr,tpr,mean_auc,lower_ci,upper_ci,model_name=model_name,
1219
+ lw=1.5,color=colors[i],alpha=alpha,ax=ax)
1220
+ plot.figsets(sp=2,legend=dict(loc="upper right", ncols=ncols, fontsize=8, bbox_to_anchor=[1.5,0.6],markerscale=0.8))
1221
+ # plot.split_legend(ax,n=2, loc=["upper left", "lower left"],bbox=[[1,0.5],[1,0.5]],ncols=2,labelcolor="k",fontsize=8)
1222
+
1223
+ ax = nexttile(subplot_layout[0],subplot_layout[1])
1224
+ for i, model_name in enumerate(ips.flatten(res_val["pr_curve"].index)):
1225
+ plot_pr_curve(
1226
+ recall=res_val["pr_curve"][model_name]["recall"],
1227
+ precision=res_val["pr_curve"][model_name]["precision"],
1228
+ avg_precision=res_val["pr_curve"][model_name]["avg_precision"],
1229
+ model_name=model_name,
1230
+ color=colors[i],lw=1.5,alpha=alpha,ax=ax)
1231
+ plot.figsets(sp=2,legend=dict(loc="upper right", ncols=1, fontsize=8, bbox_to_anchor=[1.5,0.5]))
1232
+ # plot.split_legend(ax,n=2, loc=["upper left", "lower left"],bbox=[[1,0.5],[1,0.5]],ncols=2,labelcolor="k",fontsize=8)
863
1233
 
864
-
865
- def cal_auc_ci(y_true, y_pred, n_bootstraps=1000, ci=0.95,random_state=1):
1234
+ def plot_validate_features_single(res_val,figsize=None):
1235
+ if figsize is None:
1236
+ nexttile = plot.subplot(len(ips.flatten(res_val["pr_curve"].index)), 3)
1237
+ else:
1238
+ nexttile = plot.subplot(len(ips.flatten(res_val["pr_curve"].index)), 3,figsize=figsize)
1239
+ for model_name in ips.flatten(res_val["pr_curve"].index):
1240
+ fpr = res_val["roc_curve"][model_name]["fpr"]
1241
+ tpr = res_val["roc_curve"][model_name]["tpr"]
1242
+ (lower_ci, upper_ci) = res_val["roc_curve"][model_name]["ci95"]
1243
+ mean_auc = res_val["roc_curve"][model_name]["auc"]
1244
+
1245
+ # Plotting
1246
+ plot_roc_curve(fpr, tpr, mean_auc, lower_ci, upper_ci, ax=nexttile())
1247
+ plot.figsets(title=model_name, sp=2)
1248
+
1249
+ plot_pr_curve(
1250
+ recall=res_val["pr_curve"][model_name]["recall"],
1251
+ precision=res_val["pr_curve"][model_name]["precision"],
1252
+ avg_precision=res_val["pr_curve"][model_name]["avg_precision"],
1253
+ model_name=model_name,
1254
+ ax=nexttile(),
1255
+ )
1256
+ plot.figsets(title=model_name, sp=2)
1257
+
1258
+ # plot cm
1259
+ plot_cm(res_val["confusion_matrix"][model_name], ax=nexttile(), normalize=False)
1260
+ plot.figsets(title=model_name, sp=2)
1261
+
1262
+ def cal_auc_ci(y_true, y_pred, n_bootstraps=1000, ci=0.95, random_state=1,verbose=True):
866
1263
  y_true = np.asarray(y_true)
867
1264
  y_pred = np.asarray(y_pred)
868
1265
  bootstrapped_scores = []
869
- print("auroc score:", roc_auc_score(y_true, y_pred))
1266
+ if verbose:
1267
+ print("auroc score:", roc_auc_score(y_true, y_pred))
870
1268
  rng = np.random.RandomState(random_state)
871
1269
  for i in range(n_bootstraps):
872
1270
  # bootstrap by sampling with replacement on the prediction indices
@@ -887,21 +1285,24 @@ def cal_auc_ci(y_true, y_pred, n_bootstraps=1000, ci=0.95,random_state=1):
887
1285
  # Computing the lower and upper bound of the 90% confidence interval
888
1286
  # You can change the bounds percentiles to 0.025 and 0.975 to get
889
1287
  # a 95% confidence interval instead.
890
- confidence_lower = sorted_scores[int((1-ci) * len(sorted_scores))]
1288
+ confidence_lower = sorted_scores[int((1 - ci) * len(sorted_scores))]
891
1289
  confidence_upper = sorted_scores[int(ci * len(sorted_scores))]
892
- print(
1290
+ if verbose:
1291
+ print(
893
1292
  "Confidence interval for the score: [{:0.3f} - {:0.3}]".format(
894
1293
  confidence_lower, confidence_upper
895
1294
  )
896
1295
  )
897
1296
  return confidence_lower, confidence_upper
898
1297
 
1298
+
899
1299
  def plot_roc_curve(
900
1300
  fpr=None,
901
1301
  tpr=None,
902
1302
  mean_auc=None,
903
1303
  lower_ci=None,
904
1304
  upper_ci=None,
1305
+ model_name=None,
905
1306
  color="#FF8F00",
906
1307
  lw=2,
907
1308
  alpha=0.1,
@@ -913,24 +1314,23 @@ def plot_roc_curve(
913
1314
  diagonal_color="0.5",
914
1315
  figsize=(5, 5),
915
1316
  ax=None,
916
- **kwargs
1317
+ **kwargs,
917
1318
  ):
918
1319
  if ax is None:
919
1320
  fig, ax = plt.subplots(figsize=figsize)
920
1321
  if mean_auc is not None:
1322
+ model_name = "ROC curve" if model_name is None else model_name
921
1323
  if ci_display:
922
- label = (
923
- f"ROC curve (AUC = {mean_auc:.3f})\n95% CI: {lower_ci:.3f} - {upper_ci:.3f}"
924
- )
1324
+ label = f"{model_name} (AUC = {mean_auc:.3f})\n95% CI: {lower_ci:.3f} - {upper_ci:.3f}"
925
1325
  else:
926
- label = f"ROC curve (AUC = {mean_auc:.3f})"
1326
+ label = f"{model_name} (AUC = {mean_auc:.3f})"
927
1327
  else:
928
1328
  label = None
929
1329
 
930
1330
  # Plot ROC curve and the diagonal reference line
931
1331
  ax.fill_between(fpr, tpr, alpha=alpha, color=color)
932
- ax.plot([0, 1], [0, 1], color=diagonal_color, linestyle="--")
933
- ax.plot(fpr, tpr, color=color, lw=lw, label=label,**kwargs)
1332
+ ax.plot([0, 1], [0, 1], color=diagonal_color, clip_on=False, linestyle="--")
1333
+ ax.plot(fpr, tpr, color=color, lw=lw, label=label,clip_on=False, **kwargs)
934
1334
  # Setting plot limits, labels, and title
935
1335
  ax.set_xlim([-0.01, 1.0])
936
1336
  ax.set_ylim([0.0, 1.0])
@@ -939,7 +1339,9 @@ def plot_roc_curve(
939
1339
  ax.set_title(title)
940
1340
  ax.legend(loc=legend_loc)
941
1341
  return ax
942
- #* usage: ml2ls.plot_roc_curve(fpr, tpr, mean_auc, lower_ci, upper_ci)
1342
+
1343
+
1344
+ # * usage: ml2ls.plot_roc_curve(fpr, tpr, mean_auc, lower_ci, upper_ci)
943
1345
  # for model_name in flatten(validation_results["roc_curve"].keys())[2:]:
944
1346
  # fpr = validation_results["roc_curve"][model_name]["fpr"]
945
1347
  # tpr = validation_results["roc_curve"][model_name]["tpr"]
@@ -950,6 +1352,7 @@ def plot_roc_curve(
950
1352
  # ml2ls.plot_roc_curve(fpr, tpr, mean_auc, lower_ci, upper_ci)
951
1353
  # figsets(title=model_name)
952
1354
 
1355
+
953
1356
  def plot_pr_curve(
954
1357
  recall=None,
955
1358
  precision=None,
@@ -961,21 +1364,24 @@ def plot_pr_curve(
961
1364
  xlabel="Recall",
962
1365
  ylabel="Precision",
963
1366
  alpha=0.1,
964
- color="#FF8F00",
1367
+ color="#FF8F00",
965
1368
  legend_loc="lower left",
966
1369
  ax=None,
967
- **kwargs
1370
+ **kwargs,
968
1371
  ):
969
1372
  if ax is None:
970
1373
  fig, ax = plt.subplots(figsize=figsize)
971
-
1374
+ model_name = "PR curve" if model_name is None else model_name
972
1375
  # Plot Precision-Recall curve
973
- ax.plot(recall,
974
- precision,
975
- lw=lw,
976
- color=color,
977
- label=( f"PR curve (AUC={avg_precision:.2f})"),
978
- **kwargs)
1376
+ ax.plot(
1377
+ recall,
1378
+ precision,
1379
+ lw=lw,
1380
+ color=color,
1381
+ label=(f"{model_name} (AUC={avg_precision:.2f})"),
1382
+ clip_on=False,
1383
+ **kwargs,
1384
+ )
979
1385
  # Fill area under the curve
980
1386
  ax.fill_between(recall, precision, alpha=alpha, color=color)
981
1387
 
@@ -985,10 +1391,12 @@ def plot_pr_curve(
985
1391
  ax.set_ylabel(ylabel)
986
1392
  ax.set_xlim([-0.01, 1.0])
987
1393
  ax.set_ylim([0.0, 1.0])
988
- ax.grid(False)
1394
+ ax.grid(False)
989
1395
  ax.legend(loc=legend_loc)
990
1396
  return ax
991
- #* usage: ml2ls.plot_pr_curve()
1397
+
1398
+
1399
+ # * usage: ml2ls.plot_pr_curve()
992
1400
  # for md_name in flatten(validation_results["pr_curve"].keys()):
993
1401
  # ml2ls.plot_pr_curve(
994
1402
  # recall=validation_results["pr_curve"][md_name]["recall"],
@@ -1000,6 +1408,7 @@ def plot_pr_curve(
1000
1408
  # color="r",
1001
1409
  # )
1002
1410
 
1411
+
1003
1412
  def plot_cm(
1004
1413
  cm,
1005
1414
  labels_name=None,
@@ -1016,7 +1425,9 @@ def plot_cm(
1016
1425
  if ax is None:
1017
1426
  fig, ax = plt.subplots(figsize=figsize)
1018
1427
 
1019
- cm_normalized = np.round(cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] * 100, 2)
1428
+ cm_normalized = np.round(
1429
+ cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] * 100, 2
1430
+ )
1020
1431
  cm_value = cm_normalized if normalize else cm.astype("int")
1021
1432
  # Plot the heatmap
1022
1433
  cax = ax.imshow(cm_normalized, interpolation="nearest", cmap=cmap)
@@ -1026,14 +1437,13 @@ def plot_cm(
1026
1437
  # Define tick labels based on provided labels
1027
1438
  num_local = np.arange(len(labels_name)) if labels_name is not None else range(2)
1028
1439
  if axis_labels is None:
1029
- axis_labels = labels_name if labels_name is not None else ["No","Yes"]
1440
+ axis_labels = labels_name if labels_name is not None else ["No", "Yes"]
1030
1441
  ax.set_xticks(num_local)
1031
1442
  ax.set_xticklabels(axis_labels)
1032
1443
  ax.set_yticks(num_local)
1033
1444
  ax.set_yticklabels(axis_labels)
1034
1445
  ax.set_ylabel(ylabel)
1035
1446
  ax.set_xlabel(xlabel)
1036
- plot.figsets(ax=ax, xtickloc="tl", boxloc="none")
1037
1447
 
1038
1448
  # Add TN, FP, FN, TP annotations specifically for binary classification (2x2 matrix)
1039
1449
  if labels_name is None or len(labels_name) == 2:
@@ -1050,29 +1460,53 @@ def plot_cm(
1050
1460
  tp_label = "TP"
1051
1461
 
1052
1462
  # Adjust positions slightly for TN, FP, FN, TP labels
1053
- ax.text(0,0,
1054
- f"{tn_label}:{cm_normalized[0, 0]:.2f}%" if normalize else f"{tn_label}:{cm_value[0, 0]}",
1463
+ ax.text(
1464
+ 0,
1465
+ 0,
1466
+ (
1467
+ f"{tn_label}:{cm_normalized[0, 0]:.2f}%"
1468
+ if normalize
1469
+ else f"{tn_label}:{cm_value[0, 0]}"
1470
+ ),
1055
1471
  ha="center",
1056
1472
  va="center",
1057
1473
  color="white" if cm_normalized[0, 0] > thresh * 100 else "black",
1058
1474
  fontsize=fontsize,
1059
1475
  )
1060
- ax.text(1,0,
1061
- f"{fp_label}:{cm_normalized[0, 1]:.2f}%" if normalize else f"{tn_label}:{cm_value[0, 1]}",
1476
+ ax.text(
1477
+ 1,
1478
+ 0,
1479
+ (
1480
+ f"{fp_label}:{cm_normalized[0, 1]:.2f}%"
1481
+ if normalize
1482
+ else f"{fp_label}:{cm_value[0, 1]}"
1483
+ ),
1062
1484
  ha="center",
1063
1485
  va="center",
1064
1486
  color="white" if cm_normalized[0, 1] > thresh * 100 else "black",
1065
1487
  fontsize=fontsize,
1066
1488
  )
1067
- ax.text(0,1,
1068
- f"{fn_label}:{cm_normalized[1, 0]:.2f}%" if normalize else f"{tn_label}:{cm_value[1, 0]}",
1489
+ ax.text(
1490
+ 0,
1491
+ 1,
1492
+ (
1493
+ f"{fn_label}:{cm_normalized[1, 0]:.2f}%"
1494
+ if normalize
1495
+ else f"{fn_label}:{cm_value[1, 0]}"
1496
+ ),
1069
1497
  ha="center",
1070
1498
  va="center",
1071
1499
  color="white" if cm_normalized[1, 0] > thresh * 100 else "black",
1072
1500
  fontsize=fontsize,
1073
1501
  )
1074
- ax.text(1,1,
1075
- f"{tp_label}:{cm_normalized[1, 1]:.2f}%" if normalize else f"{tn_label}:{cm_value[1, 1]}",
1502
+ ax.text(
1503
+ 1,
1504
+ 1,
1505
+ (
1506
+ f"{tp_label}:{cm_normalized[1, 1]:.2f}%"
1507
+ if normalize
1508
+ else f"{tp_label}:{cm_value[1, 1]}"
1509
+ ),
1076
1510
  ha="center",
1077
1511
  va="center",
1078
1512
  color="white" if cm_normalized[1, 1] > thresh * 100 else "black",
@@ -1084,11 +1518,1028 @@ def plot_cm(
1084
1518
  for j in range(len(labels_name)):
1085
1519
  val = cm_normalized[i, j]
1086
1520
  color = "white" if val > thresh * 100 else "black"
1087
- ax.text(j,i,
1521
+ ax.text(
1522
+ j,
1523
+ i,
1088
1524
  f"{val:.2f}%",
1089
1525
  ha="center",
1090
1526
  va="center",
1091
1527
  color=color,
1092
1528
  fontsize=fontsize,
1093
1529
  )
1530
+
1531
+ plot.figsets(ax=ax,
1532
+ boxloc="none"
1533
+ )
1094
1534
  return ax
1535
+
1536
+ def rank_models(
1537
+ cv_test_scores,
1538
+ rm_outlier=False,
1539
+ metric_weights=None,
1540
+ plot_=True,
1541
+ ):
1542
+ """
1543
+ Selects the best model based on a multi-metric scoring approach, with outlier handling, optional visualization,
1544
+ and additional performance metrics.
1545
+
1546
+ Parameters:
1547
+ - cv_test_scores (pd.DataFrame): DataFrame with cross-validation results across multiple metrics.
1548
+ Assumes columns are 'Classifier', 'accuracy', 'precision', 'recall', 'f1', 'roc_auc'.
1549
+ - metric_weights (dict): Dictionary specifying weights for each metric (e.g., {'accuracy': 0.2, 'precision': 0.3, ...}).
1550
+ If None, default weights are applied equally across available metrics.
1551
+ a. equal_weights(standard approch): 所有的metrics同等重要
1552
+ e.g., {"accuracy": 0.2, "precision": 0.2, "recall": 0.2, "f1": 0.2, "roc_auc": 0.2}
1553
+ b. accuracy_focosed: classification correctness (e.g., in balanced datasets), accuracy might be weighted more heavily.
1554
+ e.g., {"accuracy": 0.4, "precision": 0.2, "recall": 0.2, "f1": 0.1, "roc_auc": 0.1}
1555
+ c. Precision and Recall Emphasis: In cases where false positives and false negatives are particularly important (such as
1556
+ in medical applications or fraud detection), precision and recall may be weighted more heavily.
1557
+ e.g., {"accuracy": 0.2, "precision": 0.3, "recall": 0.3, "f1": 0.1, "roc_auc": 0.1}
1558
+ d. F1-Focused: When balance between precision and recall is crucial (e.g., in imbalanced datasets)
1559
+ e.g., {"accuracy": 0.2, "precision": 0.2, "recall": 0.2, "f1": 0.3, "roc_auc": 0.1}
1560
+ e. ROC-AUC Emphasis: In some cases, ROC AUC may be prioritized, particularly in classification tasks where class imbalance
1561
+ is present, as ROC AUC accounts for the model's performance across all classification thresholds.
1562
+ e.g., {"accuracy": 0.1, "precision": 0.2, "recall": 0.2, "f1": 0.3, "roc_auc": 0.3}
1563
+
1564
+ - normalize (bool): Whether to normalize scores of each metric to range [0, 1].
1565
+ - visualize (bool): If True, generates visualizations (e.g., bar plot, radar chart).
1566
+ - outlier_threshold (float): The threshold to detect outliers using the IQR method. Default is 1.5.
1567
+ - cv_folds (int): The number of cross-validation folds used.
1568
+
1569
+ Returns:
1570
+ - best_model (str): Name of the best model based on the combined metric scores.
1571
+ - scored_df (pd.DataFrame): DataFrame with an added 'combined_score' column used for model selection.
1572
+ - visualizations (dict): A dictionary containing visualizations if `visualize=True`.
1573
+ """
1574
+ from sklearn.preprocessing import MinMaxScaler
1575
+ import seaborn as sns
1576
+ import matplotlib.pyplot as plt
1577
+ from py2ls import plot
1578
+
1579
+ # Check for missing metrics and set default weights if not provided
1580
+ available_metrics = cv_test_scores.columns[1:] # Exclude 'Classifier' column
1581
+ if metric_weights is None:
1582
+ metric_weights = {
1583
+ metric: 1 / len(available_metrics) for metric in available_metrics
1584
+ } # Equal weight if not specified
1585
+ elif metric_weights == "a":
1586
+ metric_weights = {
1587
+ "accuracy": 0.2,
1588
+ "precision": 0.2,
1589
+ "recall": 0.2,
1590
+ "f1": 0.2,
1591
+ "roc_auc": 0.2,
1592
+ }
1593
+ elif metric_weights == "b":
1594
+ metric_weights = {
1595
+ "accuracy": 0.4,
1596
+ "precision": 0.2,
1597
+ "recall": 0.2,
1598
+ "f1": 0.1,
1599
+ "roc_auc": 0.1,
1600
+ }
1601
+ elif metric_weights == "c":
1602
+ metric_weights = {
1603
+ "accuracy": 0.2,
1604
+ "precision": 0.3,
1605
+ "recall": 0.3,
1606
+ "f1": 0.1,
1607
+ "roc_auc": 0.1,
1608
+ }
1609
+ elif metric_weights == "d":
1610
+ metric_weights = {
1611
+ "accuracy": 0.2,
1612
+ "precision": 0.2,
1613
+ "recall": 0.2,
1614
+ "f1": 0.3,
1615
+ "roc_auc": 0.1,
1616
+ }
1617
+ elif metric_weights == "e":
1618
+ metric_weights = {
1619
+ "accuracy": 0.1,
1620
+ "precision": 0.2,
1621
+ "recall": 0.2,
1622
+ "f1": 0.3,
1623
+ "roc_auc": 0.3,
1624
+ }
1625
+ else:
1626
+ metric_weights = {
1627
+ metric: 1 / len(available_metrics) for metric in available_metrics
1628
+ }
1629
+
1630
+ # Normalize weights if they don’t sum to 1
1631
+ total_weight = sum(metric_weights.values())
1632
+ metric_weights = {
1633
+ metric: weight / total_weight for metric, weight in metric_weights.items()
1634
+ }
1635
+ if rm_outlier:
1636
+ cv_test_scores_ = ips.df_outlier(cv_test_scores)
1637
+ else:
1638
+ cv_test_scores_=cv_test_scores
1639
+
1640
+ # Normalize the scores of metrics if normalize is True
1641
+ scaler = MinMaxScaler()
1642
+ normalized_scores = pd.DataFrame(
1643
+ scaler.fit_transform(cv_test_scores_[available_metrics]),
1644
+ columns=available_metrics,
1645
+ )
1646
+ cv_test_scores_ = pd.concat(
1647
+ [cv_test_scores_[["Classifier"]], normalized_scores], axis=1
1648
+ )
1649
+
1650
+ # Calculate weighted scores for each model
1651
+ cv_test_scores_["combined_score"] = sum(
1652
+ cv_test_scores_[metric] * weight for metric, weight in metric_weights.items()
1653
+ )
1654
+ top_models = cv_test_scores_.sort_values(by="combined_score", ascending=False)
1655
+ cv_test_scores = cv_test_scores.loc[top_models.index]
1656
+ top_models.reset_index(drop=True, inplace=True)
1657
+ cv_test_scores.reset_index(drop=True, inplace=True)
1658
+
1659
+ if plot_:
1660
+
1661
+ def generate_bar_plot(ax, cv_test_scores):
1662
+ ax = plot.plotxy(
1663
+ y="Classifier", x="combined_score", data=cv_test_scores, kind="bar"
1664
+ )
1665
+ plt.title("Classifier Performance")
1666
+ plt.tight_layout()
1667
+ return plt
1668
+
1669
+ nexttile = plot.subplot(2, 2, figsize=[10, 7])
1670
+ generate_bar_plot(nexttile(), top_models.dropna())
1671
+ plot.radar(
1672
+ ax=nexttile(projection="polar"),
1673
+ data=cv_test_scores.set_index("Classifier"),
1674
+ ylim=[0.5, 1],
1675
+ color=plot.get_color(10),
1676
+ alpha=0.05,
1677
+ circular=1,
1678
+ )
1679
+ return cv_test_scores
1680
+
1681
+
1682
+ # # Example Usage:
1683
+ # metric_weights = {
1684
+ # "accuracy": 0.2,
1685
+ # "precision": 0.3,
1686
+ # "recall": 0.2,
1687
+ # "f1": 0.2,
1688
+ # "roc_auc": 0.1,
1689
+ # }
1690
+ # cv_test_scores = res["cv_test_scores"].copy()
1691
+ # best_model = rank_models(
1692
+ # cv_test_scores, metric_weights=metric_weights, normalize=True, plot_=True
1693
+ # )
1694
+
1695
+ # figsave("classifier_performance.pdf")
1696
+
1697
+ def predict(
1698
+ x_train: pd.DataFrame,
1699
+ y_train: pd.Series,
1700
+ x_true: pd.DataFrame=None,
1701
+ y_true: Optional[pd.Series] = None,
1702
+ common_features: set = None,
1703
+ purpose: str = "classification", # 'classification' or 'regression'
1704
+ cls: Optional[Dict[str, Any]] = None,
1705
+ metrics: Optional[List[str]] = None,
1706
+ random_state: int = 1,
1707
+ smote: bool = False,
1708
+ n_jobs:int = -1,
1709
+ plot_: bool = True,
1710
+ test_size:float=0.2,# specific only when x_true is None
1711
+ cv_folds:int=5,# more cv_folds 得更加稳定,auc可能更低
1712
+ cv_level:str="l",#"s":'low',"m":'medium',"l":"high"
1713
+ class_weight: str = "balanced",
1714
+ verbose:bool=False,
1715
+ dir_save:str="./"
1716
+ ) -> pd.DataFrame:
1717
+ """
1718
+ 1. 对x_train进行split_train_test,并对其进行validate
1719
+ predict(x_train, y_train)
1720
+ 2. 利用x_train, y_train的数据,对x_true的数据进行predict
1721
+ predict(x_train, y_train, x_true)
1722
+ 3. 利用x_train, y_train的数据,validate x_true和y_true
1723
+ predict(x_train, y_train, x_true, y_true)
1724
+
1725
+ Advanced master predictor function with grid search for hyperparameter tuning.
1726
+
1727
+ Parameters:
1728
+ - x_train, y_train: Training dataset.
1729
+ - x_true, y_true: Dataset for validation or prediction (y_true=None for prediction).
1730
+ - common_features (set): Common features to use for validation.
1731
+ - purpose (str): Task type - 'classification' or 'regression'.
1732
+ - models (dict): Dictionary of models and parameters.
1733
+ - metrics (list): Metrics to compute.
1734
+ - random_state (int): Seed for reproducibility.
1735
+ - smote (bool): Use SMOTE for class imbalance (classification only).
1736
+ - class_weight (str): Class weights to handle imbalance.
1737
+
1738
+ Returns:
1739
+ - df_results (pd.DataFrame): DataFrame with performance metrics and hyperparameters.
1740
+ """
1741
+ from tqdm import tqdm
1742
+ from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, ExtraTreesClassifier, ExtraTreesRegressor, BaggingClassifier, BaggingRegressor, AdaBoostClassifier, AdaBoostRegressor
1743
+ from sklearn.svm import SVC, SVR
1744
+ from sklearn.tree import DecisionTreeRegressor
1745
+ from sklearn.linear_model import LogisticRegression, ElasticNet, ElasticNetCV, LinearRegression, Lasso,RidgeClassifierCV, Perceptron, SGDClassifier
1746
+ from sklearn.neighbors import KNeighborsClassifier,KNeighborsRegressor
1747
+ from sklearn.naive_bayes import GaussianNB,BernoulliNB
1748
+ from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
1749
+ import xgboost as xgb
1750
+ import lightgbm as lgb
1751
+ import catboost as cb
1752
+ from sklearn.neural_network import MLPClassifier, MLPRegressor
1753
+ from sklearn.model_selection import GridSearchCV, StratifiedKFold, KFold
1754
+ from sklearn.discriminant_analysis import LinearDiscriminantAnalysis,QuadraticDiscriminantAnalysis
1755
+ from sklearn.preprocessing import PolynomialFeatures
1756
+
1757
+
1758
+ # 拼写检查
1759
+ purpose=ips.strcmp(purpose,['classification','regression'])[0]
1760
+ print(f"{purpose} processing...")
1761
+ # Default models or regressors if not provided
1762
+ if purpose == "classification":
1763
+ model_ = {
1764
+ "Random Forest": RandomForestClassifier(random_state=random_state, class_weight=class_weight),
1765
+
1766
+ # SVC (Support Vector Classification)
1767
+ "SVM": SVC(kernel="rbf",probability=True,class_weight=class_weight,random_state=random_state),
1768
+
1769
+ # fit the best model without enforcing sparsity, which means it does not directly perform feature selection.
1770
+ "Logistic Regression": LogisticRegression(class_weight=class_weight, random_state=random_state),
1771
+
1772
+ # Logistic Regression with L1 Regularization (Lasso)
1773
+ "Lasso Logistic Regression": LogisticRegression(penalty="l1", solver="saga", random_state=random_state),
1774
+ "Gradient Boosting": GradientBoostingClassifier(random_state=random_state),
1775
+ "XGBoost": xgb.XGBClassifier(eval_metric="logloss",random_state=random_state,),
1776
+ "KNN": KNeighborsClassifier(n_neighbors=5),
1777
+ "Naive Bayes": GaussianNB(),
1778
+ "Linear Discriminant Analysis": LinearDiscriminantAnalysis(),
1779
+ "AdaBoost": AdaBoostClassifier(algorithm='SAMME', random_state=random_state),
1780
+ # "LightGBM": lgb.LGBMClassifier(random_state=random_state, class_weight=class_weight),
1781
+ "CatBoost": cb.CatBoostClassifier(verbose=0, random_state=random_state),
1782
+ "Extra Trees": ExtraTreesClassifier(random_state=random_state, class_weight=class_weight),
1783
+ "Bagging": BaggingClassifier(random_state=random_state),
1784
+ "Neural Network": MLPClassifier(max_iter=500, random_state=random_state),
1785
+ "DecisionTree": DecisionTreeClassifier(),
1786
+ "Quadratic Discriminant Analysis": QuadraticDiscriminantAnalysis(),
1787
+ "Ridge": RidgeClassifierCV(class_weight=class_weight, store_cv_results=True),
1788
+ "Perceptron": Perceptron(random_state=random_state),
1789
+ "Bernoulli Naive Bayes": BernoulliNB(),
1790
+ "SGDClassifier": SGDClassifier(random_state=random_state),
1791
+ }
1792
+ elif purpose == "regression":
1793
+ model_ = {
1794
+ "Random Forest": RandomForestRegressor(random_state=random_state),
1795
+ "SVM": SVR(),# SVR (Support Vector Regression)
1796
+ # "Lasso": Lasso(random_state=random_state), # 它和LassoCV相同(必须要提供alpha参数),
1797
+ "LassoCV": LassoCV(cv=cv_folds, random_state=random_state),#LassoCV自动找出最适alpha,优于Lasso
1798
+ "Gradient Boosting": GradientBoostingRegressor(random_state=random_state),
1799
+ "XGBoost": xgb.XGBRegressor(eval_metric="rmse",random_state=random_state),
1800
+ "Linear Regression": LinearRegression(),
1801
+ "Lasso": Lasso(random_state=random_state),
1802
+ "AdaBoost": AdaBoostRegressor(random_state=random_state),
1803
+ # "LightGBM": lgb.LGBMRegressor(random_state=random_state),
1804
+ "CatBoost": cb.CatBoostRegressor(verbose=0, random_state=random_state),
1805
+ "Extra Trees": ExtraTreesRegressor(random_state=random_state),
1806
+ "Bagging": BaggingRegressor(random_state=random_state),
1807
+ "Neural Network": MLPRegressor(max_iter=500, random_state=random_state),
1808
+ "ElasticNet": ElasticNet(random_state=random_state),
1809
+ "Ridge": Ridge(),
1810
+ "KNN":KNeighborsRegressor()
1811
+ }
1812
+ # indicate cls:
1813
+ if ips.run_once_within(30):# 10 min
1814
+ print(f"supported models: {list(model_.keys())}")
1815
+ if cls is None:
1816
+ models=model_
1817
+ else:
1818
+ if not isinstance(cls, list):
1819
+ cls=[cls]
1820
+ models={}
1821
+ for cls_ in cls:
1822
+ cls_ = ips.strcmp(cls_, list(model_.keys()))[0]
1823
+ models[cls_] = model_[cls_]
1824
+ if 'LightGBM' in models:
1825
+ x_train=ips.df_special_characters_cleaner(x_train)
1826
+ x_true=ips.df_special_characters_cleaner(x_true) if x_true is not None else None
1827
+
1828
+ if isinstance(y_train, str) and y_train in x_train.columns:
1829
+ y_train_col_name=y_train
1830
+ y_train=x_train[y_train]
1831
+ y_train=ips.df_encoder(pd.DataFrame(y_train),method='dummy')
1832
+ x_train = x_train.drop(y_train_col_name,axis=1)
1833
+ else:
1834
+ y_train=ips.df_encoder(pd.DataFrame(y_train),method='dummy').values.ravel()
1835
+
1836
+ if x_true is None:
1837
+ x_train, x_true, y_train, y_true = train_test_split(
1838
+ x_train,
1839
+ y_train,
1840
+ test_size=test_size,
1841
+ random_state=random_state,
1842
+ stratify=y_train if purpose == "classification" else None
1843
+ )
1844
+ if isinstance(y_train, str) and y_train in x_train.columns:
1845
+ y_train_col_name=y_train
1846
+ y_train=x_train[y_train]
1847
+ y_train=ips.df_encoder(pd.DataFrame(y_train),method='dummy')
1848
+ x_train = x_train.drop(y_train_col_name,axis=1)
1849
+ else:
1850
+ y_train=ips.df_encoder(pd.DataFrame(y_train),method='dummy').values.ravel()
1851
+ if y_true is not None:
1852
+ if isinstance(y_true, str) and y_true in x_true.columns:
1853
+ y_true_col_name=y_true
1854
+ y_true=x_true[y_true]
1855
+ y_true=ips.df_encoder(pd.DataFrame(y_true),method='dummy')
1856
+ x_true = x_true.drop(y_true_col_name,axis=1)
1857
+ else:
1858
+ y_true=ips.df_encoder(pd.DataFrame(y_true),method='dummy').values.ravel()
1859
+
1860
+ # to convert the 2D to 1D: 2D column-vector format (like [[1], [0], [1], ...]) instead of a 1D array ([1, 0, 1, ...]
1861
+ y_train=y_train.values.ravel() if y_train is not None else None
1862
+ y_true=y_true.values.ravel() if y_true is not None else None
1863
+
1864
+ # Ensure common features are selected
1865
+ if common_features is not None:
1866
+ x_train, x_true = x_train[common_features], x_true[common_features]
1867
+ else:
1868
+ share_col_names = ips.shared(x_train.columns, x_true.columns,verbose=verbose)
1869
+ x_train, x_true =x_train[share_col_names], x_true[share_col_names]
1870
+
1871
+ x_train, x_true = ips.df_scaler(x_train), ips.df_scaler(x_true)
1872
+ x_train, x_true = ips.df_encoder(x_train, method="dummy"), ips.df_encoder(
1873
+ x_true, method="dummy"
1874
+ )
1875
+
1876
+ # Handle class imbalance using SMOTE (only for classification)
1877
+ if (
1878
+ smote
1879
+ and purpose == "classification"
1880
+ and y_train.value_counts(normalize=True).max() < 0.8
1881
+ ):
1882
+ from imblearn.over_sampling import SMOTE
1883
+
1884
+ smote_sampler = SMOTE(random_state=random_state)
1885
+ x_train, y_train = smote_sampler.fit_resample(x_train, y_train)
1886
+
1887
+ # Hyperparameter grids for tuning
1888
+ if cv_level in ["low",'simple','s','l']:
1889
+ param_grids = {
1890
+ "Random Forest": {
1891
+ "n_estimators": [100], # One basic option
1892
+ "max_depth": [None, 10],
1893
+ "min_samples_split": [2],
1894
+ "min_samples_leaf": [1],
1895
+ "class_weight": [None],
1896
+ } if purpose == "classification" else {
1897
+ "n_estimators": [100], # One basic option
1898
+ "max_depth": [None, 10],
1899
+ "min_samples_split": [2],
1900
+ "min_samples_leaf": [1],
1901
+ "max_features": [None],
1902
+ "bootstrap": [True], # Only one option for simplicity
1903
+ },
1904
+ "SVM": {
1905
+ "C": [1],
1906
+ "gamma": ['scale'],
1907
+ "kernel": ['rbf'],
1908
+ },
1909
+ "Lasso": {
1910
+ "alpha": [0.1],
1911
+ },
1912
+ "LassoCV": {
1913
+ "alphas": [[0.1]],
1914
+ },
1915
+ "Logistic Regression": {
1916
+ "C": [1],
1917
+ "solver": ['lbfgs'],
1918
+ "penalty": ['l2'],
1919
+ "max_iter": [500],
1920
+ },
1921
+ "Gradient Boosting": {
1922
+ "n_estimators": [100],
1923
+ "learning_rate": [0.1],
1924
+ "max_depth": [3],
1925
+ "min_samples_split": [2],
1926
+ "subsample": [0.8],
1927
+ },
1928
+ "XGBoost": {
1929
+ "n_estimators": [100],
1930
+ "max_depth": [3],
1931
+ "learning_rate": [0.1],
1932
+ "subsample": [0.8],
1933
+ "colsample_bytree": [0.8],
1934
+ },
1935
+ "KNN": {
1936
+ "n_neighbors": [3],
1937
+ "weights": ['uniform'],
1938
+ "algorithm": ['auto'],
1939
+ "p": [2],
1940
+ } if purpose == 'classification' else {
1941
+ 'n_neighbors': [3],
1942
+ 'weights': ['uniform'],
1943
+ 'metric': ['euclidean'],
1944
+ 'leaf_size': [30],
1945
+ 'p': [2],
1946
+ },
1947
+ "Naive Bayes": {
1948
+ "var_smoothing": [1e-9],
1949
+ },
1950
+ "SVR": {
1951
+ "C": [1],
1952
+ "gamma": ['scale'],
1953
+ "kernel": ['rbf'],
1954
+ },
1955
+ "Linear Regression": {
1956
+ "fit_intercept": [True],
1957
+ },
1958
+ "Extra Trees": {
1959
+ "n_estimators": [100],
1960
+ "max_depth": [None, 10],
1961
+ "min_samples_split": [2],
1962
+ "min_samples_leaf": [1],
1963
+ },
1964
+ "CatBoost": {
1965
+ "iterations": [100],
1966
+ "learning_rate": [0.1],
1967
+ "depth": [3],
1968
+ "l2_leaf_reg": [1],
1969
+ },
1970
+ "LightGBM": {
1971
+ "n_estimators": [100],
1972
+ "num_leaves": [31],
1973
+ "max_depth": [10],
1974
+ 'min_data_in_leaf': [20],
1975
+ 'min_gain_to_split': [0.01],
1976
+ 'scale_pos_weight': [10],
1977
+ },
1978
+ "Bagging": {
1979
+ "n_estimators": [50],
1980
+ "max_samples": [0.7],
1981
+ "max_features": [0.7],
1982
+ },
1983
+ "Neural Network": {
1984
+ "hidden_layer_sizes": [(50,)],
1985
+ "activation": ["relu"],
1986
+ "solver": ["adam"],
1987
+ "alpha": [0.0001],
1988
+ },
1989
+ "Decision Tree": {
1990
+ "max_depth": [None, 10],
1991
+ "min_samples_split": [2],
1992
+ "min_samples_leaf": [1],
1993
+ "criterion": ["gini"],
1994
+ },
1995
+ "AdaBoost": {
1996
+ "n_estimators": [50],
1997
+ "learning_rate": [0.5],
1998
+ },
1999
+ "Linear Discriminant Analysis": {
2000
+ "solver": ["svd"],
2001
+ "shrinkage": [None],
2002
+ },
2003
+ "Quadratic Discriminant Analysis": {
2004
+ 'reg_param': [0.0],
2005
+ 'priors': [None],
2006
+ 'tol': [1e-4],
2007
+ },
2008
+ "Ridge": {'class_weight': [None, 'balanced']} if purpose == "classification" else {
2009
+ 'alpha': [0.1, 1, 10],
2010
+ },
2011
+ "Perceptron": {
2012
+ 'alpha': [1e-3],
2013
+ 'penalty': ['l2'],
2014
+ 'max_iter': [1000],
2015
+ 'eta0': [1.0],
2016
+ },
2017
+ "Bernoulli Naive Bayes": {
2018
+ 'alpha': [0.1, 1, 10],
2019
+ 'binarize': [0.0],
2020
+ 'fit_prior': [True],
2021
+ },
2022
+ "SGDClassifier": {
2023
+ 'eta0': [0.01],
2024
+ 'loss': ['hinge'],
2025
+ 'penalty': ['l2'],
2026
+ 'alpha': [1e-3],
2027
+ 'max_iter': [1000],
2028
+ 'tol': [1e-3],
2029
+ 'random_state': [random_state],
2030
+ 'learning_rate': ['constant'],
2031
+ },
2032
+ }
2033
+ elif cv_level in ['high','advanced','h']:
2034
+ param_grids = {
2035
+ "Random Forest": {
2036
+ "n_estimators": [100, 200, 500, 700, 1000],
2037
+ "max_depth": [None, 3, 5, 10, 15, 20, 30],
2038
+ "min_samples_split": [2, 5, 10, 20],
2039
+ "min_samples_leaf": [1, 2, 4],
2040
+ "class_weight": [None, "balanced"] if purpose == "classification" else {},
2041
+ } if purpose == "classification" else {
2042
+ "n_estimators": [100, 200, 500, 700, 1000],
2043
+ "max_depth": [None, 3, 5, 10, 15, 20, 30],
2044
+ "min_samples_split": [2, 5, 10, 20],
2045
+ "min_samples_leaf": [1, 2, 4],
2046
+ "max_features": ['auto', 'sqrt', 'log2'], # Number of features to consider when looking for the best split
2047
+ "bootstrap": [True, False], # Whether bootstrap samples are used when building trees
2048
+ },
2049
+ "SVM": {
2050
+ "C": [0.001, 0.01, 0.1, 1, 10, 100, 1000],
2051
+ "gamma": ["scale", "auto", 0.001, 0.01, 0.1],
2052
+ "kernel": ["linear", "rbf", "poly"],
2053
+ },
2054
+ "Logistic Regression": {
2055
+ "C": [0.001, 0.01, 0.1, 1, 10, 100, 1000],
2056
+ "solver": ["liblinear", "saga", "newton-cg", "lbfgs"],
2057
+ "penalty": ["l1", "l2", "elasticnet"],
2058
+ "max_iter": [100, 200, 300, 500],
2059
+ },
2060
+ "Lasso":{
2061
+ "alpha": [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0],
2062
+ "max_iter": [500, 1000, 2000, 5000],
2063
+ "tol": [1e-4, 1e-5, 1e-6],
2064
+ "selection": ["cyclic", "random"]
2065
+ },
2066
+ "LassoCV":{
2067
+ "alphas": [[0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0]],
2068
+ "max_iter": [500, 1000, 2000, 5000],
2069
+ "cv": [3, 5, 10],
2070
+ "tol": [1e-4, 1e-5, 1e-6]
2071
+ },
2072
+ "Gradient Boosting": {
2073
+ "n_estimators": [100, 200, 300, 400, 500, 700, 1000],
2074
+ "learning_rate": [0.001, 0.01, 0.1, 0.2, 0.3, 0.5],
2075
+ "max_depth": [3, 5, 7, 9, 15],
2076
+ "min_samples_split": [2, 5, 10, 20],
2077
+ "subsample": [0.8, 1.0],
2078
+ },
2079
+ "XGBoost": {
2080
+ "n_estimators": [100, 200, 500, 700],
2081
+ "max_depth": [3, 5, 7, 10],
2082
+ "learning_rate": [0.01, 0.1, 0.2, 0.3],
2083
+ "subsample": [0.8, 1.0],
2084
+ "colsample_bytree": [0.8, 0.9, 1.0],
2085
+ },
2086
+ "KNN": {
2087
+ "n_neighbors": [1, 3, 5, 10, 15, 20],
2088
+ "weights": ["uniform", "distance"],
2089
+ "algorithm": ["auto", "ball_tree", "kd_tree", "brute"],
2090
+ "p": [1, 2], # 1 for Manhattan, 2 for Euclidean distance
2091
+ } if purpose=='classification' else {
2092
+ 'n_neighbors': [3, 5, 7, 9, 11], # Number of neighbors
2093
+ 'weights': ['uniform', 'distance'], # Weight function used in prediction
2094
+ 'metric': ['euclidean', 'manhattan', 'minkowski'], # Distance metric
2095
+ 'leaf_size': [20, 30, 40, 50], # Leaf size for KDTree or BallTree algorithms
2096
+ 'p': [1, 2] # Power parameter for the Minkowski metric (1 = Manhattan, 2 = Euclidean)
2097
+ },
2098
+ "Naive Bayes": {
2099
+ "var_smoothing": [1e-10, 1e-9, 1e-8, 1e-7],
2100
+ },
2101
+ "AdaBoost": {
2102
+ "n_estimators": [50, 100, 200, 300, 500],
2103
+ "learning_rate": [0.001, 0.01, 0.1, 0.5, 1.0],
2104
+ },
2105
+ "SVR": {
2106
+ "C": [0.01, 0.1, 1, 10, 100, 1000],
2107
+ "gamma": [0.001, 0.01, 0.1, "scale", "auto"],
2108
+ "kernel": ["linear", "rbf", "poly"],
2109
+ },
2110
+ "Linear Regression": {
2111
+ "fit_intercept": [True, False],
2112
+ },
2113
+ "Lasso":{
2114
+ "alpha": [0.001, 0.01, 0.1, 1.0, 10.0, 100.0],
2115
+ "max_iter": [1000, 2000] # Higher iteration limit for fine-tuning
2116
+ },
2117
+ "Extra Trees": {
2118
+ "n_estimators": [100, 200, 500, 700, 1000],
2119
+ "max_depth": [None, 5, 10, 15, 20, 30],
2120
+ "min_samples_split": [2, 5, 10, 20],
2121
+ "min_samples_leaf": [1, 2, 4]
2122
+ },
2123
+ "CatBoost": {
2124
+ "iterations": [100, 200, 500],
2125
+ "learning_rate": [0.001, 0.01, 0.1, 0.2],
2126
+ "depth": [3, 5, 7, 10],
2127
+ "l2_leaf_reg": [1, 3, 5, 7, 10],
2128
+ "border_count": [32, 64, 128],
2129
+ },
2130
+ "LightGBM": {
2131
+ "n_estimators": [100, 200, 500, 700, 1000],
2132
+ "learning_rate": [0.001, 0.01, 0.1, 0.2],
2133
+ "num_leaves": [31, 50, 100, 200],
2134
+ "max_depth": [-1, 5, 10, 20, 30],
2135
+ "min_child_samples": [5, 10, 20],
2136
+ "subsample": [0.8, 1.0],
2137
+ "colsample_bytree": [0.8, 0.9, 1.0],
2138
+ },
2139
+ "Neural Network": {
2140
+ "hidden_layer_sizes": [(50,), (100,), (100, 50), (200, 100)],
2141
+ "activation": ["relu", "tanh", "logistic"],
2142
+ "solver": ["adam", "sgd", "lbfgs"],
2143
+ "alpha": [0.0001, 0.001, 0.01],
2144
+ "learning_rate": ["constant", "adaptive"],
2145
+ },
2146
+ "Decision Tree": {
2147
+ "max_depth": [None, 5, 10, 20, 30],
2148
+ "min_samples_split": [2, 5, 10, 20],
2149
+ "min_samples_leaf": [1, 2, 5, 10],
2150
+ "criterion": ["gini", "entropy"],
2151
+ "splitter": ["best", "random"],
2152
+ },
2153
+ "Linear Discriminant Analysis": {
2154
+ "solver": ["svd", "lsqr", "eigen"],
2155
+ "shrinkage": [None, "auto", 0.1, 0.5, 1.0], # shrinkage levels for 'lsqr' and 'eigen'
2156
+ },
2157
+ 'Ridge': {'class_weight': [None, 'balanced']} if purpose == "classification" else {
2158
+ 'alpha': [0.1, 1, 10, 100, 1000],
2159
+ 'solver': ['auto', 'svd', 'cholesky', 'lsqr', 'lbfgs'],
2160
+ 'fit_intercept': [True, False], # Whether to calculate the intercept
2161
+ 'normalize': [True, False] # If True, the regressors X will be normalized
2162
+ }
2163
+ }
2164
+ else: # median level
2165
+ param_grids = {
2166
+ "Random Forest": {
2167
+ "n_estimators": [100, 200, 500],
2168
+ "max_depth": [None, 10, 20, 30],
2169
+ "min_samples_split": [2, 5, 10],
2170
+ "min_samples_leaf": [1, 2, 4],
2171
+ "class_weight": [None, "balanced"]
2172
+ } if purpose == "classification" else {
2173
+ "n_estimators": [100, 200, 500],
2174
+ "max_depth": [None, 10, 20, 30],
2175
+ "min_samples_split": [2, 5, 10],
2176
+ "min_samples_leaf": [1, 2, 4],
2177
+ "max_features": ['auto', 'sqrt', 'log2'], # Number of features to consider when looking for the best split
2178
+ "bootstrap": [True, False], # Whether bootstrap samples are used when building trees
2179
+ },
2180
+ "SVM": {
2181
+ "C": [0.1, 1, 10, 100], # Regularization strength
2182
+ "gamma": ['scale', 'auto'], # Common gamma values
2183
+ "kernel": ['rbf', 'linear', 'poly'],
2184
+ },
2185
+ "Logistic Regression": {
2186
+ "C": [0.1, 1, 10, 100], # Regularization strength
2187
+ "solver": ['lbfgs', 'liblinear', 'saga'], # Common solvers
2188
+ "penalty": ['l2'], # L2 penalty is most common
2189
+ "max_iter": [500, 1000, 2000], # Increased max_iter for better convergence
2190
+ },
2191
+ "Lasso":{
2192
+ "alpha": [0.001, 0.01, 0.1, 1.0, 10.0, 100.0],
2193
+ "max_iter": [500, 1000, 2000]
2194
+ },
2195
+ "LassoCV":{
2196
+ "alphas": [[0.001, 0.01, 0.1, 1.0, 10.0, 100.0]],
2197
+ "max_iter": [500, 1000, 2000]
2198
+ },
2199
+ "Gradient Boosting": {
2200
+ "n_estimators": [100, 200, 500],
2201
+ "learning_rate": [0.01, 0.1, 0.2],
2202
+ "max_depth": [3, 5, 7],
2203
+ "min_samples_split": [2, 5, 10],
2204
+ "subsample": [0.8, 1.0],
2205
+ },
2206
+ "XGBoost": {
2207
+ "n_estimators": [100, 200, 500],
2208
+ "max_depth": [3, 5, 7],
2209
+ "learning_rate": [0.01, 0.1, 0.2],
2210
+ "subsample": [0.8, 1.0],
2211
+ "colsample_bytree": [0.8, 1.0],
2212
+ },
2213
+ "KNN": {
2214
+ "n_neighbors": [3, 5, 7, 10],
2215
+ "weights": ['uniform', 'distance'],
2216
+ "algorithm": ['auto', 'ball_tree', 'kd_tree', 'brute'],
2217
+ "p": [1, 2],
2218
+ } if purpose=='classification' else {
2219
+ 'n_neighbors': [3, 5, 7, 9, 11], # Number of neighbors
2220
+ 'weights': ['uniform', 'distance'], # Weight function used in prediction
2221
+ 'metric': ['euclidean', 'manhattan', 'minkowski'], # Distance metric
2222
+ 'leaf_size': [20, 30, 40, 50], # Leaf size for KDTree or BallTree algorithms
2223
+ 'p': [1, 2] # Power parameter for the Minkowski metric (1 = Manhattan, 2 = Euclidean)
2224
+ },
2225
+ "Naive Bayes": {
2226
+ "var_smoothing": [1e-9, 1e-8, 1e-7],
2227
+ },
2228
+ "SVR": {
2229
+ "C": [0.1, 1, 10, 100],
2230
+ "gamma": ['scale', 'auto'],
2231
+ "kernel": ['rbf', 'linear'],
2232
+ },
2233
+ "Linear Regression": {
2234
+ "fit_intercept": [True, False],
2235
+ },
2236
+ "Lasso": {
2237
+ "alpha": [0.1, 1.0, 10.0],
2238
+ "max_iter": [1000, 2000], # Sufficient iterations for convergence
2239
+ },
2240
+ "Extra Trees": {
2241
+ "n_estimators": [100, 200, 500],
2242
+ "max_depth": [None, 10, 20, 30],
2243
+ "min_samples_split": [2, 5, 10],
2244
+ "min_samples_leaf": [1, 2, 4],
2245
+ },
2246
+ "CatBoost": {
2247
+ "iterations": [100, 200],
2248
+ "learning_rate": [0.01, 0.1],
2249
+ "depth": [3, 6, 10],
2250
+ "l2_leaf_reg": [1, 3, 5, 7],
2251
+ },
2252
+ "LightGBM": {
2253
+ "n_estimators": [100, 200, 500],
2254
+ "learning_rate": [0.01, 0.1],
2255
+ "num_leaves": [31, 50, 100],
2256
+ "max_depth": [-1, 10, 20],
2257
+ 'min_data_in_leaf': [20], # Minimum samples in each leaf
2258
+ 'min_gain_to_split': [0.01], # Minimum gain to allow a split
2259
+ 'scale_pos_weight': [10], # Address class imbalance
2260
+ },
2261
+ "Bagging": {
2262
+ "n_estimators": [10, 50, 100],
2263
+ "max_samples": [0.5, 0.7, 1.0],
2264
+ "max_features": [0.5, 0.7, 1.0],
2265
+ },
2266
+ "Neural Network": {
2267
+ "hidden_layer_sizes": [(50,), (100,), (100, 50)],
2268
+ "activation": ["relu", "tanh"],
2269
+ "solver": ["adam", "sgd"],
2270
+ "alpha": [0.0001, 0.001],
2271
+ },
2272
+ "Decision Tree": {
2273
+ "max_depth": [None, 10, 20],
2274
+ "min_samples_split": [2, 10],
2275
+ "min_samples_leaf": [1, 4],
2276
+ "criterion": ["gini", "entropy"],
2277
+ },
2278
+ "AdaBoost": {
2279
+ "n_estimators": [50, 100],
2280
+ "learning_rate": [0.5, 1.0],
2281
+ },
2282
+ "Linear Discriminant Analysis": {
2283
+ "solver": ["svd", "lsqr", "eigen"],
2284
+ "shrinkage": [None, "auto"],
2285
+ }, "Quadratic Discriminant Analysis":{
2286
+ 'reg_param': [0.0, 0.1, 0.5, 1.0], # Regularization parameter
2287
+ 'priors': [None, [0.5, 0.5], [0.3, 0.7]], # Class priors
2288
+ 'tol': [1e-4, 1e-3, 1e-2] # Tolerance value for the convergence of the algorithm
2289
+ },
2290
+ "Perceptron":{
2291
+ 'alpha': [1e-4, 1e-3, 1e-2], # Regularization parameter
2292
+ 'penalty': ['l2', 'l1', 'elasticnet'], # Regularization penalty
2293
+ 'max_iter': [1000, 2000], # Maximum number of iterations
2294
+ 'eta0': [1.0, 0.1], # Learning rate for gradient descent
2295
+ 'tol': [1e-3, 1e-4, 1e-5], # Tolerance for stopping criteria
2296
+ 'random_state': [random_state] # Random state for reproducibility
2297
+ },
2298
+ "Bernoulli Naive Bayes":{
2299
+ 'alpha': [0.1, 1.0, 10.0], # Additive (Laplace) smoothing parameter
2300
+ 'binarize': [0.0, 0.5, 1.0], # Threshold for binarizing the input features
2301
+ 'fit_prior': [True, False] # Whether to learn class prior probabilities
2302
+ },
2303
+ "SGDClassifier":{
2304
+ 'eta0': [0.01, 0.1, 1.0],
2305
+ 'loss': ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'], # Loss function
2306
+ 'penalty': ['l2', 'l1', 'elasticnet'], # Regularization penalty
2307
+ 'alpha': [1e-4, 1e-3, 1e-2], # Regularization strength
2308
+ 'l1_ratio': [0.15, 0.5, 0.85], # L1 ratio for elasticnet penalty
2309
+ 'max_iter': [1000, 2000], # Maximum number of iterations
2310
+ 'tol': [1e-3, 1e-4], # Tolerance for stopping criteria
2311
+ 'random_state': [random_state], # Random state for reproducibility
2312
+ 'learning_rate': ['constant', 'optimal', 'invscaling', 'adaptive'], # Learning rate schedule
2313
+ },
2314
+ 'Ridge': {'class_weight': [None, 'balanced']} if purpose == "classification" else {
2315
+ 'alpha': [0.1, 1, 10, 100],
2316
+ 'solver': ['auto', 'svd', 'cholesky', 'lsqr'] # Solver for optimization
2317
+ }
2318
+ }
2319
+
2320
+ results = {}
2321
+ # Use StratifiedKFold for classification and KFold for regression
2322
+ cv = (
2323
+ StratifiedKFold(n_splits=cv_folds, shuffle=True, random_state=random_state)
2324
+ if purpose == "classification"
2325
+ else KFold(n_splits=cv_folds, shuffle=True, random_state=random_state)
2326
+ )
2327
+
2328
+ # Train and validate each model
2329
+ for name, clf in tqdm(
2330
+ models.items(),
2331
+ desc="models",
2332
+ colour="green",
2333
+ bar_format="{l_bar}{bar} {n_fmt}/{total_fmt}",
2334
+ ):
2335
+ if verbose:
2336
+ print(f"\nTraining and validating {name}:")
2337
+
2338
+ # Grid search with KFold or StratifiedKFold
2339
+ gs = GridSearchCV(
2340
+ clf,
2341
+ param_grid=param_grids.get(name, {}),
2342
+ scoring=(
2343
+ "roc_auc" if purpose == "classification" else "neg_mean_squared_error"
2344
+ ),
2345
+ cv=cv,
2346
+ n_jobs=n_jobs,
2347
+ verbose=verbose,
2348
+ )
2349
+ gs.fit(x_train, y_train)
2350
+ best_clf = gs.best_estimator_
2351
+ # make sure x_train and x_test has the same name
2352
+ x_true = x_true.reindex(columns=x_train.columns, fill_value=0)
2353
+ y_pred = best_clf.predict(x_true)
2354
+
2355
+ # y_pred_proba
2356
+ if hasattr(best_clf, "predict_proba"):
2357
+ y_pred_proba = best_clf.predict_proba(x_true)[:, 1]
2358
+ elif hasattr(best_clf, "decision_function"):
2359
+ # If predict_proba is not available, use decision_function (e.g., for SVM)
2360
+ y_pred_proba = best_clf.decision_function(x_true)
2361
+ # Ensure y_pred_proba is within 0 and 1 bounds
2362
+ y_pred_proba = (y_pred_proba - y_pred_proba.min()) / (
2363
+ y_pred_proba.max() - y_pred_proba.min()
2364
+ )
2365
+ else:
2366
+ y_pred_proba = None # No probability output for certain models
2367
+
2368
+
2369
+ validation_scores = {}
2370
+ if y_true is not None:
2371
+ validation_scores = cal_metrics(y_true, y_pred, y_pred_proba=y_pred_proba, purpose=purpose, average="weighted")
2372
+
2373
+ # Calculate ROC curve
2374
+ # https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
2375
+ if y_pred_proba is not None:
2376
+ # fpr, tpr, roc_auc = dict(), dict(), dict()
2377
+ fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
2378
+ lower_ci, upper_ci = cal_auc_ci(y_true, y_pred_proba,verbose=False)
2379
+ roc_auc = auc(fpr, tpr)
2380
+ roc_info = {
2381
+ "fpr": fpr.tolist(),
2382
+ "tpr": tpr.tolist(),
2383
+ "auc": roc_auc,
2384
+ "ci95": (lower_ci, upper_ci),
2385
+ }
2386
+ # precision-recall curve
2387
+ precision_, recall_, _ = precision_recall_curve(y_true, y_pred_proba)
2388
+ avg_precision_ = average_precision_score(y_true, y_pred_proba)
2389
+ pr_info = {
2390
+ "precision": precision_,
2391
+ "recall": recall_,
2392
+ "avg_precision": avg_precision_,
2393
+ }
2394
+ else:
2395
+ roc_info, pr_info = None, None
2396
+ if purpose=="classification":
2397
+ results[name] = {
2398
+ "best_clf": gs.best_estimator_,
2399
+ "best_params": gs.best_params_,
2400
+ "auc_indiv":[gs.cv_results_[f'split{i}_test_score'][gs.best_index_] for i in range(cv_folds)],
2401
+ "scores": validation_scores,
2402
+ "roc_curve": roc_info,
2403
+ "pr_curve": pr_info,
2404
+ "confusion_matrix": confusion_matrix(y_true, y_pred),
2405
+ "predictions": y_pred.tolist(),
2406
+ "predictions_proba": (
2407
+ y_pred_proba.tolist() if y_pred_proba is not None else None
2408
+ ),
2409
+ }
2410
+ else: # "regression"
2411
+ results[name] = {
2412
+ "best_clf": gs.best_estimator_,
2413
+ "best_params": gs.best_params_,
2414
+ "scores": validation_scores, # e.g., neg_MSE, R², etc.
2415
+ "predictions": y_pred.tolist(),
2416
+ "predictions_proba": (
2417
+ y_pred_proba.tolist() if y_pred_proba is not None else None
2418
+ ),
2419
+ }
2420
+
2421
+ else:
2422
+ results[name] = {
2423
+ "best_clf": gs.best_estimator_,
2424
+ "best_params": gs.best_params_,
2425
+ "scores": validation_scores,
2426
+ "predictions": y_pred.tolist(),
2427
+ "predictions_proba": (
2428
+ y_pred_proba.tolist() if y_pred_proba is not None else None
2429
+ ),
2430
+ }
2431
+
2432
+ # Convert results to DataFrame
2433
+ df_results = pd.DataFrame.from_dict(results, orient="index")
2434
+
2435
+ # sort
2436
+ if y_true is not None and purpose=="classification":
2437
+ df_scores = pd.DataFrame(
2438
+ df_results["scores"].tolist(), index=df_results["scores"].index
2439
+ ).sort_values(by="roc_auc", ascending=False)
2440
+ df_results=df_results.loc[df_scores.index]
2441
+
2442
+ if plot_:
2443
+ nexttile=plot.subplot(figsize=[12, 10])
2444
+ plot.heatmap(df_scores, kind="direct",ax=nexttile())
2445
+ plot.figsets(xangle=30)
2446
+ if dir_save:
2447
+ ips.figsave(dir_save+"scores_sorted_heatmap.pdf")
2448
+ plot.heatmap(df_scores, kind="direct",cluster=True)
2449
+ plot.figsets(xangle=30)
2450
+ if dir_save:
2451
+ ips.figsave(dir_save+"scores_clus.pdf")
2452
+ if all([plot_, y_true is not None, purpose=='classification']):
2453
+ try:
2454
+ if len(models)>3:
2455
+ plot_validate_features(df_results)
2456
+ else:
2457
+ plot_validate_features_single(df_results,figsize=(12,4*len(models)))
2458
+ if dir_save:
2459
+ ips.figsave(dir_save+"validate_features.pdf")
2460
+ except Exception as e:
2461
+ print(f"Error: 在画图的过程中出现了问题:{e}")
2462
+ return df_results
2463
+
2464
+
2465
+ def cal_metrics(y_true, y_pred, y_pred_proba=None, purpose="regression", average="weighted"):
2466
+ """
2467
+ Calculate regression or classification metrics based on the purpose.
2468
+
2469
+ Parameters:
2470
+ - y_true: Array of true values.
2471
+ - y_pred: Array of predicted labels for classification or predicted values for regression.
2472
+ - y_pred_proba: Array of predicted probabilities for classification (optional).
2473
+ - purpose: str, "regression" or "classification".
2474
+ - average: str, averaging method for multi-class classification ("binary", "micro", "macro", "weighted", etc.).
2475
+
2476
+ Returns:
2477
+ - validation_scores: dict of computed metrics.
2478
+ """
2479
+ from sklearn.metrics import (
2480
+ mean_squared_error,
2481
+ mean_absolute_error,
2482
+ mean_absolute_percentage_error,
2483
+ explained_variance_score,
2484
+ r2_score,
2485
+ mean_squared_log_error,
2486
+ accuracy_score,
2487
+ precision_score,
2488
+ recall_score,
2489
+ f1_score,
2490
+ roc_auc_score,
2491
+ matthews_corrcoef,
2492
+ confusion_matrix,
2493
+ balanced_accuracy_score,
2494
+ average_precision_score,
2495
+ precision_recall_curve
2496
+ )
2497
+ validation_scores = {}
2498
+
2499
+ if purpose == "regression":
2500
+ y_true = np.asarray(y_true)
2501
+ y_true = y_true.ravel()
2502
+ y_pred = np.asarray(y_pred)
2503
+ y_pred = y_pred.ravel()
2504
+ # Regression metrics
2505
+ validation_scores = {
2506
+ "mse": mean_squared_error(y_true, y_pred),
2507
+ "rmse": np.sqrt(mean_squared_error(y_true, y_pred)),
2508
+ "mae": mean_absolute_error(y_true, y_pred),
2509
+ "r2": r2_score(y_true, y_pred),
2510
+ "mape": mean_absolute_percentage_error(y_true, y_pred),
2511
+ "explained_variance": explained_variance_score(y_true, y_pred),
2512
+ "mbd": np.mean(y_pred - y_true) # Mean Bias Deviation
2513
+ }
2514
+ # Check if MSLE can be calculated
2515
+ if np.all(y_true >= 0) and np.all(y_pred >= 0): # Ensure no negative values
2516
+ validation_scores["msle"] = mean_squared_log_error(y_true, y_pred)
2517
+ else:
2518
+ validation_scores["msle"] = "Cannot be calculated due to negative values"
2519
+
2520
+ elif purpose == "classification":
2521
+ # Classification metrics
2522
+ validation_scores = {
2523
+ "accuracy": accuracy_score(y_true, y_pred),
2524
+ "precision": precision_score(y_true, y_pred, average=average),
2525
+ "recall": recall_score(y_true, y_pred, average=average),
2526
+ "f1": f1_score(y_true, y_pred, average=average),
2527
+ "mcc": matthews_corrcoef(y_true, y_pred),
2528
+ "specificity": None,
2529
+ "balanced_accuracy": balanced_accuracy_score(y_true, y_pred)
2530
+ }
2531
+
2532
+ # Confusion matrix to calculate specificity
2533
+ tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
2534
+ validation_scores["specificity"] = tn / (tn + fp) if (tn + fp) > 0 else 0 # Specificity calculation
2535
+
2536
+ if y_pred_proba is not None:
2537
+ # Calculate ROC-AUC
2538
+ validation_scores["roc_auc"] = roc_auc_score(y_true, y_pred_proba)
2539
+ # PR-AUC (Precision-Recall AUC) calculation
2540
+ validation_scores["pr_auc"] = average_precision_score(y_true, y_pred_proba)
2541
+ else:
2542
+ raise ValueError("Invalid purpose specified. Choose 'regression' or 'classification'.")
2543
+
2544
+ return validation_scores
2545
+