unifiedbooster 0.5.0__tar.gz → 0.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/PKG-INFO +1 -1
  2. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/README.md +3 -1
  3. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/setup.py +1 -1
  4. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/unifiedbooster/gbdt.py +9 -5
  5. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/unifiedbooster/gbdt_classification.py +43 -11
  6. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/unifiedbooster/gbdt_regression.py +45 -12
  7. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/unifiedbooster/gpoptimization.py +126 -61
  8. unifiedbooster-0.7.0/unifiedbooster/nonconformist/__init__.py +36 -0
  9. unifiedbooster-0.7.0/unifiedbooster/nonconformist/acp.py +381 -0
  10. unifiedbooster-0.7.0/unifiedbooster/nonconformist/base.py +156 -0
  11. unifiedbooster-0.7.0/unifiedbooster/nonconformist/cp.py +172 -0
  12. unifiedbooster-0.7.0/unifiedbooster/nonconformist/evaluation.py +486 -0
  13. unifiedbooster-0.7.0/unifiedbooster/nonconformist/icp.py +442 -0
  14. unifiedbooster-0.7.0/unifiedbooster/nonconformist/nc.py +610 -0
  15. unifiedbooster-0.7.0/unifiedbooster/nonconformist/util.py +9 -0
  16. unifiedbooster-0.7.0/unifiedbooster/predictioninterval/__init__.py +3 -0
  17. unifiedbooster-0.7.0/unifiedbooster/predictioninterval/predictioninterval.py +314 -0
  18. unifiedbooster-0.7.0/unifiedbooster/predictionset/__init__.py +3 -0
  19. unifiedbooster-0.7.0/unifiedbooster/predictionset/predictionset.py +111 -0
  20. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/unifiedbooster.egg-info/PKG-INFO +1 -1
  21. unifiedbooster-0.7.0/unifiedbooster.egg-info/SOURCES.txt +27 -0
  22. unifiedbooster-0.5.0/unifiedbooster.egg-info/SOURCES.txt +0 -15
  23. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/LICENSE +0 -0
  24. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/setup.cfg +0 -0
  25. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/unifiedbooster/__init__.py +0 -0
  26. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/unifiedbooster.egg-info/dependency_links.txt +0 -0
  27. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/unifiedbooster.egg-info/entry_points.txt +0 -0
  28. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/unifiedbooster.egg-info/not-zip-safe +0 -0
  29. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/unifiedbooster.egg-info/requires.txt +0 -0
  30. {unifiedbooster-0.5.0 → unifiedbooster-0.7.0}/unifiedbooster.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unifiedbooster
3
- Version: 0.5.0
3
+ Version: 0.7.0
4
4
  Summary: Unified interface for Gradient Boosted Decision Trees
5
5
  Home-page: https://github.com/thierrymoudiki/unifiedbooster
6
6
  Author: T. Moudiki
@@ -7,6 +7,8 @@ Unified interface for Gradient Boosted Decision Trees algorithms
7
7
 
8
8
  ## Examples
9
9
 
10
+ See also https://thierrymoudiki.github.io/blog/2024/08/05/python/r/unibooster
11
+
10
12
  ### classification
11
13
 
12
14
  ```python
@@ -90,4 +92,4 @@ for dataset in datasets:
90
92
  print(f"Regression Root Mean Squared Error xgboost: {mse1:.2f}")
91
93
  print(f"Regression Root Mean Squared Error catboost: {mse2:.2f}")
92
94
  print(f"Regression Root Mean Squared Error lightgbm: {mse3:.2f}")
93
- ```
95
+ ```
@@ -9,7 +9,7 @@ from os import path
9
9
 
10
10
  subprocess.check_call(['pip', 'install', 'Cython'])
11
11
 
12
- __version__ = "0.5.0"
12
+ __version__ = "0.7.0"
13
13
 
14
14
  here = path.abspath(path.dirname(__file__))
15
15
 
@@ -35,7 +35,6 @@ class GBDT(BaseEstimator):
35
35
  **kwargs: dict
36
36
  additional parameters to be passed to the class
37
37
  """
38
-
39
38
  def __init__(
40
39
  self,
41
40
  model_type="xgboost",
@@ -44,6 +43,8 @@ class GBDT(BaseEstimator):
44
43
  max_depth=3,
45
44
  rowsample=1.0,
46
45
  colsample=1.0,
46
+ level=None,
47
+ pi_method=None,
47
48
  verbose=0,
48
49
  seed=123,
49
50
  **kwargs
@@ -55,6 +56,8 @@ class GBDT(BaseEstimator):
55
56
  self.max_depth = max_depth
56
57
  self.rowsample = rowsample
57
58
  self.colsample = colsample
59
+ self.level = level
60
+ self.pi_method = pi_method
58
61
  self.verbose = verbose
59
62
  self.seed = seed
60
63
 
@@ -91,7 +94,7 @@ class GBDT(BaseEstimator):
91
94
  "verbose": self.verbose,
92
95
  "random_seed": self.seed,
93
96
  "boosting_type": "Plain",
94
- "leaf_estimation_iterations": 1,
97
+ "leaf_estimation_iterations": 1,
95
98
  "bootstrap_type": "Bernoulli",
96
99
  **kwargs,
97
100
  }
@@ -126,7 +129,6 @@ class GBDT(BaseEstimator):
126
129
 
127
130
  self: object
128
131
  """
129
-
130
132
  if getattr(self, "type_fit") == "classification":
131
133
  self.classes_ = np.unique(y) # for compatibility with sklearn
132
134
  self.n_classes_ = len(
@@ -152,5 +154,7 @@ class GBDT(BaseEstimator):
152
154
 
153
155
  model predictions: {array-like}
154
156
  """
155
-
156
- return getattr(self, "model").predict(X)
157
+ if self.level is not None and self.type_fit == "regression":
158
+ return getattr(self, "model").predict(X, return_pi=True)
159
+ else:
160
+ return getattr(self, "model").predict(X)
@@ -1,5 +1,6 @@
1
1
  from .gbdt import GBDT
2
2
  from sklearn.base import ClassifierMixin
3
+ from .predictionset import PredictionSet
3
4
 
4
5
  try:
5
6
  from xgboost import XGBClassifier
@@ -39,6 +40,12 @@ class GBDTClassifier(GBDT, ClassifierMixin):
39
40
 
40
41
  colsample: float
41
42
  percentage of features to use at each node split
43
+
44
+ level: float
45
+ confidence level for prediction sets
46
+
47
+ pi_method: str
48
+ method for constructing the prediction intervals: 'icp' (inductive conformal), 'tcp' (transductive conformal)
42
49
 
43
50
  verbose: int
44
51
  controls verbosity (default=0)
@@ -88,7 +95,6 @@ class GBDTClassifier(GBDT, ClassifierMixin):
88
95
  print(f"Classification Accuracy lightgbm: {accuracy3:.2f}")
89
96
  ```
90
97
  """
91
-
92
98
  def __init__(
93
99
  self,
94
100
  model_type="xgboost",
@@ -97,6 +103,8 @@ class GBDTClassifier(GBDT, ClassifierMixin):
97
103
  max_depth=3,
98
104
  rowsample=1.0,
99
105
  colsample=1.0,
106
+ level=None,
107
+ pi_method="icp",
100
108
  verbose=0,
101
109
  seed=123,
102
110
  **kwargs,
@@ -111,21 +119,46 @@ class GBDTClassifier(GBDT, ClassifierMixin):
111
119
  max_depth=max_depth,
112
120
  rowsample=rowsample,
113
121
  colsample=colsample,
122
+ level=level,
123
+ pi_method=pi_method,
114
124
  verbose=verbose,
115
125
  seed=seed,
116
126
  **kwargs,
117
127
  )
118
128
 
119
- if model_type == "xgboost":
120
- self.model = XGBClassifier(**self.params)
121
- elif model_type == "catboost":
122
- self.model = CatBoostClassifier(**self.params)
123
- elif model_type == "lightgbm":
124
- self.model = LGBMClassifier(**self.params)
125
- elif model_type == "gradientboosting":
126
- self.model = GradientBoostingClassifier(**self.params)
129
+ if self.level is not None:
130
+
131
+ if model_type == "xgboost":
132
+ self.model = PredictionSet(XGBClassifier(**self.params),
133
+ level=self.level,
134
+ method=self.pi_method)
135
+ elif model_type == "catboost":
136
+ self.model = PredictionSet(CatBoostClassifier(**self.params),
137
+ level=self.level,
138
+ method=self.pi_method)
139
+ elif model_type == "lightgbm":
140
+ self.model = PredictionSet(LGBMClassifier(**self.params),
141
+ level=self.level,
142
+ method=self.pi_method)
143
+ elif model_type == "gradientboosting":
144
+ self.model = PredictionSet(GradientBoostingClassifier(**self.params),
145
+ level=self.level,
146
+ method=self.pi_method)
147
+ else:
148
+ raise ValueError(f"Unknown model_type: {model_type}")
149
+
127
150
  else:
128
- raise ValueError(f"Unknown model_type: {model_type}")
151
+
152
+ if model_type == "xgboost":
153
+ self.model = XGBClassifier(**self.params)
154
+ elif model_type == "catboost":
155
+ self.model = CatBoostClassifier(**self.params)
156
+ elif model_type == "lightgbm":
157
+ self.model = LGBMClassifier(**self.params)
158
+ elif model_type == "gradientboosting":
159
+ self.model = GradientBoostingClassifier(**self.params)
160
+ else:
161
+ raise ValueError(f"Unknown model_type: {model_type}")
129
162
 
130
163
  def predict_proba(self, X):
131
164
  """Predict probabilities for test data X.
@@ -143,5 +176,4 @@ class GBDTClassifier(GBDT, ClassifierMixin):
143
176
 
144
177
  probability estimates for test data: {array-like}
145
178
  """
146
-
147
179
  return self.model.predict_proba(X)
@@ -1,5 +1,6 @@
1
1
  from .gbdt import GBDT
2
2
  from sklearn.base import RegressorMixin
3
+ from .predictioninterval import PredictionInterval
3
4
 
4
5
  try:
5
6
  from xgboost import XGBRegressor
@@ -39,6 +40,12 @@ class GBDTRegressor(GBDT, RegressorMixin):
39
40
 
40
41
  colsample: float
41
42
  percentage of features to use at each node split
43
+
44
+ level: float
45
+ confidence level for prediction sets
46
+
47
+ pi_method: str
48
+ method for constructing the prediction intervals: 'splitconformal', 'localconformal'
42
49
 
43
50
  verbose: int
44
51
  controls verbosity (default=0)
@@ -88,7 +95,6 @@ class GBDTRegressor(GBDT, RegressorMixin):
88
95
  print(f"Regression Mean Squared Error lightgbm: {mse3:.2f}")
89
96
  ```
90
97
  """
91
-
92
98
  def __init__(
93
99
  self,
94
100
  model_type="xgboost",
@@ -97,12 +103,14 @@ class GBDTRegressor(GBDT, RegressorMixin):
97
103
  max_depth=3,
98
104
  rowsample=1.0,
99
105
  colsample=1.0,
106
+ level=None,
107
+ pi_method="splitconformal",
100
108
  verbose=0,
101
109
  seed=123,
102
110
  **kwargs,
103
111
  ):
104
112
 
105
- self.type_fit = "regression"
113
+ self.type_fit = "regression"
106
114
 
107
115
  super().__init__(
108
116
  model_type=model_type,
@@ -111,18 +119,43 @@ class GBDTRegressor(GBDT, RegressorMixin):
111
119
  max_depth=max_depth,
112
120
  rowsample=rowsample,
113
121
  colsample=colsample,
122
+ level=level,
123
+ pi_method=pi_method,
114
124
  verbose=verbose,
115
125
  seed=seed,
116
126
  **kwargs,
117
127
  )
118
128
 
119
- if model_type == "xgboost":
120
- self.model = XGBRegressor(**self.params)
121
- elif model_type == "catboost":
122
- self.model = CatBoostRegressor(**self.params)
123
- elif model_type == "lightgbm":
124
- self.model = LGBMRegressor(**self.params)
125
- elif model_type == "gradientboosting":
126
- self.model = GradientBoostingRegressor(**self.params)
127
- else:
128
- raise ValueError(f"Unknown model_type: {model_type}")
129
+ if self.level is not None:
130
+
131
+ if model_type == "xgboost":
132
+ self.model = PredictionInterval(XGBRegressor(**self.params),
133
+ level=self.level,
134
+ method=self.pi_method)
135
+ elif model_type == "catboost":
136
+ self.model = PredictionInterval(CatBoostRegressor(**self.params),
137
+ level=self.level,
138
+ method=self.pi_method)
139
+ elif model_type == "lightgbm":
140
+ self.model = PredictionInterval(LGBMRegressor(**self.params),
141
+ level=self.level,
142
+ method=self.pi_method)
143
+ elif model_type == "gradientboosting":
144
+ self.model = PredictionInterval(GradientBoostingRegressor(**self.params),
145
+ level=self.level,
146
+ method=self.pi_method)
147
+ else:
148
+ raise ValueError(f"Unknown model_type: {model_type}")
149
+
150
+ else:
151
+
152
+ if model_type == "xgboost":
153
+ self.model = XGBRegressor(**self.params)
154
+ elif model_type == "catboost":
155
+ self.model = CatBoostRegressor(**self.params)
156
+ elif model_type == "lightgbm":
157
+ self.model = LGBMRegressor(**self.params)
158
+ elif model_type == "gradientboosting":
159
+ self.model = GradientBoostingRegressor(**self.params)
160
+ else:
161
+ raise ValueError(f"Unknown model_type: {model_type}")
@@ -18,7 +18,7 @@ def cross_val_optim(
18
18
  model_type="xgboost",
19
19
  type_fit="classification",
20
20
  scoring="accuracy",
21
- n_estimators=100,
21
+ n_estimators=None,
22
22
  surrogate_obj=None,
23
23
  cv=5,
24
24
  n_jobs=None,
@@ -59,7 +59,7 @@ def cross_val_optim(
59
59
  scoring metric; see https://scikit-learn.org/stable/modules/model_evaluation.html#the-scoring-parameter-defining-model-evaluation-rules
60
60
 
61
61
  n_estimators: int
62
- maximum number of trees that can be built
62
+ maximum number of trees that can be built (default is None, and if None, then the parameter is tuned)
63
63
 
64
64
  surrogate_obj: an object;
65
65
  An ML model for estimating the uncertainty around the objective function
@@ -168,63 +168,128 @@ def cross_val_optim(
168
168
  ).mean()
169
169
 
170
170
  # objective function for hyperparams tuning
171
- def crossval_objective(xx):
172
- return gbdt_cv(
173
- X_train=X_train,
174
- y_train=y_train,
175
- model_type=model_type,
176
- n_estimators=n_estimators,
177
- learning_rate=10 ** xx[0],
178
- max_depth=int(xx[1]),
179
- rowsample=xx[2],
180
- colsample=xx[3],
181
- cv=cv,
182
- n_jobs=n_jobs,
183
- type_fit=type_fit,
184
- scoring=scoring,
185
- seed=seed,
186
- )
171
+ if n_estimators is not None:
187
172
 
188
- if surrogate_obj is None:
189
- gp_opt = gp.GPOpt(
190
- objective_func=crossval_objective,
191
- lower_bound=np.array([-6, 1, 0.5, 0.5]),
192
- upper_bound=np.array([0, 16, 1.0, 1.0]),
193
- params_names=[
194
- "learning_rate",
195
- "max_depth",
196
- "rowsample",
197
- "colsample",
198
- ],
199
- method="bayesian",
200
- n_init=n_init,
201
- n_iter=n_iter,
202
- seed=seed,
203
- )
204
- else:
205
- gp_opt = gp.GPOpt(
206
- objective_func=crossval_objective,
207
- lower_bound=np.array([-6, 1, 0.5, 0.5]),
208
- upper_bound=np.array([0, 16, 1.0, 1.0]),
209
- params_names=[
210
- "learning_rate",
211
- "max_depth",
212
- "rowsample",
213
- "colsample",
214
- ],
215
- acquisition="ucb",
216
- method="splitconformal",
217
- surrogate_obj=ns.PredictionInterval(
218
- obj=surrogate_obj, method="splitconformal"
219
- ),
220
- n_init=n_init,
221
- n_iter=n_iter,
222
- seed=seed,
223
- )
173
+ def crossval_objective(xx):
174
+ return gbdt_cv(
175
+ X_train=X_train,
176
+ y_train=y_train,
177
+ model_type=model_type,
178
+ n_estimators=n_estimators,
179
+ learning_rate=10 ** xx[0],
180
+ max_depth=int(xx[1]),
181
+ rowsample=xx[2],
182
+ colsample=xx[3],
183
+ cv=cv,
184
+ n_jobs=n_jobs,
185
+ type_fit=type_fit,
186
+ scoring=scoring,
187
+ seed=seed,
188
+ )
189
+
190
+ else: # n_estimators is None
191
+
192
+ def crossval_objective(xx):
193
+ return gbdt_cv(
194
+ X_train=X_train,
195
+ y_train=y_train,
196
+ model_type=model_type,
197
+ n_estimators=int(10 ** xx[4]),
198
+ learning_rate=10 ** xx[0],
199
+ max_depth=int(xx[1]),
200
+ rowsample=xx[2],
201
+ colsample=xx[3],
202
+ cv=cv,
203
+ n_jobs=n_jobs,
204
+ type_fit=type_fit,
205
+ scoring=scoring,
206
+ seed=seed,
207
+ )
208
+
209
+ if n_estimators is not None:
210
+ if surrogate_obj is None:
211
+ gp_opt = gp.GPOpt(
212
+ objective_func=crossval_objective,
213
+ lower_bound=np.array([-6, 1, 0.5, 0.5]),
214
+ upper_bound=np.array([0, 16, 1.0, 1.0]),
215
+ params_names=[
216
+ "learning_rate",
217
+ "max_depth",
218
+ "rowsample",
219
+ "colsample",
220
+ ],
221
+ method="bayesian",
222
+ n_init=n_init,
223
+ n_iter=n_iter,
224
+ seed=seed,
225
+ )
226
+ else:
227
+ gp_opt = gp.GPOpt(
228
+ objective_func=crossval_objective,
229
+ lower_bound=np.array([-6, 1, 0.5, 0.5]),
230
+ upper_bound=np.array([0, 16, 1.0, 1.0]),
231
+ params_names=[
232
+ "learning_rate",
233
+ "max_depth",
234
+ "rowsample",
235
+ "colsample",
236
+ ],
237
+ acquisition="ucb",
238
+ method="splitconformal",
239
+ surrogate_obj=ns.PredictionInterval(
240
+ obj=surrogate_obj, method="splitconformal"
241
+ ),
242
+ n_init=n_init,
243
+ n_iter=n_iter,
244
+ seed=seed,
245
+ )
246
+ else: # n_estimators is None
247
+ if surrogate_obj is None:
248
+ gp_opt = gp.GPOpt(
249
+ objective_func=crossval_objective,
250
+ lower_bound=np.array([-6, 1, 0.5, 0.5, 2]),
251
+ upper_bound=np.array([0, 16, 1.0, 1.0, 3]),
252
+ params_names=[
253
+ "learning_rate",
254
+ "max_depth",
255
+ "rowsample",
256
+ "colsample",
257
+ "n_estimators",
258
+ ],
259
+ method="bayesian",
260
+ n_init=n_init,
261
+ n_iter=n_iter,
262
+ seed=seed,
263
+ )
264
+ else:
265
+ gp_opt = gp.GPOpt(
266
+ objective_func=crossval_objective,
267
+ lower_bound=np.array([-6, 1, 0.5, 0.5, 2]),
268
+ upper_bound=np.array([0, 16, 1.0, 1.0, 3]),
269
+ params_names=[
270
+ "learning_rate",
271
+ "max_depth",
272
+ "rowsample",
273
+ "colsample",
274
+ "n_estimators",
275
+ ],
276
+ acquisition="ucb",
277
+ method="splitconformal",
278
+ surrogate_obj=ns.PredictionInterval(
279
+ obj=surrogate_obj, method="splitconformal"
280
+ ),
281
+ n_init=n_init,
282
+ n_iter=n_iter,
283
+ seed=seed,
284
+ )
224
285
 
225
286
  res = gp_opt.optimize(verbose=verbose, abs_tol=abs_tol)
226
287
  res.best_params["model_type"] = model_type
227
- res.best_params["n_estimators"] = int(n_estimators)
288
+ res.best_params["n_estimators"] = (
289
+ int(n_estimators)
290
+ if n_estimators is not None
291
+ else int(10 ** res.best_params["n_estimators"])
292
+ )
228
293
  res.best_params["learning_rate"] = 10 ** res.best_params["learning_rate"]
229
294
  res.best_params["max_depth"] = int(res.best_params["max_depth"])
230
295
  res.best_params["rowsample"] = res.best_params["rowsample"]
@@ -256,7 +321,7 @@ def lazy_cross_val_optim(
256
321
  type_fit="classification",
257
322
  scoring="accuracy",
258
323
  customize=False,
259
- n_estimators=100,
324
+ n_estimators=None,
260
325
  cv=5,
261
326
  n_jobs=None,
262
327
  n_init=10,
@@ -297,9 +362,9 @@ def lazy_cross_val_optim(
297
362
 
298
363
  customize: boolean
299
364
  if True, the surrogate is transformed into a quasi-randomized network (default is False)
300
-
365
+
301
366
  n_estimators: int
302
- maximum number of trees that can be built
367
+ maximum number of trees that can be built (default is None, if None, the parameters is tuned)
303
368
 
304
369
  cv: int;
305
370
  number of cross-validation folds
@@ -325,7 +390,7 @@ def lazy_cross_val_optim(
325
390
  Examples:
326
391
 
327
392
  ```python
328
- import os
393
+ import os
329
394
  import unifiedbooster as ub
330
395
  from sklearn.datasets import load_breast_cancer
331
396
  from sklearn.model_selection import train_test_split
@@ -396,7 +461,7 @@ def lazy_cross_val_optim(
396
461
  if customize == True:
397
462
  print(f"\n surrogate: CustomRegressor({est[0]})")
398
463
  surr_obj = ns.CustomRegressor(obj=est[1]())
399
- else:
464
+ else:
400
465
  print(f"\n surrogate: {est[0]}")
401
466
  surr_obj = est[1]()
402
467
  res = cross_val_optim(
@@ -421,7 +486,7 @@ def lazy_cross_val_optim(
421
486
  if customize == True:
422
487
  results.append((f"CustomRegressor({est[0]})", res))
423
488
  else:
424
- results.append((est[0], res))
489
+ results.append((est[0], res))
425
490
  except:
426
491
  pass
427
492
 
@@ -0,0 +1,36 @@
1
+ #!/usr/bin/env python
2
+
3
+ """
4
+ docstring
5
+ """
6
+
7
+ # Authors: Henrik Linusson
8
+ # Yaniv Romano modified np.py file to include CQR
9
+ # T. Moudiki modified __init__.py to import classes
10
+
11
+ # __version__ = '2.1.0'
12
+
13
+ from .nc import (
14
+ AbsErrorErrFunc,
15
+ QuantileRegErrFunc,
16
+ RegressorNc,
17
+ RegressorNormalizer,
18
+ )
19
+ from .cp import IcpRegressor, TcpClassifier
20
+ from .icp import IcpClassifier
21
+ from .nc import ClassifierNc, MarginErrFunc
22
+ from .base import RegressorAdapter, ClassifierAdapter
23
+
24
+ __all__ = [
25
+ "AbsErrorErrFunc",
26
+ "MarginErrFunc",
27
+ "QuantileRegErrFunc",
28
+ "RegressorAdapter",
29
+ "ClassifierAdapter",
30
+ "RegressorNc",
31
+ "ClassifierNc",
32
+ "RegressorNormalizer",
33
+ "IcpRegressor",
34
+ "IcpClassifier",
35
+ "TcpClassifier"
36
+ ]