ins-pricing 0.2.9__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. ins_pricing/CHANGELOG.md +93 -0
  2. ins_pricing/README.md +11 -0
  3. ins_pricing/cli/bayesopt_entry_runner.py +626 -499
  4. ins_pricing/cli/utils/evaluation_context.py +320 -0
  5. ins_pricing/cli/utils/import_resolver.py +350 -0
  6. ins_pricing/modelling/core/bayesopt/PHASE2_REFACTORING_SUMMARY.md +449 -0
  7. ins_pricing/modelling/core/bayesopt/PHASE3_REFACTORING_SUMMARY.md +406 -0
  8. ins_pricing/modelling/core/bayesopt/REFACTORING_SUMMARY.md +247 -0
  9. ins_pricing/modelling/core/bayesopt/config_components.py +351 -0
  10. ins_pricing/modelling/core/bayesopt/config_preprocess.py +3 -4
  11. ins_pricing/modelling/core/bayesopt/core.py +153 -94
  12. ins_pricing/modelling/core/bayesopt/models/model_ft_trainer.py +118 -31
  13. ins_pricing/modelling/core/bayesopt/trainers/trainer_base.py +294 -139
  14. ins_pricing/modelling/core/bayesopt/utils/__init__.py +86 -0
  15. ins_pricing/modelling/core/bayesopt/utils/constants.py +183 -0
  16. ins_pricing/modelling/core/bayesopt/utils/distributed_utils.py +186 -0
  17. ins_pricing/modelling/core/bayesopt/utils/io_utils.py +126 -0
  18. ins_pricing/modelling/core/bayesopt/utils/metrics_and_devices.py +540 -0
  19. ins_pricing/modelling/core/bayesopt/utils/torch_trainer_mixin.py +587 -0
  20. ins_pricing/modelling/core/bayesopt/utils.py +98 -1496
  21. ins_pricing/modelling/core/bayesopt/utils_backup.py +1503 -0
  22. ins_pricing/setup.py +1 -1
  23. {ins_pricing-0.2.9.dist-info → ins_pricing-0.3.0.dist-info}/METADATA +162 -149
  24. {ins_pricing-0.2.9.dist-info → ins_pricing-0.3.0.dist-info}/RECORD +26 -13
  25. {ins_pricing-0.2.9.dist-info → ins_pricing-0.3.0.dist-info}/WHEEL +0 -0
  26. {ins_pricing-0.2.9.dist-info → ins_pricing-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,540 @@
1
+ """Metrics computation, GPU management, device selection, CV utilities, and plotting.
2
+
3
+ This module contains:
4
+ - get_logger() for package-level logging
5
+ - MetricFactory for consistent metric computation
6
+ - GPUMemoryManager for CUDA memory management (imported from package utils)
7
+ - DeviceManager for device selection and model placement (imported from package utils)
8
+ - CVStrategyResolver for cross-validation strategy selection
9
+ - PlotUtils for lift chart plotting
10
+ - Backward compatibility wrappers for plotting functions
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import gc
16
+ import logging
17
+ import os
18
+ from contextlib import contextmanager
19
+ from functools import lru_cache
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import numpy as np
23
+ import pandas as pd
24
+ import torch
25
+ import torch.nn as nn
26
+ from torch.nn.parallel import DistributedDataParallel as DDP
27
+ from sklearn.metrics import log_loss, mean_tweedie_deviance
28
+ from sklearn.model_selection import KFold, GroupKFold, TimeSeriesSplit, StratifiedKFold
29
+
30
+ # Try to import plotting dependencies
31
+ try:
32
+ import matplotlib
33
+ if os.name != "nt" and not os.environ.get("DISPLAY") and not os.environ.get("MPLBACKEND"):
34
+ matplotlib.use("Agg")
35
+ import matplotlib.pyplot as plt
36
+ _MPL_IMPORT_ERROR: Optional[BaseException] = None
37
+ except Exception as exc:
38
+ matplotlib = None
39
+ plt = None
40
+ _MPL_IMPORT_ERROR = exc
41
+
42
+ try:
43
+ from ....plotting import curves as plot_curves_common
44
+ except Exception:
45
+ try:
46
+ from ins_pricing.plotting import curves as plot_curves_common
47
+ except Exception:
48
+ plot_curves_common = None
49
+
50
+ from .constants import EPS
51
+
52
+ # Import DeviceManager and GPUMemoryManager from package-level utils
53
+ # (Eliminates ~230 lines of code duplication)
54
+ from ins_pricing.utils import DeviceManager, GPUMemoryManager
55
+ from .io_utils import IOUtils
56
+
57
+
58
+ # =============================================================================
59
+ # Logging System
60
+ # =============================================================================
61
+
62
+ @lru_cache(maxsize=1)
63
+ def _get_package_logger() -> logging.Logger:
64
+ """Get or create the package-level logger with consistent formatting."""
65
+ logger = logging.getLogger("ins_pricing")
66
+ if not logger.handlers:
67
+ handler = logging.StreamHandler()
68
+ formatter = logging.Formatter(
69
+ "[%(levelname)s][%(name)s] %(message)s"
70
+ )
71
+ handler.setFormatter(formatter)
72
+ logger.addHandler(handler)
73
+ # Default to INFO, can be changed via environment variable
74
+ level = os.environ.get("INS_PRICING_LOG_LEVEL", "INFO").upper()
75
+ logger.setLevel(getattr(logging, level, logging.INFO))
76
+ return logger
77
+
78
+
79
+ def get_logger(name: str = "ins_pricing") -> logging.Logger:
80
+ """Get a logger with the given name, inheriting package-level settings.
81
+
82
+ Args:
83
+ name: Logger name, typically module name like 'ins_pricing.trainer'
84
+
85
+ Returns:
86
+ Configured logger instance
87
+
88
+ Example:
89
+ >>> logger = get_logger("ins_pricing.trainer.ft")
90
+ >>> logger.info("Training started")
91
+ """
92
+ _get_package_logger()
93
+ return logging.getLogger(name)
94
+
95
+
96
+ # =============================================================================
97
+ # Metric Computation Factory
98
+ # =============================================================================
99
+
100
+ class MetricFactory:
101
+ """Factory for computing evaluation metrics consistently across all trainers.
102
+
103
+ This class centralizes metric computation logic that was previously duplicated
104
+ across FTTrainer, ResNetTrainer, GNNTrainer, XGBTrainer, and GLMTrainer.
105
+
106
+ Example:
107
+ >>> factory = MetricFactory(task_type='regression', tweedie_power=1.5)
108
+ >>> score = factory.compute(y_true, y_pred, sample_weight)
109
+ """
110
+
111
+ def __init__(
112
+ self,
113
+ task_type: str = "regression",
114
+ tweedie_power: float = 1.5,
115
+ clip_min: float = 1e-8,
116
+ clip_max: float = 1 - 1e-8,
117
+ ):
118
+ """Initialize the metric factory.
119
+
120
+ Args:
121
+ task_type: Either 'regression' or 'classification'
122
+ tweedie_power: Power parameter for Tweedie deviance (1.0-2.0)
123
+ clip_min: Minimum value for clipping predictions
124
+ clip_max: Maximum value for clipping predictions (for classification)
125
+ """
126
+ self.task_type = task_type
127
+ self.tweedie_power = tweedie_power
128
+ self.clip_min = clip_min
129
+ self.clip_max = clip_max
130
+
131
+ def compute(
132
+ self,
133
+ y_true: np.ndarray,
134
+ y_pred: np.ndarray,
135
+ sample_weight: Optional[np.ndarray] = None,
136
+ ) -> float:
137
+ """Compute the appropriate metric based on task type.
138
+
139
+ Args:
140
+ y_true: Ground truth values
141
+ y_pred: Predicted values
142
+ sample_weight: Optional sample weights
143
+
144
+ Returns:
145
+ Computed metric value (lower is better)
146
+ """
147
+ y_pred = np.asarray(y_pred)
148
+ y_true = np.asarray(y_true)
149
+
150
+ if self.task_type == "classification":
151
+ y_pred_clipped = np.clip(y_pred, self.clip_min, self.clip_max)
152
+ return float(log_loss(y_true, y_pred_clipped, sample_weight=sample_weight))
153
+
154
+ # Regression: use Tweedie deviance
155
+ y_pred_safe = np.maximum(y_pred, self.clip_min)
156
+ return float(mean_tweedie_deviance(
157
+ y_true,
158
+ y_pred_safe,
159
+ sample_weight=sample_weight,
160
+ power=self.tweedie_power,
161
+ ))
162
+
163
+ def update_power(self, power: float) -> None:
164
+ """Update the Tweedie power parameter.
165
+
166
+ Args:
167
+ power: New power value (1.0-2.0)
168
+ """
169
+ self.tweedie_power = power
170
+
171
+
172
+ # =============================================================================
173
+ # GPU Memory Manager and Device Manager
174
+ # =============================================================================
175
+ # NOTE: These classes are imported from ins_pricing.utils (see top of file)
176
+ # This eliminates ~230 lines of duplicate code while maintaining backward compatibility
177
+
178
+
179
+ # =============================================================================
180
+ # Cross-Validation Strategy Resolver
181
+ # =============================================================================
182
+
183
+ class CVStrategyResolver:
184
+ """Resolver for cross-validation splitting strategies.
185
+
186
+ This class consolidates CV strategy resolution logic that was previously
187
+ duplicated across trainer_base.py and trainer_ft.py.
188
+
189
+ Supported strategies:
190
+ - 'random': Standard KFold
191
+ - 'stratified': StratifiedKFold (for classification)
192
+ - 'group': GroupKFold (requires group column)
193
+ - 'time': TimeSeriesSplit (requires time column)
194
+
195
+ Example:
196
+ >>> resolver = CVStrategyResolver(
197
+ ... strategy='group',
198
+ ... n_splits=5,
199
+ ... group_col='policy_id',
200
+ ... data=train_df,
201
+ ... )
202
+ >>> splitter, groups = resolver.get_splitter()
203
+ >>> for train_idx, val_idx in splitter.split(X, y, groups):
204
+ ... pass
205
+ """
206
+
207
+ VALID_STRATEGIES = {"random", "stratified", "group", "grouped", "time", "timeseries", "temporal"}
208
+
209
+ def __init__(
210
+ self,
211
+ strategy: str = "random",
212
+ n_splits: int = 5,
213
+ shuffle: bool = True,
214
+ random_state: Optional[int] = None,
215
+ group_col: Optional[str] = None,
216
+ time_col: Optional[str] = None,
217
+ time_ascending: bool = True,
218
+ data: Optional[pd.DataFrame] = None,
219
+ ):
220
+ """Initialize the CV strategy resolver.
221
+
222
+ Args:
223
+ strategy: CV strategy name
224
+ n_splits: Number of CV folds
225
+ shuffle: Whether to shuffle for random/stratified
226
+ random_state: Random seed for reproducibility
227
+ group_col: Column name for group-based splitting
228
+ time_col: Column name for time-based splitting
229
+ time_ascending: Sort order for time-based splitting
230
+ data: DataFrame containing group/time columns
231
+ """
232
+ self.strategy = strategy.strip().lower()
233
+ self.n_splits = max(2, int(n_splits))
234
+ self.shuffle = shuffle
235
+ self.random_state = random_state
236
+ self.group_col = group_col
237
+ self.time_col = time_col
238
+ self.time_ascending = time_ascending
239
+ self.data = data
240
+
241
+ if self.strategy not in self.VALID_STRATEGIES:
242
+ raise ValueError(
243
+ f"Invalid strategy '{strategy}'. "
244
+ f"Valid options: {sorted(self.VALID_STRATEGIES)}"
245
+ )
246
+
247
+ def get_splitter(self) -> Tuple[Any, Optional[pd.Series]]:
248
+ """Get the appropriate splitter and groups.
249
+
250
+ Returns:
251
+ Tuple of (splitter, groups) where groups may be None
252
+
253
+ Raises:
254
+ ValueError: If required columns are missing
255
+ """
256
+ if self.strategy in {"group", "grouped"}:
257
+ return self._get_group_splitter()
258
+ elif self.strategy in {"time", "timeseries", "temporal"}:
259
+ return self._get_time_splitter()
260
+ elif self.strategy == "stratified":
261
+ return self._get_stratified_splitter()
262
+ else:
263
+ return self._get_random_splitter()
264
+
265
+ def _get_random_splitter(self) -> Tuple[KFold, None]:
266
+ """Get a random KFold splitter."""
267
+ splitter = KFold(
268
+ n_splits=self.n_splits,
269
+ shuffle=self.shuffle,
270
+ random_state=self.random_state if self.shuffle else None,
271
+ )
272
+ return splitter, None
273
+
274
+ def _get_stratified_splitter(self) -> Tuple[StratifiedKFold, None]:
275
+ """Get a stratified KFold splitter."""
276
+ splitter = StratifiedKFold(
277
+ n_splits=self.n_splits,
278
+ shuffle=self.shuffle,
279
+ random_state=self.random_state if self.shuffle else None,
280
+ )
281
+ return splitter, None
282
+
283
+ def _get_group_splitter(self) -> Tuple[GroupKFold, pd.Series]:
284
+ """Get a group-based KFold splitter."""
285
+ if not self.group_col:
286
+ raise ValueError("group_col is required for group strategy")
287
+ if self.data is None:
288
+ raise ValueError("data DataFrame is required for group strategy")
289
+ if self.group_col not in self.data.columns:
290
+ raise KeyError(f"group_col '{self.group_col}' not found in data")
291
+
292
+ groups = self.data[self.group_col]
293
+ splitter = GroupKFold(n_splits=self.n_splits)
294
+ return splitter, groups
295
+
296
+ def _get_time_splitter(self) -> Tuple[Any, None]:
297
+ """Get a time-series splitter."""
298
+ if not self.time_col:
299
+ raise ValueError("time_col is required for time strategy")
300
+ if self.data is None:
301
+ raise ValueError("data DataFrame is required for time strategy")
302
+ if self.time_col not in self.data.columns:
303
+ raise KeyError(f"time_col '{self.time_col}' not found in data")
304
+
305
+ splitter = TimeSeriesSplit(n_splits=self.n_splits)
306
+
307
+ # Create an ordered wrapper that sorts by time column
308
+ order_index = self.data[self.time_col].sort_values(
309
+ ascending=self.time_ascending
310
+ ).index
311
+ order = self.data.index.get_indexer(order_index)
312
+
313
+ return _OrderedSplitter(splitter, order), None
314
+
315
+
316
+ class _OrderedSplitter:
317
+ """Wrapper for splitters that need to respect a specific ordering."""
318
+
319
+ def __init__(self, base_splitter, order: np.ndarray):
320
+ self.base_splitter = base_splitter
321
+ self.order = order
322
+
323
+ def split(self, X, y=None, groups=None):
324
+ """Split with ordering applied."""
325
+ n = len(X)
326
+ X_ordered = np.arange(n)[self.order]
327
+ for train_idx, val_idx in self.base_splitter.split(X_ordered):
328
+ yield self.order[train_idx], self.order[val_idx]
329
+
330
+ def get_n_splits(self, X=None, y=None, groups=None):
331
+ return self.base_splitter.get_n_splits()
332
+
333
+
334
+ # =============================================================================
335
+ # Plot Utils
336
+ # =============================================================================
337
+
338
+ def _plot_skip(label: str) -> None:
339
+ """Print message when plot is skipped due to missing matplotlib."""
340
+ if _MPL_IMPORT_ERROR is not None:
341
+ print(f"[Plot] Skip {label}: matplotlib unavailable ({_MPL_IMPORT_ERROR}).", flush=True)
342
+ else:
343
+ print(f"[Plot] Skip {label}: matplotlib unavailable.", flush=True)
344
+
345
+
346
+ class PlotUtils:
347
+ """Plotting utilities for lift charts."""
348
+
349
+ @staticmethod
350
+ def split_data(data: pd.DataFrame, col_nme: str, wgt_nme: str, n_bins: int = 10) -> pd.DataFrame:
351
+ """Split data into bins by cumulative weight."""
352
+ data_sorted = data.sort_values(by=col_nme, ascending=True).copy()
353
+ data_sorted['cum_weight'] = data_sorted[wgt_nme].cumsum()
354
+ w_sum = data_sorted[wgt_nme].sum()
355
+ if w_sum <= EPS:
356
+ data_sorted['bins'] = 0
357
+ else:
358
+ data_sorted['bins'] = np.floor(
359
+ data_sorted['cum_weight'] * float(n_bins) / w_sum
360
+ )
361
+ data_sorted.loc[(data_sorted['bins'] == n_bins),
362
+ 'bins'] = n_bins - 1
363
+ return data_sorted.groupby(['bins'], observed=True).sum(numeric_only=True)
364
+
365
+ @staticmethod
366
+ def plot_lift_ax(ax, plot_data, title, pred_label='Predicted', act_label='Actual', weight_label='Earned Exposure'):
367
+ """Plot lift chart on given axes."""
368
+ ax.plot(plot_data.index, plot_data['act_v'],
369
+ label=act_label, color='red')
370
+ ax.plot(plot_data.index, plot_data['exp_v'],
371
+ label=pred_label, color='blue')
372
+ ax.set_title(title, fontsize=8)
373
+ ax.set_xticks(plot_data.index)
374
+ ax.set_xticklabels(plot_data.index, rotation=90, fontsize=6)
375
+ ax.tick_params(axis='y', labelsize=6)
376
+ ax.legend(loc='upper left', fontsize=5, frameon=False)
377
+ ax.margins(0.05)
378
+ ax2 = ax.twinx()
379
+ ax2.bar(plot_data.index, plot_data['weight'],
380
+ alpha=0.5, color='seagreen',
381
+ label=weight_label)
382
+ ax2.tick_params(axis='y', labelsize=6)
383
+ ax2.legend(loc='upper right', fontsize=5, frameon=False)
384
+
385
+ @staticmethod
386
+ def plot_dlift_ax(ax, plot_data, title, label1, label2, act_label='Actual', weight_label='Earned Exposure'):
387
+ """Plot double lift chart on given axes."""
388
+ ax.plot(plot_data.index, plot_data['act_v'],
389
+ label=act_label, color='red')
390
+ ax.plot(plot_data.index, plot_data['exp_v1'],
391
+ label=label1, color='blue')
392
+ ax.plot(plot_data.index, plot_data['exp_v2'],
393
+ label=label2, color='black')
394
+ ax.set_title(title, fontsize=8)
395
+ ax.set_xticks(plot_data.index)
396
+ ax.set_xticklabels(plot_data.index, rotation=90, fontsize=6)
397
+ ax.set_xlabel(f'{label1} / {label2}', fontsize=6)
398
+ ax.tick_params(axis='y', labelsize=6)
399
+ ax.legend(loc='upper left', fontsize=5, frameon=False)
400
+ ax.margins(0.1)
401
+ ax2 = ax.twinx()
402
+ ax2.bar(plot_data.index, plot_data['weight'],
403
+ alpha=0.5, color='seagreen',
404
+ label=weight_label)
405
+ ax2.tick_params(axis='y', labelsize=6)
406
+ ax2.legend(loc='upper right', fontsize=5, frameon=False)
407
+
408
+ @staticmethod
409
+ def plot_lift_list(pred_model, w_pred_list, w_act_list,
410
+ weight_list, tgt_nme, n_bins: int = 10,
411
+ fig_nme: str = 'Lift Chart'):
412
+ """Plot lift chart for model predictions."""
413
+ if plot_curves_common is not None:
414
+ save_path = os.path.join(
415
+ os.getcwd(), 'plot', f'05_{tgt_nme}_{fig_nme}.png')
416
+ plot_curves_common.plot_lift_curve(
417
+ pred_model,
418
+ w_act_list,
419
+ weight_list,
420
+ n_bins=n_bins,
421
+ title=f'Lift Chart of {tgt_nme}',
422
+ pred_label='Predicted',
423
+ act_label='Actual',
424
+ weight_label='Earned Exposure',
425
+ pred_weighted=False,
426
+ actual_weighted=True,
427
+ save_path=save_path,
428
+ show=False,
429
+ )
430
+ return
431
+ if plt is None:
432
+ _plot_skip("lift plot")
433
+ return
434
+ lift_data = pd.DataFrame({
435
+ 'pred': pred_model,
436
+ 'w_pred': w_pred_list,
437
+ 'act': w_act_list,
438
+ 'weight': weight_list
439
+ })
440
+ plot_data = PlotUtils.split_data(lift_data, 'pred', 'weight', n_bins)
441
+ plot_data['exp_v'] = plot_data['w_pred'] / plot_data['weight']
442
+ plot_data['act_v'] = plot_data['act'] / plot_data['weight']
443
+ plot_data.reset_index(inplace=True)
444
+
445
+ fig = plt.figure(figsize=(7, 5))
446
+ ax = fig.add_subplot(111)
447
+ PlotUtils.plot_lift_ax(ax, plot_data, f'Lift Chart of {tgt_nme}')
448
+ plt.subplots_adjust(wspace=0.3)
449
+
450
+ save_path = os.path.join(
451
+ os.getcwd(), 'plot', f'05_{tgt_nme}_{fig_nme}.png')
452
+ IOUtils.ensure_parent_dir(save_path)
453
+ plt.savefig(save_path, dpi=300)
454
+ plt.close(fig)
455
+
456
+ @staticmethod
457
+ def plot_dlift_list(pred_model_1, pred_model_2,
458
+ model_nme_1, model_nme_2,
459
+ tgt_nme,
460
+ w_list, w_act_list, n_bins: int = 10,
461
+ fig_nme: str = 'Double Lift Chart'):
462
+ """Plot double lift chart comparing two models."""
463
+ if plot_curves_common is not None:
464
+ save_path = os.path.join(
465
+ os.getcwd(), 'plot', f'06_{tgt_nme}_{fig_nme}.png')
466
+ plot_curves_common.plot_double_lift_curve(
467
+ pred_model_1,
468
+ pred_model_2,
469
+ w_act_list,
470
+ w_list,
471
+ n_bins=n_bins,
472
+ title=f'Double Lift Chart of {tgt_nme}',
473
+ label1=model_nme_1,
474
+ label2=model_nme_2,
475
+ pred1_weighted=False,
476
+ pred2_weighted=False,
477
+ actual_weighted=True,
478
+ save_path=save_path,
479
+ show=False,
480
+ )
481
+ return
482
+ if plt is None:
483
+ _plot_skip("double lift plot")
484
+ return
485
+ lift_data = pd.DataFrame({
486
+ 'pred1': pred_model_1,
487
+ 'pred2': pred_model_2,
488
+ 'act': w_act_list,
489
+ 'weight': w_list
490
+ })
491
+ lift_data['diff_ly'] = lift_data['pred1'] / lift_data['pred2']
492
+ lift_data['w_pred1'] = lift_data['pred1'] * lift_data['weight']
493
+ lift_data['w_pred2'] = lift_data['pred2'] * lift_data['weight']
494
+ plot_data = PlotUtils.split_data(
495
+ lift_data, 'diff_ly', 'weight', n_bins)
496
+ plot_data['exp_v1'] = plot_data['w_pred1'] / plot_data['act']
497
+ plot_data['exp_v2'] = plot_data['w_pred2'] / plot_data['act']
498
+ plot_data['act_v'] = plot_data['act']/plot_data['act']
499
+ plot_data.reset_index(inplace=True)
500
+
501
+ fig = plt.figure(figsize=(7, 5))
502
+ ax = fig.add_subplot(111)
503
+ PlotUtils.plot_dlift_ax(
504
+ ax, plot_data, f'Double Lift Chart of {tgt_nme}', model_nme_1, model_nme_2)
505
+ plt.subplots_adjust(bottom=0.25, top=0.95, right=0.8)
506
+
507
+ save_path = os.path.join(
508
+ os.getcwd(), 'plot', f'06_{tgt_nme}_{fig_nme}.png')
509
+ IOUtils.ensure_parent_dir(save_path)
510
+ plt.savefig(save_path, dpi=300)
511
+ plt.close(fig)
512
+
513
+
514
+ # =============================================================================
515
+ # Backward Compatibility Wrappers
516
+ # =============================================================================
517
+
518
+ def split_data(data, col_nme, wgt_nme, n_bins=10):
519
+ """Legacy function wrapper for PlotUtils.split_data()."""
520
+ return PlotUtils.split_data(data, col_nme, wgt_nme, n_bins)
521
+
522
+
523
+ def plot_lift_list(pred_model, w_pred_list, w_act_list,
524
+ weight_list, tgt_nme, n_bins=10,
525
+ fig_nme='Lift Chart'):
526
+ """Legacy function wrapper for PlotUtils.plot_lift_list()."""
527
+ return PlotUtils.plot_lift_list(pred_model, w_pred_list, w_act_list,
528
+ weight_list, tgt_nme, n_bins, fig_nme)
529
+
530
+
531
+ def plot_dlift_list(pred_model_1, pred_model_2,
532
+ model_nme_1, model_nme_2,
533
+ tgt_nme,
534
+ w_list, w_act_list, n_bins=10,
535
+ fig_nme='Double Lift Chart'):
536
+ """Legacy function wrapper for PlotUtils.plot_dlift_list()."""
537
+ return PlotUtils.plot_dlift_list(pred_model_1, pred_model_2,
538
+ model_nme_1, model_nme_2,
539
+ tgt_nme, w_list, w_act_list,
540
+ n_bins, fig_nme)