ins-pricing 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. ins_pricing/README.md +60 -0
  2. ins_pricing/__init__.py +102 -0
  3. ins_pricing/governance/README.md +18 -0
  4. ins_pricing/governance/__init__.py +20 -0
  5. ins_pricing/governance/approval.py +93 -0
  6. ins_pricing/governance/audit.py +37 -0
  7. ins_pricing/governance/registry.py +99 -0
  8. ins_pricing/governance/release.py +159 -0
  9. ins_pricing/modelling/BayesOpt.py +146 -0
  10. ins_pricing/modelling/BayesOpt_USAGE.md +925 -0
  11. ins_pricing/modelling/BayesOpt_entry.py +575 -0
  12. ins_pricing/modelling/BayesOpt_incremental.py +731 -0
  13. ins_pricing/modelling/Explain_Run.py +36 -0
  14. ins_pricing/modelling/Explain_entry.py +539 -0
  15. ins_pricing/modelling/Pricing_Run.py +36 -0
  16. ins_pricing/modelling/README.md +33 -0
  17. ins_pricing/modelling/__init__.py +44 -0
  18. ins_pricing/modelling/bayesopt/__init__.py +98 -0
  19. ins_pricing/modelling/bayesopt/config_preprocess.py +303 -0
  20. ins_pricing/modelling/bayesopt/core.py +1476 -0
  21. ins_pricing/modelling/bayesopt/models.py +2196 -0
  22. ins_pricing/modelling/bayesopt/trainers.py +2446 -0
  23. ins_pricing/modelling/bayesopt/utils.py +1021 -0
  24. ins_pricing/modelling/cli_common.py +136 -0
  25. ins_pricing/modelling/explain/__init__.py +55 -0
  26. ins_pricing/modelling/explain/gradients.py +334 -0
  27. ins_pricing/modelling/explain/metrics.py +176 -0
  28. ins_pricing/modelling/explain/permutation.py +155 -0
  29. ins_pricing/modelling/explain/shap_utils.py +146 -0
  30. ins_pricing/modelling/notebook_utils.py +284 -0
  31. ins_pricing/modelling/plotting/__init__.py +45 -0
  32. ins_pricing/modelling/plotting/common.py +63 -0
  33. ins_pricing/modelling/plotting/curves.py +572 -0
  34. ins_pricing/modelling/plotting/diagnostics.py +139 -0
  35. ins_pricing/modelling/plotting/geo.py +362 -0
  36. ins_pricing/modelling/plotting/importance.py +121 -0
  37. ins_pricing/modelling/run_logging.py +133 -0
  38. ins_pricing/modelling/tests/conftest.py +8 -0
  39. ins_pricing/modelling/tests/test_cross_val_generic.py +66 -0
  40. ins_pricing/modelling/tests/test_distributed_utils.py +18 -0
  41. ins_pricing/modelling/tests/test_explain.py +56 -0
  42. ins_pricing/modelling/tests/test_geo_tokens_split.py +49 -0
  43. ins_pricing/modelling/tests/test_graph_cache.py +33 -0
  44. ins_pricing/modelling/tests/test_plotting.py +63 -0
  45. ins_pricing/modelling/tests/test_plotting_library.py +150 -0
  46. ins_pricing/modelling/tests/test_preprocessor.py +48 -0
  47. ins_pricing/modelling/watchdog_run.py +211 -0
  48. ins_pricing/pricing/README.md +44 -0
  49. ins_pricing/pricing/__init__.py +27 -0
  50. ins_pricing/pricing/calibration.py +39 -0
  51. ins_pricing/pricing/data_quality.py +117 -0
  52. ins_pricing/pricing/exposure.py +85 -0
  53. ins_pricing/pricing/factors.py +91 -0
  54. ins_pricing/pricing/monitoring.py +99 -0
  55. ins_pricing/pricing/rate_table.py +78 -0
  56. ins_pricing/production/__init__.py +21 -0
  57. ins_pricing/production/drift.py +30 -0
  58. ins_pricing/production/monitoring.py +143 -0
  59. ins_pricing/production/scoring.py +40 -0
  60. ins_pricing/reporting/README.md +20 -0
  61. ins_pricing/reporting/__init__.py +11 -0
  62. ins_pricing/reporting/report_builder.py +72 -0
  63. ins_pricing/reporting/scheduler.py +45 -0
  64. ins_pricing/setup.py +41 -0
  65. ins_pricing v2/__init__.py +23 -0
  66. ins_pricing v2/governance/__init__.py +20 -0
  67. ins_pricing v2/governance/approval.py +93 -0
  68. ins_pricing v2/governance/audit.py +37 -0
  69. ins_pricing v2/governance/registry.py +99 -0
  70. ins_pricing v2/governance/release.py +159 -0
  71. ins_pricing v2/modelling/Explain_Run.py +36 -0
  72. ins_pricing v2/modelling/Pricing_Run.py +36 -0
  73. ins_pricing v2/modelling/__init__.py +151 -0
  74. ins_pricing v2/modelling/cli_common.py +141 -0
  75. ins_pricing v2/modelling/config.py +249 -0
  76. ins_pricing v2/modelling/config_preprocess.py +254 -0
  77. ins_pricing v2/modelling/core.py +741 -0
  78. ins_pricing v2/modelling/data_container.py +42 -0
  79. ins_pricing v2/modelling/explain/__init__.py +55 -0
  80. ins_pricing v2/modelling/explain/gradients.py +334 -0
  81. ins_pricing v2/modelling/explain/metrics.py +176 -0
  82. ins_pricing v2/modelling/explain/permutation.py +155 -0
  83. ins_pricing v2/modelling/explain/shap_utils.py +146 -0
  84. ins_pricing v2/modelling/features.py +215 -0
  85. ins_pricing v2/modelling/model_manager.py +148 -0
  86. ins_pricing v2/modelling/model_plotting.py +463 -0
  87. ins_pricing v2/modelling/models.py +2203 -0
  88. ins_pricing v2/modelling/notebook_utils.py +294 -0
  89. ins_pricing v2/modelling/plotting/__init__.py +45 -0
  90. ins_pricing v2/modelling/plotting/common.py +63 -0
  91. ins_pricing v2/modelling/plotting/curves.py +572 -0
  92. ins_pricing v2/modelling/plotting/diagnostics.py +139 -0
  93. ins_pricing v2/modelling/plotting/geo.py +362 -0
  94. ins_pricing v2/modelling/plotting/importance.py +121 -0
  95. ins_pricing v2/modelling/run_logging.py +133 -0
  96. ins_pricing v2/modelling/tests/conftest.py +8 -0
  97. ins_pricing v2/modelling/tests/test_cross_val_generic.py +66 -0
  98. ins_pricing v2/modelling/tests/test_distributed_utils.py +18 -0
  99. ins_pricing v2/modelling/tests/test_explain.py +56 -0
  100. ins_pricing v2/modelling/tests/test_geo_tokens_split.py +49 -0
  101. ins_pricing v2/modelling/tests/test_graph_cache.py +33 -0
  102. ins_pricing v2/modelling/tests/test_plotting.py +63 -0
  103. ins_pricing v2/modelling/tests/test_plotting_library.py +150 -0
  104. ins_pricing v2/modelling/tests/test_preprocessor.py +48 -0
  105. ins_pricing v2/modelling/trainers.py +2447 -0
  106. ins_pricing v2/modelling/utils.py +1020 -0
  107. ins_pricing v2/modelling/watchdog_run.py +211 -0
  108. ins_pricing v2/pricing/__init__.py +27 -0
  109. ins_pricing v2/pricing/calibration.py +39 -0
  110. ins_pricing v2/pricing/data_quality.py +117 -0
  111. ins_pricing v2/pricing/exposure.py +85 -0
  112. ins_pricing v2/pricing/factors.py +91 -0
  113. ins_pricing v2/pricing/monitoring.py +99 -0
  114. ins_pricing v2/pricing/rate_table.py +78 -0
  115. ins_pricing v2/production/__init__.py +21 -0
  116. ins_pricing v2/production/drift.py +30 -0
  117. ins_pricing v2/production/monitoring.py +143 -0
  118. ins_pricing v2/production/scoring.py +40 -0
  119. ins_pricing v2/reporting/__init__.py +11 -0
  120. ins_pricing v2/reporting/report_builder.py +72 -0
  121. ins_pricing v2/reporting/scheduler.py +45 -0
  122. ins_pricing v2/scripts/BayesOpt_incremental.py +722 -0
  123. ins_pricing v2/scripts/Explain_entry.py +545 -0
  124. ins_pricing v2/scripts/__init__.py +1 -0
  125. ins_pricing v2/scripts/train.py +568 -0
  126. ins_pricing v2/setup.py +55 -0
  127. ins_pricing v2/smoke_test.py +28 -0
  128. ins_pricing-0.1.6.dist-info/METADATA +78 -0
  129. ins_pricing-0.1.6.dist-info/RECORD +169 -0
  130. ins_pricing-0.1.6.dist-info/WHEEL +5 -0
  131. ins_pricing-0.1.6.dist-info/top_level.txt +4 -0
  132. user_packages/__init__.py +105 -0
  133. user_packages legacy/BayesOpt.py +5659 -0
  134. user_packages legacy/BayesOpt_entry.py +513 -0
  135. user_packages legacy/BayesOpt_incremental.py +685 -0
  136. user_packages legacy/Pricing_Run.py +36 -0
  137. user_packages legacy/Try/BayesOpt Legacy251213.py +3719 -0
  138. user_packages legacy/Try/BayesOpt Legacy251215.py +3758 -0
  139. user_packages legacy/Try/BayesOpt lagecy251201.py +3506 -0
  140. user_packages legacy/Try/BayesOpt lagecy251218.py +3992 -0
  141. user_packages legacy/Try/BayesOpt legacy.py +3280 -0
  142. user_packages legacy/Try/BayesOpt.py +838 -0
  143. user_packages legacy/Try/BayesOptAll.py +1569 -0
  144. user_packages legacy/Try/BayesOptAllPlatform.py +909 -0
  145. user_packages legacy/Try/BayesOptCPUGPU.py +1877 -0
  146. user_packages legacy/Try/BayesOptSearch.py +830 -0
  147. user_packages legacy/Try/BayesOptSearchOrigin.py +829 -0
  148. user_packages legacy/Try/BayesOptV1.py +1911 -0
  149. user_packages legacy/Try/BayesOptV10.py +2973 -0
  150. user_packages legacy/Try/BayesOptV11.py +3001 -0
  151. user_packages legacy/Try/BayesOptV12.py +3001 -0
  152. user_packages legacy/Try/BayesOptV2.py +2065 -0
  153. user_packages legacy/Try/BayesOptV3.py +2209 -0
  154. user_packages legacy/Try/BayesOptV4.py +2342 -0
  155. user_packages legacy/Try/BayesOptV5.py +2372 -0
  156. user_packages legacy/Try/BayesOptV6.py +2759 -0
  157. user_packages legacy/Try/BayesOptV7.py +2832 -0
  158. user_packages legacy/Try/BayesOptV8Codex.py +2731 -0
  159. user_packages legacy/Try/BayesOptV8Gemini.py +2614 -0
  160. user_packages legacy/Try/BayesOptV9.py +2927 -0
  161. user_packages legacy/Try/BayesOpt_entry legacy.py +313 -0
  162. user_packages legacy/Try/ModelBayesOptSearch.py +359 -0
  163. user_packages legacy/Try/ResNetBayesOptSearch.py +249 -0
  164. user_packages legacy/Try/XgbBayesOptSearch.py +121 -0
  165. user_packages legacy/Try/xgbbayesopt.py +523 -0
  166. user_packages legacy/__init__.py +19 -0
  167. user_packages legacy/cli_common.py +124 -0
  168. user_packages legacy/notebook_utils.py +228 -0
  169. user_packages legacy/watchdog_run.py +202 -0
@@ -0,0 +1,2927 @@
1
+ # 数据在 CPU 和 GPU 之间传输成本较高,可通过多条 CUDA 流并行搬运与计算来支撑更大数据集。
2
+
3
+ import copy
4
+ import gc
5
+ import math
6
+ import os
7
+ from dataclasses import dataclass
8
+ from pathlib import Path
9
+ from typing import Any, Dict, List, Optional
10
+ import csv
11
+
12
+ import joblib
13
+ import matplotlib.pyplot as plt
14
+ import numpy as np # 1.26.2
15
+ import optuna # 4.3.0
16
+ import pandas as pd # 2.2.3
17
+ import shap
18
+ import statsmodels.api as sm
19
+
20
+ import torch # 版本: 1.10.1+cu111
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+ import xgboost as xgb # 1.7.0
24
+
25
+ from torch.utils.data import Dataset, DataLoader, TensorDataset, DistributedSampler
26
+ from torch.cuda.amp import autocast, GradScaler
27
+ from torch.nn.utils import clip_grad_norm_
28
+ from torch.nn.parallel import DistributedDataParallel as DDP
29
+ import torch.distributed as dist
30
+ from sklearn.model_selection import ShuffleSplit, cross_val_score # 1.2.2
31
+ from sklearn.preprocessing import StandardScaler
32
+ from sklearn.metrics import log_loss, make_scorer, mean_tweedie_deviance
33
+
34
+ # 常量与工具模块
35
+ # =============================================================================
36
+ torch.backends.cudnn.benchmark = True
37
+ EPS = 1e-8
38
+
39
+
40
+ class IOUtils:
41
+ # 文件与路径处理的小工具集合。
42
+
43
+ @staticmethod
44
+ def csv_to_dict(file_path: str) -> List[Dict[str, Any]]:
45
+ with open(file_path, mode='r', encoding='utf-8') as file:
46
+ reader = csv.DictReader(file)
47
+ return [
48
+ dict(filter(lambda item: item[0] != '', row.items()))
49
+ for row in reader
50
+ ]
51
+
52
+ @staticmethod
53
+ def ensure_parent_dir(file_path: str) -> None:
54
+ # 若目标文件所在目录不存在则自动创建
55
+ directory = os.path.dirname(file_path)
56
+ if directory:
57
+ os.makedirs(directory, exist_ok=True)
58
+
59
+
60
+ class TrainingUtils:
61
+ # 训练阶段常用的小型辅助函数集合。
62
+
63
+ @staticmethod
64
+ def compute_batch_size(data_size: int, learning_rate: float, batch_num: int, minimum: int) -> int:
65
+ estimated = int((learning_rate / 1e-4) ** 0.5 *
66
+ (data_size / max(batch_num, 1)))
67
+ return max(1, min(data_size, max(minimum, estimated)))
68
+
69
+ @staticmethod
70
+ def tweedie_loss(pred, target, p=1.5, eps=1e-6, max_clip=1e6):
71
+ # 为确保稳定性先将预测值裁剪为正数
72
+ pred_clamped = torch.clamp(pred, min=eps)
73
+ if p == 1:
74
+ term1 = target * torch.log(target / pred_clamped + eps) # 泊松
75
+ term2 = -target + pred_clamped
76
+ term3 = 0
77
+ elif p == 0:
78
+ term1 = 0.5 * torch.pow(target - pred_clamped, 2) # 高斯
79
+ term2 = 0
80
+ term3 = 0
81
+ elif p == 2:
82
+ term1 = torch.log(pred_clamped / target + eps) # 伽马
83
+ term2 = -target / pred_clamped + 1
84
+ term3 = 0
85
+ else:
86
+ term1 = torch.pow(target, 2 - p) / ((1 - p) * (2 - p))
87
+ term2 = target * torch.pow(pred_clamped, 1 - p) / (1 - p)
88
+ term3 = torch.pow(pred_clamped, 2 - p) / (2 - p)
89
+ return torch.nan_to_num( # Tweedie 负对数似然(忽略常数项)
90
+ 2 * (term1 - term2 + term3),
91
+ nan=eps,
92
+ posinf=max_clip,
93
+ neginf=-max_clip
94
+ )
95
+
96
+ @staticmethod
97
+ def free_cuda() -> None:
98
+ print(">>> Moving all models to CPU...")
99
+ for obj in gc.get_objects():
100
+ try:
101
+ if hasattr(obj, "to") and callable(obj.to):
102
+ obj.to("cpu")
103
+ except Exception:
104
+ pass
105
+
106
+ print(">>> Deleting tensors, optimizers, dataloaders...")
107
+ gc.collect()
108
+
109
+ print(">>> Emptying CUDA cache...")
110
+ torch.cuda.empty_cache()
111
+ torch.cuda.synchronize()
112
+
113
+ print(">>> CUDA memory freed.")
114
+
115
+
116
+ class DistributedUtils:
117
+ @staticmethod
118
+ def setup_ddp():
119
+ """Initialize DDP process group."""
120
+ if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
121
+ rank = int(os.environ["RANK"])
122
+ world_size = int(os.environ["WORLD_SIZE"])
123
+ local_rank = int(os.environ["LOCAL_RANK"])
124
+
125
+ if torch.cuda.is_available():
126
+ torch.cuda.set_device(local_rank)
127
+
128
+ dist.init_process_group(backend="nccl", init_method="env://")
129
+ print(
130
+ f">>> DDP Initialized: Rank {rank}/{world_size}, Local Rank {local_rank}")
131
+ return True, local_rank, rank, world_size
132
+ else:
133
+ print(
134
+ f">>> DDP Setup Failed: RANK or WORLD_SIZE not found in env. Keys found: {list(os.environ.keys())}")
135
+ return False, 0, 0, 1
136
+
137
+ @staticmethod
138
+ def cleanup_ddp():
139
+ """Destroy DDP process group."""
140
+ if dist.is_initialized():
141
+ dist.destroy_process_group()
142
+
143
+ @staticmethod
144
+ def is_main_process():
145
+ return not dist.is_initialized() or dist.get_rank() == 0
146
+
147
+
148
+ class PlotUtils:
149
+ # 多种模型共享的绘图辅助工具。
150
+
151
+ @staticmethod
152
+ def split_data(data: pd.DataFrame, col_nme: str, wgt_nme: str, n_bins: int = 10) -> pd.DataFrame:
153
+ data_sorted = data.sort_values(by=col_nme, ascending=True).copy()
154
+ data_sorted['cum_weight'] = data_sorted[wgt_nme].cumsum()
155
+ w_sum = data_sorted[wgt_nme].sum()
156
+ if w_sum <= EPS:
157
+ data_sorted.loc[:, 'bins'] = 0
158
+ else:
159
+ data_sorted.loc[:, 'bins'] = np.floor(
160
+ data_sorted['cum_weight'] * float(n_bins) / w_sum
161
+ )
162
+ data_sorted.loc[(data_sorted['bins'] == n_bins),
163
+ 'bins'] = n_bins - 1
164
+ return data_sorted.groupby(['bins'], observed=True).sum(numeric_only=True)
165
+
166
+ @staticmethod
167
+ def plot_lift_ax(ax, plot_data, title, pred_label='Predicted', act_label='Actual', weight_label='Earned Exposure'):
168
+ ax.plot(plot_data.index, plot_data['act_v'],
169
+ label=act_label, color='red')
170
+ ax.plot(plot_data.index, plot_data['exp_v'],
171
+ label=pred_label, color='blue')
172
+ ax.set_title(title, fontsize=8)
173
+ ax.set_xticks(plot_data.index)
174
+ ax.set_xticklabels(plot_data.index, rotation=90, fontsize=6)
175
+ ax.tick_params(axis='y', labelsize=6)
176
+ ax.legend(loc='upper left', fontsize=5, frameon=False)
177
+ ax.margins(0.05)
178
+ ax2 = ax.twinx()
179
+ ax2.bar(plot_data.index, plot_data['weight'],
180
+ alpha=0.5, color='seagreen',
181
+ label=weight_label)
182
+ ax2.tick_params(axis='y', labelsize=6)
183
+ ax2.legend(loc='upper right', fontsize=5, frameon=False)
184
+
185
+ @staticmethod
186
+ def plot_dlift_ax(ax, plot_data, title, label1, label2, act_label='Actual', weight_label='Earned Exposure'):
187
+ ax.plot(plot_data.index, plot_data['act_v'],
188
+ label=act_label, color='red')
189
+ ax.plot(plot_data.index, plot_data['exp_v1'],
190
+ label=label1, color='blue')
191
+ ax.plot(plot_data.index, plot_data['exp_v2'],
192
+ label=label2, color='black')
193
+ ax.set_title(title, fontsize=8)
194
+ ax.set_xticks(plot_data.index)
195
+ ax.set_xticklabels(plot_data.index, rotation=90, fontsize=6)
196
+ ax.set_xlabel(f'{label1} / {label2}', fontsize=6)
197
+ ax.tick_params(axis='y', labelsize=6)
198
+ ax.legend(loc='upper left', fontsize=5, frameon=False)
199
+ ax.margins(0.1)
200
+ ax2 = ax.twinx()
201
+ ax2.bar(plot_data.index, plot_data['weight'],
202
+ alpha=0.5, color='seagreen',
203
+ label=weight_label)
204
+ ax2.tick_params(axis='y', labelsize=6)
205
+ ax2.legend(loc='upper right', fontsize=5, frameon=False)
206
+
207
+ @staticmethod
208
+ def plot_lift_list(pred_model, w_pred_list, w_act_list,
209
+ weight_list, tgt_nme, n_bins: int = 10,
210
+ fig_nme: str = 'Lift Chart'):
211
+ lift_data = pd.DataFrame()
212
+ lift_data.loc[:, 'pred'] = pred_model
213
+ lift_data.loc[:, 'w_pred'] = w_pred_list
214
+ lift_data.loc[:, 'act'] = w_act_list
215
+ lift_data.loc[:, 'weight'] = weight_list
216
+ plot_data = PlotUtils.split_data(lift_data, 'pred', 'weight', n_bins)
217
+ plot_data['exp_v'] = plot_data['w_pred'] / plot_data['weight']
218
+ plot_data['act_v'] = plot_data['act'] / plot_data['weight']
219
+ plot_data.reset_index(inplace=True)
220
+
221
+ fig = plt.figure(figsize=(7, 5))
222
+ ax = fig.add_subplot(111)
223
+ PlotUtils.plot_lift_ax(ax, plot_data, f'Lift Chart of {tgt_nme}')
224
+ plt.subplots_adjust(wspace=0.3)
225
+
226
+ save_path = os.path.join(
227
+ os.getcwd(), 'plot', f'05_{tgt_nme}_{fig_nme}.png')
228
+ IOUtils.ensure_parent_dir(save_path)
229
+ plt.savefig(save_path, dpi=300)
230
+ plt.close(fig)
231
+
232
+ @staticmethod
233
+ def plot_dlift_list(pred_model_1, pred_model_2,
234
+ model_nme_1, model_nme_2,
235
+ tgt_nme,
236
+ w_list, w_act_list, n_bins: int = 10,
237
+ fig_nme: str = 'Double Lift Chart'):
238
+ lift_data = pd.DataFrame()
239
+ lift_data.loc[:, 'pred1'] = pred_model_1
240
+ lift_data.loc[:, 'pred2'] = pred_model_2
241
+ lift_data.loc[:, 'diff_ly'] = lift_data['pred1'] / lift_data['pred2']
242
+ lift_data.loc[:, 'act'] = w_act_list
243
+ lift_data.loc[:, 'weight'] = w_list
244
+ lift_data.loc[:, 'w_pred1'] = lift_data['pred1'] * lift_data['weight']
245
+ lift_data.loc[:, 'w_pred2'] = lift_data['pred2'] * lift_data['weight']
246
+ plot_data = PlotUtils.split_data(
247
+ lift_data, 'diff_ly', 'weight', n_bins)
248
+ plot_data['exp_v1'] = plot_data['w_pred1'] / plot_data['act']
249
+ plot_data['exp_v2'] = plot_data['w_pred2'] / plot_data['act']
250
+ plot_data['act_v'] = plot_data['act']/plot_data['act']
251
+ plot_data.reset_index(inplace=True)
252
+
253
+ fig = plt.figure(figsize=(7, 5))
254
+ ax = fig.add_subplot(111)
255
+ PlotUtils.plot_dlift_ax(
256
+ ax, plot_data, f'Double Lift Chart of {tgt_nme}', model_nme_1, model_nme_2)
257
+ plt.subplots_adjust(bottom=0.25, top=0.95, right=0.8)
258
+
259
+ save_path = os.path.join(
260
+ os.getcwd(), 'plot', f'06_{tgt_nme}_{fig_nme}.png')
261
+ IOUtils.ensure_parent_dir(save_path)
262
+ plt.savefig(save_path, dpi=300)
263
+ plt.close(fig)
264
+
265
+
266
+ # 向后兼容的函数式封装
267
+ def csv_to_dict(file_path: str) -> List[Dict[str, Any]]:
268
+ return IOUtils.csv_to_dict(file_path)
269
+
270
+
271
+ def ensure_parent_dir(file_path: str) -> None:
272
+ IOUtils.ensure_parent_dir(file_path)
273
+
274
+
275
+ def compute_batch_size(data_size: int, learning_rate: float, batch_num: int, minimum: int) -> int:
276
+ return TrainingUtils.compute_batch_size(data_size, learning_rate, batch_num, minimum)
277
+
278
+
279
+ # 定义在 PyTorch 环境下的 Tweedie 偏差损失函数
280
+ # 参考文档:https://scikit-learn.org/stable/modules/model_evaluation.html#mean-poisson-gamma-and-tweedie-deviances
281
+ def tweedie_loss(pred, target, p=1.5, eps=1e-6, max_clip=1e6):
282
+ return TrainingUtils.tweedie_loss(pred, target, p=p, eps=eps, max_clip=max_clip)
283
+
284
+
285
+ # 定义释放CUDA内存函数
286
+ def free_cuda():
287
+ TrainingUtils.free_cuda()
288
+
289
+
290
+ class TorchTrainerMixin:
291
+ # 面向 Torch 表格训练器的共享工具方法。
292
+
293
+ def _device_type(self) -> str:
294
+ return getattr(self, "device", torch.device("cpu")).type
295
+
296
+ def _build_dataloader(self,
297
+ dataset,
298
+ N: int,
299
+ base_bs_gpu: tuple,
300
+ base_bs_cpu: tuple,
301
+ min_bs: int = 64,
302
+ target_effective_cuda: int = 8192,
303
+ target_effective_cpu: int = 4096,
304
+ large_threshold: int = 200_000,
305
+ mid_threshold: int = 50_000):
306
+ batch_size = TrainingUtils.compute_batch_size(
307
+ data_size=len(dataset),
308
+ learning_rate=self.learning_rate,
309
+ batch_num=self.batch_num,
310
+ minimum=min_bs
311
+ )
312
+ gpu_large, gpu_mid, gpu_small = base_bs_gpu
313
+ cpu_mid, cpu_small = base_bs_cpu
314
+
315
+ if self._device_type() == 'cuda':
316
+ device_count = torch.cuda.device_count()
317
+ # 多卡环境下,适当增大最小批量,确保每张卡都能分到足够数据
318
+ if device_count > 1:
319
+ min_bs = min_bs * device_count
320
+ print(
321
+ f">>> Multi-GPU detected: {device_count} devices. Adjusted min_bs to {min_bs}.")
322
+
323
+ if N > large_threshold:
324
+ base_bs = gpu_large * device_count
325
+ elif N > mid_threshold:
326
+ base_bs = gpu_mid * device_count
327
+ else:
328
+ base_bs = gpu_small * device_count
329
+ else:
330
+ base_bs = cpu_mid if N > mid_threshold else cpu_small
331
+
332
+ # 重新计算 batch_size,确保不小于调整后的 min_bs
333
+ batch_size = TrainingUtils.compute_batch_size(
334
+ data_size=len(dataset),
335
+ learning_rate=self.learning_rate,
336
+ batch_num=self.batch_num,
337
+ minimum=min_bs
338
+ )
339
+ batch_size = min(batch_size, base_bs, N)
340
+
341
+ target_effective_bs = target_effective_cuda if self._device_type(
342
+ ) == 'cuda' else target_effective_cpu
343
+ accum_steps = max(1, target_effective_bs // batch_size)
344
+
345
+ print(
346
+ f">>> DataLoader config: Batch Size={batch_size}, Accum Steps={accum_steps}, Workers={min(8, os.cpu_count() or 1)}")
347
+
348
+ # Linux (posix) 采用 fork 更高效;Windows (nt) 使用 spawn,开销更大。
349
+ if os.name == 'nt':
350
+ workers = 0
351
+ else:
352
+ workers = min(8, os.cpu_count() or 1)
353
+
354
+ sampler = None
355
+ if dist.is_initialized():
356
+ sampler = DistributedSampler(dataset, shuffle=True)
357
+ shuffle = False # Sampler handles shuffling
358
+ else:
359
+ shuffle = True
360
+
361
+ dataloader = DataLoader(
362
+ dataset,
363
+ batch_size=batch_size,
364
+ shuffle=shuffle,
365
+ sampler=sampler,
366
+ num_workers=workers,
367
+ pin_memory=(self._device_type() == 'cuda'),
368
+ persistent_workers=workers > 0,
369
+ )
370
+ return dataloader, accum_steps
371
+
372
+ def _compute_weighted_loss(self, y_pred, y_true, weights, apply_softplus: bool = False):
373
+ task = getattr(self, "task_type", "regression")
374
+ if task == 'classification':
375
+ loss_fn = nn.BCEWithLogitsLoss(reduction='none')
376
+ losses = loss_fn(y_pred, y_true).view(-1)
377
+ else:
378
+ if apply_softplus:
379
+ y_pred = F.softplus(y_pred)
380
+ y_pred = torch.clamp(y_pred, min=1e-6)
381
+ power = getattr(self, "tw_power", 1.5)
382
+ losses = tweedie_loss(y_pred, y_true, p=power).view(-1)
383
+ weighted_loss = (losses * weights.view(-1)).sum() / \
384
+ torch.clamp(weights.sum(), min=EPS)
385
+ return weighted_loss
386
+
387
+ def _early_stop_update(self, val_loss, best_loss, best_state, patience_counter, model):
388
+ if val_loss < best_loss:
389
+ return val_loss, copy.deepcopy(model.state_dict()), 0, False
390
+ patience_counter += 1
391
+ should_stop = best_state is not None and patience_counter >= getattr(
392
+ self, "patience", 0)
393
+ return best_loss, best_state, patience_counter, should_stop
394
+
395
+ def _train_model(self,
396
+ model,
397
+ dataloader,
398
+ accum_steps,
399
+ optimizer,
400
+ scaler,
401
+ forward_fn,
402
+ val_forward_fn=None,
403
+ apply_softplus: bool = False,
404
+ clip_fn=None,
405
+ trial: Optional[optuna.trial.Trial] = None):
406
+ device_type = self._device_type()
407
+ best_loss = float('inf')
408
+ best_state = None
409
+ patience_counter = 0
410
+ stop_training = False
411
+
412
+ for epoch in range(1, getattr(self, "epochs", 1) + 1):
413
+ if hasattr(self, 'dataloader_sampler') and self.dataloader_sampler is not None:
414
+ self.dataloader_sampler.set_epoch(epoch)
415
+
416
+ model.train()
417
+ optimizer.zero_grad()
418
+
419
+ for step, batch in enumerate(dataloader):
420
+ with autocast(enabled=(device_type == 'cuda')):
421
+ y_pred, y_true, w = forward_fn(batch)
422
+ weighted_loss = self._compute_weighted_loss(
423
+ y_pred, y_true, w, apply_softplus=apply_softplus)
424
+ loss_for_backward = weighted_loss / accum_steps
425
+
426
+ scaler.scale(loss_for_backward).backward()
427
+
428
+ if ((step + 1) % accum_steps == 0) or ((step + 1) == len(dataloader)):
429
+ if clip_fn is not None:
430
+ clip_fn()
431
+ scaler.step(optimizer)
432
+ scaler.update()
433
+ optimizer.zero_grad()
434
+
435
+ if val_forward_fn is not None:
436
+ model.eval()
437
+ with torch.no_grad(), autocast(enabled=(device_type == 'cuda')):
438
+ val_result = val_forward_fn()
439
+ if isinstance(val_result, tuple) and len(val_result) == 3:
440
+ y_val_pred, y_val_true, w_val = val_result
441
+ val_weighted_loss = self._compute_weighted_loss(
442
+ y_val_pred, y_val_true, w_val, apply_softplus=apply_softplus)
443
+ else:
444
+ val_weighted_loss = val_result
445
+
446
+ best_loss, best_state, patience_counter, stop_training = self._early_stop_update(
447
+ val_weighted_loss, best_loss, best_state, patience_counter, model)
448
+
449
+ # Optuna 剪枝:若评估值劣于历史表现则提前中止该 trial
450
+ if trial is not None:
451
+ trial.report(val_weighted_loss, epoch)
452
+ if trial.should_prune():
453
+ raise optuna.TrialPruned()
454
+
455
+ if stop_training:
456
+ break
457
+
458
+ return best_state
459
+
460
+
461
+ # =============================================================================
462
+ # 绘图辅助模块
463
+ # =============================================================================
464
+
465
+ def split_data(data, col_nme, wgt_nme, n_bins=10):
466
+ return PlotUtils.split_data(data, col_nme, wgt_nme, n_bins)
467
+
468
+ # 定义提纯曲线(Lift)绘制函数
469
+
470
+
471
+ def plot_lift_list(pred_model, w_pred_list, w_act_list,
472
+ weight_list, tgt_nme, n_bins=10,
473
+ fig_nme='Lift Chart'):
474
+ return PlotUtils.plot_lift_list(pred_model, w_pred_list, w_act_list,
475
+ weight_list, tgt_nme, n_bins, fig_nme)
476
+
477
+ # 定义双提纯曲线绘制函数
478
+
479
+
480
+ def plot_dlift_list(pred_model_1, pred_model_2,
481
+ model_nme_1, model_nme_2,
482
+ tgt_nme,
483
+ w_list, w_act_list, n_bins=10,
484
+ fig_nme='Double Lift Chart'):
485
+ return PlotUtils.plot_dlift_list(pred_model_1, pred_model_2,
486
+ model_nme_1, model_nme_2,
487
+ tgt_nme, w_list, w_act_list,
488
+ n_bins, fig_nme)
489
+
490
+
491
+ # =============================================================================
492
+ # ResNet 模型与 sklearn 风格封装
493
+ # =============================================================================
494
+
495
+ # 开始定义ResNet模型结构
496
+ # 残差块:两层线性 + ReLU + 残差连接
497
+ # ResBlock 继承 nn.Module
498
+ class ResBlock(nn.Module):
499
+ def __init__(self, dim: int, dropout: float = 0.1,
500
+ use_layernorm: bool = False, residual_scale: float = 0.1
501
+ ):
502
+ super().__init__()
503
+ self.use_layernorm = use_layernorm
504
+
505
+ if use_layernorm:
506
+ Norm = nn.LayerNorm # 对最后一维做归一化
507
+ else:
508
+ def Norm(d): return nn.BatchNorm1d(d) # 保留一个开关,想试 BN 时也能用
509
+
510
+ self.norm1 = Norm(dim)
511
+ self.fc1 = nn.Linear(dim, dim, bias=True)
512
+ self.act = nn.ReLU(inplace=True)
513
+ self.dropout = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
514
+ self.norm2 = Norm(dim)
515
+ self.fc2 = nn.Linear(dim, dim, bias=True)
516
+
517
+ # 残差缩放,防止一开始就把主干搞炸
518
+ self.res_scale = nn.Parameter(
519
+ torch.tensor(residual_scale, dtype=torch.float32)
520
+ )
521
+
522
+ def forward(self, x):
523
+ # 前置激活结构
524
+ out = self.norm1(x)
525
+ out = self.fc1(out)
526
+ out = self.act(out)
527
+ out = self.dropout(out)
528
+ out = self.norm2(out)
529
+ out = self.fc2(out)
530
+ # 残差缩放再相加
531
+ return F.relu(x + self.res_scale * out)
532
+
533
+ # ResNetSequential 继承 nn.Module,定义整个网络结构
534
+
535
+
536
+ class ResNetSequential(nn.Module):
537
+ # 输入张量形状:(batch, input_dim)
538
+ # 网络结构:全连接 + 归一化 + ReLU,再堆叠若干残差块,最后输出 Softplus
539
+
540
+ def __init__(self, input_dim: int, hidden_dim: int = 64, block_num: int = 2,
541
+ use_layernorm: bool = True, dropout: float = 0.1,
542
+ residual_scale: float = 0.1, task_type: str = 'regression'):
543
+ super(ResNetSequential, self).__init__()
544
+
545
+ self.net = nn.Sequential()
546
+ self.net.add_module('fc1', nn.Linear(input_dim, hidden_dim))
547
+
548
+ if use_layernorm:
549
+ self.net.add_module('norm1', nn.LayerNorm(hidden_dim))
550
+ else:
551
+ self.net.add_module('norm1', nn.BatchNorm1d(hidden_dim))
552
+
553
+ self.net.add_module('relu1', nn.ReLU(inplace=True))
554
+
555
+ # 多个残差块
556
+ for i in range(block_num):
557
+ self.net.add_module(
558
+ f'ResBlk_{i+1}',
559
+ ResBlock(
560
+ hidden_dim,
561
+ dropout=dropout,
562
+ use_layernorm=use_layernorm,
563
+ residual_scale=residual_scale)
564
+ )
565
+
566
+ self.net.add_module('fc_out', nn.Linear(hidden_dim, 1))
567
+
568
+ if task_type == 'classification':
569
+ self.net.add_module('softplus', nn.Identity())
570
+ else:
571
+ self.net.add_module('softplus', nn.Softplus())
572
+
573
+ def forward(self, x):
574
+ if self.training and not hasattr(self, '_printed_device'):
575
+ print(f">>> ResNetSequential executing on device: {x.device}")
576
+ self._printed_device = True
577
+ return self.net(x)
578
+
579
+ # 定义ResNet模型的Scikit-Learn接口类
580
+
581
+
582
+ class ResNetSklearn(TorchTrainerMixin, nn.Module):
583
+ def __init__(self, model_nme: str, input_dim: int, hidden_dim: int = 64,
584
+ block_num: int = 2, batch_num: int = 100, epochs: int = 100,
585
+ task_type: str = 'regression',
586
+ tweedie_power: float = 1.5, learning_rate: float = 0.01, patience: int = 10,
587
+ use_layernorm: bool = True, dropout: float = 0.1,
588
+ residual_scale: float = 0.1,
589
+ use_data_parallel: bool = True,
590
+ use_ddp: bool = False):
591
+ super(ResNetSklearn, self).__init__()
592
+
593
+ self.use_ddp = use_ddp
594
+ self.is_ddp_enabled, self.local_rank, self.rank, self.world_size = (
595
+ False, 0, 0, 1)
596
+
597
+ if self.use_ddp:
598
+ self.is_ddp_enabled, self.local_rank, self.rank, self.world_size = DistributedUtils.setup_ddp()
599
+
600
+ self.input_dim = input_dim
601
+ self.hidden_dim = hidden_dim
602
+ self.block_num = block_num
603
+ self.batch_num = batch_num
604
+ self.epochs = epochs
605
+ self.task_type = task_type
606
+ self.model_nme = model_nme
607
+ self.learning_rate = learning_rate
608
+ self.patience = patience
609
+ self.use_layernorm = use_layernorm
610
+ self.dropout = dropout
611
+ self.residual_scale = residual_scale
612
+
613
+ # 设备选择:cuda > mps > cpu
614
+ if self.is_ddp_enabled:
615
+ self.device = torch.device(f'cuda:{self.local_rank}')
616
+ elif torch.cuda.is_available():
617
+ self.device = torch.device('cuda')
618
+ elif torch.backends.mps.is_available():
619
+ self.device = torch.device('mps')
620
+ else:
621
+ self.device = torch.device('cpu')
622
+
623
+ # Tweedie 幂指数设定(分类时不使用)
624
+ if self.task_type == 'classification':
625
+ self.tw_power = None
626
+ elif 'f' in self.model_nme:
627
+ self.tw_power = 1
628
+ elif 's' in self.model_nme:
629
+ self.tw_power = 2
630
+ else:
631
+ self.tw_power = tweedie_power
632
+
633
+ # 搭建网络(先在 CPU 上建好)
634
+ core = ResNetSequential(
635
+ self.input_dim,
636
+ self.hidden_dim,
637
+ self.block_num,
638
+ use_layernorm=self.use_layernorm,
639
+ dropout=self.dropout,
640
+ residual_scale=self.residual_scale,
641
+ task_type=self.task_type
642
+ )
643
+
644
+ # ===== 多卡支持:DataParallel vs DistributedDataParallel =====
645
+ if self.is_ddp_enabled:
646
+ core = core.to(self.device)
647
+ core = DDP(core, device_ids=[
648
+ self.local_rank], output_device=self.local_rank)
649
+ elif use_data_parallel and (self.device.type == 'cuda') and (torch.cuda.device_count() > 1):
650
+ core = nn.DataParallel(core, device_ids=list(
651
+ range(torch.cuda.device_count())))
652
+ # DataParallel 会把输入 scatter 到多卡上,但“主设备”仍然是 cuda:0
653
+ self.device = torch.device('cuda')
654
+
655
+ self.resnet = core.to(self.device)
656
+
657
+ # ================ 内部工具 ================
658
+ def _build_train_val_tensors(self, X_train, y_train, w_train, X_val, y_val, w_val):
659
+ X_tensor = torch.tensor(X_train.values, dtype=torch.float32)
660
+ y_tensor = torch.tensor(
661
+ y_train.values, dtype=torch.float32).view(-1, 1)
662
+ w_tensor = torch.tensor(w_train.values, dtype=torch.float32).view(
663
+ -1, 1) if w_train is not None else torch.ones_like(y_tensor)
664
+
665
+ has_val = X_val is not None and y_val is not None
666
+ if has_val:
667
+ X_val_tensor = torch.tensor(X_val.values, dtype=torch.float32)
668
+ y_val_tensor = torch.tensor(
669
+ y_val.values, dtype=torch.float32).view(-1, 1)
670
+ w_val_tensor = torch.tensor(w_val.values, dtype=torch.float32).view(
671
+ -1, 1) if w_val is not None else torch.ones_like(y_val_tensor)
672
+ else:
673
+ X_val_tensor = y_val_tensor = w_val_tensor = None
674
+ return X_tensor, y_tensor, w_tensor, X_val_tensor, y_val_tensor, w_val_tensor, has_val
675
+
676
+ def forward(self, x):
677
+ # 处理 SHAP 的 NumPy 输入
678
+ if isinstance(x, np.ndarray):
679
+ x_tensor = torch.tensor(x, dtype=torch.float32)
680
+ else:
681
+ x_tensor = x
682
+
683
+ x_tensor = x_tensor.to(self.device)
684
+ y_pred = self.resnet(x_tensor)
685
+ return y_pred
686
+
687
+ # ---------------- 训练 ----------------
688
+
689
+ def fit(self, X_train, y_train, w_train=None,
690
+ X_val=None, y_val=None, w_val=None, trial=None):
691
+
692
+ X_tensor, y_tensor, w_tensor, X_val_tensor, y_val_tensor, w_val_tensor, has_val = \
693
+ self._build_train_val_tensors(
694
+ X_train, y_train, w_train, X_val, y_val, w_val)
695
+
696
+ dataset = TensorDataset(X_tensor, y_tensor, w_tensor)
697
+ dataloader, accum_steps = self._build_dataloader(
698
+ dataset,
699
+ N=X_tensor.shape[0],
700
+ base_bs_gpu=(65536, 32768, 16384),
701
+ base_bs_cpu=(1024, 512),
702
+ min_bs=64,
703
+ target_effective_cuda=8192,
704
+ target_effective_cpu=4096
705
+ )
706
+
707
+ # 在每个 epoch 开始前设置 sampler 的 epoch,以保证 shuffle 的随机性
708
+ if self.is_ddp_enabled and hasattr(dataloader.sampler, 'set_epoch'):
709
+ self.dataloader_sampler = dataloader.sampler
710
+ else:
711
+ self.dataloader_sampler = None
712
+
713
+ # === 4. 优化器与 AMP ===
714
+ self.optimizer = torch.optim.Adam(
715
+ self.resnet.parameters(), lr=self.learning_rate)
716
+ self.scaler = GradScaler(enabled=(self.device.type == 'cuda'))
717
+
718
+ X_val_dev = y_val_dev = w_val_dev = None
719
+ val_dataloader = None
720
+ if has_val:
721
+ # 构建验证集 DataLoader
722
+ val_dataset = TensorDataset(
723
+ X_val_tensor, y_val_tensor, w_val_tensor)
724
+ # 验证阶段无需反向传播,可适当放大批量以提高吞吐
725
+ val_bs = accum_steps * dataloader.batch_size
726
+
727
+ # 验证集的 worker 数沿用相同的分配逻辑
728
+ if os.name == 'nt':
729
+ val_workers = 0
730
+ else:
731
+ val_workers = min(4, os.cpu_count() or 1)
732
+
733
+ val_dataloader = DataLoader(
734
+ val_dataset,
735
+ batch_size=val_bs,
736
+ shuffle=False,
737
+ num_workers=val_workers,
738
+ pin_memory=(self.device.type == 'cuda'),
739
+ persistent_workers=val_workers > 0,
740
+ )
741
+ # 验证集通常不需要 DDP Sampler,因为我们只在主进程验证或汇总验证结果
742
+ # 但为了简单起见,这里保持单卡验证或主进程验证
743
+
744
+ is_data_parallel = isinstance(self.resnet, nn.DataParallel)
745
+
746
+ def forward_fn(batch):
747
+ X_batch, y_batch, w_batch = batch
748
+
749
+ if not is_data_parallel:
750
+ X_batch = X_batch.to(self.device, non_blocking=True)
751
+ # 目标值与权重始终与主设备保持一致,便于后续损失计算
752
+ y_batch = y_batch.to(self.device, non_blocking=True)
753
+ w_batch = w_batch.to(self.device, non_blocking=True)
754
+
755
+ y_pred = self.resnet(X_batch)
756
+ return y_pred, y_batch, w_batch
757
+
758
+ def val_forward_fn():
759
+ total_loss = 0.0
760
+ total_weight = 0.0
761
+ for batch in val_dataloader:
762
+ X_b, y_b, w_b = batch
763
+ if not is_data_parallel:
764
+ X_b = X_b.to(self.device, non_blocking=True)
765
+ y_b = y_b.to(self.device, non_blocking=True)
766
+ w_b = w_b.to(self.device, non_blocking=True)
767
+
768
+ y_pred = self.resnet(X_b)
769
+
770
+ # 手动计算当前批次的加权损失,以便后续精确加总
771
+ task = getattr(self, "task_type", "regression")
772
+ if task == 'classification':
773
+ loss_fn = nn.BCEWithLogitsLoss(reduction='none')
774
+ losses = loss_fn(y_pred, y_b).view(-1)
775
+ else:
776
+ # 此处无需再做 softplus:训练时 apply_softplus=False,模型前向结果本身已为正
777
+ y_pred_clamped = torch.clamp(y_pred, min=1e-6)
778
+ power = getattr(self, "tw_power", 1.5)
779
+ losses = tweedie_loss(
780
+ y_pred_clamped, y_b, p=power).view(-1)
781
+
782
+ batch_weight_sum = torch.clamp(w_b.sum(), min=EPS)
783
+ batch_weighted_loss_sum = (losses * w_b.view(-1)).sum()
784
+
785
+ total_loss += batch_weighted_loss_sum.item()
786
+ total_weight += batch_weight_sum.item()
787
+
788
+ return total_loss / max(total_weight, EPS)
789
+
790
+ clip_fn = None
791
+ if self.device.type == 'cuda':
792
+ def clip_fn(): return (self.scaler.unscale_(self.optimizer),
793
+ clip_grad_norm_(self.resnet.parameters(), max_norm=1.0))
794
+
795
+ # DDP 模式下,只在主进程打印日志和保存模型
796
+ if self.is_ddp_enabled and not DistributedUtils.is_main_process():
797
+ # 非主进程不进行验证回调中的打印操作(需在 _train_model 内部控制,这里暂略)
798
+ pass
799
+
800
+ best_state = self._train_model(
801
+ self.resnet,
802
+ dataloader,
803
+ accum_steps,
804
+ self.optimizer,
805
+ self.scaler,
806
+ forward_fn,
807
+ val_forward_fn if has_val else None,
808
+ apply_softplus=False,
809
+ clip_fn=clip_fn,
810
+ trial=trial
811
+ )
812
+
813
+ if has_val and best_state is not None:
814
+ self.resnet.load_state_dict(best_state)
815
+
816
+ # ---------------- 预测 ----------------
817
+
818
+ def predict(self, X_test):
819
+ self.resnet.eval()
820
+ if isinstance(X_test, pd.DataFrame):
821
+ X_np = X_test.values.astype(np.float32)
822
+ else:
823
+ X_np = X_test
824
+
825
+ with torch.no_grad():
826
+ y_pred = self(X_np).cpu().numpy()
827
+
828
+ if self.task_type == 'classification':
829
+ y_pred = 1 / (1 + np.exp(-y_pred)) # Sigmoid 函数将 logit 转换为概率
830
+ else:
831
+ y_pred = np.clip(y_pred, 1e-6, None)
832
+ return y_pred.flatten()
833
+
834
+ # ---------------- 设置参数 ----------------
835
+
836
+ def set_params(self, params):
837
+ for key, value in params.items():
838
+ if hasattr(self, key):
839
+ setattr(self, key, value)
840
+ else:
841
+ raise ValueError(f"Parameter {key} not found in model.")
842
+ return self
843
+
844
+
845
+ # =============================================================================
846
+ # FT-Transformer 模型与 sklearn 风格封装
847
+ # =============================================================================
848
+ # 开始定义FT Transformer模型结构
849
+
850
+
851
+ class FeatureTokenizer(nn.Module):
852
+ # 将数值特征与类别特征统一映射为 token,输出形状为 (batch, token_num, d_model)
853
+ # 约定:
854
+ # - X_num:表示数值特征,shape=(batch, num_numeric)
855
+ # - X_cat:表示类别特征,shape=(batch, num_categorical),每列是编码后的整数标签 [0, card-1]
856
+
857
+ def __init__(self, num_numeric: int, cat_cardinalities, d_model: int):
858
+ super().__init__()
859
+
860
+ self.num_numeric = num_numeric
861
+ self.has_numeric = num_numeric > 0
862
+
863
+ if self.has_numeric:
864
+ self.num_linear = nn.Linear(num_numeric, d_model)
865
+
866
+ self.embeddings = nn.ModuleList([
867
+ nn.Embedding(card, d_model) for card in cat_cardinalities
868
+ ])
869
+
870
+ def forward(self, X_num, X_cat):
871
+ tokens = []
872
+
873
+ if self.has_numeric:
874
+ # 数值特征整体映射为一个 token
875
+ # shape = (batch, d_model)
876
+ num_token = self.num_linear(X_num)
877
+ tokens.append(num_token)
878
+
879
+ # 每个类别特征各生成一个嵌入 token
880
+ for i, emb in enumerate(self.embeddings):
881
+ # shape = (batch, d_model)
882
+ tok = emb(X_cat[:, i])
883
+ tokens.append(tok)
884
+
885
+ # 拼接后得到 (batch, token_num, d_model)
886
+ x = torch.stack(tokens, dim=1)
887
+ return x
888
+
889
+ # 定义具有残差缩放的Encoder层
890
+
891
+
892
+ class ScaledTransformerEncoderLayer(nn.Module):
893
+ def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048,
894
+ dropout: float = 0.1, residual_scale_attn: float = 1.0,
895
+ residual_scale_ffn: float = 1.0, norm_first: bool = True,
896
+ ):
897
+ super().__init__()
898
+ self.self_attn = nn.MultiheadAttention(
899
+ embed_dim=d_model,
900
+ num_heads=nhead,
901
+ dropout=dropout,
902
+ batch_first=True
903
+ )
904
+
905
+ # 前馈网络部分
906
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
907
+ self.dropout = nn.Dropout(dropout)
908
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
909
+
910
+ # 归一化与 Dropout
911
+ self.norm1 = nn.LayerNorm(d_model)
912
+ self.norm2 = nn.LayerNorm(d_model)
913
+ self.dropout1 = nn.Dropout(dropout)
914
+ self.dropout2 = nn.Dropout(dropout)
915
+
916
+ self.activation = nn.GELU()
917
+ # self.activation = nn.ReLU()
918
+ self.norm_first = norm_first
919
+
920
+ # 残差缩放系数
921
+ self.res_scale_attn = residual_scale_attn
922
+ self.res_scale_ffn = residual_scale_ffn
923
+
924
+ def forward(self, src, src_mask=None, src_key_padding_mask=None):
925
+ # 输入张量形状:(batch, 序列长度, d_model)
926
+ x = src
927
+
928
+ if self.norm_first:
929
+ # 先归一化再做注意力
930
+ x = x + self._sa_block(self.norm1(x), src_mask,
931
+ src_key_padding_mask)
932
+ x = x + self._ff_block(self.norm2(x))
933
+ else:
934
+ # 后归一化(一般不启用)
935
+ x = self.norm1(
936
+ x + self._sa_block(x, src_mask, src_key_padding_mask))
937
+ x = self.norm2(x + self._ff_block(x))
938
+
939
+ return x
940
+
941
+ def _sa_block(self, x, attn_mask, key_padding_mask):
942
+ # 自注意力并附带残差缩放
943
+ attn_out, _ = self.self_attn(
944
+ x, x, x,
945
+ attn_mask=attn_mask,
946
+ key_padding_mask=key_padding_mask,
947
+ need_weights=False
948
+ )
949
+ return self.res_scale_attn * self.dropout1(attn_out)
950
+
951
+ def _ff_block(self, x):
952
+ # 前馈网络并附带残差缩放
953
+ x2 = self.linear2(self.dropout(self.activation(self.linear1(x))))
954
+ return self.res_scale_ffn * self.dropout2(x2)
955
+
956
+ # 定义FT-Transformer核心模型
957
+
958
+
959
+ class FTTransformerCore(nn.Module):
960
+ # 最小可用版本的 FT-Transformer,由三部分组成:
961
+ # 1) FeatureTokenizer:将数值/类别特征转换成 token;
962
+ # 2) TransformerEncoder:建模特征之间的交互;
963
+ # 3) 池化 + MLP + Softplus:输出正值,方便 Tweedie/Gamma 等任务。
964
+
965
+ def __init__(self, num_numeric: int, cat_cardinalities, d_model: int = 64,
966
+ n_heads: int = 8, n_layers: int = 4, dropout: float = 0.1,
967
+ task_type: str = 'regression'
968
+ ):
969
+ super().__init__()
970
+
971
+ self.tokenizer = FeatureTokenizer(
972
+ num_numeric=num_numeric,
973
+ cat_cardinalities=cat_cardinalities,
974
+ d_model=d_model
975
+ )
976
+ scale = 1.0 / math.sqrt(n_layers) # 推荐一个默认值
977
+ encoder_layer = ScaledTransformerEncoderLayer(
978
+ d_model=d_model,
979
+ nhead=n_heads,
980
+ dim_feedforward=d_model * 4,
981
+ dropout=dropout,
982
+ residual_scale_attn=scale,
983
+ residual_scale_ffn=scale,
984
+ norm_first=True,
985
+ )
986
+ self.encoder = nn.TransformerEncoder(
987
+ encoder_layer,
988
+ num_layers=n_layers
989
+ )
990
+ self.n_layers = n_layers
991
+
992
+ layers = [
993
+ nn.LayerNorm(d_model),
994
+ nn.Linear(d_model, d_model),
995
+ nn.GELU(),
996
+ nn.Linear(d_model, 1),
997
+ ]
998
+
999
+ if task_type == 'classification':
1000
+ # 分类任务输出 logits,与 BCEWithLogitsLoss 更匹配
1001
+ layers.append(nn.Identity())
1002
+ else:
1003
+ # 回归任务需保持正值,适配 Tweedie/Gamma
1004
+ layers.append(nn.Softplus())
1005
+
1006
+ self.head = nn.Sequential(*layers)
1007
+
1008
+ def forward(self, X_num, X_cat):
1009
+
1010
+ # 输入:
1011
+ # X_num -> (batch, 数值特征数) 的 float32 张量
1012
+ # X_cat -> (batch, 类别特征数) 的 long 张量
1013
+
1014
+ if self.training and not hasattr(self, '_printed_device'):
1015
+ print(f">>> FTTransformerCore executing on device: {X_num.device}")
1016
+ self._printed_device = True
1017
+
1018
+ tokens = self.tokenizer(X_num, X_cat) # => (batch, token_num, d_model)
1019
+ x = self.encoder(tokens) # => (batch, token_num, d_model)
1020
+
1021
+ # 对 token 做平均池化,再送入回归头
1022
+ x = x.mean(dim=1) # => (batch, d_model)
1023
+
1024
+ out = self.head(x) # => (batch, 1),Softplus 约束为正
1025
+ return out
1026
+
1027
+ # 定义TabularDataset类
1028
+
1029
+
1030
+ class TabularDataset(Dataset):
1031
+ def __init__(self, X_num, X_cat, y, w):
1032
+
1033
+ # 输入张量说明:
1034
+ # X_num: torch.float32,shape=(N, 数值特征数)
1035
+ # X_cat: torch.long, shape=(N, 类别特征数)
1036
+ # y: torch.float32,shape=(N, 1)
1037
+ # w: torch.float32,shape=(N, 1)
1038
+
1039
+ self.X_num = X_num
1040
+ self.X_cat = X_cat
1041
+ self.y = y
1042
+ self.w = w
1043
+
1044
+ def __len__(self):
1045
+ return self.y.shape[0]
1046
+
1047
+ def __getitem__(self, idx):
1048
+ return (
1049
+ self.X_num[idx],
1050
+ self.X_cat[idx],
1051
+ self.y[idx],
1052
+ self.w[idx],
1053
+ )
1054
+
1055
+ # 定义FTTransformer的Scikit-Learn接口类
1056
+
1057
+
1058
+ class FTTransformerSklearn(TorchTrainerMixin, nn.Module):
1059
+
1060
+ # sklearn 风格包装:
1061
+ # - num_cols:数值特征列名列表
1062
+ # - cat_cols:类别特征列名列表(需事先做标签编码,取值 ∈ [0, n_classes-1])
1063
+
1064
+ def __init__(self, model_nme: str, num_cols, cat_cols, d_model: int = 64, n_heads: int = 8,
1065
+ n_layers: int = 4, dropout: float = 0.1, batch_num: int = 100, epochs: int = 100,
1066
+ task_type: str = 'regression',
1067
+ tweedie_power: float = 1.5, learning_rate: float = 1e-3, patience: int = 10,
1068
+ use_data_parallel: bool = True,
1069
+ use_ddp: bool = False
1070
+ ):
1071
+ super().__init__()
1072
+
1073
+ self.use_ddp = use_ddp
1074
+ self.is_ddp_enabled, self.local_rank, self.rank, self.world_size = (
1075
+ False, 0, 0, 1)
1076
+ if self.use_ddp:
1077
+ self.is_ddp_enabled, self.local_rank, self.rank, self.world_size = DistributedUtils.setup_ddp()
1078
+
1079
+ self.model_nme = model_nme
1080
+ self.num_cols = list(num_cols)
1081
+ self.cat_cols = list(cat_cols)
1082
+ self.d_model = d_model
1083
+ self.n_heads = n_heads
1084
+ self.n_layers = n_layers
1085
+ self.dropout = dropout
1086
+ self.batch_num = batch_num
1087
+ self.epochs = epochs
1088
+ self.learning_rate = learning_rate
1089
+ self.task_type = task_type
1090
+ self.patience = patience
1091
+ if self.task_type == 'classification':
1092
+ self.tw_power = None # 分类时不使用 Tweedie 幂
1093
+ elif 'f' in self.model_nme:
1094
+ self.tw_power = 1.0
1095
+ elif 's' in self.model_nme:
1096
+ self.tw_power = 2.0
1097
+ else:
1098
+ self.tw_power = tweedie_power
1099
+
1100
+ if self.is_ddp_enabled:
1101
+ self.device = torch.device(f"cuda:{self.local_rank}")
1102
+ elif torch.cuda.is_available():
1103
+ self.device = torch.device("cuda")
1104
+ elif torch.backends.mps.is_available():
1105
+ self.device = torch.device("mps")
1106
+ else:
1107
+ self.device = torch.device("cpu")
1108
+ self.cat_cardinalities = None
1109
+ self.cat_categories = {}
1110
+ self.ft = None
1111
+ self.use_data_parallel = torch.cuda.device_count() > 1 and use_data_parallel
1112
+
1113
+ def _build_model(self, X_train):
1114
+ num_numeric = len(self.num_cols)
1115
+ cat_cardinalities = []
1116
+
1117
+ for col in self.cat_cols:
1118
+ cats = X_train[col].astype('category')
1119
+ categories = cats.cat.categories
1120
+ self.cat_categories[col] = categories # 保存训练集类别全集
1121
+
1122
+ card = len(categories) + 1 # 多预留 1 类给“未知/缺失”
1123
+ cat_cardinalities.append(card)
1124
+
1125
+ self.cat_cardinalities = cat_cardinalities
1126
+
1127
+ core = FTTransformerCore(
1128
+ num_numeric=num_numeric,
1129
+ cat_cardinalities=cat_cardinalities,
1130
+ d_model=self.d_model,
1131
+ n_heads=self.n_heads,
1132
+ n_layers=self.n_layers,
1133
+ dropout=self.dropout,
1134
+ task_type=self.task_type
1135
+ )
1136
+ if self.is_ddp_enabled:
1137
+ core = core.to(self.device)
1138
+ core = DDP(core, device_ids=[
1139
+ self.local_rank], output_device=self.local_rank)
1140
+ elif self.use_data_parallel:
1141
+ core = nn.DataParallel(core, device_ids=list(
1142
+ range(torch.cuda.device_count())))
1143
+ self.device = torch.device("cuda")
1144
+ self.ft = core.to(self.device)
1145
+
1146
+ def _encode_cats(self, X):
1147
+ # 输入 DataFrame 至少需要包含所有类别特征列
1148
+ # 返回形状 (N, 类别特征数) 的 int64 数组
1149
+
1150
+ if not self.cat_cols:
1151
+ return np.zeros((len(X), 0), dtype='int64')
1152
+
1153
+ X_cat_list = []
1154
+ for col in self.cat_cols:
1155
+ # 使用训练阶段记录的类别全集
1156
+ categories = self.cat_categories[col]
1157
+ # 按固定类别构造 Categorical
1158
+ cats = pd.Categorical(X[col], categories=categories)
1159
+ codes = cats.codes.astype('int64', copy=True) # -1 表示未知或缺失
1160
+ # 未知或缺失映射到额外的“未知”索引 len(categories)
1161
+ codes[codes < 0] = len(categories)
1162
+ X_cat_list.append(codes)
1163
+
1164
+ X_cat_np = np.stack(X_cat_list, axis=1) # 形状 (N, 类别特征数)
1165
+ return X_cat_np
1166
+
1167
+ def _build_train_tensors(self, X_train, y_train, w_train):
1168
+ return self._tensorize_split(X_train, y_train, w_train)
1169
+
1170
+ def _build_val_tensors(self, X_val, y_val, w_val):
1171
+ return self._tensorize_split(X_val, y_val, w_val, allow_none=True)
1172
+
1173
+ def _tensorize_split(self, X, y, w, allow_none: bool = False):
1174
+ if X is None:
1175
+ if allow_none:
1176
+ return None, None, None, None, False
1177
+ raise ValueError("输入特征 X 不能为空。")
1178
+
1179
+ X_num = torch.tensor(
1180
+ X[self.num_cols].to_numpy(dtype=np.float32, copy=True),
1181
+ dtype=torch.float32
1182
+ )
1183
+ if self.cat_cols:
1184
+ X_cat = torch.tensor(self._encode_cats(X), dtype=torch.long)
1185
+ else:
1186
+ X_cat = torch.zeros((X_num.shape[0], 0), dtype=torch.long)
1187
+
1188
+ y_tensor = torch.tensor(
1189
+ y.values, dtype=torch.float32).view(-1, 1) if y is not None else None
1190
+ if y_tensor is None:
1191
+ w_tensor = None
1192
+ elif w is not None:
1193
+ w_tensor = torch.tensor(
1194
+ w.values, dtype=torch.float32).view(-1, 1)
1195
+ else:
1196
+ w_tensor = torch.ones_like(y_tensor)
1197
+ return X_num, X_cat, y_tensor, w_tensor, y is not None
1198
+
1199
+ def fit(self, X_train, y_train, w_train=None,
1200
+ X_val=None, y_val=None, w_val=None, trial=None):
1201
+
1202
+ # 首次拟合时需要构建底层模型结构
1203
+ if self.ft is None:
1204
+ self._build_model(X_train)
1205
+
1206
+ X_num_train, X_cat_train, y_tensor, w_tensor, _ = self._build_train_tensors(
1207
+ X_train, y_train, w_train)
1208
+ X_num_val, X_cat_val, y_val_tensor, w_val_tensor, has_val = self._build_val_tensors(
1209
+ X_val, y_val, w_val)
1210
+
1211
+ # --- 构建 DataLoader ---
1212
+ dataset = TabularDataset(
1213
+ X_num_train, X_cat_train, y_tensor, w_tensor
1214
+ )
1215
+
1216
+ dataloader, accum_steps = self._build_dataloader(
1217
+ dataset,
1218
+ N=X_num_train.shape[0],
1219
+ base_bs_gpu=(65536, 32768, 16384),
1220
+ base_bs_cpu=(256, 128),
1221
+ min_bs=64,
1222
+ target_effective_cuda=4096,
1223
+ target_effective_cpu=2048
1224
+ )
1225
+
1226
+ if self.is_ddp_enabled and hasattr(dataloader.sampler, 'set_epoch'):
1227
+ self.dataloader_sampler = dataloader.sampler
1228
+ else:
1229
+ self.dataloader_sampler = None
1230
+
1231
+ optimizer = torch.optim.Adam(
1232
+ self.ft.parameters(), lr=self.learning_rate)
1233
+ scaler = GradScaler(enabled=(self.device.type == 'cuda'))
1234
+
1235
+ X_num_val_dev = X_cat_val_dev = y_val_dev = w_val_dev = None
1236
+ val_dataloader = None
1237
+ if has_val:
1238
+ val_dataset = TabularDataset(
1239
+ X_num_val, X_cat_val, y_val_tensor, w_val_tensor
1240
+ )
1241
+ val_bs = accum_steps * dataloader.batch_size
1242
+
1243
+ if os.name == 'nt':
1244
+ val_workers = 0
1245
+ else:
1246
+ val_workers = min(4, os.cpu_count() or 1)
1247
+
1248
+ val_dataloader = DataLoader(
1249
+ val_dataset,
1250
+ batch_size=val_bs,
1251
+ shuffle=False,
1252
+ num_workers=val_workers,
1253
+ pin_memory=(self.device.type == 'cuda'),
1254
+ persistent_workers=val_workers > 0,
1255
+ )
1256
+
1257
+ is_data_parallel = isinstance(self.ft, nn.DataParallel)
1258
+
1259
+ def forward_fn(batch):
1260
+ X_num_b, X_cat_b, y_b, w_b = batch
1261
+
1262
+ if not is_data_parallel:
1263
+ X_num_b = X_num_b.to(self.device, non_blocking=True)
1264
+ X_cat_b = X_cat_b.to(self.device, non_blocking=True)
1265
+ y_b = y_b.to(self.device, non_blocking=True)
1266
+ w_b = w_b.to(self.device, non_blocking=True)
1267
+
1268
+ y_pred = self.ft(X_num_b, X_cat_b)
1269
+ return y_pred, y_b, w_b
1270
+
1271
+ def val_forward_fn():
1272
+ total_loss = 0.0
1273
+ total_weight = 0.0
1274
+ for batch in val_dataloader:
1275
+ X_num_b, X_cat_b, y_b, w_b = batch
1276
+ if not is_data_parallel:
1277
+ X_num_b = X_num_b.to(self.device, non_blocking=True)
1278
+ X_cat_b = X_cat_b.to(self.device, non_blocking=True)
1279
+ y_b = y_b.to(self.device, non_blocking=True)
1280
+ w_b = w_b.to(self.device, non_blocking=True)
1281
+
1282
+ y_pred = self.ft(X_num_b, X_cat_b)
1283
+
1284
+ # 手动计算验证损失
1285
+ task = getattr(self, "task_type", "regression")
1286
+ if task == 'classification':
1287
+ loss_fn = nn.BCEWithLogitsLoss(reduction='none')
1288
+ losses = loss_fn(y_pred, y_b).view(-1)
1289
+ else:
1290
+ # 模型输出已通过 Softplus,无需再次应用
1291
+ y_pred_clamped = torch.clamp(y_pred, min=1e-6)
1292
+ power = getattr(self, "tw_power", 1.5)
1293
+ losses = tweedie_loss(
1294
+ y_pred_clamped, y_b, p=power).view(-1)
1295
+
1296
+ batch_weight_sum = torch.clamp(w_b.sum(), min=EPS)
1297
+ batch_weighted_loss_sum = (losses * w_b.view(-1)).sum()
1298
+
1299
+ total_loss += batch_weighted_loss_sum.item()
1300
+ total_weight += batch_weight_sum.item()
1301
+
1302
+ return total_loss / max(total_weight, EPS)
1303
+
1304
+ clip_fn = None
1305
+ if self.device.type == 'cuda':
1306
+ def clip_fn(): return (scaler.unscale_(optimizer),
1307
+ clip_grad_norm_(self.ft.parameters(), max_norm=1.0))
1308
+
1309
+ best_state = self._train_model(
1310
+ self.ft,
1311
+ dataloader,
1312
+ accum_steps,
1313
+ optimizer,
1314
+ scaler,
1315
+ forward_fn,
1316
+ val_forward_fn if has_val else None,
1317
+ apply_softplus=False,
1318
+ clip_fn=clip_fn,
1319
+ trial=trial
1320
+ )
1321
+
1322
+ if has_val and best_state is not None:
1323
+ self.ft.load_state_dict(best_state)
1324
+
1325
+ def predict(self, X_test):
1326
+ # X_test 需要包含所有数值列与类别列
1327
+
1328
+ self.ft.eval()
1329
+ X_num, X_cat, _, _, _ = self._tensorize_split(
1330
+ X_test, None, None, allow_none=True)
1331
+
1332
+ with torch.no_grad():
1333
+ X_num = X_num.to(self.device, non_blocking=True)
1334
+ X_cat = X_cat.to(self.device, non_blocking=True)
1335
+ y_pred = self.ft(X_num, X_cat).cpu().numpy()
1336
+
1337
+ if self.task_type == 'classification':
1338
+ # 从 logits 转换为概率
1339
+ y_pred = 1 / (1 + np.exp(-y_pred))
1340
+ else:
1341
+ # 模型已含 softplus,若需要可按需做 log-exp 平滑:y_pred = log(1 + exp(y_pred))
1342
+ y_pred = np.clip(y_pred, 1e-6, None)
1343
+ return y_pred.ravel()
1344
+
1345
+ def set_params(self, params: dict):
1346
+
1347
+ # 和 sklearn 风格保持一致。
1348
+ # 注意:对结构性参数(如 d_model/n_heads)修改后,需要重新 fit 才会生效。
1349
+
1350
+ for key, value in params.items():
1351
+ if hasattr(self, key):
1352
+ setattr(self, key, value)
1353
+ else:
1354
+ raise ValueError(f"Parameter {key} not found in model.")
1355
+ return self
1356
+
1357
+
1358
+ # ===== 基础组件与训练封装 =====================================================
1359
+
1360
+ # =============================================================================
1361
+ # 配置、预处理与训练器基类
1362
+ # =============================================================================
1363
+ @dataclass
1364
+ class BayesOptConfig:
1365
+ model_nme: str
1366
+ resp_nme: str
1367
+ weight_nme: str
1368
+ factor_nmes: List[str]
1369
+ task_type: str = 'regression'
1370
+ binary_resp_nme: Optional[str] = None
1371
+ cate_list: Optional[List[str]] = None
1372
+ prop_test: float = 0.25
1373
+ rand_seed: Optional[int] = None
1374
+ epochs: int = 100
1375
+ use_gpu: bool = True
1376
+ use_resn_data_parallel: bool = True
1377
+ use_ft_data_parallel: bool = True
1378
+ use_resn_ddp: bool = False
1379
+ use_ft_ddp: bool = False
1380
+
1381
+
1382
+ class OutputManager:
1383
+ # 统一管理结果、图表与模型的输出路径
1384
+
1385
+ def __init__(self, root: Optional[str] = None, model_name: str = "model") -> None:
1386
+ self.root = Path(root or os.getcwd())
1387
+ self.model_name = model_name
1388
+ self.plot_dir = self.root / 'plot'
1389
+ self.result_dir = self.root / 'Results'
1390
+ self.model_dir = self.root / 'model'
1391
+
1392
+ def _prepare(self, path: Path) -> str:
1393
+ ensure_parent_dir(str(path))
1394
+ return str(path)
1395
+
1396
+ def plot_path(self, filename: str) -> str:
1397
+ return self._prepare(self.plot_dir / filename)
1398
+
1399
+ def result_path(self, filename: str) -> str:
1400
+ return self._prepare(self.result_dir / filename)
1401
+
1402
+ def model_path(self, filename: str) -> str:
1403
+ return self._prepare(self.model_dir / filename)
1404
+
1405
+
1406
+ class DatasetPreprocessor:
1407
+ # 为各训练器准备通用的训练/测试数据视图
1408
+
1409
+ def __init__(self, train_df: pd.DataFrame, test_df: pd.DataFrame,
1410
+ config: BayesOptConfig) -> None:
1411
+ self.config = config
1412
+ self.train_data = train_df.copy(deep=True)
1413
+ self.test_data = test_df.copy(deep=True)
1414
+ self.num_features: List[str] = []
1415
+ self.train_oht_scl_data: Optional[pd.DataFrame] = None
1416
+ self.test_oht_scl_data: Optional[pd.DataFrame] = None
1417
+ self.var_nmes: List[str] = []
1418
+ self.cat_categories_for_shap: Dict[str, List[Any]] = {}
1419
+
1420
+ def run(self) -> "DatasetPreprocessor":
1421
+ cfg = self.config
1422
+ # 预先计算加权实际值,后续画图、校验都依赖该字段
1423
+ self.train_data.loc[:, 'w_act'] = self.train_data[cfg.resp_nme] * \
1424
+ self.train_data[cfg.weight_nme]
1425
+ self.test_data.loc[:, 'w_act'] = self.test_data[cfg.resp_nme] * \
1426
+ self.test_data[cfg.weight_nme]
1427
+ if cfg.binary_resp_nme:
1428
+ self.train_data.loc[:, 'w_binary_act'] = self.train_data[cfg.binary_resp_nme] * \
1429
+ self.train_data[cfg.weight_nme]
1430
+ self.test_data.loc[:, 'w_binary_act'] = self.test_data[cfg.binary_resp_nme] * \
1431
+ self.test_data[cfg.weight_nme]
1432
+ # 高分位裁剪用来吸收离群值;若删除会导致极端点主导损失
1433
+ q99 = self.train_data[cfg.resp_nme].quantile(0.999)
1434
+ self.train_data[cfg.resp_nme] = self.train_data[cfg.resp_nme].clip(
1435
+ upper=q99)
1436
+ cate_list = list(cfg.cate_list or [])
1437
+ if cate_list:
1438
+ for cate in cate_list:
1439
+ self.train_data[cate] = self.train_data[cate].astype(
1440
+ 'category')
1441
+ self.test_data[cate] = self.test_data[cate].astype('category')
1442
+ cats = self.train_data[cate].cat.categories
1443
+ self.cat_categories_for_shap[cate] = list(cats)
1444
+ self.num_features = [
1445
+ nme for nme in cfg.factor_nmes if nme not in cate_list]
1446
+ train_oht = self.train_data[cfg.factor_nmes +
1447
+ [cfg.weight_nme] + [cfg.resp_nme]].copy()
1448
+ test_oht = self.test_data[cfg.factor_nmes +
1449
+ [cfg.weight_nme] + [cfg.resp_nme]].copy()
1450
+ train_oht = pd.get_dummies(
1451
+ train_oht,
1452
+ columns=cate_list,
1453
+ drop_first=True,
1454
+ dtype=np.int8
1455
+ )
1456
+ test_oht = pd.get_dummies(
1457
+ test_oht,
1458
+ columns=cate_list,
1459
+ drop_first=True,
1460
+ dtype=np.int8
1461
+ )
1462
+ for num_chr in self.num_features:
1463
+ # 逐列标准化保障每个特征在同一量级,否则神经网络会难以收敛
1464
+ scaler = StandardScaler()
1465
+ train_oht[num_chr] = scaler.fit_transform(
1466
+ train_oht[num_chr].values.reshape(-1, 1))
1467
+ test_oht[num_chr] = scaler.transform(
1468
+ test_oht[num_chr].values.reshape(-1, 1))
1469
+ # reindex 时将缺失的哑变量列补零,避免测试集列数与训练集不一致
1470
+ test_oht = test_oht.reindex(columns=train_oht.columns, fill_value=0)
1471
+ self.train_oht_scl_data = train_oht
1472
+ self.test_oht_scl_data = test_oht
1473
+ self.var_nmes = list(
1474
+ set(list(train_oht.columns)) - set([cfg.weight_nme, cfg.resp_nme])
1475
+ )
1476
+ return self
1477
+
1478
+ # =============================================================================
1479
+ # 训练器体系
1480
+ # =============================================================================
1481
+
1482
+
1483
+ class TrainerBase:
1484
+ def __init__(self, context: "BayesOptModel", label: str, model_name_prefix: str) -> None:
1485
+ self.ctx = context
1486
+ self.label = label
1487
+ self.model_name_prefix = model_name_prefix
1488
+ self.model = None
1489
+ self.best_params: Optional[Dict[str, Any]] = None
1490
+ self.best_trial = None
1491
+
1492
+ @property
1493
+ def config(self) -> BayesOptConfig:
1494
+ return self.ctx.config
1495
+
1496
+ @property
1497
+ def output(self) -> OutputManager:
1498
+ return self.ctx.output_manager
1499
+
1500
+ def _get_model_filename(self) -> str:
1501
+ ext = 'pkl' if self.label in ['Xgboost', 'GLM'] else 'pth'
1502
+ return f'01_{self.ctx.model_nme}_{self.model_name_prefix}.{ext}'
1503
+
1504
+ def tune(self, max_evals: int, objective_fn=None) -> None:
1505
+ # 通用的 Optuna 调参循环流程。
1506
+ if objective_fn is None:
1507
+ # 若子类未显式提供 objective_fn,则默认使用 cross_val 作为优化目标
1508
+ objective_fn = self.cross_val
1509
+
1510
+ def objective_wrapper(trial: optuna.trial.Trial) -> float:
1511
+ try:
1512
+ result = objective_fn(trial)
1513
+ finally:
1514
+ self._clean_gpu()
1515
+ return result
1516
+
1517
+ study = optuna.create_study(
1518
+ direction='minimize',
1519
+ sampler=optuna.samplers.TPESampler(seed=self.ctx.rand_seed)
1520
+ )
1521
+ study.optimize(objective_wrapper, n_trials=max_evals)
1522
+ self.best_params = study.best_params
1523
+ self.best_trial = study.best_trial
1524
+
1525
+ # 将最优参数保存为 CSV,方便复现
1526
+ params_path = self.output.result_path(
1527
+ f'{self.ctx.model_nme}_bestparams_{self.label.lower()}.csv'
1528
+ )
1529
+ pd.DataFrame(self.best_params, index=[0]).to_csv(params_path)
1530
+
1531
+ def train(self) -> None:
1532
+ raise NotImplementedError
1533
+
1534
+ def save(self) -> None:
1535
+ if self.model is None:
1536
+ print(f"[save] Warning: No model to save for {self.label}")
1537
+ return
1538
+
1539
+ path = self.output.model_path(self._get_model_filename())
1540
+ if self.label in ['Xgboost', 'GLM']:
1541
+ joblib.dump(self.model, path)
1542
+ else:
1543
+ # Torch 模型既可以只存 state_dict,也可以整个对象一起序列化
1544
+ # 兼容历史行为:ResNetTrainer 保存 state_dict,FTTrainer 保存完整对象
1545
+ if hasattr(self.model, 'resnet'): # ResNetSklearn
1546
+ torch.save(self.model.resnet.state_dict(), path)
1547
+ else: # FTTransformerSklearn or others
1548
+ torch.save(self.model, path)
1549
+
1550
+ def load(self) -> None:
1551
+ path = self.output.model_path(self._get_model_filename())
1552
+ if not os.path.exists(path):
1553
+ print(f"[load] Warning: Model file not found: {path}")
1554
+ return
1555
+
1556
+ if self.label in ['Xgboost', 'GLM']:
1557
+ self.model = joblib.load(path)
1558
+ else:
1559
+ # Torch 模型的加载需要根据结构区别处理
1560
+ if self.label == 'ResNet' or self.label == 'ResNetClassifier':
1561
+ # ResNet 需要重新构建骨架,结构参数依赖 ctx,因此交由子类处理
1562
+ pass
1563
+ else:
1564
+ # FT-Transformer 序列化了整个对象,可直接加载后迁移到目标设备
1565
+ loaded = torch.load(path, map_location='cpu')
1566
+ self._move_to_device(loaded)
1567
+ self.model = loaded
1568
+
1569
+ def _move_to_device(self, model_obj):
1570
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
1571
+ if hasattr(model_obj, 'device'):
1572
+ model_obj.device = device
1573
+ if hasattr(model_obj, 'to'):
1574
+ model_obj.to(device)
1575
+ # 若对象内部还包含 ft/resnet 子模块,也要同时迁移设备
1576
+ if hasattr(model_obj, 'ft'):
1577
+ model_obj.ft.to(device)
1578
+ if hasattr(model_obj, 'resnet'):
1579
+ model_obj.resnet.to(device)
1580
+
1581
+ def _clean_gpu(self):
1582
+ gc.collect()
1583
+ if torch.cuda.is_available():
1584
+ torch.cuda.empty_cache()
1585
+
1586
+ # 预测 + 缓存逻辑
1587
+ def _predict_and_cache(self,
1588
+ model,
1589
+ pred_prefix: str,
1590
+ use_oht: bool = False,
1591
+ design_fn=None) -> None:
1592
+ if design_fn:
1593
+ X_train = design_fn(train=True)
1594
+ X_test = design_fn(train=False)
1595
+ elif use_oht:
1596
+ X_train = self.ctx.train_oht_scl_data[self.ctx.var_nmes]
1597
+ X_test = self.ctx.test_oht_scl_data[self.ctx.var_nmes]
1598
+ else:
1599
+ X_train = self.ctx.train_data[self.ctx.factor_nmes]
1600
+ X_test = self.ctx.test_data[self.ctx.factor_nmes]
1601
+
1602
+ preds_train = model.predict(X_train)
1603
+ preds_test = model.predict(X_test)
1604
+
1605
+ self.ctx.train_data[f'pred_{pred_prefix}'] = preds_train
1606
+ self.ctx.test_data[f'pred_{pred_prefix}'] = preds_test
1607
+ self.ctx.train_data[f'w_pred_{pred_prefix}'] = (
1608
+ self.ctx.train_data[f'pred_{pred_prefix}'] *
1609
+ self.ctx.train_data[self.ctx.weight_nme]
1610
+ )
1611
+ self.ctx.test_data[f'w_pred_{pred_prefix}'] = (
1612
+ self.ctx.test_data[f'pred_{pred_prefix}'] *
1613
+ self.ctx.test_data[self.ctx.weight_nme]
1614
+ )
1615
+
1616
+ def _fit_predict_cache(self,
1617
+ model,
1618
+ X_train,
1619
+ y_train,
1620
+ sample_weight,
1621
+ pred_prefix: str,
1622
+ use_oht: bool = False,
1623
+ design_fn=None,
1624
+ fit_kwargs: Optional[Dict[str, Any]] = None,
1625
+ sample_weight_arg: Optional[str] = 'sample_weight') -> None:
1626
+ fit_kwargs = fit_kwargs.copy() if fit_kwargs else {}
1627
+ if sample_weight is not None and sample_weight_arg:
1628
+ fit_kwargs.setdefault(sample_weight_arg, sample_weight)
1629
+ model.fit(X_train, y_train, **fit_kwargs)
1630
+ self.ctx.model_label.append(self.label)
1631
+ self._predict_and_cache(
1632
+ model, pred_prefix, use_oht=use_oht, design_fn=design_fn)
1633
+
1634
+
1635
+ class XGBTrainer(TrainerBase):
1636
+ def __init__(self, context: "BayesOptModel") -> None:
1637
+ super().__init__(context, 'Xgboost', 'Xgboost')
1638
+ self.model: Optional[xgb.XGBRegressor] = None
1639
+
1640
+ def _build_estimator(self) -> xgb.XGBRegressor:
1641
+ params = dict(
1642
+ objective=self.ctx.obj,
1643
+ random_state=self.ctx.rand_seed,
1644
+ subsample=0.9,
1645
+ tree_method='gpu_hist' if self.ctx.use_gpu else 'hist',
1646
+ enable_categorical=True,
1647
+ predictor='gpu_predictor' if self.ctx.use_gpu else 'cpu_predictor'
1648
+ )
1649
+ if self.ctx.use_gpu:
1650
+ params['gpu_id'] = 0
1651
+ print(f">>> XGBoost using GPU ID: 0 (Single GPU Mode)")
1652
+ return xgb.XGBRegressor(**params)
1653
+
1654
+ def cross_val(self, trial: optuna.trial.Trial) -> float:
1655
+ learning_rate = trial.suggest_float(
1656
+ 'learning_rate', 1e-5, 1e-1, log=True)
1657
+ gamma = trial.suggest_float('gamma', 0, 10000)
1658
+ max_depth = trial.suggest_int('max_depth', 3, 25)
1659
+ n_estimators = trial.suggest_int('n_estimators', 10, 500, step=10)
1660
+ min_child_weight = trial.suggest_int(
1661
+ 'min_child_weight', 100, 10000, step=100)
1662
+ reg_alpha = trial.suggest_float('reg_alpha', 1e-10, 1, log=True)
1663
+ reg_lambda = trial.suggest_float('reg_lambda', 1e-10, 1, log=True)
1664
+ if self.ctx.obj == 'reg:tweedie':
1665
+ tweedie_variance_power = trial.suggest_float(
1666
+ 'tweedie_variance_power', 1, 2)
1667
+ elif self.ctx.obj == 'count:poisson':
1668
+ tweedie_variance_power = 1
1669
+ elif self.ctx.obj == 'reg:gamma':
1670
+ tweedie_variance_power = 2
1671
+ else:
1672
+ tweedie_variance_power = 1.5
1673
+ clf = self._build_estimator()
1674
+ params = {
1675
+ 'learning_rate': learning_rate,
1676
+ 'gamma': gamma,
1677
+ 'max_depth': max_depth,
1678
+ 'n_estimators': n_estimators,
1679
+ 'min_child_weight': min_child_weight,
1680
+ 'reg_alpha': reg_alpha,
1681
+ 'reg_lambda': reg_lambda
1682
+ }
1683
+ if self.ctx.obj == 'reg:tweedie':
1684
+ params['tweedie_variance_power'] = tweedie_variance_power
1685
+ clf.set_params(**params)
1686
+ n_jobs = 1 if self.ctx.use_gpu else int(1 / self.ctx.prop_test)
1687
+ acc = cross_val_score(
1688
+ clf,
1689
+ self.ctx.train_data[self.ctx.factor_nmes],
1690
+ self.ctx.train_data[self.ctx.resp_nme].values,
1691
+ fit_params=self.ctx.fit_params,
1692
+ cv=self.ctx.cv,
1693
+ scoring=make_scorer(
1694
+ mean_tweedie_deviance,
1695
+ power=tweedie_variance_power,
1696
+ greater_is_better=False),
1697
+ error_score='raise',
1698
+ n_jobs=n_jobs
1699
+ ).mean()
1700
+ return -acc
1701
+
1702
+ def train(self) -> None:
1703
+ if not self.best_params:
1704
+ raise RuntimeError('请先运行 tune() 以获得 XGB 最优参数。')
1705
+ self.model = self._build_estimator()
1706
+ self.model.set_params(**self.best_params)
1707
+ self._fit_predict_cache(
1708
+ self.model,
1709
+ self.ctx.train_data[self.ctx.factor_nmes],
1710
+ self.ctx.train_data[self.ctx.resp_nme].values,
1711
+ sample_weight=None,
1712
+ pred_prefix='xgb',
1713
+ fit_kwargs=self.ctx.fit_params,
1714
+ sample_weight_arg=None # 样本权重已通过 fit_kwargs 传入
1715
+ )
1716
+ self.ctx.xgb_best = self.model
1717
+
1718
+
1719
+ class GLMTrainer(TrainerBase):
1720
+ def __init__(self, context: "BayesOptModel") -> None:
1721
+ super().__init__(context, 'GLM', 'GLM')
1722
+ self.model = None
1723
+
1724
+ def _select_family(self, tweedie_power: Optional[float] = None):
1725
+ if self.ctx.task_type == 'classification':
1726
+ return sm.families.Binomial()
1727
+ if self.ctx.obj == 'count:poisson':
1728
+ return sm.families.Poisson()
1729
+ if self.ctx.obj == 'reg:gamma':
1730
+ return sm.families.Gamma()
1731
+ power = tweedie_power if tweedie_power is not None else 1.5
1732
+ return sm.families.Tweedie(var_power=power, link=sm.families.links.log())
1733
+
1734
+ def _prepare_design(self, data: pd.DataFrame) -> pd.DataFrame:
1735
+ # 为 statsmodels 设计矩阵添加截距项
1736
+ X = data[self.ctx.var_nmes]
1737
+ return sm.add_constant(X, has_constant='add')
1738
+
1739
+ def _metric_power(self, family, tweedie_power: Optional[float]) -> float:
1740
+ if isinstance(family, sm.families.Poisson):
1741
+ return 1.0
1742
+ if isinstance(family, sm.families.Gamma):
1743
+ return 2.0
1744
+ if isinstance(family, sm.families.Tweedie):
1745
+ return tweedie_power if tweedie_power is not None else getattr(family, 'var_power', 1.5)
1746
+ return 1.5
1747
+
1748
+ def cross_val(self, trial: optuna.trial.Trial) -> float:
1749
+ alpha = trial.suggest_float('alpha', 1e-6, 1e2, log=True)
1750
+ l1_ratio = trial.suggest_float('l1_ratio', 0.0, 1.0)
1751
+ tweedie_power = None
1752
+ if self.ctx.task_type == 'regression' and self.ctx.obj == 'reg:tweedie':
1753
+ tweedie_power = trial.suggest_float('tweedie_power', 1.01, 1.99)
1754
+
1755
+ X_all = self._prepare_design(self.ctx.train_oht_scl_data)
1756
+ y_all = self.ctx.train_oht_scl_data[self.ctx.resp_nme]
1757
+ w_all = self.ctx.train_oht_scl_data[self.ctx.weight_nme]
1758
+
1759
+ scores = []
1760
+ for train_idx, val_idx in self.ctx.cv.split(X_all):
1761
+ X_train, X_val = X_all.iloc[train_idx], X_all.iloc[val_idx]
1762
+ y_train, y_val = y_all.iloc[train_idx], y_all.iloc[val_idx]
1763
+ w_train, w_val = w_all.iloc[train_idx], w_all.iloc[val_idx]
1764
+
1765
+ family = self._select_family(tweedie_power)
1766
+ glm = sm.GLM(y_train, X_train, family=family,
1767
+ freq_weights=w_train)
1768
+ result = glm.fit_regularized(
1769
+ alpha=alpha, L1_wt=l1_ratio, maxiter=200)
1770
+
1771
+ y_pred = result.predict(X_val)
1772
+ if self.ctx.task_type == 'classification':
1773
+ y_pred = np.clip(y_pred, EPS, 1 - EPS)
1774
+ fold_score = log_loss(
1775
+ y_val, y_pred, sample_weight=w_val)
1776
+ else:
1777
+ y_pred = np.maximum(y_pred, EPS)
1778
+ fold_score = mean_tweedie_deviance(
1779
+ y_val,
1780
+ y_pred,
1781
+ sample_weight=w_val,
1782
+ power=self._metric_power(family, tweedie_power)
1783
+ )
1784
+ scores.append(fold_score)
1785
+
1786
+ return float(np.mean(scores))
1787
+
1788
+ def train(self) -> None:
1789
+ if not self.best_params:
1790
+ raise RuntimeError('请先运行 tune() 以获得 GLM 最优参数。')
1791
+ tweedie_power = self.best_params.get('tweedie_power')
1792
+ family = self._select_family(tweedie_power)
1793
+
1794
+ X_train = self._prepare_design(self.ctx.train_oht_scl_data)
1795
+ y_train = self.ctx.train_oht_scl_data[self.ctx.resp_nme]
1796
+ w_train = self.ctx.train_oht_scl_data[self.ctx.weight_nme]
1797
+
1798
+ glm = sm.GLM(y_train, X_train, family=family,
1799
+ freq_weights=w_train)
1800
+ self.model = glm.fit_regularized(
1801
+ alpha=self.best_params['alpha'],
1802
+ L1_wt=self.best_params['l1_ratio'],
1803
+ maxiter=300
1804
+ )
1805
+
1806
+ self.ctx.glm_best = self.model
1807
+ self.ctx.model_label += [self.label]
1808
+ self._predict_and_cache(
1809
+ self.model,
1810
+ 'glm',
1811
+ design_fn=lambda train: self._prepare_design(
1812
+ self.ctx.train_oht_scl_data if train else self.ctx.test_oht_scl_data
1813
+ )
1814
+ )
1815
+
1816
+
1817
+ class ResNetTrainer(TrainerBase):
1818
+ def __init__(self, context: "BayesOptModel") -> None:
1819
+ if context.task_type == 'classification':
1820
+ super().__init__(context, 'ResNetClassifier', 'ResNet')
1821
+ else:
1822
+ super().__init__(context, 'ResNet', 'ResNet')
1823
+ self.model: Optional[ResNetSklearn] = None
1824
+
1825
+ # ========= 交叉验证(BayesOpt 用) =========
1826
+ def cross_val(self, trial: optuna.trial.Trial) -> float:
1827
+ # 针对 ResNet 的交叉验证流程,重点控制显存:
1828
+ # - 每个 fold 单独创建 ResNetSklearn,结束立刻释放资源;
1829
+ # - fold 完成后迁移模型到 CPU,删除对象并调用 gc/empty_cache;
1830
+ # - 可选:BayesOpt 期间只抽样部分训练集以减少显存压力。
1831
+
1832
+ # 1. 超参空间(基本沿用你之前的设定)
1833
+ learning_rate = trial.suggest_float(
1834
+ 'learning_rate', 1e-6, 1e-2, log=True
1835
+ )
1836
+ # hidden_dim = trial.suggest_int('hidden_dim', 32, 256, step=32) # 不宜过大
1837
+ hidden_dim = trial.suggest_int('hidden_dim', 8, 32, step=2)
1838
+ block_num = trial.suggest_int('block_num', 2, 10)
1839
+
1840
+ if self.ctx.task_type == 'regression':
1841
+ if self.ctx.obj == 'reg:tweedie':
1842
+ tw_power = trial.suggest_float('tw_power', 1.0, 2.0)
1843
+ elif self.ctx.obj == 'count:poisson':
1844
+ tw_power = 1.0
1845
+ elif self.ctx.obj == 'reg:gamma':
1846
+ tw_power = 2.0
1847
+ else:
1848
+ tw_power = 1.5
1849
+ else: # classification
1850
+ tw_power = None # Not used
1851
+
1852
+ fold_losses = []
1853
+
1854
+ # 2. (可选)BayesOpt 只在子样本上做 CV,减轻显存 & 时间压力
1855
+ data_for_cv = self.ctx.train_oht_scl_data
1856
+ max_rows_for_resnet_bo = min(100000, int(
1857
+ len(data_for_cv)/5)) # 你可以按 A30 情况调小,比如 50_000
1858
+ if len(data_for_cv) > max_rows_for_resnet_bo:
1859
+ data_for_cv = data_for_cv.sample(
1860
+ max_rows_for_resnet_bo,
1861
+ random_state=self.ctx.rand_seed
1862
+ )
1863
+
1864
+ X_all = data_for_cv[self.ctx.var_nmes]
1865
+ y_all = data_for_cv[self.ctx.resp_nme]
1866
+ w_all = data_for_cv[self.ctx.weight_nme]
1867
+
1868
+ # 用局部 ShuffleSplit,避免子样本时索引不一致
1869
+ cv_local = ShuffleSplit(
1870
+ n_splits=int(1 / self.ctx.prop_test),
1871
+ test_size=self.ctx.prop_test,
1872
+ random_state=self.ctx.rand_seed
1873
+ )
1874
+
1875
+ # 使用 Hold-out 验证代替 K-Fold CV 以提高速度
1876
+ # 只取一次划分
1877
+ train_idx, val_idx = next(cv_local.split(X_all))
1878
+
1879
+ X_train_fold = X_all.iloc[train_idx]
1880
+ y_train_fold = y_all.iloc[train_idx]
1881
+ w_train_fold = w_all.iloc[train_idx]
1882
+
1883
+ X_val_fold = X_all.iloc[val_idx]
1884
+ y_val_fold = y_all.iloc[val_idx]
1885
+ w_val_fold = w_all.iloc[val_idx]
1886
+
1887
+ # 3. 创建 ResNet 模型
1888
+ cv_net = ResNetSklearn(
1889
+ model_nme=self.ctx.model_nme,
1890
+ input_dim=X_all.shape[1],
1891
+ hidden_dim=hidden_dim,
1892
+ block_num=block_num,
1893
+ task_type=self.ctx.task_type,
1894
+ epochs=self.ctx.epochs,
1895
+ tweedie_power=tw_power,
1896
+ learning_rate=learning_rate,
1897
+ patience=5,
1898
+ use_layernorm=True,
1899
+ dropout=0.1,
1900
+ residual_scale=0.1,
1901
+ use_data_parallel=self.ctx.config.use_resn_data_parallel,
1902
+ use_ddp=self.ctx.config.use_resn_ddp
1903
+ )
1904
+
1905
+ try:
1906
+ # 4. 训练
1907
+ cv_net.fit(
1908
+ X_train_fold,
1909
+ y_train_fold,
1910
+ w_train_fold,
1911
+ X_val_fold,
1912
+ y_val_fold,
1913
+ w_val_fold,
1914
+ trial=trial
1915
+ )
1916
+
1917
+ # 5. 验证集预测
1918
+ y_pred_fold = cv_net.predict(X_val_fold)
1919
+
1920
+ # 6. 评估:Tweedie deviance(评估用,训练 loss 不动)
1921
+ if self.ctx.task_type == 'regression':
1922
+ loss = mean_tweedie_deviance(
1923
+ y_val_fold,
1924
+ y_pred_fold,
1925
+ sample_weight=w_val_fold,
1926
+ power=tw_power
1927
+ )
1928
+ else: # classification
1929
+ from sklearn.metrics import log_loss
1930
+ loss = log_loss(
1931
+ y_val_fold,
1932
+ y_pred_fold,
1933
+ sample_weight=w_val_fold,
1934
+ )
1935
+ fold_losses.append(loss)
1936
+ finally:
1937
+ # 7. 结束后释放 GPU 资源
1938
+ try:
1939
+ if hasattr(cv_net, "resnet"):
1940
+ cv_net.resnet.to("cpu")
1941
+ except Exception:
1942
+ pass
1943
+ del cv_net
1944
+ self._clean_gpu()
1945
+
1946
+ return np.mean(fold_losses)
1947
+
1948
+ # ========= 用最优超参训练最终 ResNet =========
1949
+ def train(self) -> None:
1950
+ if not self.best_params:
1951
+ raise RuntimeError('请先运行 tune() 以获得 ResNet 最优参数。')
1952
+
1953
+ self.model = ResNetSklearn(
1954
+ model_nme=self.ctx.model_nme,
1955
+ input_dim=self.ctx.train_oht_scl_data[self.ctx.var_nmes].shape[1],
1956
+ task_type=self.ctx.task_type,
1957
+ use_data_parallel=self.ctx.config.use_resn_data_parallel,
1958
+ use_ddp=self.ctx.config.use_resn_ddp
1959
+ )
1960
+ self.model.set_params(self.best_params)
1961
+
1962
+ self._fit_predict_cache(
1963
+ self.model,
1964
+ self.ctx.train_oht_scl_data[self.ctx.var_nmes],
1965
+ self.ctx.train_oht_scl_data[self.ctx.resp_nme],
1966
+ sample_weight=self.ctx.train_oht_scl_data[self.ctx.weight_nme],
1967
+ pred_prefix='resn',
1968
+ use_oht=True,
1969
+ sample_weight_arg='w_train'
1970
+ )
1971
+
1972
+ # 方便外部调用
1973
+ self.ctx.resn_best = self.model
1974
+
1975
+ # ========= 保存 / 加载 =========
1976
+ # ResNet 使用 state_dict 保存,需要特殊的 load 逻辑,所以保留 load
1977
+ # save 逻辑已经在 TrainerBase 中处理了 (check for .resnet attribute)
1978
+
1979
+ def load(self) -> None:
1980
+ # 将磁盘中的 ResNet 权重加载到当前设备,保持与上下文一致。
1981
+ path = self.output.model_path(self._get_model_filename())
1982
+ if os.path.exists(path):
1983
+ resn_loaded = ResNetSklearn(
1984
+ model_nme=self.ctx.model_nme,
1985
+ input_dim=self.ctx.train_oht_scl_data[self.ctx.var_nmes].shape[1],
1986
+ task_type=self.ctx.task_type,
1987
+ use_data_parallel=self.ctx.config.use_resn_data_parallel,
1988
+ use_ddp=self.ctx.config.use_resn_ddp
1989
+ )
1990
+ state_dict = torch.load(path, map_location='cpu')
1991
+ resn_loaded.resnet.load_state_dict(state_dict)
1992
+
1993
+ self._move_to_device(resn_loaded)
1994
+ self.model = resn_loaded
1995
+ self.ctx.resn_best = self.model
1996
+ else:
1997
+ print(f"[ResNetTrainer.load] 未找到模型文件:{path}")
1998
+
1999
+
2000
+ class FTTrainer(TrainerBase):
2001
+ def __init__(self, context: "BayesOptModel") -> None:
2002
+ if context.task_type == 'classification':
2003
+ super().__init__(context, 'FTTransformerClassifier', 'FTTransformer')
2004
+ else:
2005
+ super().__init__(context, 'FTTransformer', 'FTTransformer')
2006
+ self.model: Optional[FTTransformerSklearn] = None
2007
+
2008
+ def cross_val(self, trial: optuna.trial.Trial) -> float:
2009
+ # 针对 FT-Transformer 的交叉验证,重点同样在显存控制:
2010
+ # - 收缩超参搜索空间,防止不必要的超大模型;
2011
+ # - 每个 fold 结束后立即释放 GPU 显存,确保下一个 trial 顺利进行。
2012
+ # 超参空间适当缩小一点,避免特别大的模型
2013
+ learning_rate = trial.suggest_float(
2014
+ 'learning_rate', 1e-5, 5e-4, log=True
2015
+ )
2016
+ d_model = trial.suggest_int('d_model', 32, 256, step=32)
2017
+ # n_heads = trial.suggest_categorical('n_heads', [2, 4]) 避免欠拟合
2018
+ n_heads = trial.suggest_categorical('n_heads', [2, 4, 8])
2019
+ # n_layers = trial.suggest_int('n_layers', 2, 4) 避免欠拟合
2020
+ n_layers = trial.suggest_int('n_layers', 2, 8)
2021
+ dropout = trial.suggest_float('dropout', 0.0, 0.2)
2022
+
2023
+ if self.ctx.task_type == 'regression':
2024
+ if self.ctx.obj == 'reg:tweedie':
2025
+ tw_power = trial.suggest_float('tw_power', 1.0, 2.0)
2026
+ elif self.ctx.obj == 'count:poisson':
2027
+ tw_power = 1.0
2028
+ elif self.ctx.obj == 'reg:gamma':
2029
+ tw_power = 2.0
2030
+ else:
2031
+ tw_power = 1.5
2032
+ else: # classification
2033
+ tw_power = None # Not used
2034
+
2035
+ fold_losses = []
2036
+
2037
+ # 可选:只在子样本上做 BO,避免大数据直接压垮显存
2038
+ data_for_cv = self.ctx.train_data
2039
+ max_rows_for_ft_bo = min(1000000, int(
2040
+ len(data_for_cv)/2)) # 你可以根据显存情况调小或调大
2041
+ if len(data_for_cv) > max_rows_for_ft_bo:
2042
+ data_for_cv = data_for_cv.sample(
2043
+ max_rows_for_ft_bo,
2044
+ random_state=self.ctx.rand_seed
2045
+ )
2046
+
2047
+ # 用局部 ShuffleSplit,避免子样本时索引不一致
2048
+ cv_local = ShuffleSplit(
2049
+ n_splits=int(1 / self.ctx.prop_test),
2050
+ test_size=self.ctx.prop_test,
2051
+ random_state=self.ctx.rand_seed
2052
+ )
2053
+
2054
+ # 使用 Hold-out 验证代替 K-Fold CV 以提高速度
2055
+ # 只取一次划分
2056
+ train_idx, val_idx = next(cv_local.split(
2057
+ data_for_cv[self.ctx.factor_nmes]))
2058
+
2059
+ X_train_fold = data_for_cv.iloc[train_idx][self.ctx.factor_nmes]
2060
+ y_train_fold = data_for_cv.iloc[train_idx][self.ctx.resp_nme]
2061
+ w_train_fold = data_for_cv.iloc[train_idx][self.ctx.weight_nme]
2062
+ X_val_fold = data_for_cv.iloc[val_idx][self.ctx.factor_nmes]
2063
+ y_val_fold = data_for_cv.iloc[val_idx][self.ctx.resp_nme]
2064
+ w_val_fold = data_for_cv.iloc[val_idx][self.ctx.weight_nme]
2065
+
2066
+ cv_ft = FTTransformerSklearn(
2067
+ model_nme=self.ctx.model_nme,
2068
+ num_cols=self.ctx.num_features,
2069
+ cat_cols=self.ctx.cate_list,
2070
+ d_model=d_model,
2071
+ n_heads=n_heads,
2072
+ n_layers=n_layers,
2073
+ dropout=dropout,
2074
+ task_type=self.ctx.task_type,
2075
+ # batch_num=batch_num,
2076
+ epochs=self.ctx.epochs,
2077
+ tweedie_power=tw_power,
2078
+ learning_rate=learning_rate,
2079
+ patience=5,
2080
+ use_data_parallel=self.ctx.config.use_ft_data_parallel,
2081
+ use_ddp=self.ctx.config.use_ft_ddp
2082
+ )
2083
+
2084
+ try:
2085
+ cv_ft.fit(
2086
+ X_train_fold, y_train_fold, w_train_fold,
2087
+ X_val_fold, y_val_fold, w_val_fold,
2088
+ trial=trial
2089
+ )
2090
+ y_pred_fold = cv_ft.predict(X_val_fold)
2091
+ if self.ctx.task_type == 'regression':
2092
+ loss = mean_tweedie_deviance(
2093
+ y_val_fold,
2094
+ y_pred_fold,
2095
+ sample_weight=w_val_fold,
2096
+ power=tw_power
2097
+ )
2098
+ else: # classification
2099
+ from sklearn.metrics import log_loss
2100
+ loss = log_loss(
2101
+ y_val_fold,
2102
+ y_pred_fold,
2103
+ sample_weight=w_val_fold,
2104
+ )
2105
+ fold_losses.append(loss)
2106
+ finally:
2107
+ # 结束后立即释放 GPU 资源
2108
+ try:
2109
+ # 如果模型在 GPU 上,先挪回 CPU
2110
+ if hasattr(cv_ft, "ft"):
2111
+ cv_ft.ft.to("cpu")
2112
+ except Exception:
2113
+ pass
2114
+ del cv_ft
2115
+ self._clean_gpu()
2116
+
2117
+ return np.mean(fold_losses)
2118
+
2119
+ def train(self) -> None:
2120
+ if not self.best_params:
2121
+ raise RuntimeError('请先运行 tune() 以获得 FT-Transformer 最优参数。')
2122
+ self.model = FTTransformerSklearn(
2123
+ model_nme=self.ctx.model_nme,
2124
+ num_cols=self.ctx.num_features,
2125
+ cat_cols=self.ctx.cate_list,
2126
+ task_type=self.ctx.task_type,
2127
+ use_data_parallel=self.ctx.config.use_ft_data_parallel,
2128
+ use_ddp=self.ctx.config.use_ft_ddp
2129
+ )
2130
+ self.model.set_params(self.best_params)
2131
+ self._fit_predict_cache(
2132
+ self.model,
2133
+ self.ctx.train_data[self.ctx.factor_nmes],
2134
+ self.ctx.train_data[self.ctx.resp_nme],
2135
+ sample_weight=self.ctx.train_data[self.ctx.weight_nme],
2136
+ pred_prefix='ft',
2137
+ sample_weight_arg='w_train'
2138
+ )
2139
+ self.ctx.ft_best = self.model
2140
+
2141
+
2142
+ # =============================================================================
2143
+ # BayesOpt orchestration & SHAP utilities
2144
+ # =============================================================================
2145
+ class BayesOptModel:
2146
+ def __init__(self, train_data, test_data,
2147
+ model_nme, resp_nme, weight_nme, factor_nmes, task_type='regression',
2148
+ binary_resp_nme=None,
2149
+ cate_list=None, prop_test=0.25, rand_seed=None,
2150
+ epochs=100, use_gpu=True,
2151
+ use_resn_data_parallel: bool = False, use_ft_data_parallel: bool = False,
2152
+ use_resn_ddp: bool = False, use_ft_ddp: bool = False):
2153
+ cfg = BayesOptConfig(
2154
+ model_nme=model_nme,
2155
+ task_type=task_type,
2156
+ resp_nme=resp_nme,
2157
+ weight_nme=weight_nme,
2158
+ factor_nmes=list(factor_nmes),
2159
+ binary_resp_nme=binary_resp_nme,
2160
+ cate_list=list(cate_list) if cate_list else None,
2161
+ prop_test=prop_test,
2162
+ rand_seed=rand_seed,
2163
+ epochs=epochs,
2164
+ use_gpu=use_gpu,
2165
+ use_resn_data_parallel=use_resn_data_parallel,
2166
+ use_ft_data_parallel=use_ft_data_parallel,
2167
+ use_resn_ddp=use_resn_ddp,
2168
+ use_ft_ddp=use_ft_ddp
2169
+ )
2170
+ self.config = cfg
2171
+ self.model_nme = cfg.model_nme
2172
+ self.task_type = cfg.task_type
2173
+ self.resp_nme = cfg.resp_nme
2174
+ self.weight_nme = cfg.weight_nme
2175
+ self.factor_nmes = cfg.factor_nmes
2176
+ self.binary_resp_nme = cfg.binary_resp_nme
2177
+ self.cate_list = list(cfg.cate_list or [])
2178
+ self.prop_test = cfg.prop_test
2179
+ self.epochs = cfg.epochs
2180
+ self.rand_seed = cfg.rand_seed if cfg.rand_seed is not None else np.random.randint(
2181
+ 1, 10000)
2182
+ self.use_gpu = bool(cfg.use_gpu and torch.cuda.is_available())
2183
+ self.output_manager = OutputManager(os.getcwd(), self.model_nme)
2184
+
2185
+ preprocessor = DatasetPreprocessor(train_data, test_data, cfg).run()
2186
+ self.train_data = preprocessor.train_data
2187
+ self.test_data = preprocessor.test_data
2188
+ self.train_oht_scl_data = preprocessor.train_oht_scl_data
2189
+ self.test_oht_scl_data = preprocessor.test_oht_scl_data
2190
+ self.var_nmes = preprocessor.var_nmes
2191
+ self.num_features = preprocessor.num_features
2192
+ self.cat_categories_for_shap = preprocessor.cat_categories_for_shap
2193
+
2194
+ self.cv = ShuffleSplit(n_splits=int(1/self.prop_test),
2195
+ test_size=self.prop_test,
2196
+ random_state=self.rand_seed)
2197
+ if self.task_type == 'classification':
2198
+ self.obj = 'binary:logistic'
2199
+ else: # regression
2200
+ if 'f' in self.model_nme:
2201
+ self.obj = 'count:poisson'
2202
+ elif 's' in self.model_nme:
2203
+ self.obj = 'reg:gamma'
2204
+ elif 'bc' in self.model_nme:
2205
+ self.obj = 'reg:tweedie'
2206
+ else:
2207
+ self.obj = 'reg:tweedie'
2208
+ self.fit_params = {
2209
+ 'sample_weight': self.train_data[self.weight_nme].values
2210
+ }
2211
+ self.model_label: List[str] = []
2212
+
2213
+ # 记录各模型训练器,后续统一通过标签访问,方便扩展新模型
2214
+ self.trainers: Dict[str, TrainerBase] = {
2215
+ 'glm': GLMTrainer(self),
2216
+ 'xgb': XGBTrainer(self),
2217
+ 'resn': ResNetTrainer(self),
2218
+ 'ft': FTTrainer(self)
2219
+ }
2220
+ self.xgb_best = None
2221
+ self.resn_best = None
2222
+ self.glm_best = None
2223
+ self.ft_best = None
2224
+ self.best_xgb_params = None
2225
+ self.best_resn_params = None
2226
+ self.best_ft_params = None
2227
+ self.best_xgb_trial = None
2228
+ self.best_resn_trial = None
2229
+ self.best_ft_trial = None
2230
+ self.best_glm_params = None
2231
+ self.best_glm_trial = None
2232
+ self.xgb_load = None
2233
+ self.resn_load = None
2234
+ self.ft_load = None
2235
+
2236
+ # 定义单因素画图函数
2237
+ def plot_oneway(self, n_bins=10):
2238
+ for c in self.factor_nmes:
2239
+ fig = plt.figure(figsize=(7, 5))
2240
+ if c in self.cate_list:
2241
+ group_col = c
2242
+ plot_source = self.train_data
2243
+ else:
2244
+ group_col = f'{c}_bins'
2245
+ bins = pd.qcut(
2246
+ self.train_data[c],
2247
+ n_bins,
2248
+ duplicates='drop' # 注意:如果分位数重复会丢 bin,避免异常终止
2249
+ )
2250
+ plot_source = self.train_data.assign(**{group_col: bins})
2251
+ plot_data = plot_source.groupby(
2252
+ [group_col], observed=True).sum(numeric_only=True)
2253
+ plot_data.reset_index(inplace=True)
2254
+ plot_data['act_v'] = plot_data['w_act'] / \
2255
+ plot_data[self.weight_nme]
2256
+ plot_data.head()
2257
+ ax = fig.add_subplot(111)
2258
+ ax.plot(plot_data.index, plot_data['act_v'],
2259
+ label='Actual', color='red')
2260
+ ax.set_title(
2261
+ 'Analysis of %s : Train Data' % group_col,
2262
+ fontsize=8)
2263
+ plt.xticks(plot_data.index,
2264
+ list(plot_data[group_col].astype(str)),
2265
+ rotation=90)
2266
+ if len(list(plot_data[group_col].astype(str))) > 50:
2267
+ plt.xticks(fontsize=3)
2268
+ else:
2269
+ plt.xticks(fontsize=6)
2270
+ plt.yticks(fontsize=6)
2271
+ ax2 = ax.twinx()
2272
+ ax2.bar(plot_data.index,
2273
+ plot_data[self.weight_nme],
2274
+ alpha=0.5, color='seagreen')
2275
+ plt.yticks(fontsize=6)
2276
+ plt.margins(0.05)
2277
+ plt.subplots_adjust(wspace=0.3)
2278
+ save_path = self.output_manager.plot_path(
2279
+ f'00_{self.model_nme}_{group_col}_oneway.png')
2280
+ plt.savefig(save_path, dpi=300)
2281
+ plt.close(fig)
2282
+
2283
+ # 定义通用优化函数
2284
+ def optimize_model(self, model_key: str, max_evals: int = 100):
2285
+ if model_key not in self.trainers:
2286
+ print(f"Warning: Unknown model key: {model_key}")
2287
+ return
2288
+
2289
+ trainer = self.trainers[model_key]
2290
+ trainer.tune(max_evals)
2291
+ trainer.train()
2292
+
2293
+ # Update context attributes for backward compatibility
2294
+ setattr(self, f"{model_key}_best", trainer.model)
2295
+ setattr(self, f"best_{model_key}_params", trainer.best_params)
2296
+ setattr(self, f"best_{model_key}_trial", trainer.best_trial)
2297
+
2298
+ # 定义GLM贝叶斯优化函数
2299
+ def bayesopt_glm(self, max_evals=50):
2300
+ self.optimize_model('glm', max_evals)
2301
+
2302
+ # 定义Xgboost贝叶斯优化函数
2303
+ def bayesopt_xgb(self, max_evals=100):
2304
+ self.optimize_model('xgb', max_evals)
2305
+
2306
+ # 定义ResNet贝叶斯优化函数
2307
+ def bayesopt_resnet(self, max_evals=100):
2308
+ self.optimize_model('resn', max_evals)
2309
+
2310
+ # 定义 FT-Transformer 贝叶斯优化函数
2311
+ def bayesopt_ft(self, max_evals=50):
2312
+ self.optimize_model('ft', max_evals)
2313
+
2314
+ # 绘制提纯曲线
2315
+ def plot_lift(self, model_label, pred_nme, n_bins=10):
2316
+ model_map = {
2317
+ 'Xgboost': 'pred_xgb',
2318
+ 'ResNet': 'pred_resn',
2319
+ 'ResNetClassifier': 'pred_resn',
2320
+ 'FTTransformer': 'pred_ft',
2321
+ 'FTTransformerClassifier': 'pred_ft',
2322
+ 'GLM': 'pred_glm'
2323
+ }
2324
+ for k, v in model_map.items():
2325
+ if model_label.startswith(k):
2326
+ pred_nme = v
2327
+ break
2328
+
2329
+ fig = plt.figure(figsize=(11, 5))
2330
+ for pos, (title, data) in zip([121, 122],
2331
+ [('Lift Chart on Train Data', self.train_data),
2332
+ ('Lift Chart on Test Data', self.test_data)]):
2333
+ lift_df = pd.DataFrame({
2334
+ 'pred': data[pred_nme].values,
2335
+ 'w_pred': data[f'w_{pred_nme}'].values,
2336
+ 'act': data['w_act'].values,
2337
+ 'weight': data[self.weight_nme].values
2338
+ })
2339
+ plot_data = PlotUtils.split_data(lift_df, 'pred', 'weight', n_bins)
2340
+ denom = np.maximum(plot_data['weight'], EPS)
2341
+ plot_data['exp_v'] = plot_data['w_pred'] / denom
2342
+ plot_data['act_v'] = plot_data['act'] / denom
2343
+ plot_data = plot_data.reset_index()
2344
+
2345
+ ax = fig.add_subplot(pos)
2346
+ PlotUtils.plot_lift_ax(ax, plot_data, title)
2347
+
2348
+ plt.subplots_adjust(wspace=0.3)
2349
+ save_path = self.output_manager.plot_path(
2350
+ f'01_{self.model_nme}_{model_label}_lift.png')
2351
+ plt.savefig(save_path, dpi=300)
2352
+ plt.show()
2353
+ plt.close(fig)
2354
+
2355
+ # 绘制双提纯曲线
2356
+ def plot_dlift(self, model_comp: List[str] = ['xgb', 'resn'], n_bins: int = 10) -> None:
2357
+ # 绘制双提纯曲线,对比两个模型在不同分箱下的表现。
2358
+ # Args:
2359
+ # model_comp: 需要对比的模型简称(如 ['xgb', 'resn'],支持 'xgb'/'resn'/'ft')。
2360
+ # n_bins: 分箱数量,用于控制 lift 曲线的粒度。
2361
+ if len(model_comp) != 2:
2362
+ raise ValueError("`model_comp` 必须包含两个模型进行对比。")
2363
+
2364
+ model_name_map = {
2365
+ 'xgb': 'Xgboost',
2366
+ 'resn': 'ResNet',
2367
+ 'ft': 'FTTransformer',
2368
+ 'glm': 'GLM'
2369
+ }
2370
+
2371
+ name1, name2 = model_comp
2372
+ if name1 not in model_name_map or name2 not in model_name_map:
2373
+ raise ValueError(f"不支持的模型简称。请从 {list(model_name_map.keys())} 中选择。")
2374
+
2375
+ fig, axes = plt.subplots(1, 2, figsize=(11, 5))
2376
+ datasets = {
2377
+ 'Train Data': self.train_data,
2378
+ 'Test Data': self.test_data
2379
+ }
2380
+
2381
+ for ax, (data_name, data) in zip(axes, datasets.items()):
2382
+ pred1_col = f'w_pred_{name1}'
2383
+ pred2_col = f'w_pred_{name2}'
2384
+
2385
+ if pred1_col not in data.columns or pred2_col not in data.columns:
2386
+ print(
2387
+ f"警告: 在 {data_name} 中找不到预测列 {pred1_col} 或 {pred2_col}。跳过绘图。")
2388
+ continue
2389
+
2390
+ lift_data = pd.DataFrame({
2391
+ 'pred1': data[pred1_col].values,
2392
+ 'pred2': data[pred2_col].values,
2393
+ 'diff_ly': data[pred1_col].values / np.maximum(data[pred2_col].values, EPS),
2394
+ 'act': data['w_act'].values,
2395
+ 'weight': data[self.weight_nme].values
2396
+ })
2397
+ plot_data = PlotUtils.split_data(
2398
+ lift_data, 'diff_ly', 'weight', n_bins)
2399
+ denom = np.maximum(plot_data['act'], EPS)
2400
+ plot_data['exp_v1'] = plot_data['pred1'] / denom
2401
+ plot_data['exp_v2'] = plot_data['pred2'] / denom
2402
+ plot_data['act_v'] = plot_data['act'] / denom
2403
+ plot_data.reset_index(inplace=True)
2404
+
2405
+ label1 = model_name_map[name1]
2406
+ label2 = model_name_map[name2]
2407
+
2408
+ PlotUtils.plot_dlift_ax(
2409
+ ax, plot_data, f'Double Lift Chart on {data_name}', label1, label2)
2410
+
2411
+ plt.subplots_adjust(bottom=0.25, top=0.95, right=0.8, wspace=0.3)
2412
+ save_path = self.output_manager.plot_path(
2413
+ f'02_{self.model_nme}_dlift_{name1}_vs_{name2}.png')
2414
+ plt.savefig(save_path, dpi=300)
2415
+ plt.show()
2416
+ plt.close(fig)
2417
+
2418
+ # 绘制成交率提升曲线
2419
+ def plot_conversion_lift(self, model_pred_col: str, n_bins: int = 20):
2420
+ if not self.binary_resp_nme:
2421
+ print("错误: 未在 BayesOptModel 初始化时提供 `binary_resp_nme`。无法绘制成交率曲线。")
2422
+ return
2423
+
2424
+ fig, axes = plt.subplots(1, 2, figsize=(14, 6), sharey=True)
2425
+ datasets = {
2426
+ 'Train Data': self.train_data,
2427
+ 'Test Data': self.test_data
2428
+ }
2429
+
2430
+ for ax, (data_name, data) in zip(axes, datasets.items()):
2431
+ if model_pred_col not in data.columns:
2432
+ print(f"警告: 在 {data_name} 中找不到预测列 '{model_pred_col}'。跳过绘图。")
2433
+ continue
2434
+
2435
+ # 按模型预测分排序,并计算分箱
2436
+ plot_data = data.sort_values(by=model_pred_col).copy()
2437
+ plot_data['cum_weight'] = plot_data[self.weight_nme].cumsum()
2438
+ total_weight = plot_data[self.weight_nme].sum()
2439
+
2440
+ if total_weight > EPS:
2441
+ plot_data['bin'] = pd.cut(
2442
+ plot_data['cum_weight'],
2443
+ bins=n_bins,
2444
+ labels=False,
2445
+ right=False
2446
+ )
2447
+ else:
2448
+ plot_data['bin'] = 0
2449
+
2450
+ # 按分箱聚合
2451
+ lift_agg = plot_data.groupby('bin').agg(
2452
+ total_weight=(self.weight_nme, 'sum'),
2453
+ actual_conversions=(self.binary_resp_nme, 'sum'),
2454
+ weighted_conversions=('w_binary_act', 'sum'),
2455
+ avg_pred=(model_pred_col, 'mean')
2456
+ ).reset_index()
2457
+
2458
+ # 计算成交率
2459
+ lift_agg['conversion_rate'] = lift_agg['weighted_conversions'] / \
2460
+ lift_agg['total_weight']
2461
+
2462
+ # 计算整体平均成交率
2463
+ overall_conversion_rate = data['w_binary_act'].sum(
2464
+ ) / data[self.weight_nme].sum()
2465
+ ax.axhline(y=overall_conversion_rate, color='gray', linestyle='--',
2466
+ label=f'Overall Avg Rate ({overall_conversion_rate:.2%})')
2467
+
2468
+ ax.plot(lift_agg['bin'], lift_agg['conversion_rate'],
2469
+ marker='o', linestyle='-', label='Actual Conversion Rate')
2470
+ ax.set_title(f'Conversion Rate Lift Chart on {data_name}')
2471
+ ax.set_xlabel(f'Model Score Decile (based on {model_pred_col})')
2472
+ ax.set_ylabel('Conversion Rate')
2473
+ ax.grid(True, linestyle='--', alpha=0.6)
2474
+ ax.legend()
2475
+
2476
+ plt.tight_layout()
2477
+ plt.show()
2478
+
2479
+ # 保存模型
2480
+ def save_model(self, model_name=None):
2481
+ keys = [model_name] if model_name else self.trainers.keys()
2482
+ for key in keys:
2483
+ if key in self.trainers:
2484
+ self.trainers[key].save()
2485
+ else:
2486
+ if model_name: # Only warn if specific model requested
2487
+ print(f"[save_model] Warning: Unknown model key {key}")
2488
+
2489
+ def load_model(self, model_name=None):
2490
+ keys = [model_name] if model_name else self.trainers.keys()
2491
+ for key in keys:
2492
+ if key in self.trainers:
2493
+ self.trainers[key].load()
2494
+ # Update context attributes
2495
+ trainer = self.trainers[key]
2496
+ if trainer.model is not None:
2497
+ setattr(self, f"{key}_best", trainer.model)
2498
+ # Also update xxx_load for backward compatibility if needed
2499
+ # Original code had xgb_load, resn_load, ft_load but not glm_load
2500
+ if key in ['xgb', 'resn', 'ft']:
2501
+ setattr(self, f"{key}_load", trainer.model)
2502
+ else:
2503
+ if model_name:
2504
+ print(f"[load_model] Warning: Unknown model key {key}")
2505
+
2506
+ def _sample_rows(self, data: pd.DataFrame, n: int) -> pd.DataFrame:
2507
+ if len(data) == 0:
2508
+ return data
2509
+ return data.sample(min(len(data), n), random_state=self.rand_seed)
2510
+
2511
+ @staticmethod
2512
+ def _shap_nsamples(arr: np.ndarray, max_nsamples: int = 300) -> int:
2513
+ min_needed = arr.shape[1] + 2
2514
+ return max(min_needed, min(max_nsamples, arr.shape[0] * arr.shape[1]))
2515
+
2516
+ def _build_ft_shap_matrix(self, data: pd.DataFrame) -> np.ndarray:
2517
+
2518
+ # 将原始特征 DataFrame (包含 self.factor_nmes) 转成
2519
+ # 纯数值矩阵: 数值列为 float64,类别列为整数 code(float64 存储)。
2520
+ # 列顺序与 self.factor_nmes 保持一致。
2521
+
2522
+ matrices = []
2523
+
2524
+ for col in self.factor_nmes:
2525
+ s = data[col]
2526
+
2527
+ if col in self.cate_list:
2528
+ # 类别列:按训练时的类别全集编码
2529
+ cats = pd.Categorical(
2530
+ s,
2531
+ categories=self.cat_categories_for_shap[col]
2532
+ )
2533
+ # cats.codes 是一个 Index / ndarray,用 np.asarray 包一下再 reshape
2534
+ codes = np.asarray(cats.codes, dtype=np.float64).reshape(-1, 1)
2535
+ matrices.append(codes)
2536
+ else:
2537
+ # 数值列:转成 Series -> numpy -> reshape
2538
+ vals = pd.to_numeric(s, errors="coerce")
2539
+ arr = vals.to_numpy(dtype=np.float64, copy=True).reshape(-1, 1)
2540
+ matrices.append(arr)
2541
+
2542
+ X_mat = np.concatenate(matrices, axis=1) # (N, F)
2543
+ return X_mat
2544
+
2545
+ def _decode_ft_shap_matrix_to_df(self, X_mat: np.ndarray) -> pd.DataFrame:
2546
+
2547
+ # 将 SHAP 的数值矩阵 (N, F) 还原为原始特征 DataFrame,
2548
+ # 数值列为 float,类别列还原为 pandas 的 category 类型,
2549
+ # 以便兼容 enable_categorical=True 的 XGBoost 和 FT-Transformer 的输入。
2550
+ # 列顺序 = self.factor_nmes
2551
+
2552
+ data_dict = {}
2553
+
2554
+ for j, col in enumerate(self.factor_nmes):
2555
+ col_vals = X_mat[:, j]
2556
+
2557
+ if col in self.cate_list:
2558
+ cats = self.cat_categories_for_shap[col]
2559
+
2560
+ # SHAP 会扰动成小数,这里 round 回整数 code
2561
+ codes = np.round(col_vals).astype(int)
2562
+ # 限制在 [-1, len(cats)-1]
2563
+ codes = np.clip(codes, -1, len(cats) - 1)
2564
+
2565
+ # 使用 pandas.Categorical.from_codes:
2566
+ # - codes = -1 被当成缺失 (NaN)
2567
+ # - 其他索引映射到 cats 中对应的类别
2568
+ cat_series = pd.Categorical.from_codes(
2569
+ codes,
2570
+ categories=cats
2571
+ )
2572
+ # 存的是 Categorical 类型,而不是 object
2573
+ data_dict[col] = cat_series
2574
+ else:
2575
+ # 数值列:直接 float
2576
+ data_dict[col] = col_vals.astype(float)
2577
+
2578
+ df = pd.DataFrame(data_dict, columns=self.factor_nmes)
2579
+
2580
+ # 再保险:确保所有类别列 dtype 真的是 category
2581
+ for col in self.cate_list:
2582
+ if col in df.columns:
2583
+ df[col] = df[col].astype("category")
2584
+ return df
2585
+
2586
+ def _build_glm_design(self, data: pd.DataFrame) -> pd.DataFrame:
2587
+ # 与 GLM 训练阶段一致:在 one-hot + 标准化特征上添加截距
2588
+ X = data[self.var_nmes]
2589
+ return sm.add_constant(X, has_constant='add')
2590
+
2591
+ def _compute_shap_core(self,
2592
+ model_key: str,
2593
+ n_background: int,
2594
+ n_samples: int,
2595
+ on_train: bool,
2596
+ X_df: pd.DataFrame,
2597
+ prep_fn,
2598
+ predict_fn,
2599
+ cleanup_fn=None):
2600
+ # 通用的 SHAP 计算核心逻辑:配置背景样本、构建解释器并返回结果。
2601
+ if model_key not in self.trainers or self.trainers[model_key].model is None:
2602
+ raise RuntimeError(f"Model {model_key} not trained.")
2603
+
2604
+ if cleanup_fn:
2605
+ cleanup_fn()
2606
+
2607
+ # Background
2608
+ bg_df = self._sample_rows(X_df, n_background)
2609
+ bg_mat = prep_fn(bg_df)
2610
+
2611
+ # Explainer
2612
+ explainer = shap.KernelExplainer(predict_fn, bg_mat)
2613
+
2614
+ # Explain data
2615
+ ex_df = self._sample_rows(X_df, n_samples)
2616
+ ex_mat = prep_fn(ex_df)
2617
+
2618
+ nsample_eff = self._shap_nsamples(ex_mat)
2619
+ shap_values = explainer.shap_values(ex_mat, nsamples=nsample_eff)
2620
+
2621
+ # Base value
2622
+ bg_pred = predict_fn(bg_mat)
2623
+ base_value = float(np.asarray(bg_pred).mean())
2624
+
2625
+ return {
2626
+ "explainer": explainer,
2627
+ "X_explain": ex_df,
2628
+ "shap_values": shap_values,
2629
+ "base_value": base_value
2630
+ }
2631
+
2632
+ # ========= XGBoost SHAP =========
2633
+ def compute_shap_xgb(self, n_background: int = 500,
2634
+ n_samples: int = 200,
2635
+ on_train: bool = True):
2636
+ data = self.train_data if on_train else self.test_data
2637
+ X_raw = data[self.factor_nmes]
2638
+
2639
+ def predict_wrapper(x_mat):
2640
+ df_input = self._decode_ft_shap_matrix_to_df(x_mat)
2641
+ return self.xgb_best.predict(df_input)
2642
+
2643
+ self.shap_xgb = self._compute_shap_core(
2644
+ 'xgb', n_background, n_samples, on_train,
2645
+ X_df=X_raw,
2646
+ prep_fn=lambda df: self._build_ft_shap_matrix(
2647
+ df).astype(np.float64),
2648
+ predict_fn=predict_wrapper
2649
+ )
2650
+ return self.shap_xgb
2651
+
2652
+ # ========= ResNet SHAP =========
2653
+ def _resn_predict_wrapper(self, X_np):
2654
+ # 保证走 CPU
2655
+ model = self.resn_best.resnet.to("cpu")
2656
+ with torch.no_grad():
2657
+ X_tensor = torch.tensor(X_np, dtype=torch.float32)
2658
+ y_pred = model(X_tensor).cpu().numpy()
2659
+ y_pred = np.clip(y_pred, 1e-6, None)
2660
+ return y_pred.reshape(-1)
2661
+
2662
+ def compute_shap_resn(self, n_background: int = 500,
2663
+ n_samples: int = 200,
2664
+ on_train: bool = True):
2665
+ data = self.train_oht_scl_data if on_train else self.test_oht_scl_data
2666
+ X = data[self.var_nmes]
2667
+
2668
+ def cleanup():
2669
+ self.resn_best.device = torch.device("cpu")
2670
+ self.resn_best.resnet.to("cpu")
2671
+ if torch.cuda.is_available():
2672
+ torch.cuda.empty_cache()
2673
+
2674
+ self.shap_resn = self._compute_shap_core(
2675
+ 'resn', n_background, n_samples, on_train,
2676
+ X_df=X,
2677
+ prep_fn=lambda df: df.to_numpy(dtype=np.float64),
2678
+ predict_fn=lambda x: self._resn_predict_wrapper(x),
2679
+ cleanup_fn=cleanup
2680
+ )
2681
+ return self.shap_resn
2682
+
2683
+ # ========= FT-Transformer SHAP =========
2684
+ def _ft_shap_predict_wrapper(self, X_mat: np.ndarray) -> np.ndarray:
2685
+ df_input = self._decode_ft_shap_matrix_to_df(X_mat)
2686
+ y_pred = self.ft_best.predict(df_input)
2687
+ return np.asarray(y_pred, dtype=np.float64).reshape(-1)
2688
+
2689
+ def compute_shap_ft(self, n_background: int = 500,
2690
+ n_samples: int = 200,
2691
+ on_train: bool = True):
2692
+ data = self.train_data if on_train else self.test_data
2693
+ X_raw = data[self.factor_nmes]
2694
+
2695
+ def cleanup():
2696
+ self.ft_best.device = torch.device("cpu")
2697
+ self.ft_best.ft.to("cpu")
2698
+ if torch.cuda.is_available():
2699
+ torch.cuda.empty_cache()
2700
+
2701
+ self.shap_ft = self._compute_shap_core(
2702
+ 'ft', n_background, n_samples, on_train,
2703
+ X_df=X_raw,
2704
+ prep_fn=lambda df: self._build_ft_shap_matrix(
2705
+ df).astype(np.float64),
2706
+ predict_fn=self._ft_shap_predict_wrapper,
2707
+ cleanup_fn=cleanup
2708
+ )
2709
+ return self.shap_ft
2710
+
2711
+ # ========= GLM SHAP =========
2712
+ def compute_shap_glm(self, n_background: int = 500,
2713
+ n_samples: int = 200,
2714
+ on_train: bool = True):
2715
+ data = self.train_oht_scl_data if on_train else self.test_oht_scl_data
2716
+ design_all = self._build_glm_design(data)
2717
+ design_cols = list(design_all.columns)
2718
+
2719
+ matrices = []
2720
+
2721
+ for col in self.factor_nmes:
2722
+ s = data[col]
2723
+
2724
+ if col in self.cate_list:
2725
+ # 类别列:按训练时的类别全集编码
2726
+ cats = pd.Categorical(
2727
+ s,
2728
+ categories=self.cat_categories_for_shap[col]
2729
+ )
2730
+ # cats.codes 是一个 Index / ndarray,用 np.asarray 包一下再 reshape
2731
+ codes = np.asarray(cats.codes, dtype=np.float64).reshape(-1, 1)
2732
+ matrices.append(codes)
2733
+ else:
2734
+ # 数值列:转成 Series -> numpy -> reshape
2735
+ vals = pd.to_numeric(s, errors="coerce")
2736
+ arr = vals.to_numpy(dtype=np.float64, copy=True).reshape(-1, 1)
2737
+ matrices.append(arr)
2738
+
2739
+ X_mat = np.concatenate(matrices, axis=1) # (N, F)
2740
+ return X_mat
2741
+
2742
+ def _decode_ft_shap_matrix_to_df(self, X_mat: np.ndarray) -> pd.DataFrame:
2743
+
2744
+ # 将 SHAP 的数值矩阵 (N, F) 还原为原始特征 DataFrame,
2745
+ # 数值列为 float,类别列还原为 pandas 的 category 类型,
2746
+ # 以便兼容 enable_categorical=True 的 XGBoost 和 FT-Transformer 的输入。
2747
+ # 列顺序 = self.factor_nmes
2748
+
2749
+ data_dict = {}
2750
+
2751
+ for j, col in enumerate(self.factor_nmes):
2752
+ col_vals = X_mat[:, j]
2753
+
2754
+ if col in self.cate_list:
2755
+ cats = self.cat_categories_for_shap[col]
2756
+
2757
+ # SHAP 会扰动成小数,这里 round 回整数 code
2758
+ codes = np.round(col_vals).astype(int)
2759
+ # 限制在 [-1, len(cats)-1]
2760
+ codes = np.clip(codes, -1, len(cats) - 1)
2761
+
2762
+ # 使用 pandas.Categorical.from_codes:
2763
+ # - codes = -1 被当成缺失 (NaN)
2764
+ # - 其他索引映射到 cats 中对应的类别
2765
+ cat_series = pd.Categorical.from_codes(
2766
+ codes,
2767
+ categories=cats
2768
+ )
2769
+ # 存的是 Categorical 类型,而不是 object
2770
+ data_dict[col] = cat_series
2771
+ else:
2772
+ # 数值列:直接 float
2773
+ data_dict[col] = col_vals.astype(float)
2774
+
2775
+ df = pd.DataFrame(data_dict, columns=self.factor_nmes)
2776
+
2777
+ # 再保险:确保所有类别列 dtype 真的是 category
2778
+ for col in self.cate_list:
2779
+ if col in df.columns:
2780
+ df[col] = df[col].astype("category")
2781
+ return df
2782
+
2783
+ def _build_glm_design(self, data: pd.DataFrame) -> pd.DataFrame:
2784
+ # 与 GLM 训练阶段一致:在 one-hot + 标准化特征上添加截距
2785
+ X = data[self.var_nmes]
2786
+ return sm.add_constant(X, has_constant='add')
2787
+
2788
+ def _compute_shap_core(self,
2789
+ model_key: str,
2790
+ n_background: int,
2791
+ n_samples: int,
2792
+ on_train: bool,
2793
+ X_df: pd.DataFrame,
2794
+ prep_fn,
2795
+ predict_fn,
2796
+ cleanup_fn=None):
2797
+ # 通用的 SHAP 计算核心逻辑:配置背景样本、构建解释器并返回结果。
2798
+ if model_key not in self.trainers or self.trainers[model_key].model is None:
2799
+ raise RuntimeError(f"Model {model_key} not trained.")
2800
+
2801
+ if cleanup_fn:
2802
+ cleanup_fn()
2803
+
2804
+ # Background
2805
+ bg_df = self._sample_rows(X_df, n_background)
2806
+ bg_mat = prep_fn(bg_df)
2807
+
2808
+ # Explainer
2809
+ explainer = shap.KernelExplainer(predict_fn, bg_mat)
2810
+
2811
+ # Explain data
2812
+ ex_df = self._sample_rows(X_df, n_samples)
2813
+ ex_mat = prep_fn(ex_df)
2814
+
2815
+ nsample_eff = self._shap_nsamples(ex_mat)
2816
+ shap_values = explainer.shap_values(ex_mat, nsamples=nsample_eff)
2817
+
2818
+ # Base value
2819
+ bg_pred = predict_fn(bg_mat)
2820
+ base_value = float(np.asarray(bg_pred).mean())
2821
+
2822
+ return {
2823
+ "explainer": explainer,
2824
+ "X_explain": ex_df,
2825
+ "shap_values": shap_values,
2826
+ "base_value": base_value
2827
+ }
2828
+
2829
+ # ========= XGBoost SHAP =========
2830
+ def compute_shap_xgb(self, n_background: int = 500,
2831
+ n_samples: int = 200,
2832
+ on_train: bool = True):
2833
+ data = self.train_data if on_train else self.test_data
2834
+ X_raw = data[self.factor_nmes]
2835
+
2836
+ def predict_wrapper(x_mat):
2837
+ df_input = self._decode_ft_shap_matrix_to_df(x_mat)
2838
+ return self.xgb_best.predict(df_input)
2839
+
2840
+ self.shap_xgb = self._compute_shap_core(
2841
+ 'xgb', n_background, n_samples, on_train,
2842
+ X_df=X_raw,
2843
+ prep_fn=lambda df: self._build_ft_shap_matrix(
2844
+ df).astype(np.float64),
2845
+ predict_fn=predict_wrapper
2846
+ )
2847
+ return self.shap_xgb
2848
+
2849
+ # ========= ResNet SHAP =========
2850
+ def _resn_predict_wrapper(self, X_np):
2851
+ # 保证走 CPU
2852
+ model = self.resn_best.resnet.to("cpu")
2853
+ with torch.no_grad():
2854
+ X_tensor = torch.tensor(X_np, dtype=torch.float32)
2855
+ y_pred = model(X_tensor).cpu().numpy()
2856
+ y_pred = np.clip(y_pred, 1e-6, None)
2857
+ return y_pred.reshape(-1)
2858
+
2859
+ def compute_shap_resn(self, n_background: int = 500,
2860
+ n_samples: int = 200,
2861
+ on_train: bool = True):
2862
+ data = self.train_oht_scl_data if on_train else self.test_oht_scl_data
2863
+ X = data[self.var_nmes]
2864
+
2865
+ def cleanup():
2866
+ self.resn_best.device = torch.device("cpu")
2867
+ self.resn_best.resnet.to("cpu")
2868
+ if torch.cuda.is_available():
2869
+ torch.cuda.empty_cache()
2870
+
2871
+ self.shap_resn = self._compute_shap_core(
2872
+ 'resn', n_background, n_samples, on_train,
2873
+ X_df=X,
2874
+ prep_fn=lambda df: df.to_numpy(dtype=np.float64),
2875
+ predict_fn=lambda x: self._resn_predict_wrapper(x),
2876
+ cleanup_fn=cleanup
2877
+ )
2878
+ return self.shap_resn
2879
+
2880
+ # ========= FT-Transformer SHAP =========
2881
+ def _ft_shap_predict_wrapper(self, X_mat: np.ndarray) -> np.ndarray:
2882
+ df_input = self._decode_ft_shap_matrix_to_df(X_mat)
2883
+ y_pred = self.ft_best.predict(df_input)
2884
+ return np.asarray(y_pred, dtype=np.float64).reshape(-1)
2885
+
2886
+ def compute_shap_ft(self, n_background: int = 500,
2887
+ n_samples: int = 200,
2888
+ on_train: bool = True):
2889
+ data = self.train_data if on_train else self.test_data
2890
+ X_raw = data[self.factor_nmes]
2891
+
2892
+ def cleanup():
2893
+ self.ft_best.device = torch.device("cpu")
2894
+ self.ft_best.ft.to("cpu")
2895
+ if torch.cuda.is_available():
2896
+ torch.cuda.empty_cache()
2897
+
2898
+ self.shap_ft = self._compute_shap_core(
2899
+ 'ft', n_background, n_samples, on_train,
2900
+ X_df=X_raw,
2901
+ prep_fn=lambda df: self._build_ft_shap_matrix(
2902
+ df).astype(np.float64),
2903
+ predict_fn=self._ft_shap_predict_wrapper,
2904
+ cleanup_fn=cleanup
2905
+ )
2906
+ return self.shap_ft
2907
+
2908
+ # ========= GLM SHAP =========
2909
+ def compute_shap_glm(self, n_background: int = 500,
2910
+ n_samples: int = 200,
2911
+ on_train: bool = True):
2912
+ data = self.train_oht_scl_data if on_train else self.test_oht_scl_data
2913
+ design_all = self._build_glm_design(data)
2914
+ design_cols = list(design_all.columns)
2915
+
2916
+ def predict_wrapper(x_np):
2917
+ x_df = pd.DataFrame(x_np, columns=design_cols)
2918
+ y_pred = self.glm_best.predict(x_df)
2919
+ return np.asarray(y_pred, dtype=np.float64).reshape(-1)
2920
+
2921
+ res = self._compute_shap_core(
2922
+ 'glm', n_background, n_samples, on_train,
2923
+ X_df=design_all,
2924
+ prep_fn=lambda df: df.to_numpy(dtype=np.float64),
2925
+ predict_fn=predict_wrapper
2926
+ )
2927
+ return res